使用cudaMemcpy3D传输***指针

use*_*579 2 cuda

我正在尝试使用cudaMemcpy3D来传输动态分配的 3d 矩阵(张量)。张量被分配为连续的内存块(参见下面的代码)。我尝试了cudaExtent和的各种组合cudaMemcpy3DParms,但是元素的顺序混淆了。我创建了以下示例来演示该问题:

#include <stdio.h>

int ***alloc_tensor(int Nx, int Ny, int Nz) {
   int i, j;
   int ***tensor;

   tensor = (int ***) malloc((size_t) (Nx * sizeof(int **)));
   tensor[0] = (int **) malloc((size_t) (Nx * Ny * sizeof(int *)));
   tensor[0][0] = (int *) malloc((size_t) (Nx * Ny * Nz * sizeof(int)));

   for(j = 1; j < Ny; j++)
      tensor[0][j] = tensor[0][j-1] + Nz;
   for(i = 1; i < Nx; i++) {
      tensor[i] = tensor[i - 1] + Ny;
      tensor[i][0] = tensor[i - 1][0] + Ny * Nz;
      for(j = 1; j < Ny; j++)
         tensor[i][j] = tensor[i][j - 1] + Nz;
   }

   return tensor;
}

__global__ void kernel(cudaPitchedPtr tensor, int Nx, int Ny, int Nz) {
   int i, j, k;
   char *tensorslice;
   int *tensorrow;

   for (i = 0; i < Nx; i++) {
      for (j = 0; j < Ny; j++) {
         for (k = 0; k < Nz; k++) {
            tensorslice = ((char *)tensor.ptr) + k * tensor.pitch * Nx;
            tensorrow = (int *)(tensorslice + i * tensor.pitch);
            printf("d_tensor[%d][%d][%d] = %d\n", i, j, k, tensorrow[j]);
         }
      }
   }   
}

int main() {
   int i, j, k, value = 0;
   int Nx = 2, Ny = 6, Nz = 4;

   int ***h_tensor;
   struct cudaPitchedPtr d_tensor;

   h_tensor = alloc_tensor(Nx, Ny, Nz);
   cudaMalloc3D(&d_tensor, make_cudaExtent(Nx * sizeof(int), Ny, Nz));

   for(i = 0; i < Nx; i++) {
      for(j = 0; j < Ny; j++) {
         for(k = 0; k < Nz; k++) {
            h_tensor[i][j][k] = value++;
            printf("h_tensor[%d][%d][%d] = %d\n", i, j, k, h_tensor[i][j][k]);
         }
      }
   }

   cudaMemcpy3DParms cpy = { 0 };
   cpy.srcPtr = make_cudaPitchedPtr(h_tensor[0][0], Nx * sizeof(int), Ny, Nz);
   cpy.dstPtr = d_tensor;
   cpy.extent = make_cudaExtent(Nx * sizeof(int), Ny, Nz);
   cpy.kind = cudaMemcpyHostToDevice;

   cudaMemcpy3D(&cpy);

   kernel<<<1, 1>>>(d_tensor, Nx, Ny, Nz);

   // ... clean-up
}
Run Code Online (Sandbox Code Playgroud)

主变量 ( h_tensor) 和设备 ( d_tensor) 的输出不同,看起来像

h_tensor[0][0][0] = 0
h_tensor[0][0][1] = 1
h_tensor[0][0][2] = 2
h_tensor[0][0][3] = 3
h_tensor[0][1][0] = 4
h_tensor[0][1][1] = 5
h_tensor[0][1][2] = 6
...

d_tensor[0][0][0] = 0
d_tensor[0][0][1] = 12
d_tensor[0][0][2] = 24
d_tensor[0][0][3] = 36
d_tensor[0][1][0] = 1
d_tensor[0][1][1] = 13
d_tensor[0][1][2] = 25
...
Run Code Online (Sandbox Code Playgroud)

我究竟做错了什么?正确的使用方法是什么cudaMemcpy3D

Rob*_*lla 6

  1. 每当您在使用 cuda 代码时遇到问题时,最好进行适当的 cuda 错误检查。至少,您在此处发布的代码对我来说无法正确运行 - 该cudaMemcpy3D行会引发错误。这是由于下面第 2 项造成的。(我怀疑您用来生成输出的代码与您在此处显示的代码不同,但这只是一个猜测。)
  2. 您的用法make_cudaPitchedPtr不正确:

    cpy.srcPtr = make_cudaPitchedPtr(h_tensor[0][0], Nx * sizeof(int), Ny, Nz);
    
    Run Code Online (Sandbox Code Playgroud)

    查看 API 文档。以这种方式制作 CUDA 倾斜指针在 2D 和 3D 之间没有什么不同。因此,像您所做的那样传递 3 个不同的维度是没有意义的。相反,这样做:

    cpy.srcPtr = make_cudaPitchedPtr(h_tensor[0][0], Nx * sizeof(int), Nx, Ny);
    
    Run Code Online (Sandbox Code Playgroud)
  3. 我发现的其余问题归因于对 C 中 3 维的错误理解。多下标数组的最后一个下标是快速变化的维度,即内存中相邻值占据相邻索引值的维度。因此,你在第三维度中对 Z 的使用让我感到困惑。您的主机分配在第一个下标位置使用Nx,但您的设备索引不匹配。显然有多种方法可以处理这个问题。如果您不喜欢我的安排,您可以更改它,但主机和设备索引必须匹配。

无论如何,以下代码修改对我有用:

#include <stdio.h>

int ***alloc_tensor(int Nx, int Ny, int Nz) {
   int i, j;
   int ***tensor;

   tensor = (int ***) malloc((size_t) (Nx * sizeof(int **)));
   tensor[0] = (int **) malloc((size_t) (Nx * Ny * sizeof(int *)));
   tensor[0][0] = (int *) malloc((size_t) (Nx * Ny * Nz * sizeof(int)));

   for(j = 1; j < Ny; j++)
      tensor[0][j] = tensor[0][j-1] + Nz;
   for(i = 1; i < Nx; i++) {
      tensor[i] = tensor[i - 1] + Ny;
      tensor[i][0] = tensor[i - 1][0] + Ny * Nz;
      for(j = 1; j < Ny; j++)
         tensor[i][j] = tensor[i][j - 1] + Nz;
   }

   return tensor;
}

__global__ void kernel(cudaPitchedPtr tensor, int Nx, int Ny, int Nz) {
   int i, j, k;
   char *tensorslice;
   int *tensorrow;

   for (i = 0; i < Nx; i++) {
      for (j = 0; j < Ny; j++) {
         for (k = 0; k < Nz; k++) {
            tensorslice = ((char *)tensor.ptr) + k * tensor.pitch * Ny;
            tensorrow = (int *)(tensorslice + j * tensor.pitch);
            printf("d_tensor[%d][%d][%d] = %d\n", i, j, k, tensorrow[i]);
         }
      }
   }
}

int main() {
   int i, j, k, value = 0;
   int Nx = 2, Ny = 6, Nz = 4;

   int ***h_tensor;
   struct cudaPitchedPtr d_tensor;

   h_tensor = alloc_tensor(Nz, Ny, Nx);
   cudaMalloc3D(&d_tensor, make_cudaExtent(Nx * sizeof(int), Ny, Nz));

   for(i = 0; i < Nx; i++) {
      for(j = 0; j < Ny; j++) {
         for(k = 0; k < Nz; k++) {
            h_tensor[k][j][i] = value++;
            //printf("h_tensor[%d][%d][%d] = %d\n", i, j, k, h_tensor[i][j][k]);
         }
      }
   }
   for(i = 0; i < Nx; i++) {
      for(j = 0; j < Ny; j++) {
         for(k = 0; k < Nz; k++) {
            //h_tensor[i][j][k] = value++;
            printf("h_tensor[%d][%d][%d] = %d\n", i, j, k, h_tensor[k][j][i]);
         }
      }
   }

   cudaMemcpy3DParms cpy = { 0 };
   cpy.srcPtr = make_cudaPitchedPtr(h_tensor[0][0], Nx * sizeof(int), Nx, Ny);
   cpy.dstPtr = d_tensor;
   cpy.extent = make_cudaExtent(Nx * sizeof(int), Ny, Nz);
   cpy.kind = cudaMemcpyHostToDevice;

   cudaMemcpy3D(&cpy);

   kernel<<<1, 1>>>(d_tensor, Nx, Ny, Nz);
   cudaDeviceSynchronize();
   // ... clean-up
}
Run Code Online (Sandbox Code Playgroud)