为什么我的CUDA实现与CPU实现同样快

Her*_*ert 4 c++ cuda nvidia convolution

我在标准C++和CUDA中创建了一些代码,用于在1300x1300灰度图像和15x15内核上进行2D聚合.两个版本:

中央处理器:

#include <iostream>
#include <exception>

#define N 1300
#define K 15
#define K2 ((K - 1) / 2)


template<int mx, int my>
inline int index(int x, int y)
{
  return x*my + y;
}

int main() {
  double *image  = new double[N * N];
  double *kernel = new double[K * K];
  double *result = new double[N * N];

  for (int x=0; x<N; ++x)
  for (int y=0; y<N; ++y)
  {
    double r = 0;
    for(int i=0; i<K; ++i)
    for(int j=0; j<K; ++j)
    {
      if (x + i - K2 >= 0 and
          x + i - K2 < N  and
          y + j - K2 >= 0 and
          y + j - K2 < N)
      {
        r +=  kernel[index<K,K>(i,j)] * image[index<N,N>(x+i-K2, y+j-K2)];
      }
    }
    result[index<N,N>(x, y)] = r;
  }

  delete[] image;
  delete[] kernel;
  delete[] result;
}
Run Code Online (Sandbox Code Playgroud)

GPU:

#include <iostream>
#include <exception>

// ignore, just for error handling
struct ErrorHandler {
  int d_line;
  char const *d_file;
  ErrorHandler(int line, char const *file) : d_line(line), d_file(file) {};
};

#define EH ErrorHandler(__LINE__, __FILE__)

ErrorHandler operator<<(ErrorHandler eh, cudaError_t err)
{
  if (err != cudaSuccess)
  {
    std::cerr << cudaGetErrorString( err ) << " in " << eh.d_file << " at line " << eh.d_line << '\n';
    throw std::exception();
  }
  return eh;
}
// end.

#define N 1300
#define K 15
#define K2 ((K - 1) / 2)


template<int mx, int my>
__device__ inline int index(int x, int y)
{
  return x*my + y;
}

__global__ void kernelkernel(double *image, double *kernel, double *result)
{
  int x = blockIdx.x;
  int y = blockIdx.y; // becomes: int y = threadIdx.x;

  double r = 0;
  for(int i=0; i<K; ++i)
  for(int j=0; j<K; ++j)
  {
    if (x + i - K2 >= 0 and
        x + i - K2 < N  and
        y + j - K2 >= 0 and
        y + j - K2 < N)
    {
      r +=  kernel[index<K,K>(i,j)] * image[index<N,N>(x+i-K2, y+j-K2)];
    }
  }
  result[index<N,N>(x, y)] = r;
}

int main() {
  double *image      = new double[N * N];
  double *kernel    = new double[K * K];
  double *result      = new double[N * N];

  double *image_cuda;
  double *kernel_cuda;
  double *result_cuda;
  EH << cudaMalloc((void **) &image_cuda,  N*N*sizeof(double));
  EH << cudaMalloc((void **) &kernel_cuda, K*K*sizeof(double));
  EH << cudaMalloc((void **) &result_cuda, N*N*sizeof(double));

  EH << cudaMemcpy(image_cuda,     image,     N*N*sizeof(double), cudaMemcpyHostToDevice);
  EH << cudaMemcpy(kernel_cuda,    kernel,    K*K*sizeof(double), cudaMemcpyHostToDevice);

  dim3 grid   ( N, N );
  kernelkernel<<<grid, 1>>>(image_cuda, kernel_cuda, result_cuda);
  // replace previous 2 statements with: 
  // kernelkernel<<<N, N>>>(image_cuda, kernel_cuda, result_cuda);
  EH << cudaMemcpy(result, result_cuda, N*N*sizeof(double), cudaMemcpyDeviceToHost);

  cudaFree( image_cuda );
  cudaFree( kernel_cuda );
  cudaFree( result_cuda );

  delete[] image;
  delete[] kernel;
  delete[] result;
}
Run Code Online (Sandbox Code Playgroud)

我希望cuda代码更快,但是:

$ nvprof ./gpuversion
==17806== NVPROF is profiling process 17806, command: ./gpuversion
==17806== Profiling application: ./gpuversion
==17806== Profiling result:
Time(%)      Time     Calls       Avg       Min       Max  Name
99.89%  3.83149s         1  3.83149s  3.83149s  3.83149s  kernelkernel(double*, double*, double*)
  0.07%  2.6420ms         1  2.6420ms  2.6420ms  2.6420ms  [CUDA memcpy DtoH]
  0.04%  1.5111ms         2  755.54us     736ns  1.5103ms  [CUDA memcpy HtoD]
Run Code Online (Sandbox Code Playgroud)

和:

$ time ./cpuversion
real    0m3.382s
user    0m3.371s
sys     0m0.012s
Run Code Online (Sandbox Code Playgroud)

他们的差异在统计上是微不足道的.CUDA内核需要大约3-4秒,为什么它不是更快?我的代码是并行运行的吗?

PS:我是CUDA的新手,所以我可能会遗漏一些微不足道的东西.

我发现的是,CUDA不允许你从块中无所畏惧地访问内存.我想CUDA编程的一般策略是:

  • 使用cudaMalloc和cudaMemCpy从RAM分配和复制内存到cuda
  • 在块和线程之间划分工作负载,使得不同块访问的存储器不会重叠太多.
  • 如果块使用的内存之间存在重叠,则通过复制共享阵列内的内存来启动每个块.请注意:
    • 这个数组的大小必须是已知的编译时间
    • 它的大小有限
    • 这个内存由一个块中的每个线程共享,因此__shared double foo [10]为每个BLOCK分配10个双精度.
  • 将一个块所需的内存复制到内核中的共享变量.当然,你使用不同的线程来"高效"地做到这一点
  • 同步线程,以便在使用之前所有数据都在那里.
  • 处理数据,并写入结果.它到内核的输出数组
  • 再次同步,我不知道为什么,但互联网上的每个人都在这样做:S
  • 将GPU内存复制回RAM
  • 清理GPU内存.

这给出了以下代码.它是mex代码,对于结构相似性的Matlab来说,它也可以通过滑动内核工作,但是超过2个图像并且具有与点积不同的聚合.

// author: Herbert Kruitbosch, CC: be nice, include my name in documentation/papers/publications when used
#include <matrix.h>
#include <mex.h>

#include <cmath>
#include <iostream>
#include <fstream>

#include <iostream>
#include <stdio.h>

static void HandleError(
  cudaError_t err,
  const char *file,
  int line )
{
  if (err != cudaSuccess)
  {
    printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line );
    exit( EXIT_FAILURE );
  }
}

#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define TILE_WIDTH 31

__device__ inline double sim(double v0, double v1, double c)
{
  return (c + 2*v0*v1) / (c + v1*v1 + v0*v0);
}

__device__ inline int index(int rows, int cols, int row, int col)
{
  return row + col*rows;
}

__global__ void ssimkernel(double *test, double *reference, const double * __restrict__ kernel, double *ssim, int k, int rows, int cols, int tile_batches_needed)
{
  int radius = k / 2;
  int block_width = TILE_WIDTH - k + 1;
  __shared__ double tile_test     [TILE_WIDTH][TILE_WIDTH];
  __shared__ double tile_reference[TILE_WIDTH][TILE_WIDTH];



  for(int offset=0; offset < tile_batches_needed; ++offset)
  {
    int dest = block_width*block_width*offset + threadIdx.y * block_width + threadIdx.x;
    int destRow = dest / TILE_WIDTH;
    int destCol = dest % TILE_WIDTH;
    int srcRow = blockIdx.y * block_width + destRow - radius;
    int srcCol = blockIdx.x * block_width + destCol - radius;
    int src  = srcCol * rows + srcRow;
    if (destRow < TILE_WIDTH)
    {
      if (srcRow >= 0 and srcRow < rows and
          srcCol >= 0 and srcCol < cols)
      {
        tile_test     [destRow][destCol] = test     [src];
        tile_reference[destRow][destCol] = reference[src];
      }
      else
      {
        tile_test     [destRow][destCol] = 0;
        tile_reference[destRow][destCol] = 0;
      }
    }
  }
  __syncthreads();

  double mean_test = 0;
  double mean_reference = 0;
  for(int i=0; i<k; ++i)
  for(int j=0; j<k; ++j)
  {
    double w = kernel[i * k + j];
    mean_test      +=  w * tile_test     [threadIdx.y+i][threadIdx.x+j];
    mean_reference +=  w * tile_reference[threadIdx.y+i][threadIdx.x+j];
  }

  double var_test = 0;
  double var_reference = 0;
  double correlation = 0;
  for(int i=0; i<k; ++i)
  for(int j=0; j<k; ++j)
  {
    double w = kernel[i * k + j];
    double a = (tile_test     [threadIdx.y+i][threadIdx.x+j] - mean_test     );
    double b = (tile_reference[threadIdx.y+i][threadIdx.x+j] - mean_reference);
    var_test      += w * a * a;
    var_reference += w * b * b;
    correlation   += w * a * b;
  }

  int destRow = blockIdx.y * block_width + threadIdx.y;
  int destCol = blockIdx.x * block_width + threadIdx.x;
  if (destRow < rows and destCol < cols)
    ssim[destCol * rows + destRow] = sim(mean_test, mean_reference, 0.01) * (0.03 + 2*correlation) / (0.03 + var_test + var_reference);

  __syncthreads();
}


template<typename T>
inline T sim(T v0, T v1, T c)
{
  return (c + 2*v0*v1) / (c + v1*v1 + v0*v0);
}

inline int upperdiv(int a, int b) {
  return (a + b - 1) / b;
}

void mexFunction(int nargout, mxArray *argout[], int nargin, const mxArray *argin[])
{
  mwSize rows = mxGetDimensions(argin[0])[0];
  mwSize cols = mxGetDimensions(argin[0])[1];
  mwSize k    = mxGetDimensions(argin[2])[0];
  mwSize channels = mxGetNumberOfDimensions(argin[0]) <= 2 ? 1 : mxGetDimensions(argin[0])[2];
  int dims[] = {rows, cols, channels};
  argout[0] = mxCreateNumericArray(3, dims, mxDOUBLE_CLASS, mxREAL);

  double *test      = (double *)mxGetData(argin[0]);
  double *reference = (double *)mxGetData(argin[1]);
  double *gaussian  = (double *)mxGetData(argin[2]);
  double *ssim      = (double *)mxGetData(argout[0]);

  double *test_cuda;
  double *reference_cuda;
  double *gaussian_cuda;
  double *ssim_cuda;
  HANDLE_ERROR( cudaMalloc((void **) &test_cuda,      rows*cols*sizeof(double)) );
  HANDLE_ERROR( cudaMalloc((void **) &reference_cuda, rows*cols*sizeof(double)) );
  HANDLE_ERROR( cudaMalloc((void **) &gaussian_cuda,  k*k*sizeof(double)) );
  HANDLE_ERROR( cudaMalloc((void **) &ssim_cuda,      rows*cols*sizeof(double)) );
  HANDLE_ERROR( cudaMemcpy(gaussian_cuda,  gaussian,  k*k*sizeof(double), cudaMemcpyHostToDevice) );

  int block_width = TILE_WIDTH - k + 1;
  int tile_batches_needed = upperdiv(TILE_WIDTH*TILE_WIDTH, block_width*block_width);

  for(int c=0; c<channels; ++c)
  {
    HANDLE_ERROR( cudaMemcpy(test_cuda,      test      + rows*cols*c, rows*cols*sizeof(double), cudaMemcpyHostToDevice) );
    HANDLE_ERROR( cudaMemcpy(reference_cuda, reference + rows*cols*c, rows*cols*sizeof(double), cudaMemcpyHostToDevice) );
    dim3 dimGrid(upperdiv(cols, block_width), upperdiv(rows, block_width), 1);
    dim3 dimBlock(block_width, block_width, 1);

    ssimkernel<<<dimGrid, dimBlock>>>(test_cuda, reference_cuda, gaussian_cuda, ssim_cuda, k, rows, cols, tile_batches_needed);

    HANDLE_ERROR( cudaMemcpy(ssim + rows*cols*c, ssim_cuda, rows*cols*sizeof(double), cudaMemcpyDeviceToHost) );
  }
  cudaFree( test_cuda );
  cudaFree( reference_cuda );
  cudaFree( gaussian_cuda );
  cudaFree( ssim_cuda );
}
Run Code Online (Sandbox Code Playgroud)

小智 9

kernelkernel<<<grid, 1>>>
Run Code Online (Sandbox Code Playgroud)

这是一个重要问题; nVidia GPU上的线程在32个线程的warp中工作.但是,您只为每个块分配了一个线程,这意味着其中31个线程将处于空闲状态,而单个线程可以正常工作.通常,对于具有灵活性的内核,通常每个块需要几个warp而不是一个.

您可以通过使用N个块和每个块N个线程来获得立即加速,而不是使用N ^ 2个块.

实际上,N可能太大了,因为每个块的线程数有一个上限.虽然您可以选择合适的M,以便每个块使用N/M个线程,并使用N*M个块.

实际上,通过选择一些M(我猜测256可能接近最优)并且L=ceiling(N*N/M)每个线程使用块和M块启动,你可能会在这方面得到最好的结果.然后每个线程图[0, M*L)基于其块和线程ID 重建索引,然后那些索引所在的那些[0,N*N)将继续将该索引拆分为x和y坐标并且起作用.