在 CUDA 中使用较少线程时数组复制速度更快

Tat*_*ore 3 cuda

我测试了两种不同的方法来在 CUDA 内核中复制 2D 数组。

第一个启动 TILE_DIM x TILE_DIM 线程块。每个块复制数组的一个图块,为每个元素分配一个线程:

__global__ void simple_copy(float *outdata, const float *indata){

int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;

int width = gridDim.x * TILE_DIM;

outdata[y*width + x] = indata[y*width + x];

}
Run Code Online (Sandbox Code Playgroud)

第二个摘自NVIDIA 博客。它与之前的内核类似,但每个块使用 TILE_DIM x BLOCK_ROWS 线程。每个线程循环遍历矩阵的多个元素:

__global__ void fast_copy(float *outdata, const float *indata)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;

for (int k = 0 ; k < TILE_DIM ; k += BLOCK_ROWS)
    outdata[(y+k)*width + x] = indata[(y+k)*width + x];
}
Run Code Online (Sandbox Code Playgroud)

我进行了一个测试来比较这两种方法。两个内核都对全局内存执行合并访问,但第二个内核似乎明显更快。

NVIDIA 视觉分析器证实了这一测试。

那么第二个内核如何实现更快的复制呢?

这是我用来测试内核的完整代码:

#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <conio.h>

#define TILE_DIM 32
#define BLOCK_ROWS 8

/* KERNELS */

__global__ void simple_copy(float *outdata, const float *indata){

int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;

int width = gridDim.x * TILE_DIM;

outdata[y*width + x] = indata[y*width + x];

}
//###########################################################################

__global__ void fast_copy(float *outdata, const float *indata)
{
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
int width = gridDim.x * TILE_DIM;

for (int k = 0 ; k < TILE_DIM ; k += BLOCK_ROWS)
    outdata[(y+k)*width + x] = indata[(y+k)*width + x];
}
//###########################################################################

/* MAIN */

int main(){

float *indata,*dev_indata,*outdata1,*dev_outdata1,*outdata2,*dev_outdata2;
cudaEvent_t start, stop;
float time1,time2;
int i,j,k;

int n_iter = 100;

int N = 2048;

cudaEventCreate(&start);
cudaEventCreate(&stop);


dim3 grid(N/TILE_DIM, N/TILE_DIM);
dim3 threads1(TILE_DIM,TILE_DIM);
dim3 threads2(TILE_DIM,BLOCK_ROWS);

// Allocations

indata = (float *)malloc(N*N*sizeof(float));
outdata1 = (float *)malloc(N*N*sizeof(float));
outdata2 = (float *)malloc(N*N*sizeof(float));

cudaMalloc( (void**)&dev_indata,N*N*sizeof(float) );
cudaMalloc( (void**)&dev_outdata1,N*N*sizeof(float) );
cudaMalloc( (void**)&dev_outdata2,N*N*sizeof(float) );

// Initialisation

for(j=0 ; j<N ; j++){
        for(i=0 ; i<N ; i++){
            indata[i + N*j] = i + N*j;
        }
}

// Transfer to Device
cudaMemcpy( dev_indata, indata, N*N*sizeof(float),cudaMemcpyHostToDevice );

// Simple copy
cudaEventRecord( start, 0 );
for(k=0 ; k<n_iter ; k++){
    simple_copy<<<grid, threads1>>>(dev_outdata1,dev_indata);
}
cudaEventRecord( stop, 0 );

cudaEventSynchronize( stop );
cudaEventElapsedTime( &time1, start, stop );
printf("Elapsed time with simple copy: %f\n",time1);

// Fast copy
cudaEventRecord( start, 0 );
for(k=0 ; k<n_iter ; k++){
    fast_copy<<<grid, threads2>>>(dev_outdata2,dev_indata);
}
cudaEventRecord( stop, 0 );

cudaEventSynchronize( stop );
cudaEventElapsedTime( &time2, start, stop );
printf("Elapsed time with fast copy: %f\n",time2);

// Transfer to Host

cudaMemcpy( outdata1, dev_outdata1, N*N*sizeof(float),cudaMemcpyDeviceToHost );
cudaMemcpy( outdata2, dev_outdata2, N*N*sizeof(float),cudaMemcpyDeviceToHost );

// Check for error
float error = 0;
for(j=0 ; j<N ; j++){
        for(i=0 ; i<N ; i++){
            error += outdata1[i + N*j] - outdata2[i + N*j];
        }
}
printf("error: %f\n",error);

/*// Print the copied matrix
printf("Copy\n");
for(j=0 ; j<N ; j++){
        for(i=0 ; i<N ; i++){
            printf("%f\t",outdata1[i + N*j]);
        }
        printf("\n");
}*/

cudaEventDestroy( start );
cudaEventDestroy( stop );

free(indata);
free(outdata1);
free(outdata2);

cudaFree(dev_indata);
cudaFree(dev_outdata1);
cudaFree(dev_outdata2);

cudaDeviceReset();

getch();

return 0;
 }

//###########################################################################
Run Code Online (Sandbox Code Playgroud)

Arc*_*are 5

我想你会通过比较两个内核的微码找到答案。

当我为 SM 3.0 编译这些内核时,编译器完全展开第二个内核中的循环(因为它知道它将迭代 4x)。这可能解释了性能差异 - CUDA 硬件可以使用寄存器来覆盖内存延迟和指令延迟。Vasily Volkov 几年前就该主题做了一次精彩的演讲“低占用率下的更好性能”(https://www.nvidia.com/content/GTC-2010/pdfs/2238_GTC2010.pdf)。