我尝试使用 cudaStream 开发 sobel 的示例。这是程序:
void SobelStream(void)
{
cv::Mat imageGrayL2 = cv::imread("/home/xavier/Bureau/Image1.png",0);
u_int8_t *u8_PtImageHost;
u_int8_t *u8_PtImageDevice;
u_int8_t *u8_ptDataOutHost;
u_int8_t *u8_ptDataOutDevice;
u_int8_t u8_Used[NB_STREAM];
u8_ptDataOutHost = (u_int8_t *)malloc(WIDTH*HEIGHT*sizeof(u_int8_t));
checkCudaErrors(cudaMalloc((void**)&u8_ptDataOutDevice,WIDTH*HEIGHT*sizeof(u_int8_t)));
u8_PtImageHost = (u_int8_t *)malloc(WIDTH*HEIGHT*sizeof(u_int8_t));
checkCudaErrors(cudaMalloc((void**)&u8_PtImageDevice,WIDTH*HEIGHT*sizeof(u_int8_t)));
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<unsigned char>();
checkCudaErrors(cudaMallocArray(&Array_PatchsMaxDevice, &channelDesc,WIDTH,HEIGHT ));
checkCudaErrors(cudaBindTextureToArray(Image,Array_PatchsMaxDevice));
dim3 threads(BLOC_X,BLOC_Y);
dim3 blocks(ceil((float)WIDTH/BLOC_X),ceil((float)HEIGHT/BLOC_Y));
ClearKernel<<<blocks,threads>>>(u8_ptDataOutDevice,WIDTH,HEIGHT);
int blockh = HEIGHT/NB_STREAM;
Stream = (cudaStream_t *) malloc(NB_STREAM * sizeof(cudaStream_t));
for (int i = 0; i < NB_STREAM; i++)
{
checkCudaErrors(cudaStreamCreate(&(Stream[i])));
}
// for(int i=0;i<NB_STREAM;i++)
// {
// cudaSetDevice(0);
// cudaStreamCreate(&Stream[i]);
// }
cudaEvent_t Start;
cudaEvent_t Stop;
cudaEventCreate(&Start);
cudaEventCreate(&Stop);
cudaEventRecord(Start, 0);
//////////////////////////////////////////////////////////
for(int i=0;i<NB_STREAM;i++)
{
if(i == 0)
{
int localHeight = blockh;
checkCudaErrors(cudaMemcpy2DToArrayAsync( Array_PatchsMaxDevice,
0,
0,
imageGrayL2.data,//u8_PtImageDevice,
WIDTH,
WIDTH,
blockh,
cudaMemcpyHostToDevice ,
Stream[i]));
dim3 threads(BLOC_X,BLOC_Y);
dim3 blocks(ceil((float)WIDTH/BLOC_X),ceil((float)localHeight/BLOC_Y));
SobelKernel<<<blocks,threads,0,Stream[i]>>>(u8_ptDataOutDevice,0,WIDTH,localHeight-1);
checkCudaErrors(cudaGetLastError());
u8_Used[i] = 1;
}else{
int ioffsetImage = WIDTH*(HEIGHT/NB_STREAM );
int hoffset = HEIGHT/NB_STREAM *i;
int hoffsetkernel = HEIGHT/NB_STREAM -1 + HEIGHT/NB_STREAM* (i-1);
int localHeight = min(HEIGHT - (blockh*i),blockh);
//printf("hoffset: %d hoffsetkernel %d localHeight %d rest %d ioffsetImage %d \n",hoffset,hoffsetkernel,localHeight,HEIGHT - (blockh +1 +blockh*(i-1)),ioffsetImage*i/WIDTH);
checkCudaErrors(cudaMemcpy2DToArrayAsync( Array_PatchsMaxDevice,
0,
hoffset,
&imageGrayL2.data[ioffsetImage*i],//&u8_PtImageDevice[ioffset*i],
WIDTH,
WIDTH,
localHeight,
cudaMemcpyHostToDevice ,
Stream[i]));
u8_Used[i] = 1;
if(HEIGHT - (blockh +1 +blockh*(i-1))<=0)
{
break;
}
}
}
///////////////////////////////////////////
for(int i=0;i<NB_STREAM;i++)
{
if(i == 0)
{
int localHeight = blockh;
dim3 threads(BLOC_X,BLOC_Y);
dim3 blocks(1,1);
SobelKernel<<<blocks,threads,0,Stream[i]>>>(u8_ptDataOutDevice,0,WIDTH,localHeight-1);
checkCudaErrors(cudaGetLastError());
u8_Used[i] = 1;
}else{
int ioffsetImage = WIDTH*(HEIGHT/NB_STREAM );
int hoffset = HEIGHT/NB_STREAM *i;
int hoffsetkernel = HEIGHT/NB_STREAM -1 + HEIGHT/NB_STREAM* (i-1);
int localHeight = min(HEIGHT - (blockh*i),blockh);
dim3 threads(BLOC_X,BLOC_Y);
dim3 blocks(1,1);
SobelKernel<<<blocks,threads,0,Stream[i]>>>(u8_ptDataOutDevice,hoffsetkernel,WIDTH,localHeight);
checkCudaErrors(cudaGetLastError());
u8_Used[i] = 1;
if(HEIGHT - (blockh +1 +blockh*(i-1))<=0)
{
break;
}
}
}
///////////////////////////////////////////////////////
for(int i=0;i<NB_STREAM;i++)
{
if(i == 0)
{
int localHeight = blockh;
checkCudaErrors(cudaMemcpyAsync(u8_ptDataOutHost,u8_ptDataOutDevice,WIDTH*(localHeight-1)*sizeof(u_int8_t),cudaMemcpyDeviceToHost,Stream[i]));
u8_Used[i] = 1;
}else{
int ioffsetImage = WIDTH*(HEIGHT/NB_STREAM );
int hoffset = HEIGHT/NB_STREAM *i;
int hoffsetkernel = HEIGHT/NB_STREAM -1 + HEIGHT/NB_STREAM* (i-1);
int localHeight = min(HEIGHT - (blockh*i),blockh);
checkCudaErrors(cudaMemcpyAsync(&u8_ptDataOutHost[hoffsetkernel*WIDTH],&u8_ptDataOutDevice[hoffsetkernel*WIDTH],WIDTH*localHeight*sizeof(u_int8_t),cudaMemcpyDeviceToHost,Stream[i]));
u8_Used[i] = 1;
if(HEIGHT - (blockh +1 +blockh*(i-1))<=0)
{
break;
}
}
}
for(int i=0;i<NB_STREAM;i++)
{
cudaStreamSynchronize(Stream[i]);
}
cudaEventRecord(Stop, 0);
cudaEventSynchronize(Start);
cudaEventSynchronize(Stop);
float dt_ms;
cudaEventElapsedTime(&dt_ms, Start, Stop);
printf("dt_ms %f \n",dt_ms);
}
Run Code Online (Sandbox Code Playgroud)
我在执行程序时的表现非常奇怪。我决定分析一下我的例子,我得到了:
我不明白似乎每个流都在互相等待。有人可以帮我吗?
首先,以后请提供完整的代码。我也在处理您在这里交叉发布的内容,以填写一些详细信息,例如内核大小。
您有两个问题需要解决:
首先,任何时候您希望使用cudaMemcpyAsync
,您很可能希望使用固定主机分配。如果您使用创建的分配,例如使用,则就异步并发执行而言,malloc
您将不会获得预期的行为。编程指南cudaMemcpyAsync
中涵盖了这种必要性:
如果复制涉及主机内存,则必须对其进行页面锁定。
因此,对代码进行的第一个更改是转换:
u8_PtImageHost = (u_int8_t *)malloc(WIDTH*HEIGHT*sizeof(u_int8_t));
u8_ptDataOutHost = (u_int8_t *)malloc(WIDTH*HEIGHT*sizeof(u_int8_t));
Run Code Online (Sandbox Code Playgroud)
对此:
checkCudaErrors(cudaHostAlloc(&u8_PtImageHost, WIDTH*HEIGHT*sizeof(u_int8_t), cudaHostAllocDefault));
checkCudaErrors(cudaHostAlloc(&u8_ptDataOutHost, WIDTH*HEIGHT*sizeof(u_int8_t), cudaHostAllocDefault));
Run Code Online (Sandbox Code Playgroud)
根据我的测试,仅通过这一更改,您的执行时间就会从大约 21 毫秒下降到 7 毫秒。这样做的原因是,如果不进行更改,我们就不会出现任何重叠:
通过更改,复制活动可以相互重叠(H->D 和 D->H)并与内核执行重叠:
并发内核执行面临的第二个问题是内核太大(块/线程太多):
#define WIDTH 6400
#define HEIGHT 4800
#define NB_STREAM 10
#define BLOC_X 32
#define BLOC_Y 32
dim3 threads(BLOC_X,BLOC_Y);
dim3 blocks(ceil((float)WIDTH/BLOC_X),ceil((float)HEIGHT/BLOC_Y));
Run Code Online (Sandbox Code Playgroud)
我建议,如果这些是您需要运行的内核大小,那么尝试争取内核重叠可能没有多大好处 - 每个内核都会启动足够的块来“填充”GPU,因此您已经暴露了足够的并行性让 GPU 保持忙碌。但是,如果您迫切希望见证内核并发性,则可以使内核使用较少数量的块,同时使每个内核花费更多的执行时间。我们可以通过启动 1 个块来实现此目的,并让每个块中的线程执行图像过滤。