CUDA 7.5实验__host__ __device__ lambdas

hav*_*ogt 5 c++ lambda cuda c++11

我玩了一些实验设备lambdas,它在CUDA 7.5中引入并在Mark Harris的博客文章中进行了推广.

对于以下示例,我删除了许多不需要显示我的问题的东西(我的实际实现看起来更好......).

我尝试编写一个foreach函数,该函数可以在设备上的向量(每个元素1个线程)或主机(串行)上运行,具体取决于模板参数.有了这个foreach函数,我可以轻松实现BLAS函数.作为一个例子,我使用向量的每个组件分配一个标量(我最后附上完整的代码):

template<bool onDevice> void assignScalar( size_t size, double* vector, double a )
{
    auto assign = [=] __host__ __device__ ( size_t index ) { vector[index] = a; };
    if( onDevice )
    {
        foreachDevice( size, assign );
    }
    else
    {
        foreachHost( size, assign );
    }
}
Run Code Online (Sandbox Code Playgroud)

但是,由于__host__ __device__lambda ,此代码会产生编译器错误:

lambda("lambda - > void")的闭包类型不能在__global__函数模板实例化的模板参数类型中使用,除非lambda在__device__或__global__函数中定义

如果我__device__从lambda表达式中删除,我得到相同的错误,如果我删除__host__(只有__device__lambda),我没有得到编译错误,但在这种情况下,主机部分没有被执行...

如果我将lambda定义为__host__或者__device__单独定义,则代码将按预期编译并运行.

template<bool onDevice> void assignScalar2( size_t size, double* vector, double a )
{
    if( onDevice )
    {
        auto assign = [=] __device__ ( size_t index ) { vector[index] = a; };
        foreachDevice( size, assign );
    }
    else
    {
        auto assign = [=] __host__ ( size_t index ) { vector[index] = a; };
        foreachHost( size, assign );
    }
}
Run Code Online (Sandbox Code Playgroud)

但是,这引入了代码重复,实际上使得使用lambdas的整个想法对于这个例子毫无用处.

有没有办法完成我想要做的事情,或者这是实验性功能中的错误?实际上,__host__ __device__编程指南的第一个例子中明确提到了定义lambda .即使对于那个更简单的例子(只是从lambda返回一个常量值),我找不到在主机和设备上使用lambda表达式的方法.

这是完整的代码,使用选项进行编译-std=c++11 --expt-extended-lambda:

#include <iostream>
using namespace std;

template<typename Operation> void foreachHost( size_t size, Operation o )
{
    for( size_t i = 0; i < size; ++i )
    {
        o( i );
    }
}

template<typename Operation> __global__ void kernel_foreach( Operation o )
{
    size_t index = blockIdx.x * blockDim.x + threadIdx.x;
    o( index );
}

template<typename Operation> void foreachDevice( size_t size, Operation o )
{
    size_t blocksize = 32;
    size_t gridsize = size/32;
    kernel_foreach<<<gridsize,blocksize>>>( o );
}

__global__ void printFirstElementOnDevice( double* vector )
{
    printf( "dVector[0] = %f\n", vector[0] );
}

void assignScalarHost( size_t size, double* vector, double a )
{
    auto assign = [=] ( size_t index ) { vector[index] = a; };
    foreachHost( size, assign );
}

void assignScalarDevice( size_t size, double* vector, double a )
{
    auto assign = [=] __device__ ( size_t index ) { vector[index] = a; };
    foreachDevice( size, assign );
}

// compile error:
template<bool onDevice> void assignScalar( size_t size, double* vector, double a )
{
    auto assign = [=]  __host__ __device__ ( size_t index ) { vector[index] = a; };
    if( onDevice )
    {
        foreachDevice( size, assign );
    }
    else
    {
        foreachHost( size, assign );
    }
}

// works:
template<bool onDevice> void assignScalar2( size_t size, double* vector, double a )
{
    if( onDevice )
    {
        auto assign = [=] __device__ ( size_t index ) { vector[index] = a; };
        foreachDevice( size, assign );
    }
    else
    {
        auto assign = [=] __host__ ( size_t index ) { vector[index] = a; };
        foreachHost( size, assign );
    }
}

int main()
{
    size_t SIZE = 32;

    double* hVector = new double[SIZE];
    double* dVector;
    cudaMalloc( &dVector, SIZE*sizeof(double) );

    // clear memory
    for( size_t i = 0; i < SIZE; ++i )
    {
        hVector[i] = 0;
    }
    cudaMemcpy( dVector, hVector, SIZE*sizeof(double), cudaMemcpyHostToDevice );

    assignScalarHost( SIZE, hVector, 1.0 );
    cout << "hVector[0] = " << hVector[0] << endl;

    assignScalarDevice( SIZE, dVector, 2.0 );
    printFirstElementOnDevice<<<1,1>>>( dVector );
    cudaDeviceSynchronize();

    assignScalar2<false>( SIZE, hVector, 3.0 );
    cout << "hVector[0] = " << hVector[0] << endl;

    assignScalar2<true>( SIZE, dVector, 4.0 );
    printFirstElementOnDevice<<<1,1>>>( dVector );
    cudaDeviceSynchronize();

//  assignScalar<false>( SIZE, hVector, 5.0 );
//  cout << "hVector[0] = " << hVector[0] << endl;
//
//  assignScalar<true>( SIZE, dVector, 6.0 );
//  printFirstElementOnDevice<<<1,1>>>( dVector );
//  cudaDeviceSynchronize();

    cudaError_t error = cudaGetLastError();
    if(error!=cudaSuccess)
    {
        cout << "ERROR: " << cudaGetErrorString(error);
    }
}
Run Code Online (Sandbox Code Playgroud)

我使用了CUDA 7.5的生产版本.

更新

我尝试了第三个版本的assignScalar函数:

template<bool onDevice> void assignScalar3( size_t size, double* vector, double a )
{
#ifdef __CUDA_ARCH__
#define LAMBDA_HOST_DEVICE __device__
#else
#define LAMBDA_HOST_DEVICE __host__
#endif

    auto assign = [=] LAMBDA_HOST_DEVICE ( size_t index ) { vector[index] = a; };
    if( onDevice )
    {
        foreachDevice( size, assign );
    }
    else
    {
        foreachHost( size, assign );
    }
}
Run Code Online (Sandbox Code Playgroud)

它编译并运行时没有错误,但assignScalar3<true>不执行设备版本().实际上,我认为这__CUDA_ARCH__将永远是未定义的(因为函数不是__device__)但我明确检查了存在定义它的编译路径.

hav*_*ogt 3

我尝试使用问题中提供的示例完成的任务在 CUDA 7.5 中是不可能的,尽管它没有明确排除在实验性 lambda 支持允许的情况之外。

根据博客文章《CUDA 8 Features Revealed》 , NVIDIA 宣布CUDA Toolkit 8.0将支持lambda 作为实验性功能。__host__ __device__

我验证了我的示例适用于 CUDA 8 Release Candidate(Cuda 编译工具,版本 8.0,V8.0.26)。

这是我最终使用的代码,编译如下nvcc -std=c++11 --expt-extended-lambda

#include <iostream>
using namespace std;

template<typename Operation> __global__ void kernel_foreach( Operation o )
{
    size_t i = blockIdx.x * blockDim.x + threadIdx.x;
    o( i );
}

template<bool onDevice, typename Operation> void foreach( size_t size, Operation o )
{
    if( onDevice )
    {
        size_t blocksize = 32;
        size_t gridsize = size/32;
        kernel_foreach<<<gridsize,blocksize>>>( o );
    }
    else
    {
        for( size_t i = 0; i < size; ++i )
        {
            o( i );
        }
    }
}

__global__ void printFirstElementOnDevice( double* vector )
{
    printf( "dVector[0] = %f\n", vector[0] );
}

template<bool onDevice> void assignScalar( size_t size, double* vector, double a )
{
    auto assign = [=]  __host__ __device__ ( size_t i ) { vector[i] = a; };
    foreach<onDevice>( size, assign );
}

int main()
{
    size_t SIZE = 32;

    double* hVector = new double[SIZE];
    double* dVector;
    cudaMalloc( &dVector, SIZE*sizeof(double) );

    // clear memory
    for( size_t i = 0; i < SIZE; ++i )
    {
        hVector[i] = 0;
    }
    cudaMemcpy( dVector, hVector, SIZE*sizeof(double), cudaMemcpyHostToDevice );

    assignScalar<false>( SIZE, hVector, 3.0 );
    cout << "hVector[0] = " << hVector[0] << endl;

    assignScalar<true>( SIZE, dVector, 4.0 );
    printFirstElementOnDevice<<<1,1>>>( dVector );
    cudaDeviceSynchronize();

    cudaError_t error = cudaGetLastError();
    if(error!=cudaSuccess)
    {
        cout << "ERROR: " << cudaGetErrorString(error);
    }
}
Run Code Online (Sandbox Code Playgroud)