解决CUDA中的三对角线性系统问题

pen*_*jun 2 cuda

我正在尝试基于Cyclic Reduction我的方法实现一个三对角系统求解器GTS450.

本文阐述了循环减少

Y. Zhang,J.Cohen,JD Owens,"GPU上的快速三对角解算器"

但是,无论我做什么,我的CUDA代码都比顺序代码快得多.总的来说,我的结果512 x 5127ms在我的i7 3.4GHz上5ms.GPU没有加速!

哪个可能是问题?

#include "cutrid.cuh"
 __global__ void cutrid_RC_1b(double *a,double *b,double *c,double *d,double *x)
{
 int idx_global=blockIdx.x*blockDim.x+threadIdx.x;
 int idx=threadIdx.x;

 __shared__ double asub[512];
 __shared__ double bsub[512];
 __shared__ double csub[512];
 __shared__ double dsub[512];

 double at=0;
 double bt=0;
 double ct=0;
 double dt=0;

 asub[idx]=a[idx_global];
 bsub[idx]=b[idx_global];
 csub[idx]=c[idx_global];
 dsub[idx]=d[idx_global];


 for(int stride=1;stride<N;stride*=2)
  {
    int margin_left,margin_right;
    margin_left=idx-stride;
    margin_right=idx+stride;


    at=(margin_left>=0)?(-csub[idx-stride]*asub[idx]/bsub[idx-stride]):0.f; 

    bt=bsub[idx]+((margin_left>=0)?(-csub[idx-stride]*asub[idx]/bsub[idx-stride]):0.f)
    -((margin_right<512)?asub[idx+stride]*csub[idx]/bsub[idx+stride]:0.f); 

    ct=(margin_right<512)?(-csub[idx+stride]*asub[idx]/bsub[idx+stride]):0.f; 

    dt=dsub[idx]+((margin_left>=0)?(-dsub[idx-stride]*asub[idx]/bsub[idx-stride]):0.f)
    -((margin_right<512)?dsub[idx+stride]*csub[idx]/bsub[idx+stride]:0.f); 

    __syncthreads();
    asub[idx]=at;
    bsub[idx]=bt;
    csub[idx]=ct;
    dsub[idx]=dt;
    __syncthreads();
  }


x[idx_global]=dsub[idx]/bsub[idx];

}/*}}}*/
Run Code Online (Sandbox Code Playgroud)

我启动了这个内核cutrid_RC_1b<<<512,512>>>(d_a,d_b,d_c,d_d,d_x),并达到了100%设备占用率.这个结果让我困惑了好几天.

我的代码有一个改进版本:

    #include "cutrid.cuh"
    __global__ void cutrid_RC_1b(float *a,float *b,float *c,float *d,float *x)
    {/*{{{*/
     int idx_global=blockIdx.x*blockDim.x+threadIdx.x;
     int idx=threadIdx.x;

     __shared__ float asub[512];
     __shared__ float bsub[512];
     __shared__ float csub[512];
     __shared__ float dsub[512];

    asub[idx]=a[idx_global];
    bsub[idx]=b[idx_global];
    csub[idx]=c[idx_global];
    dsub[idx]=d[idx_global];
 __syncthreads();
   //Reduction  
    for(int stride=1;stride<512;stride*=2)
    {
        int margin_left=(idx-stride);
        int margin_right=(idx+stride);
        if(margin_left<0) margin_left=0;
        if(margin_right>=512) margin_right=511;
        float tmp1 = asub[idx] / bsub[margin_left];
        float tmp2 = csub[idx] / bsub[margin_right];
        float tmp3 = dsub[margin_right];
        float tmp4 = dsub[margin_left];
        __syncthreads();

        dsub[idx] = dsub[idx] - tmp4*tmp1-tmp3*tmp2;
        bsub[idx] = bsub[idx]-csub[margin_left]*tmp1-asub[margin_right]*tmp2;

        tmp3 = -csub[margin_right]; 
        tmp4 = -asub[margin_left];

        __syncthreads();
        asub[idx] = tmp3*tmp1;
        csub[idx] = tmp4*tmp2;
        __syncthreads();
     }

        x[idx_global]=dsub[idx]/bsub[idx];

    }/*}}}*/
Run Code Online (Sandbox Code Playgroud)

的速度提高到0.73ms上一个Quadro k4000用于512 x 512系统,然而,在所提到的纸代码运行在0.5ms上一个GTX280.

Jac*_*ern 6

解决三对角方程系统是一个具有挑战性的并行问题,因为经典解决方案,即高斯消元,本质上是连续的.

循环减少包括两个阶段:

  1. 前进减少.原始系统分为两个独立的三对角系统,用于两组未知数,即奇数索引和偶数索引.这样的系统可以独立解决,这一步骤可以看作是分裂和阻抗方案中的第一个.两个较小的系统在两个子系统中以相同的方式再次分开,并且重复该过程直到2达到仅有方程的系统.
  2. 向后替代.首先2求解方程组.然后,通过在不同的核心上独立地求解子系统来攀爬分裂和阻抗结构.

我不确定(但如果我错了,请纠正我)你的代码会返回一致的结果.N似乎没有定义.此外,您正在访问csub[idx-stride],但我不知道什么时候idx==0stride>1.此外,您使用了几个条件语句,主要用于边界检查.最后,你的代码缺乏一个能够处理上面提到的divide et impera方案的正确的线程结构,概念上非常类似于CUDA SDK减少样本中使用的那个.

正如我在上面的一条评论中所提到的,我记得在tridiagonalsolvers中你可以找到一个循环减少方案的实现来解决三对角方程系统.浏览相关的谷歌页面,在我看来,代码是由上述论文的第一作者(姚章)保留的.代码将被复制并粘贴到下面.请注意,边界检查仅执行一次(if (iRight >= systemSize) iRight = systemSize - 1;),从而限制了所涉及的条件语句的数量.还要注意能够处理除法和求解方案的线程结构.

张,科恩和欧文斯的代码

__global__ void crKernel(T *d_a, T *d_b, T *d_c, T *d_d, T *d_x)
{
   int thid = threadIdx.x;
   int blid = blockIdx.x;

   int stride = 1;

   int numThreads = blockDim.x;
   const unsigned int systemSize = blockDim.x * 2;

   int iteration = (int)log2(T(systemSize/2));
   #ifdef GPU_PRINTF 
    if (thid == 0 && blid == 0) printf("iteration = %d\n", iteration);
   #endif

   __syncthreads();

   extern __shared__ char shared[];

   T* a = (T*)shared;
   T* b = (T*)&a[systemSize];
   T* c = (T*)&b[systemSize];
   T* d = (T*)&c[systemSize];
   T* x = (T*)&d[systemSize];

   a[thid] = d_a[thid + blid * systemSize];
   a[thid + blockDim.x] = d_a[thid + blockDim.x + blid * systemSize];

   b[thid] = d_b[thid + blid * systemSize];
   b[thid + blockDim.x] = d_b[thid + blockDim.x + blid * systemSize];

   c[thid] = d_c[thid + blid * systemSize];
   c[thid + blockDim.x] = d_c[thid + blockDim.x + blid * systemSize];

   d[thid] = d_d[thid + blid * systemSize];
   d[thid + blockDim.x] = d_d[thid + blockDim.x + blid * systemSize];

   __syncthreads();

   //forward elimination
   for (int j = 0; j <iteration; j++)
   {
       __syncthreads();
       stride *= 2;
       int delta = stride/2;

    if (threadIdx.x < numThreads)
    {
        int i = stride * threadIdx.x + stride - 1;
        int iLeft = i - delta;
        int iRight = i + delta;
        if (iRight >= systemSize) iRight = systemSize - 1;
        T tmp1 = a[i] / b[iLeft];
        T tmp2 = c[i] / b[iRight];
        b[i] = b[i] - c[iLeft] * tmp1 - a[iRight] * tmp2;
        d[i] = d[i] - d[iLeft] * tmp1 - d[iRight] * tmp2;
        a[i] = -a[iLeft] * tmp1;
        c[i] = -c[iRight] * tmp2;
    }
       numThreads /= 2;
   }

   if (thid < 2)
   {
     int addr1 = stride - 1;
     int addr2 = 2 * stride - 1;
     T tmp3 = b[addr2]*b[addr1]-c[addr1]*a[addr2];
     x[addr1] = (b[addr2]*d[addr1]-c[addr1]*d[addr2])/tmp3;
     x[addr2] = (d[addr2]*b[addr1]-d[addr1]*a[addr2])/tmp3;
   }

   // backward substitution
   numThreads = 2;
   for (int j = 0; j <iteration; j++)
   {
       int delta = stride/2;
       __syncthreads();
       if (thid < numThreads)
       {
           int i = stride * thid + stride/2 - 1;
           if(i == delta - 1)
                 x[i] = (d[i] - c[i]*x[i+delta])/b[i];
           else
                 x[i] = (d[i] - a[i]*x[i-delta] - c[i]*x[i+delta])/b[i];
        }
        stride /= 2;
        numThreads *= 2;
     }

   __syncthreads();

   d_x[thid + blid * systemSize] = x[thid];
   d_x[thid + blockDim.x + blid * systemSize] = x[thid + blockDim.x];
Run Code Online (Sandbox Code Playgroud)

}