费米生成GPU的单精度计算应该比双精度快2倍.然而,虽然我将所有声明'double'重写为'float',但我没有加快速度.前面有什么错误吗 编译选项等..?
GPU:Tesla C2075操作系统:win7 pro编译器:VS2013(nvcc)CUDA:v.7.5命令行:nvcc test.cu
我写了测试代码:
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
#include<conio.h>
#include<cuda_runtime.h>
#include<cuda_profiler_api.h>
#include<device_functions.h>
#include<device_launch_parameters.h>
#define DOUBLE 1
#define MAXI 10
__global__ void Kernel_double(double*a,int nthreadx)
{
double b=1.e0;
int i;
i = blockIdx.x * nthreadx + threadIdx.x + 0;
a[i] *= b;
}
__global__ void Kernel_float(float*a,int nthreadx)
{
float b=1.0F;
int i;
i = blockIdx.x * nthreadx + threadIdx.x + 0;
a[i] *= b;
}
int main()
{
#if DOUBLE
double a[10];
for(int i=0;i<MAXI;++i){
a[i]=1.e0;
}
double*d_a;
cudaMalloc((void**)&d_a, sizeof(double)*(MAXI));
cudaMemcpy(d_a, a, sizeof(double)*(MAXI), cudaMemcpyHostToDevice);
#else
float a[10];
for(int i=0;i<MAXI;++i){
a[i]=1.0F;
}
float*d_a;
cudaMalloc((void**)&d_a, sizeof(float)*(MAXI));
cudaMemcpy(d_a, a, sizeof(float)*(MAXI), cudaMemcpyHostToDevice);
#endif
dim3 grid(2, 2, 1);
dim3 block(2, 2, 1);
clock_t start_clock, end_clock;
double sec_clock;
printf("[%d] start\n", __LINE__);
start_clock = clock();
for (int i = 1; i <= 100000; ++i){
#if DOUBLE
Kernel_double << < grid, block >> > (d_a, 2);
cudaMemcpy(a, d_a, sizeof(double)*(MAXI), cudaMemcpyDeviceToHost);
#else
Kernel_float << < grid, block >> > (d_a, 2);
cudaMemcpy(a, d_a, sizeof(float)*(MAXI), cudaMemcpyDeviceToHost);
#endif
}
end_clock = clock();
sec_clock = (end_clock - start_clock) / (double)CLOCKS_PER_SEC;
printf("[%d] %f[s]\n", __LINE__, sec_clock);
printf("[%d] end\n", __LINE__);
return 0;
}
Run Code Online (Sandbox Code Playgroud)
好吧,经过一些调查,那是因为你只需要执行常数1的乘法,它被优化为二进制中的"什么都不做":
相反,如果您对数组进行平方(以防止这种简单的优化),您将获得以下程序集:
并且在下面(简化的)代码段中恢复了性能提升,其中我改变了一些东西:
这是代码:
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
#include<conio.h>
#include<cuda_runtime.h>
#include<cuda_profiler_api.h>
#include<device_functions.h>
#include<device_launch_parameters.h>
#define DOUBLE float
#define ITER 10
#define MAXI 100000000
__global__ void kernel(DOUBLE*a)
{
for(int i = blockIdx.x * blockDim.x + threadIdx.x ; i < MAXI; i += blockDim.x * gridDim.x)
{
a[i] *= a[i];
}
}
int main()
{
DOUBLE* a = (DOUBLE*) malloc(MAXI*sizeof(DOUBLE));
for(int i=0;i<MAXI;++i)
{
a[i]=(DOUBLE)1.0;
}
DOUBLE* d_a;
cudaMalloc((void**)&d_a, sizeof(DOUBLE)*(MAXI));
cudaMemcpy(d_a, a, sizeof(DOUBLE)*(MAXI), cudaMemcpyHostToDevice);
clock_t start_clock, end_clock;
double sec_clock;
printf("[%d] start\n", __LINE__);
start_clock = clock();
for (int i = 1; i <= ITER; ++i){
kernel <<< 32, 256>>> (d_a);
}
cudaDeviceSynchronize();
end_clock = clock();
cudaMemcpy(a, d_a, sizeof(DOUBLE)*(MAXI), cudaMemcpyDeviceToHost);
sec_clock = (end_clock - start_clock) / (double)CLOCKS_PER_SEC;
printf("[%d] %f/%d[s]\n", __LINE__, sec_clock, CLOCKS_PER_SEC);
printf("[%d] end\n", __LINE__);
return 0;
}
Run Code Online (Sandbox Code Playgroud)
(你会注意到我分配了一个长度为100M的数组来获得可衡量的性能.)