2015-06-21 153 views
0

我在CUDA C中的矩阵乘法中遇到了麻烦。检查很长一段时间后,我发现问题是我错误地使用了“dim3”。纠正后,我得到了正确的结果。但是我增加了矩阵的维数,答案是不正确的。即使我不能使用Nsight,它在我增加维度之前运行良好,以启动CUDA调试。CUDA中的矩阵乘法的问题

内核代码:

__global__ void multiKernal(float* Md, float*Nd, float*Pd, int width) 
{ 
    int row = blockIdx.y*blockDim.y + threadIdx.y; 
    int col = blockIdx.x*blockDim.x + threadIdx.x; 
    float Pvalue = 0; 

for (int k = 0; k <width; ++k){ 
    Pvalue += Md[row*width + k] * Nd[col + width*k]; 
} 
Pd[row*width + col] = Pvalue; 
return; 
} 

主机到设备:

void matrixmutiplacation(float*hostM, float*hostN, float*hostP, int width) 
{ 
    int size = width*width*sizeof(float); 
    float* Md; float* Nd; float* Pd; 
    dim3 dimGrid(4, 4, 1); 
    dim3 dimBlock(128, 128, 1); 
    cudaError_t error; 
    cudaEvent_t start; 
    error = cudaEventCreate(&start); 
    cudaEvent_t stop; 
    error = cudaEventCreate(&stop); 

    cudaMalloc((void**)&Md, size); 
    cudaMalloc((void**)&Nd, size); 
    cudaMalloc((void**)&Pd, size); 
    cudaMemcpy(Md, hostM, size, cudaMemcpyHostToDevice); 
    cudaMemcpy(Nd, hostN, size, cudaMemcpyHostToDevice); 
    cudaMemcpy(Pd, hostP, size, cudaMemcpyHostToDevice); 

    error = cudaEventRecord(start, NULL); 
    multiKernal << <dimGrid, dimBlock >> >(Md, Nd, Pd, width); 
    error = cudaEventRecord(stop, NULL); 
    error = cudaEventSynchronize(stop); 
    float msecTotal = 0.0f; 
    error = cudaEventElapsedTime(&msecTotal, start, stop); 
    float msecPerMatrixMul = msecTotal; 
    printf("running time:%.3f msec", msecPerMatrixMul); 

    cudaMemcpy(hostP, Pd, size, cudaMemcpyDeviceToHost); 
    cudaFree(Md); cudaFree(Nd); cudaFree(Pd); 
    return; 
} 

主:

int main() 
{ 
    int M = 512 * 512; 
    int N = 512 * 512; 
    int P = 512 * 512; 
    int width = 512; 
    int c[512]; 
    float* hostM = (float*)malloc(sizeof(float)*M); 
    float* hostN = (float*)malloc(sizeof(float)*N); 
    float* hostP = (float*)malloc(sizeof(float)*P); 

    for (int i = 0; i < P; ++i) 
     hostP[i] = 0; 

    for (int i = 0; i <width; i++) 
     c[i] = i + 1; 

    for (int i = 0; i <width; i++) { 
     for (int j = 0; j <width; j++) { 
      hostM[i*width + j] = c[j] + i; 
      hostN[i*width + j] = c[j] + i; 
     } 
    } 

    matrixmutiplacation(hostM, hostN, hostP, width); 

    //for (int i = 0; i <width; i++){ 
    //for (int j = 0; j <width; j++){ 
    // printf("%f\t", hostP[i*width + j]); 
    //} 
    // printf("\n"); 
    //} 

    free(hostM); 
    free(hostN); 
    free(hostP); 

    return 0; 

} 

回答

4

你threadblock尺寸128x128x1 = 16K,最大尺寸threadblock是1024。内核只是不运行。尝试使用cuda-memcheck运行应用程序,它可能会告诉你代码有问题。检查结果代码CUDA运行时API函数返回错误也是一种很好的做法。

+0

我认为“1024 * 1024 * 64”是每块的线程大小。谢谢 ! –

+0

@CalvinLouBME:块的最大*维*和每块的最大线程数是不同的东西,并且都适用于限制合法块大小 – talonmies