2015-11-30 65 views
-1

我是CUDA/GPU的新手,我在将数据从设备复制回主机时遇到问题。我正在开发带有CUDA Toolkit 6.5的Jetson TK1。它成功构建,但在运行时发生错误。我的代码如下:从设备到主机的cudaMemcpy中的无效参数错误

//main.cu 
void allocate(double* const d_inputCurrent, double* signal, double* const d_outputCurrent, const size_t size); 

int main() { 
    int data_length = 1024000; 
    const int length=512; 
    const size_t size= length; 

    double signalA[length], signalB[length], signalC[length]; 

for (int i=0; i<data_length; i++) 
{ 

    double *d_inputCurrentIa, *d_inputCurrentIb, *d_inputCurrentIc; 
    double *d_outputCurrentIa, *d_outputCurrentIb, *d_outputCurrentIc; 

    if(i==0) 
    { 
     for(int k=0; k<length; k++) 
     { 
      signalA[k]=v_ia[k]; 
      signalB[k]=v_ib[k]; 
      signalC[k]=v_ic[k]; 
     } 
     i=length-1; 
    } 
    else 
    { 
     //allocate memory in GPU and kernel call for phase A 
     allocate(d_inputCurrentIa, signalA, d_outputCurrentIa, size); 
     cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); 

     checkCudaErrors(cudaMemcpy(signalA, d_outputCurrentIa, sizeof(double) * size, cudaMemcpyDeviceToHost)); 
     signalA[length-1]=v_ia[i]; 

     //allocate memory in GPU and kernel call for phase B 
     allocate(d_inputCurrentIb, signalB, d_outputCurrentIb, size); 
     cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); 

     checkCudaErrors(cudaMemcpy(signalB, d_outputCurrentIb, sizeof(double) * size, cudaMemcpyDeviceToHost)); 
     signalB[length-1]=v_ib[i]; 

     //allocate memory in GPU and kernel call for phase C; 
     allocate(d_inputCurrentIc, signalC, d_outputCurrentIc, size); 
     cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); 

     checkCudaErrors(cudaMemcpy(signalC, d_outputCurrentIc, sizeof(double) * size, cudaMemcpyDeviceToHost)); 
     signalC[length-1]=v_ic[i]; 

     //memory cleaning 
     checkCudaErrors(cudaFree(d_inputCurrentIa)); 
     checkCudaErrors(cudaFree(d_inputCurrentIb)); 
     checkCudaErrors(cudaFree(d_inputCurrentIc)); 
     checkCudaErrors(cudaFree(d_outputCurrentIa)); 
     checkCudaErrors(cudaFree(d_outputCurrentIb)); 
     checkCudaErrors(cudaFree(d_outputCurrentIc)); 
    } 

而且我的内核和功能都很简单,他们只是移动数组元素左边每次:

__global__ void allocate_kernel(double* const d_in, double* const d_out, const size_t size) { 

    __shared__ double shared[512]; 

    int tid = threadIdx.x; 

    if(tid < size) 
    shared[tid] = d_in[tid]; 
    __syncthreads(); 

    if(tid < size-1) 
    d_out[tid]=shared[tid+1]; 
    __syncthreads(); 

} 


void allocate(double* const d_inputCurrent, double* signal, double* const d_outputCurrent, const size_t size) { 

    const dim3 blockSize(512); 
    const dim3 gridSize(1); 

    checkCudaErrors(cudaFree(0)); 

    checkCudaErrors(cudaMalloc((void **)&d_inputCurrent, sizeof(double) * size)); 
    checkCudaErrors(cudaMalloc((void **)&d_outputCurrent, sizeof(double) * size)); 

    checkCudaErrors(cudaMemset(d_outputCurrent, 0, sizeof(double) * size)); 

    checkCudaErrors(cudaMemcpy(d_inputCurrent, signal, sizeof(double) * size, cudaMemcpyHostToDevice)); 

    allocate_kernel<<<gridSize, blockSize>>>(d_inputCurrent, d_outputCurrent, size); 
    cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); 
} 

这是我的博士论文的一小部分,我正在用这段代码练习CUDA,我知道它现在并不那么有意义,但我无法进一步行动,因为我对这个问题非常困惑。任何帮助将不胜感激,在此先感谢。

回答

1

在C中,你不能一个指针传递给由值的函数,有一个功能修改指针,然后期望该指针的修改在调用环境展现出来:

double *d_inputCurrentIa, *d_inputCurrentIb, *d_inputCurrentIc; 
double *d_outputCurrentIa, *d_outputCurrentIb, *d_outputCurrentIc; 

... 
    //allocate memory in GPU and kernel call for phase A 

// at this point, d_inputCurrentIa and d_outputCurrentIa are pointing to nothing 
    allocate(d_inputCurrentIa, signalA, d_outputCurrentIa, size); 
// allocate modified those pointers internally, but the modified values don't show up here 
    cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); 

    checkCudaErrors(cudaMemcpy(signalA, d_outputCurrentIa, sizeof(double) * size, cudaMemcpyDeviceToHost)); 
// therefore you will get an error here, because d_outputCurrentIa still points to nothing 

有很多方法可以完成这项工作。一种方法是通过你要修改的指针和使用的地址

void allocate(double** d_inputCurrent, double* signal, double **d_outputCurrent, const size_t size); 

double *d_inputCurrentIa, *d_inputCurrentIb, *d_inputCurrentIc; 
double *d_outputCurrentIa, *d_outputCurrentIb, *d_outputCurrentIc; 

... 
    //allocate memory in GPU and kernel call for phase A 
    allocate(&d_inputCurrentIa, signalA, &d_outputCurrentIa, size); 
... 
void allocate(double** d_inputCurrent, double* signal, double** d_outputCurrent, const size_t size) { 

    const dim3 blockSize(512); 
    const dim3 gridSize(1); 

    checkCudaErrors(cudaFree(0)); 

    checkCudaErrors(cudaMalloc((void **)d_inputCurrent, sizeof(double) * size)); 
    checkCudaErrors(cudaMalloc((void **)d_outputCurrent, sizeof(double) * size)); 

    checkCudaErrors(cudaMemset(*d_outputCurrent, 0, sizeof(double) * size)); 

    checkCudaErrors(cudaMemcpy(*d_inputCurrent, signal, sizeof(double) * size, cudaMemcpyHostToDevice)); 

    allocate_kernel<<<gridSize, blockSize>>>(*d_inputCurrent, *d_outputCurrent, size); 
    cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); 
} 

注:

  1. 不知道为什么你会标记这些指针const。他们不以任何方式const(该函数将修改指针以及它指向的数据。)

  2. 在浏览器中编码。你可能需要修补一些其他的东西。由于您没有提供完整的代码来处理,我还没有提供完整的代码。但这应该是一个路线图。

  3. 在函数中分配可能是一个等待发生的内存泄漏。你可能想给这个想法。一定要有一个计划来释放这些指针,如果你将重用它们或创建它们。

+0

感谢您快速回复@Robert。显然,我对编码(特别是_pointers_ :))并不熟悉,只是想在很短的时间内找出一些GPU编程。我遵循你的步骤,它的工作,但这次我在运行时遇到“总线错误”。你所说的一切都是正确的,所以我删除了'const',在主函数中分配了所有内容,并在最后释放了指针。现在它工作得很好。如果有人感兴趣,我可以发布改进的答案。 – schloxy

相关问题