2016-04-21 60 views
2

我的问题: 我正在寻找某人要么指出我尝试在CUDA中使用实现零拷贝的方式中的错误,要么显示更多'幕后'为什么零拷贝方法不会比memcpy方法更快。顺便说一下,我正在使用Ubuntu在NVidia的TK1处理器上执行我的测试。CUDA Zero Copy与Jetson上的CudaMemcpy TK1

我的问题与有效使用NVIDIA TK1(物理)统一内存架构与CUDA有关。 NVIDIA提供了GPU/CPU内存传输抽象的两种方法。

  1. 统一存储器的抽象(使用cudaHostAlloc & cudaHostGetDevicePointer)
  2. 显式拷贝到主机,和从设备(使用cudaMalloc()& cudaMemcpy)

的我的测试代码简短说明:我测试出同样的cuda内核同时使用方法1和2.我预计1会更快,因为没有将源数据复制到设备或从设备复制结果数据。然而,结果倒退到我的假设(方法#1慢了50%)。下面是我为这个测试代码:

#include <libfreenect/libfreenect.hpp> 
#include <iostream> 
#include <vector> 
#include <cmath> 
#include <pthread.h> 
#include <cxcore.h> 
#include <time.h> 
#include <sys/time.h> 
#include <memory.h> 
///CUDA/// 
#include <cuda.h> 
#include <cuda_runtime.h> 

///OpenCV 2.4 
#include <highgui.h> 
#include <cv.h> 
#include <opencv2/gpu/gpu.hpp> 

using namespace cv; 
using namespace std; 

///The Test Kernel/// 
__global__ void cudaCalcXYZ(float *dst, float *src, float *M, int height, int width, float scaleFactor, int minDistance) 
{ 
    float nx,ny,nz, nzpminD, jFactor; 
    int heightCenter = height/2; 
    int widthCenter = width/2; 
    //int j = blockIdx.x; //Represents which row we are in 
    int index = blockIdx.x*width; 
    jFactor = (blockIdx.x - heightCenter)*scaleFactor; 
    for(int i= 0; i < width; i++) 
    { 
     nz = src[index]; 
     nzpminD = nz + minDistance; 
     nx = (i - widthCenter)*(nzpminD)*scaleFactor;  
     ny = (jFactor)*(nzpminD); 
     //Solve for only Y matrix (height vlaues)   
     dst[index++] = nx*M[4] + ny*M[5] + nz*M[6]; 
     //dst[index++] = 1 + 2 + 3; 
    } 
} 

//Function fwd declarations 
double getMillis(); 
double getMicros(); 
void runCudaTestZeroCopy(int iter, int cols, int rows); 
void runCudaTestDeviceCopy(int iter, int cols, int rows); 

int main(int argc, char **argv) { 

    //ZERO COPY FLAG (allows runCudaTestZeroCopy to run without fail) 
    cudaSetDeviceFlags(cudaDeviceMapHost); 

    //Runs kernel using explicit data copy to 'device' and back from 'device' 
    runCudaTestDeviceCopy(20, 640,480); 
    //Uses 'unified memory' cuda abstraction so device can directly work from host data 
    runCudaTestZeroCopy(20,640, 480); 

    std::cout << "Stopping test" << std::endl; 

    return 0; 
} 

void runCudaTestZeroCopy(int iter, int cols, int rows) 
{ 
    cout << "CUDA Test::ZEROCOPY" << endl; 
     int src_rows = rows; 
     int src_cols = cols; 
     int m_rows = 4; 
     int m_cols = 4; 
     int dst_rows = src_rows; 
     int dst_cols = src_cols; 
     //Create and allocate memory for host mats pointers 
     float *psrcMat; 
     float *pmMat; 
     float *pdstMat; 
     cudaHostAlloc((void **)&psrcMat, src_rows*src_cols*sizeof(float), cudaHostAllocMapped); 
     cudaHostAlloc((void **)&pmMat, m_rows*m_cols*sizeof(float), cudaHostAllocMapped); 
     cudaHostAlloc((void **)&pdstMat, dst_rows*dst_cols*sizeof(float), cudaHostAllocMapped); 
     //Create mats using host pointers 
     Mat src_mat = Mat(cvSize(src_cols, src_rows), CV_32FC1, psrcMat); 
     Mat m_mat = Mat(cvSize(m_cols, m_rows), CV_32FC1, pmMat); 
     Mat dst_mat = Mat(cvSize(dst_cols, dst_rows), CV_32FC1, pdstMat); 

     //configure src and m mats 
     for(int i = 0; i < src_rows*src_cols; i++) 
     { 
      psrcMat[i] = (float)i; 
     } 
     for(int i = 0; i < m_rows*m_cols; i++) 
     { 
      pmMat[i] = 0.1234; 
     } 
     //Create pointers to dev mats 
     float *d_psrcMat; 
     float *d_pmMat; 
     float *d_pdstMat; 
     //Map device to host pointers 
     cudaHostGetDevicePointer((void **)&d_psrcMat, (void *)psrcMat, 0); 
     //cudaHostGetDevicePointer((void **)&d_pmMat, (void *)pmMat, 0); 
     cudaHostGetDevicePointer((void **)&d_pdstMat, (void *)pdstMat, 0); 
     //Copy matrix M to device 
     cudaMalloc((void **)&d_pmMat, sizeof(float)*4*4); //4x4 matrix 
     cudaMemcpy(d_pmMat, pmMat, sizeof(float)*m_rows*m_cols, cudaMemcpyHostToDevice); 

     //Additional Variables for kernels 
     float scaleFactor = 0.0021; 
     int minDistance = -10; 

     //Run kernel! //cudaSimpleMult(float *dst, float *src, float *M, int width, int height) 
     int blocks = src_rows; 
     const int numTests = iter; 
     double perfStart = getMillis(); 

     for(int i = 0; i < numTests; i++) 
     {   
      //cudaSimpleMult<<<blocks,1>>>(d_pdstMat, d_psrcMat, d_pmMat, src_cols, src_rows); 
      cudaCalcXYZ<<<blocks,1>>>(d_pdstMat, d_psrcMat, d_pmMat, src_rows, src_cols, scaleFactor, minDistance); 
      cudaDeviceSynchronize(); 
     } 
     double perfStop = getMillis(); 
     double perfDelta = perfStop - perfStart; 
     cout << "Ran " << numTests << " iterations totaling " << perfDelta << "ms" << endl; 
     cout << " Average time per iteration: " << (perfDelta/(float)numTests) << "ms" << endl; 

     //Copy result back to host 
     //cudaMemcpy(pdstMat, d_pdstMat, sizeof(float)*src_rows*src_cols, cudaMemcpyDeviceToHost); 
     //cout << "Printing results" << endl; 
     //for(int i = 0; i < 16*16; i++) 
     //{ 
     // cout << "src[" << i << "]= " << psrcMat[i] << " dst[" << i << "]= " << pdstMat[i] << endl; 
     //} 

     cudaFree(d_psrcMat); 
     cudaFree(d_pmMat); 
     cudaFree(d_pdstMat); 
     cudaFreeHost(psrcMat); 
     cudaFreeHost(pmMat); 
     cudaFreeHost(pdstMat); 
} 

void runCudaTestDeviceCopy(int iter, int cols, int rows) 
{ 
     cout << "CUDA Test::DEVICE COPY" << endl; 
     int src_rows = rows; 
     int src_cols = cols; 
     int m_rows = 4; 
     int m_cols = 4; 
     int dst_rows = src_rows; 
     int dst_cols = src_cols; 
     //Create and allocate memory for host mats pointers 
     float *psrcMat; 
     float *pmMat; 
     float *pdstMat; 
     cudaHostAlloc((void **)&psrcMat, src_rows*src_cols*sizeof(float), cudaHostAllocMapped); 
     cudaHostAlloc((void **)&pmMat, m_rows*m_cols*sizeof(float), cudaHostAllocMapped); 
     cudaHostAlloc((void **)&pdstMat, dst_rows*dst_cols*sizeof(float), cudaHostAllocMapped); 
     //Create pointers to dev mats 
     float *d_psrcMat; 
     float *d_pmMat; 
     float *d_pdstMat; 
     cudaMalloc((void **)&d_psrcMat, sizeof(float)*src_rows*src_cols); 
     cudaMalloc((void **)&d_pdstMat, sizeof(float)*src_rows*src_cols); 
     cudaMalloc((void **)&d_pmMat, sizeof(float)*4*4); //4x4 matrix 
     //Create mats using host pointers 
     Mat src_mat = Mat(cvSize(src_cols, src_rows), CV_32FC1, psrcMat); 
     Mat m_mat = Mat(cvSize(m_cols, m_rows), CV_32FC1, pmMat); 
     Mat dst_mat = Mat(cvSize(dst_cols, dst_rows), CV_32FC1, pdstMat); 

     //configure src and m mats 
     for(int i = 0; i < src_rows*src_cols; i++) 
     { 
      psrcMat[i] = (float)i; 
     } 
     for(int i = 0; i < m_rows*m_cols; i++) 
     { 
      pmMat[i] = 0.1234; 
     } 

     //Additional Variables for kernels 
     float scaleFactor = 0.0021; 
     int minDistance = -10; 

     //Run kernel! //cudaSimpleMult(float *dst, float *src, float *M, int width, int height) 
     int blocks = src_rows; 

     double perfStart = getMillis(); 
     for(int i = 0; i < iter; i++) 
     {   
      //Copty from host to device 
      cudaMemcpy(d_psrcMat, psrcMat, sizeof(float)*src_rows*src_cols, cudaMemcpyHostToDevice); 
      cudaMemcpy(d_pmMat, pmMat, sizeof(float)*m_rows*m_cols, cudaMemcpyHostToDevice); 
      //Run Kernel 
      //cudaSimpleMult<<<blocks,1>>>(d_pdstMat, d_psrcMat, d_pmMat, src_cols, src_rows); 
      cudaCalcXYZ<<<blocks,1>>>(d_pdstMat, d_psrcMat, d_pmMat, src_rows, src_cols, scaleFactor, minDistance); 
      //Copy from device to host 
      cudaMemcpy(pdstMat, d_pdstMat, sizeof(float)*src_rows*src_cols, cudaMemcpyDeviceToHost); 
     } 
     double perfStop = getMillis(); 
     double perfDelta = perfStop - perfStart; 
     cout << "Ran " << iter << " iterations totaling " << perfDelta << "ms" << endl; 
     cout << " Average time per iteration: " << (perfDelta/(float)iter) << "ms" << endl; 

     cudaFree(d_psrcMat); 
     cudaFree(d_pmMat); 
     cudaFree(d_pdstMat); 
     cudaFreeHost(psrcMat); 
     cudaFreeHost(pmMat); 
     cudaFreeHost(pdstMat); 
} 

//Timing functions for performance measurements 
double getMicros() 
{ 
    timespec ts; 
    //double t_ns, t_s; 
    long t_ns; 
    double t_s; 
    clock_gettime(CLOCK_MONOTONIC, &ts); 
    t_s = (double)ts.tv_sec; 
    t_ns = ts.tv_nsec; 
    //return((t_s *1000.0 * 1000.0) + (double)(t_ns/1000.0)); 
    return ((double)t_ns/1000.0); 
} 

double getMillis() 
{ 
    timespec ts; 
    double t_ns, t_s; 
    clock_gettime(CLOCK_MONOTONIC, &ts); 
    t_s = (double)ts.tv_sec; 
    t_ns = (double)ts.tv_nsec; 
    return((t_s * 1000.0) + (t_ns/1000000.0)); 
} 

我已经看到了后Cuda zero-copy performance,但我觉得这是没有关系的,原因如下:GPU和CPU有一个物理统一内存架构。

感谢

+1

[SO]不是论坛,这个问题不适合这个地方。如果您有一个具体的独立问题,用一个简短的完整代码示例来说明您的问题,请将其编辑为您的问题。把谷歌驱动器链接到代码是反生产力。如果链接中断,这个问题是没有用的。问题和答案作为永久记录存在,以帮助您和未来的访问者解决相同的问题或问题。我投票结束了这个问题。 – talonmies

+0

感谢您的建议,我将删除“讨论”请求,并更明确地提出底线请求,因为我的问题特别是“如何在物理上统一的内存体系结构上高效使用零拷贝?”基于我提供的2种方法。 – dwyer2bp

回答

1

当您使用了zerocopy,读取内存经过的地方查询该存储单元从系统内存中读取数据的一些路径。该操作有一些延迟。

当使用直接访问内存时,内存单元从全局内存收集数据,并具有不同的访问模式和延迟。

实际上看到这种差异将需要某种形式的分析。

尽管如此,你的全局函数调用利用单个线程

cudaCalcXYZ<<< blocks,1 >>> (... 

在这种情况下,GPU具有小的方式,当内存从系统内存(或全局内存)齐聚一堂,隐藏延迟。我建议你使用更多的线程(64的倍数,总共至少128),并运行探查器来获取内存访问的成本。你的算法似乎是分开的,并从

for(int i= 0; i < width; i++) 

对矫正代码

for (int i = threadIdx.x ; i < width ; i += blockDim.x) 

可能会增加整体性能。 图像大小是640的宽度,将变成128个线程的5次迭代。

cudaCalcXYZ<<< blocks,128 >>> (... 

我相信会导致一些性能提升。

+0

我认为你是在正确的轨道上,增加更多的线程,因此每像素有1个线程(480个块,640个线程),这稍微提高了零拷贝。然而,在我的零拷贝例程中,我将4x4矩阵设置为复制到设备的方法,而较大的输入和输出矩阵保持零拷贝。现在性能巨大(从8ms执行到1ms执行)。我应该上传代表性能提升的新代码吗?我仍然困惑为什么在小矩阵上零复制导致访问延迟存在这种差异。 – dwyer2bp

+0

我还注意到,在尝试从CPU线程访问以进行操作时,分配有cudaHostAlloc的内存会有显着的延迟惩罚。这是常见的吗? – dwyer2bp

1

ZeroCopy功能允许我们在设备上运行数据,而无需手动将其复制到设备内存,如cudaMemcpy函数。零拷贝内存仅将主机地址传递给在内核设备上读取/写入的设备。因此,向内核设备声明的线程块越多,在内核设备上读取/写入的数据越多,传递给设备的主机地址就越多。最后,与仅向设备内核声明几个线程块相比,您获得了更好的性能增益。