2013-07-01 62 views
6

我是新来的使用GPU的并行编程,所以如果问题很广泛或模糊,我很抱歉。我知道有在CULA库某些并行SVD功能,但应该是什么策略,如果我有大量相对较小的矩阵因式分解?例如,我有尺寸为dn矩阵,n很大,d很小。如何并行化这个过程?任何人都可以给我一个提示吗?使用CUDA并行执行多个SVD

回答

4

您可以查看CULA博客的Batched Operations帖子,以便讨论您的问题。

编辑

从我从下面您的评论理解,你希望每个线程计算一个单独的SVD。所以,基本上每个线程都应该执行一个标准的顺序SVD方案。对于一些可能有用的参考资料:

Numerical Recipes

Golub, Van Loan, Matrix Computations

如果你使用这种方法,不过,我怕你将无法再使用CUBLAS,因为这些都是host功能不可调用从device(除非你没有计算能力>3.5,看到了simpleDevLibCUBLAS例子)。但基本上这样我认为你自己实现了批处理概念。

如果你决定去一个更标准的并行GPU执行,低于基准可能会感兴趣:

Singular Value Decomposition on GPU using CUDA

+0

类似于贴在CUDA的批处理求解器/逆矩阵码注册的开发者网站,你可以考虑一个矩阵,每个线程或矩阵,每个线程块方法。如果批量大且矩阵非常小,这很好。在你的情况下,n和d的典型值是多少? – njuffa

+0

BLAS批处理模式只有矩阵乘法,对不对?我如何将它用于SVD?你能给我一个如何分割GPU中的线程或块的代码示例,并让每个单元并行执行一个SVD?例如,如果n = 500 d = 20。谢谢! –

+0

我编辑了我的帖子。我希望这会有所帮助。 – JackOLantern

7

我以前的答案是现在外的日期。自2015年2月,CUDA 7(目前在发布候选版)提供了在其cuSOLVER库全SVD能力。下面,我提供一个使用CUDA cuSOLVER生成奇异值分解的示例。

关于你正在上升的具体问题(计算小尺寸几个矩阵的奇异值分解),你应该使用流调整我在下面提供的例子。到流,以每个任务关联起来,你可以使用

cudaStreamCreate() 

cusolverDnSetStream() 

kernel.cu

#include "cuda_runtime.h" 
#include "device_launch_parameters.h" 

#include<iostream> 
#include<iomanip> 
#include<stdlib.h> 
#include<stdio.h> 
#include<assert.h> 
#include<math.h> 

#include <cusolverDn.h> 
#include <cuda_runtime_api.h> 

#include "Utilities.cuh" 

/********/ 
/* MAIN */ 
/********/ 
int main(){ 

    // --- gesvd only supports Nrows >= Ncols 
    // --- column major memory ordering 

    const int Nrows = 7; 
    const int Ncols = 5; 

    // --- cuSOLVE input/output parameters/arrays 
    int work_size = 0; 
    int *devInfo;   gpuErrchk(cudaMalloc(&devInfo,   sizeof(int))); 

    // --- CUDA solver initialization 
    cusolverDnHandle_t solver_handle; 
    cusolverDnCreate(&solver_handle); 

    // --- Setting the host, Nrows x Ncols matrix 
    double *h_A = (double *)malloc(Nrows * Ncols * sizeof(double)); 
    for(int j = 0; j < Nrows; j++) 
     for(int i = 0; i < Ncols; i++) 
      h_A[j + i*Nrows] = (i + j*j) * sqrt((double)(i + j)); 

    // --- Setting the device matrix and moving the host matrix to the device 
    double *d_A;   gpuErrchk(cudaMalloc(&d_A,  Nrows * Ncols * sizeof(double))); 
    gpuErrchk(cudaMemcpy(d_A, h_A, Nrows * Ncols * sizeof(double), cudaMemcpyHostToDevice)); 

    // --- host side SVD results space 
    double *h_U = (double *)malloc(Nrows * Nrows  * sizeof(double)); 
    double *h_V = (double *)malloc(Ncols * Ncols  * sizeof(double)); 
    double *h_S = (double *)malloc(min(Nrows, Ncols) * sizeof(double)); 

    // --- device side SVD workspace and matrices 
    double *d_U;   gpuErrchk(cudaMalloc(&d_U, Nrows * Nrows  * sizeof(double))); 
    double *d_V;   gpuErrchk(cudaMalloc(&d_V, Ncols * Ncols  * sizeof(double))); 
    double *d_S;   gpuErrchk(cudaMalloc(&d_S, min(Nrows, Ncols) * sizeof(double))); 

    // --- CUDA SVD initialization 
    cusolveSafeCall(cusolverDnDgesvd_bufferSize(solver_handle, Nrows, Ncols, &work_size)); 
    double *work; gpuErrchk(cudaMalloc(&work, work_size * sizeof(double))); 

    // --- CUDA SVD execution 
    cusolveSafeCall(cusolverDnDgesvd(solver_handle, 'A', 'A', Nrows, Ncols, d_A, Nrows, d_S, d_U, Nrows, d_V, Ncols, work, work_size, NULL, devInfo)); 
    int devInfo_h = 0; gpuErrchk(cudaMemcpy(&devInfo_h, devInfo, sizeof(int), cudaMemcpyDeviceToHost)); 
    if (devInfo_h != 0) std::cout << "Unsuccessful SVD execution\n\n"; 

    // --- Moving the results from device to host 
    gpuErrchk(cudaMemcpy(h_S, d_S, min(Nrows, Ncols) * sizeof(double), cudaMemcpyDeviceToHost)); 
    gpuErrchk(cudaMemcpy(h_U, d_U, Nrows * Nrows  * sizeof(double), cudaMemcpyDeviceToHost)); 
    gpuErrchk(cudaMemcpy(h_V, d_V, Ncols * Ncols  * sizeof(double), cudaMemcpyDeviceToHost)); 

    std::cout << "Singular values\n"; 
    for(int i = 0; i < min(Nrows, Ncols); i++) 
     std::cout << "d_S["<<i<<"] = " << std::setprecision(15) << h_S[i] << std::endl; 

    std::cout << "\nLeft singular vectors - For y = A * x, the columns of U span the space of y\n"; 
    for(int j = 0; j < Nrows; j++) { 
     printf("\n"); 
     for(int i = 0; i < Nrows; i++) 
      printf("U[%i,%i]=%f\n",i,j,h_U[j*Nrows + i]); 
    } 

    std::cout << "\nRight singular vectors - For y = A * x, the columns of V span the space of x\n"; 
    for(int i = 0; i < Ncols; i++) { 
     printf("\n"); 
     for(int j = 0; j < Ncols; j++) 
      printf("V[%i,%i]=%f\n",i,j,h_V[j*Ncols + i]); 
    } 

    cusolverDnDestroy(solver_handle); 

    return 0; 

} 

Utilities.cuh

#ifndef UTILITIES_CUH 
#define UTILITIES_CUH 

extern "C" int iDivUp(int, int); 
extern "C" void gpuErrchk(cudaError_t); 
extern "C" void cusolveSafeCall(cusolverStatus_t); 

#endif 

Utilities.cu

#include <stdio.h> 
#include <assert.h> 

#include "cuda_runtime.h" 
#include <cuda.h> 

#include <cusolverDn.h> 

/*******************/ 
/* iDivUp FUNCTION */ 
/*******************/ 
extern "C" int iDivUp(int a, int b){ return ((a % b) != 0) ? (a/b + 1) : (a/b); } 

/********************/ 
/* CUDA ERROR CHECK */ 
/********************/ 
// --- Credit to http://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api 
void gpuAssert(cudaError_t code, char *file, int line, bool abort=true) 
{ 
    if (code != cudaSuccess) 
    { 
     fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); 
     if (abort) { exit(code); } 
    } 
} 

extern "C" void gpuErrchk(cudaError_t ans) { gpuAssert((ans), __FILE__, __LINE__); } 

/**************************/ 
/* CUSOLVE ERROR CHECKING */ 
/**************************/ 
static const char *_cudaGetErrorEnum(cusolverStatus_t error) 
{ 
    switch (error) 
    { 
     case CUSOLVER_STATUS_SUCCESS: 
      return "CUSOLVER_SUCCESS"; 

     case CUSOLVER_STATUS_NOT_INITIALIZED: 
      return "CUSOLVER_STATUS_NOT_INITIALIZED"; 

     case CUSOLVER_STATUS_ALLOC_FAILED: 
      return "CUSOLVER_STATUS_ALLOC_FAILED"; 

     case CUSOLVER_STATUS_INVALID_VALUE: 
      return "CUSOLVER_STATUS_INVALID_VALUE"; 

     case CUSOLVER_STATUS_ARCH_MISMATCH: 
      return "CUSOLVER_STATUS_ARCH_MISMATCH"; 

     case CUSOLVER_STATUS_EXECUTION_FAILED: 
      return "CUSOLVER_STATUS_EXECUTION_FAILED"; 

     case CUSOLVER_STATUS_INTERNAL_ERROR: 
      return "CUSOLVER_STATUS_INTERNAL_ERROR"; 

     case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED: 
      return "CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; 

    } 

    return "<unknown>"; 
} 

inline void __cusolveSafeCall(cusolverStatus_t err, const char *file, const int line) 
{ 
    if(CUSOLVER_STATUS_SUCCESS != err) { 
     fprintf(stderr, "CUSOLVE error in file '%s', line %d\n %s\nerror %d: %s\nterminating!\n",__FILE__, __LINE__,err, \ 
           _cudaGetErrorEnum(err)); \ 
     cudaDeviceReset(); assert(0); \ 
    } 
} 

extern "C" void cusolveSafeCall(cusolverStatus_t err) { __cusolveSafeCall(err, __FILE__, __LINE__); } 
+0

您如何看待这种方法与使用MAGMA? –

+1

@AndreasYankopolus我还没有比较这两个库,对不起。 – JackOLantern