2013-07-25 113 views
3

我试图运行一个MPI矩阵乘法的例子,除了我修改它读取文件,当然事情炸毁。使用MPI矩阵乘法

具体来说,我得到这个错误:

Entering first MPI_Recv in p0 and recieving data from slave processor 1 
Fatal error in MPI_Recv: Invalid count, error stack: 
MPI_Recv(186): MPI_Recv(buf=0xbfd30930, count=-1807265191, MPI_FLOAT, src=0, tag=1, MPI_COMM_WORLD, status=0x804b080) failed 
MPI_Recv(104): Negative count, value is -1807265191 

下面是修改代码:

MPI_Init(&argc, &argv); 
MPI_Comm_rank(MPI_COMM_WORLD, &id); 
MPI_Comm_size(MPI_COMM_WORLD, &p); 
slaves = p-1; //slaves=numworkers 
/*---------------------------- master ----------------------------*/ 
if(id == 0) 
    { 
    /* check the number of arguments */ 

    if(argc!=4) 
    { 
     printf("Invalid number of aguements!\n./program matrix_file1 matrix_file2 result_matrix_file\n"); 
     return -1; 
    } 

     /* read matrix A */ 
    printf("read matrix A from %s\n", argv[1]); 
    read_matrix(argv[1],&a, &sa, &i, &j); 

    if(i != j) 
    { 
     printf("ERROR: matrix A not square\n"); 
     return -1; 
    } 



    n = i; 



    /* read matrix B */ 
    printf("read matrix B from %s\n", argv[2]); 
    read_matrix(argv[2],&b, &sb, &i, &j); 



    if(i != j) 
    {  
      printf("ERROR: matrix B not square\n"); 
      return -1; 
    } 

    if(n != i) 
    { printf("ERROR: matrix A and B incompatible\n"); 
     return -1; 
    } 



    if((n%p)!=0) 
    { 
     printf("ERROR: %d processor(s) cannot divide matrices %d x %d! \n", p,n,n); 
     return -1; 
    } 



     rows = n/slaves; 
     offset=0; 
     remainPart=n%slaves; 


    for(dest=1;dest<=slaves;dest++) 
    { 


     if(remainPart>0) 
     { 
      originalRows=rows; 
      ++rows; 
      remainPart--; 
      printf("Sending %d rows to task %d offset=%d\n",rows,dest,offset); 
      MPI_Send(&offset, 1, MPI_INT, dest, 1, MPI_COMM_WORLD); 
      MPI_Send(&rows, 1, MPI_INT, dest, 1, MPI_COMM_WORLD); 
      MPI_Send(&a[offset][0], rows*n, MPI_FLOAT,dest,1, MPI_COMM_WORLD); 
      MPI_Send(&b, n*n, MPI_FLOAT, dest, 1, MPI_COMM_WORLD); 
      offset = offset + rows; 
      rows = originalRows; 

     } 
     else 
     { 
      printf("Sending %d rows to task %d offset=%d\n",rows,dest,offset); 
      MPI_Send(&offset, 1, MPI_INT, dest, 1, MPI_COMM_WORLD); 
      MPI_Send(&rows, 1, MPI_INT, dest, 1, MPI_COMM_WORLD); 
      MPI_Send(&a[offset][0], rows*n, MPI_FLOAT,dest,1, MPI_COMM_WORLD); 
      MPI_Send(&b, n*n, MPI_FLOAT, dest, 1, MPI_COMM_WORLD); 
      offset = offset + rows; 
     } 
    } 
    /* initialize matrix C */ 

    sc = (float*)malloc(n*n*sizeof(float)); 
    memset(sc, 0, n*n*sizeof(float)); 
    c = (float**)malloc(n*sizeof(float*)); 
    for(i=0; i<n; i++) c[i] = &sc[i*n]; 

    /* wait for results from all worker tasks */ 
    for (k=1; k<=slaves; k++)  
    {    
    source = k; 
    printf("Entering first MPI_Recv in p0 and recieving data from slave processor %d\n", source); 
    MPI_Recv(&offset, 1, MPI_INT, source, 2, MPI_COMM_WORLD, &status); 
    printf("Entering second MPI_Recv in p0\n"); 
    MPI_Recv(&rows, 1, MPI_INT, source, 2, MPI_COMM_WORLD, &status); 
    printf("Entering third MPI_Recv in p0\n"); 
    MPI_Recv(&c[offset][0], rows*n, MPI_FLOAT, source, 2, MPI_COMM_WORLD, &status); 
    }  


    write_matrix(argv[3], sc, i, j); 

    free(sc); 
    free(c); 
    } 



if(id>0) 
{ 
     source = 0; 
     //printf("Entered first MPI_Recv for process %d\n", id); 
     MPI_Recv(&offset, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status); 
     //printf("Entered second MPI_Recv for process %d\n", id); 
     MPI_Recv(&rows, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status); 
     //printf("Entered third MPI_Recv for process %d\n", id); 
     MPI_Recv(&a, rows*n, MPI_FLOAT, source, 1, MPI_COMM_WORLD, &status); 
     //printf("Entered fourth MPI_Recv for process %d\n", id); 
     MPI_Recv(&b, n*n, MPI_FLOAT, source, 1, MPI_COMM_WORLD, &status); 
     /* Matrix multiplication */ 
     for (k=0; k<n; k++) 
     for (l=0; l<rows; l++) { 
     for (m=0; m<n; m++) 
      c[l][k] = c[l][k] + a[l][m] * b[m][k]; 
     } 


     //printf("Entered first MPI_send for process %d\n", id); 
     MPI_Send(&offset, 1, MPI_INT, 0, 2, MPI_COMM_WORLD); 
     //printf("Entered second MPI_send for process %d\n", id); 
     MPI_Send(&rows, 1, MPI_INT, 0, 2, MPI_COMM_WORLD); 
     //printf("Entered third MPI_send for process %d\n", id); 
     MPI_Send(&c, rows*n, MPI_FLOAT, 0, 2, MPI_COMM_WORLD); 


} 






MPI_Finalize();} 

之前的手,我被错误地通过所有的过程,而不只是工人去,所以我已经固定的但我不知道随机负数出现在哪里。特别是因为在打印语句后面跟着的内容

printf("Entering first MPI_Recv in p0 and recieving data from slave processor %d\n", source); 
    MPI_Recv(&offset, 1, MPI_INT, source, 2, MPI_COMM_WORLD, &status); 
    printf("Entering second MPI_Recv in p0\n"); 
    MPI_Recv(&rows, 1, MPI_INT, source, 2, MPI_COMM_WORLD, &status); 
    printf("Entering third MPI_Recv in p0\n"); 
    MPI_Recv(&c[offset][0], rows*n, MPI_FLOAT, source, 2, MPI_COMM_WORLD, &status); 

只不过是一个而原始维度n乘以给予从属的行的平均值。先谢谢了。

更新:好,所以问题的一部分似乎是,而我的数组已经有空间分配在主控制器中,它不是这样的奴隶进程。

一旦意识到这一点,我已经在检查处理器是否是工人之前为矩阵添加了缓冲区,以便传输它们的数据。虽然很明显打印报表不会显示出来,但显然不能按计划完成。

float buffA[n][n]; 
float buffB[n][n]; 
float buffC[n][n]; 

for(l=0;l<n;l++) 
    for(m=0;m<n;m++) 
    { 
     buffA[l][m]=a[l][m]; 
     buffB[l][m]=b[l][m]; 

         //buffA[l][m]=sa[(i*n) + j]; 
         //buffB[l][m]=sb[(i*n) + j]; 
     printf("buffA[%d][%d] =%f\n",l,m, buffA[l][m]); 
     printf("buffB[%d][%d] =%f\n",l,m,buffB[l][m]); 
    } 

if(id>0) 
{ 
     /*read_matrix(argv[1],&a, &sa, &i, &j); 
     read_matrix(argv[2],&b, &sb, &i, &j);*/ 



     source = 0; 
     printf("Entered first MPI_Recv for process %d\n", id); 
     MPI_Recv(&offset, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status); 
     printf ("offset =%d\n", offset); 
     MPI_Recv(&rows, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status); 
     printf ("row =%d\n", rows); 
     MPI_Recv(&buffA[offset][0], rows*n, MPI_FLOAT, source, 1, MPI_COMM_WORLD, &status); 
     printf("buffA[offset][0] =%f\n", buffA[offset][0]); //they're not getting the matrices 
     MPI_Recv(&buffB, n*n, MPI_FLOAT, source, 1, MPI_COMM_WORLD, &status); 
     //printf ("b=\n"); 

     /* Matrix multiplication */ 
     for (k=0; k<n; k++) 
     for (l=0; l<rows; l++) { 
      //c[l][k]=0.0; 
     for (m=0; m<n; m++) 
      buffC[l][k] = buffC[l][k] + buffA[l][m] * buffB[m][k]; 
      //printf("c[%d][%d]= %f\n", l,k, c[l][k]); 
     } 


     //printf("Entered first MPI_send for process %d\n", id); 
     MPI_Send(&offset, 1, MPI_INT, source, 2, MPI_COMM_WORLD); 
     //printf("Entered second MPI_send for process %d\n", id); 
     MPI_Send(&rows, 1, MPI_INT, source, 2, MPI_COMM_WORLD); 
     //printf("Entered third MPI_send for process %d\n", id); 
     MPI_Send(&buffC, rows*n, MPI_FLOAT, source, 2, MPI_COMM_WORLD); 

     printf("Exit via MPI_send for process %d\n", id); 
} 

错误号码也改变了,虽然我不确定这是否表示任何东西。

Fatal error in MPI_Recv: Invalid count, error stack: 
MPI_Recv(186): MPI_Recv(buf=0xbf8e642c, count=-8, MPI_FLOAT, src=0, tag=1,MPI_COMM_WORLD, status=0x804c088) failed 
MPI_Recv(104): Negative count, value is -8 

好吧,所以现在我发现维度n没有被转移,并且导致了最初的随机负数。所以我为n添加了send和recv。现在看来最后的问题是如何为MPI传输动态分配的数组。仍在努力。

更新

它的工作原理,当前的工作代码是像这样,虽然乘法是遍布我想这个地方,但婴儿的步骤。 XP

if(id>0) 
{ 




     source = 0; 
     printf("Entered first MPI_Recv for process %d\n", id); 
     MPI_Recv(&offset, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status); 
     printf ("offset =%d\n", offset); 
     MPI_Recv(&rows, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status); 
     MPI_Recv(&n, 1, MPI_INT, source, 1, MPI_COMM_WORLD, &status); 
     printf ("row =%d\nn=%d\n", rows,n); 

     float buffA[rows][n]; 
     float buffB[n][n]; 
     float buffC[rows][n]; 


     MPI_Recv(&buffA[offset][0], rows*n, MPI_FLOAT, source, 1, MPI_COMM_WORLD, &status); 
     printf("buffA[offset][0] =%f\n", buffA[offset][0]); //they're not getting the matrices 
     MPI_Recv(&buffB, n*n, MPI_FLOAT, source, 1, MPI_COMM_WORLD, &status); 
     //printf ("b=\n"); 

     /* Matrix multiplication */ 
     for (k=0; k<n; k++) 
     for (l=0; l<rows; l++) { 
      //c[l][k]=0.0; 
     for (m=0; m<n; m++) 
      //buffC[l][k] = buffC[l][k] + buffA[l][m] * buffB[m][k]; 
      //printf("c[%d][%d]= %f\n", l,k, c[l][k]); 
      buffC[l][k] = buffC[l][k] + buffA[l][m] * buffB[m][k]; 

     } 


     //printf("Entered first MPI_send for process %d\n", id); 
     MPI_Send(&offset, 1, MPI_INT, source, 2, MPI_COMM_WORLD); 
     //printf("Entered second MPI_send for process %d\n", id); 
     MPI_Send(&rows, 1, MPI_INT, source, 2, MPI_COMM_WORLD); 
     //printf("Entered third MPI_send for process %d\n", id); 
     MPI_Send(&buffC, rows*n, MPI_FLOAT, source, 2, MPI_COMM_WORLD); 

     printf("Exit via MPI_send for process %d\n", id); 
} 

结果

0.00 -0.00 -0.00 -0.00 -0.00 -0.00 0.00 0.00 
0.00 -0.00 -0.00 -0.00 -1.26 -1.26 -0.00 -1.26 
-0.00 -1.26 -0.00 0.00 -0.00 0.00 0.00 0.00 
-0.00 0.00 -0.00 -0.00 0.00 -0.00 -0.00 0.00 
0.00 0.00 0.00 0.00 -0.00 -1.26 -0.00 0.00 
-0.00 -0.00 0.00 35833769696167556769392596671120015360.00 0.00 0.00 -0.00 0.00 
-0.00 -0.00 0.00 -0.00 -0.00 0.00 0.00 0.00 
0.00 -nan -0.00 -0.00 -0.00 -0.00 -0.00 -0.00 
+0

您是否尝试过通过它运行调试器?代码不长。你可以打电话给MPI_Recv,然后弄清楚我认为相对容易的事情。 –

+0

到目前为止,我只使用了打印语句。任何建议?正常的gdb看起来有点复杂。 – user2243369

+0

好的值得一试。 – user2243369

回答

2

(从评论移动,使这个问题都可以回答)

打印语句是在分布式环境似地不可靠的。不能保证他们按顺序到达彼此。 GDB真的不是那么糟糕。你不需要附加到所有的流程,只需选择一个。你可以在这里查看我的答案(stackoverflow.com/questions/17347778/...),看看如何去做。

+0

但这不是问题的答案。如果OP公布她自己的问题的答案,而不是编辑问题本身的答案,那将会更好。这个问题没有必要有一个答案和一个评论伪装成答案仍然是一个评论。 –

+0

够公平的。真的,这个问题可能应该被封闭,因为之前称为“太本地化”,因为这是调试问题。然而,我在这里查看meta的一些建议,并发现(http://meta.stackexchange.com/questions/125384/debug-this-code-for-me-questions)这里公认的答案是告诉人们如何作为答案进行调试。如果这封闭虽然,我不会反对。我只是不知道OP会说什么,这实际上是对这个“问题”的“回答”。 –