sur*_*993 1 c c++ mpi openmpi dynamic-memory-allocation
我是MPI的新手.我写了一个简单的代码来显示使用多个进程的矩阵.如果我有一个8x8的矩阵并用4个进程启动MPI程序,那么我的第1个进程将打印第2个2行,第2个线程将由第2个线程打印,然后将它自己分开.
#define S 8
MPI_Status status;
int main(int argc, char *argv[])
{
int numtasks, taskid;
int i, j, k = 0;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &taskid);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
int rows, offset, remainPart, orginalRows, height, width;
int **a;
// int a[S][S];
if(taskid == 0)
{
cout<<taskid<<endl;
height = width = S;
a = (int **)malloc(height*sizeof(int *));
for(i=0; i<height; i++)
a[i] = (int *)malloc(width*sizeof(int));
for(i=0; i<S; i++)
for(j=0; j<S; j++)
a[i][j] = ++k;
rows = S/numtasks;
offset = rows;
remainPart = S%numtasks;
cout<<"Num Rows : "<<rows<<endl;
for(i=1; i<numtasks; i++)
if(remainPart > 0)
{
orginalRows = rows;
rows++;
remainPart--;
MPI_Send(&offset, 1, MPI_INT, i, 1, MPI_COMM_WORLD);
MPI_Send(&rows, 1, MPI_INT, i, 1, MPI_COMM_WORLD);
MPI_Send(&width, 1, MPI_INT, i, 1, MPI_COMM_WORLD);
MPI_Send(&a[offset][0], rows*S, MPI_INT,i,1, MPI_COMM_WORLD);
offset += rows;
rows = orginalRows;
}
else
{
MPI_Send(&offset, 1, MPI_INT, i, 1, MPI_COMM_WORLD);
MPI_Send(&rows, 1, MPI_INT, i, 1, MPI_COMM_WORLD);
MPI_Send(&width, 1, MPI_INT, i, 1, MPI_COMM_WORLD);
MPI_Send(&a[offset][0], rows*S, MPI_INT,i,1, MPI_COMM_WORLD);
offset += rows;
}
//Processing
rows = S/numtasks;
for(i=0; i<rows; i++)
{
for(j=0; j<width; j++)
cout<<a[i][j]<<"\t";
cout<<endl;
}
}else
{
cout<<taskid<<endl;
MPI_Recv(&offset, 1, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
MPI_Recv(&rows, 1, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
MPI_Recv(&width, 1, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
a = (int **)malloc(rows*sizeof(int *));
for(i=0; i<rows; i++)
a[i] = (int *)malloc(width*sizeof(int));
MPI_Recv(&a, rows*width, MPI_INT, 0, 1, MPI_COMM_WORLD, &status);
cout<<"Offset : "<<offset<<"\nRows : "<<rows<<"\nWidth : "<<width<<endl;
for(i=0; i<rows; i++)
{
for(j=0; j<width; j++)
cout<<a[i][j]<<"\t";
cout<<endl;
}
}
getch();
MPI_Finalize();
return 0;
}
Run Code Online (Sandbox Code Playgroud)
这是我的完整代码,这里我已经为'a'动态分配了内存,而在打印[i] [j]时,在else部分下,我收到了运行时错误.如果我将动态内存分配更改为静态,如将int**a更改为int a [N] [N]并删除
a = (int **)malloc(rows*sizeof(int));
for(i=0; i<rows; i++)
a[i] = (int *)malloc(width*sizeof(int));
Run Code Online (Sandbox Code Playgroud)
它完美地运作.
至少有两种方法可以动态分配2D阵列.
第一个是@HRoid:每行一次分配一个.看看这里获得一个计划.
第二个是@Claris建议的,它将确保数据在内存中是连续的.这是许多MPI操作所要求的......如FFTW(2D快速傅立叶变换)或Lapack(线性代数的密集矩阵)等库也需要它.您的程序可能会失败
MPI_Send(&a[offset][0], rows*S, MPI_INT,i,1, MPI_COMM_WORLD);
Run Code Online (Sandbox Code Playgroud)
如果S>1,此程序将尝试发送行结束后的项目n° offset...这可能会触发分段错误或未定义的行为.
您可以分配您的阵列这样:
a = malloc(rows * sizeof(int *));
if(a==NULL){fprintf(stderr,"out of memory...i will fail\n");}
int *t = malloc(rows * width * sizeof(int));
if(t==NULL){fprintf(stderr,"out of memory...i will fail\n");}
for(i = 0; i < rows; ++i)
a[i] = &t[i * width];
Run Code Online (Sandbox Code Playgroud)
注意:malloc 不将内存初始化为0!
您似乎希望在许多过程中传播2D数组.看看MPI_Scatterv() 这里.看看这个问题.
如果您想了解有关2D阵列和MPI的更多信息,请查看此处.
您可以在此处找到MPI_Scatterv的基本示例.
我改变#define S 8了#define SQUARE_SIZE 42.提供描述性名称总是更好.
这是一个使用MPI_Scatterv()的工作代码!
#include <mpi.h>
#include <iostream>
#include <cstdlib>
using namespace std;
#define SQUARE_SIZE 42
MPI_Status status;
int main(int argc, char *argv[])
{
int numtasks, taskid;
int i, j, k = 0;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &taskid);
MPI_Comm_size(MPI_COMM_WORLD, &numtasks);
int rows, offset, remainPart, orginalRows, height, width;
int **a;
height = width = SQUARE_SIZE;
//on rank 0, let's build a big mat of int
if(taskid == 0){
a=new int*[height];
int *t =new int[height * width];
for(i = 0; i < height; ++i)
a[i] = &t[i * width];
for(i=0; i<height; i++)
for(j=0; j<width; j++)
a[i][j] = ++k;
}
//for everyone, lets compute numbers of rows, numbers of int and displacements for everyone. Only 0 will use these arrays, but it's a practical way to get `rows`
int nbrows[numtasks];
int sendcounts[numtasks];
int displs[numtasks];
displs[0]=0;
for(i=0;i<numtasks;i++){
nbrows[i]=height/numtasks;
if(i<height%numtasks){
nbrows[i]=nbrows[i]+1;
}
sendcounts[i]=nbrows[i]*width;
if(i>0){
displs[i]=displs[i-1]+sendcounts[i-1];
}
}
rows=nbrows[taskid];
//scattering operation.
//The case of the root is particular, since the communication is not to be done...Hence, the flag MPI_IN_PLACE is used.
if(taskid==0){
MPI_Scatterv(&a[0][0],sendcounts,displs,MPI_INT,MPI_IN_PLACE,0,MPI_INT,0,MPI_COMM_WORLD);
}else{
//allocation of memory for the piece of mat on the other nodes.
a=new int*[rows];
int *t =new int[rows * width];
for(i = 0; i < rows; ++i)
a[i] = &t[i * width];
MPI_Scatterv(NULL,sendcounts,displs,MPI_INT,&a[0][0],rows*width,MPI_INT,0,MPI_COMM_WORLD);
}
//printing, one proc at a time
if(taskid>0){
MPI_Status status;
MPI_Recv(NULL,0,MPI_INT,taskid-1,0,MPI_COMM_WORLD,&status);
}
cout<<"rank"<< taskid<<" Rows : "<<rows<<" Width : "<<width<<endl;
for(i=0; i<rows; i++)
{
for(j=0; j<width; j++)
cout<<a[i][j]<<"\t";
cout<<endl;
}
if(taskid<numtasks-1){
MPI_Send(NULL,0,MPI_INT,taskid+1,0,MPI_COMM_WORLD);
}
//freeing the memory !
delete[] a[0];
delete[] a;
MPI_Finalize();
return 0;
}
Run Code Online (Sandbox Code Playgroud)
编译 : mpiCC main.cpp -o main
跑步 : mpiexec -np 3 main