通过MPI传递Armadillo C++矩阵

use*_*317 5 c++ matrix mpi armadillo

我需要通过MPI 传递由Armadillo C++ Matrix Library定义的矩阵或复杂矩阵类型.有什么好办法可以解决这个问题?我想过尝试:

  1. 写基质中以某种阵列,然后发送
    行/的那个列,方法脱/再构建的阵列任一侧MPI_send/recv

  2. 使用类似的东西MPI_BYTE

谢谢

更新

所以我试图通过在一个节点上发送和接收一个简单的例子来实现其他方案.

translate.cpp

    #include <mpi.h>
    #include <armadillo>
    #include <vector> 
    #include <cstdlib>

    using namespace std; 
    using namespace arma; 
    using std::vector; 

    class ArmadilloMPI
    {
        public:
            ArmadilloMPI(int nRows, int nCols)
            {
                this->nRows = nRows;
                this->nCols = nCols; 
                realArray = (double **)malloc(nCols * nRows * sizeof(double*));
                imArray = (double **)malloc(nCols * nRows * sizeof(double*));
            }

            ~ArmadilloMPI()
            {
                free(realArray[0]);
                free(realArray);
                free(imArray[0]);
                free(imArray);
            }

            double **realArray; 
            double **imArray; 
            int nCols; 
            int nRows; 

            cx_mat matConstructRecv(int src, int tag)
            {
                cx_mat A(nRows, nCols); 
                MPI_Recv(&(imArray[0][0]),  nRows * nCols, MPI_DOUBLE, src, tag, MPI_COMM_WORLD,0);
                MPI_Recv(&(realArray[0][0]),nRows * nCols, MPI_DOUBLE, src, tag, MPI_COMM_WORLD,0);

                for(int  i = 0; i < nRows; ++i )
                {
                    for(int j = 0; i < nCols; ++j)
                    {
                        real(A(i,j)) = *realArray[i * nRows + j]; 
                        imag(A(i,j)) = *imArray[i * nRows + j];
                    }
                }
                return A; 
            }

            void matDestroySend(cx_mat &A, int dest, int tag)
            {
                for(int  i = 0; i < nRows; ++i )
                {
                    for(int j = 0; i < nCols; ++j)
                    {
                        realArray[i * nRows + j]  = &real(A(i,j)); 
                        imArray[i * nRows + j] = &imag(A(i,j)); 
                    }
                }
                MPI_Send(&(realArray[0][0]), nRows * nCols, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD);
                MPI_Send(&(imArray[0][0]), nRows * nCols, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD);
            }
    };

    int main(int argc, char** argv)
    {
        MPI::Init(argc, argv);

        int size = MPI::COMM_WORLD.Get_size();
        int rank = MPI::COMM_WORLD.Get_rank();

        cout << "test"<<endl; 
        vector<cx_mat> world; 
        for(int i = 0; i < size; ++i )
        {
            world.push_back(randu<cx_mat>(4,4));
        }
        cx_mat A;
        A = randu<cx_mat>(4,4);

        ArmadilloMPI* armaMPI = new ArmadilloMPI(4,4); 

        if(rank==0)
        {

            for(int i = 1; i < size; i++)
            {   
                cout << "A is now " << A << endl; 
                A += armaMPI->matConstructRecv(i, 0);
            }
        }
        else
        {
            armaMPI->matDestroySend(world[rank], 1, 0);
        }

        cout << A << endl; 
        delete armaMPI;
        MPI::Finalize();
    }
Run Code Online (Sandbox Code Playgroud)

但是我们有一个seg错误.

*** Process received signal *** 
Signal: Segmentation fault: 11 (11) 
Signal     code:     (0) 
Failing at address: 0x0 translate(1032,0x7fff747ad310) malloc: ***   error for object     0x41434d5f49504d4f: pointer being freed was not allocated
Run Code Online (Sandbox Code Playgroud)

思考?

fra*_*cis 2

有几个问题:

  • 在c和c++中,数组和向量从0开始,而不是1。因此以下代码将失败:

     vector<cx_mat> world; 
     world.resize(1);
     world[1] = randu<cx_mat>(4,4); //problem to come !
    
    Run Code Online (Sandbox Code Playgroud)

    您可以更改为:

    vector<cx_mat> world;
    world.push_back(randu<cx_mat>(4,4));
    
    Run Code Online (Sandbox Code Playgroud)
  • 具有连续内存的二维数组的动态分配。您需要一个new用于双精度数组,另一个new用于双精度指针数组。然后将每个指针设置为指向该行的第一项。

    double *data=new double[nCols * nRows ];
    realArray = new double*[( nRows )];
    for(int i=0;i<nRows;i++){
         realArray[i]=&data[i*nCols];
    }
    
    Run Code Online (Sandbox Code Playgroud)
  • 你可能会猜到这个......为什么编译器不警告这种东西?因为它可能有意义,但不是在这里。

    for(int j = 0; i < nCols; ++j)
    
    Run Code Online (Sandbox Code Playgroud)
  • 您可以为每条消息添加不同的标签,以避免实部和虚部切换

    MPI_Send(&(realArray[0][0]), nRows * nCols, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD);
    MPI_Send(&(imArray[0][0]), nRows * nCols, MPI_DOUBLE, dest, tag+1, MPI_COMM_WORLD);
    
    Run Code Online (Sandbox Code Playgroud)

代码变为:

#include <mpi.h>
#include <armadillo>
#include <vector>
#include <iostream>
#include <cstdlib>

using namespace std;
using namespace arma;
using std::vector;

class ArmadilloMPI
{
public:
    ArmadilloMPI(int nRows, int nCols)
    {
        this->nRows = nRows;
        this->nCols = nCols;
        double *data=new double[nCols * nRows ];
        realArray = new double*[( nRows )];
        for(int i=0;i<nRows;i++){
            realArray[i]=&data[i*nCols];
        }
        double *datai=new double[(nCols * nRows )];
        imArray =new double*[( nRows )];
        for(int i=0;i<nRows;i++){
            imArray[i]=&datai[i*nCols];
        }

    }

    ~ArmadilloMPI()
    {
        delete[] realArray[0];
        delete[] realArray;
        delete[] imArray[0];
        delete[] imArray;
    }

    double **realArray;
    double **imArray;
    int nCols;
    int nRows;

    cx_mat matConstructRecv(int tag, int src)
    {
        cx_mat A(nRows, nCols);
        MPI_Recv(&(imArray[0][0]),  nRows * nCols, MPI_DOUBLE, src, tag+1, MPI_COMM_WORLD,0);
        MPI_Recv(&(realArray[0][0]),nRows * nCols, MPI_DOUBLE, src, tag, MPI_COMM_WORLD,0);

        for(int  i = 0; i < nRows; ++i )
        {
            for(int j = 0; j < nCols; ++j)
            {
                real(A(i,j)) = realArray[i][j];
                imag(A(i,j)) = imArray[i][j];
            }
        }
        return A;
    }

    void matDestroySend(cx_mat &A, int dest, int tag)
    {
        for(int  i = 0; i < nRows; ++i )
        {
            for(int j = 0; j < nCols; ++j)
            {
                realArray[i][j] = real((A(i,j)));
                imArray[i][j] = imag((A(i,j)));
            }
        }
        MPI_Send(&(realArray[0][0]), nRows * nCols, MPI_DOUBLE, dest, tag, MPI_COMM_WORLD);
        MPI_Send(&(imArray[0][0]), nRows * nCols, MPI_DOUBLE, dest, tag+1, MPI_COMM_WORLD);
    }
};

int main(int argc, char **argv)
{
    int rank;
    int size;
    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    srand (time(NULL)+rank);

    vector<cx_mat> world;
    world.push_back(randu<cx_mat>(4,4));

    cx_mat A;
    ArmadilloMPI* armaMPI = new ArmadilloMPI(4,4);
    if(rank==0)
    {
        world[0].print("world[0] on 0:");

        armaMPI->matDestroySend(world[0], 1, 0);
    }
    if(rank==1){
        A = armaMPI->matConstructRecv(0, 0);
        A.print("A on 1:");
    }

    delete armaMPI;
    MPI_Finalize();
}
Run Code Online (Sandbox Code Playgroud)

编译 :

 mpiCC -O2 -o main main.cpp -larmadillo -llapack -lblas -Wall
Run Code Online (Sandbox Code Playgroud)

跑步 :

mpiexec -np 2 main
Run Code Online (Sandbox Code Playgroud)