正确的方法来创建鬼区MPI [halos]

LM_*_*M_O 2 c parallel-processing mpi

晚安

我正在参加并行编程课程.老师给了我们一个涉及用于模板计算的域分区的任务.对于这种类型的计算(有限差分),并行化代码的最常用方法是对域进行分区并创建一些重影区域(晕圈).

为了更好地理解MPI中鬼区的创建,我编写了这个简单的例子来初始化一些内部值= 123和边界值88的数组.在所有通信结束时,所有重影值应保持为8.在一个节点中,我得到了123个值.

连环(无幽灵):

   123 - 123 - ... - 123 - 123
Run Code Online (Sandbox Code Playgroud)

两个分区:

   123 - 123 - ... - 88  |||  88 - ... - 123 - 123
Run Code Online (Sandbox Code Playgroud)

三个分区:

   123 - 123 - ... - 88  |||  88 - ... - 123 - 123 - 88 ||| 88 - ... - 123 - 123
Run Code Online (Sandbox Code Playgroud)

除了这个bug之外,这里的主要问题是关于创建和维护更新鬼区的正确方法.除了我的凌乱if之外还有一个更清洁的解决方案吗(myid == .... else if(myid = ... else类型的实现?人们通常如何实现这种并行性?

#include<mpi.h>
#include<stdio.h>
#include<stdlib.h>

int WhichSize(int mpiId, int numProc, int tam);

int main(int argc, char *argv[]){

    int i;
    int localSize;
    int numProc;
    int myid;

    int leftProc;
    int rightProc;

    int * myArray;
    int fullDomainSize = 16;

    MPI_Request request;

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &numProc);
    MPI_Comm_rank(MPI_COMM_WORLD, &myid);


    // Lets get each partition size.
    localSize = WhichSize(myid, numProc, fullDomainSize);


    // Allocate arrays acording to proc number.
    if(numProc == 1){

        //printf("Allocating Array for serial usage\n");
        myArray = (int*)malloc(localSize*sizeof(int));

    } else if(numProc == 2) {

        //printf("Allocating Array for 2 proc usage\n");
        myArray = (int*)malloc((localSize+ 1)*sizeof(int));

    } else if(numProc > 2) {

        if (myid == 0 || myid == numProc - 1){

            //printf("Allocating array for boundary nodes usage\n");
            myArray = (int*)malloc((localSize+ 1)*sizeof(int));

        } else {

            //printf("Allocating array for inner nodes usage\n");
            myArray = (int*)malloc((localSize+ 2)*sizeof(int));

        }

    }


    // Now we will fill the arrays with a dummy value 123. For the
    // boundaries (ghosts) we will fill than with 80. Just to differe
    // ntiate.

    if(numProc == 1){

        //printf("----------------------------------------\n");
        //printf("Filling the serial array with values... \n");

        for (i = 0; i<localSize; i++){
            myArray[i] = 123;
        }

    } else if(numProc == 2) {

        ////printf("------------------------------------------------\n");
        //printf("Filling array for two proc usage with values... \n");

        for (i = 0; i<localSize; i++){
            myArray[i] = 123;
        }

        // ghost.
        myArray[localSize+1] = 8;

    } else if(numProc > 2) {

        if (myid == 0 || myid == numProc - 1){

            //printf("--------------------------------------------------\n");
            //printf("Filling boundary node arrays usage with values... \n");

            for (i = 0; i<localSize; i++){
                myArray[i] = 123;
            }

            // ghosts.
            myArray[localSize+1] = 8;

        } else {

            //printf("--------------------------------------------------\n");
            //printf("Filling inner node arrays usage with values... \n");

            for (i = 0; i<localSize; i++){
                myArray[i] = 123;
            }

            // ghosts.
            myArray[localSize+1] = 8;
            myArray[0] = 8;

        }

    }


    // Now lets comunicate the ghosts with MPI_Sendrecv().

    if(numProc == 1){

        //printf("Serial usage, no ghost to comunicate \n");

    } else if(numProc == 2) {

        if (myid == 0){

            //printf("Sending ghost value from proc %d to %d\n", myid, myid + 1);
            MPI_Isend(&myArray[localSize+1],
                      1,
                      MPI_INT,
                      1,
                      12345,
                      MPI_COMM_WORLD,
                      &request);

        } else if (myid == 1) {

            //printf("Receiving ghost value from proc %d to %d\n", myid-1, myid);
            MPI_Irecv(&myArray[localSize+1],
                      1,
                      MPI_INT,
                      0,
                      12345,
                      MPI_COMM_WORLD,
                      &request);
        }


    } else if(numProc > 2) {

        if (myid == 0){

            rightProc = myid + 1;

            if (myid == 0){

                //printf("-------------------------------\n");
                //printf("Communicating Boundary ghosts !\n");
                //printf("-------------------------------\n");

                //printf("Sending ghost value from proc %d to %d\n", myid, myid + 1);
                MPI_Isend(&myArray[localSize+1],
                          1,
                          MPI_INT,
                          rightProc,
                          12345,
                          MPI_COMM_WORLD,
                          &request);

            } else if (myid == rightProc) {

                //printf("Receiving ghost value from proc %d to %d\n", myid-1, myid);
                MPI_Irecv(&myArray[localSize+1],
                          1,
                          MPI_INT,
                          0,
                          12345,
                          MPI_COMM_WORLD,
                          &request);
            } 

        } else if (myid == numProc - 1) {

            leftProc  = myid - 1;

            if (myid == numProc - 1){

                //printf("-------------------------------\n");
                //printf("Communicating Boundary ghosts !\n");
                //printf("-------------------------------\n");

                ////printf("Sending ghost value from proc %d to %d\n", myid, myid + 1);
                MPI_Isend(&myArray[localSize+1],
                          1,
                          MPI_INT,
                          leftProc,
                          12345,
                          MPI_COMM_WORLD,
                          &request);

            } else if (myid == leftProc) {

                rightProc = myid + 1;

                //printf("Receiving ghost value from proc %d to %d\n", myid-1, myid);
                MPI_Irecv(&myArray[localSize+1],
                          1,
                          MPI_INT,
                          rightProc,
                          12345,
                          MPI_COMM_WORLD,
                          &request);
            } 

        } else {

                //printf("-------------------------------\n");
                //printf("Communicating Inner ghosts baby\n");
                //printf("-------------------------------\n");

                leftProc  = myid - 1;
                rightProc = myid + 1;

                // Communicate tail ghost.
                if (myid == leftProc) {
                    MPI_Isend(&myArray[localSize+1],
                              1,
                              MPI_INT,
                              rightProc,
                              12345,
                              MPI_COMM_WORLD,
                              &request);

                } else if (myid == rightProc){
                    MPI_Irecv(&myArray[localSize+1],
                              1,
                              MPI_INT,
                              leftProc,
                              12345,
                              MPI_COMM_WORLD,
                              &request);
                }

                // Communicate head ghost.
                if (myid == leftProc) {
                    MPI_Isend(&myArray[0],
                              1,
                              MPI_INT,
                              rightProc,
                              12345,
                              MPI_COMM_WORLD,
                              &request);

                } else if (myid == rightProc){
                    MPI_Irecv(&myArray[0],
                              1,
                              MPI_INT,
                              leftProc,
                              12345,
                              MPI_COMM_WORLD,
                              &request);
                }
        }
    }


    // Now I Want to see if the ghosts are in place !.

    if (myid == 0){
        printf("The ghost value is: %d\n", myArray[localSize + 1]);
    } else if (myid == numProc - 1){
        printf("The ghost value is: %d\n", myArray[0]);
    } else {
        printf("The head ghost is: %d\n", myArray[0]);
        printf("The tail ghost is: %d\n", myArray[localSize + 1]);
    }


    MPI_Finalize();

    exit(0);
}

int WhichSize(int mpiId, int numProc, int tam){

    double resto;
    int    tamLocal;

    tamLocal = tam / numProc;

    resto = tam - tamLocal*numProc;

    if (mpiId < resto) tamLocal = tamLocal + 1;


    return tamLocal;
}
Run Code Online (Sandbox Code Playgroud)

感谢你们 !

Hri*_*iev 7

使用笛卡尔虚拟拓扑和发送 - 接收操作,可以在MPI中优雅地实现Halos.

首先,在条件运算符中有许多与秩相关的逻辑会使代码难以阅读和理解.当代码是对称的时,即当所有等级执行相同的代码时,它会更好.可以使用MPI_PROC_NULL空等级来处理转角情况- 发送到该等级或从该等级接收导致无操作.因此,足以做到:

// Compute the rank of the left neighbour
leftProc = myid - 1;
if (leftProc < 0) leftProc = MPI_PROC_NULL;
// Compute the rank of the right neighbour
rightProc = myid + 1;
if (rightProc >= numProc) rightProc = MPI_PROC_NULL;

// Halo exchange in forward direction
MPI_Sendrecv(&myArray[localSize], 1, MPI_INT, rightProc, 0,   // send last element to the right
             &myArray[0], 1, MPI_INT, leftProc, 0,            // receive into left halo
             MPI_COMM_WORLD);
// Halo exchange in reverse direction
MPI_Sendrecv(&myArray[1], 1, MPI_INT, leftProc, 0,            // send first element to the left
             &myArray[localSize+1], 1, MPI_INT, rightProc, 0, // receive into right halo
             MPI_COMM_WORLD);
Run Code Online (Sandbox Code Playgroud)

该代码适用于任何等级,即使是两端的那些 - 源或目的地是空等级,并且在相应的方向上不发生实际传输.它也适用于任意数量的MPI流程,从一个到多个.它要求所有队伍都有双方的光环,包括那些并不真正需要它的人(两个角落等级).人们可以在那些虚拟光环中存储有用的东西,例如边界值(例如,当解决偏微分方程时)或者仅仅存储废物,这通常可以忽略不计.

在您的代码中,您使用错误的非阻塞操作.这些都很棘手,需要小心.MPI_Sendrecv可以而且应该用来代替.它同时执行发送和接收操作,从而防止死锁(只要每次发送都有匹配的接收).

如果域是周期性的,那么秩计算逻辑变得简单:

// Compute the rank of the left neighbour
leftProc = (myid - 1 + numProc) % numProc;
// Compute the rank of the right neighbour
rightProc = (myid + 1) % numProc;
Run Code Online (Sandbox Code Playgroud)

可以创建笛卡尔虚拟拓扑,然后用于MPI_Cart_shift查找两个邻居的等级,而不是进行算术:

// Create a non-periodic 1-D Cartesian topology
int dims[1] = { numProc };
int periods[1] = { 0 };   // 0 - non-periodic, 1 - periodic
MPI_Comm cart_comm;
MPI_Cart_create(MPI_COMM_WORLD, 1, dims, periods, 1, &cart_comm);

// Find the two neighbours
MPI_Cart_shift(cart_comm, 0, 1, &leftProc, &rightProc);
Run Code Online (Sandbox Code Playgroud)

光环交换的代码保持不变,cart_comm应该替换唯一的差异MPI_COMM_WORLD.MPI_Cart_shift自动处理角落情况并MPI_PROC_NULL在适当时返回.该方法的优点是您可以通过简单地翻转periods[]数组内的值,轻松地在非周期性域和周期性域之间切换.

光环必须经常根据需要进行更新,这取决于算法.对于大多数迭代方案,更新必须在每次迭代开始时进行.可以通过引入多级光晕并使用外层中的值来计算内层中的值来降低通信频率.

总之,您的main功能可以简化为(不使用笛卡尔拓扑):

int main(int argc, char *argv[]){

    int i;
    int localSize;
    int numProc;
    int myid;

    int leftProc;
    int rightProc;

    int * myArray;
    int fullDomainSize = 16;

    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &numProc);
    MPI_Comm_rank(MPI_COMM_WORLD, &myid);

    // Compute neighbouring ranks
    rightProc = myid + 1;
    if (rightProc >= numProc) rightProc = MPI_PROC_NULL;
    leftProc = myid - 1;
    if (leftProc < 0) leftProc = MPI_PROC_NULL;

    // Lets get each partition size.
    localSize = WhichSize(myid, numProc, fullDomainSize);

    // Allocate arrays.
    myArray = (int*)malloc((localSize+ 2)*sizeof(int));

    // Now we will fill the arrays with a dummy value 123. For the
    // boundaries (ghosts) we will fill than with 80. Just to differe
    // ntiate.

    //printf("--------------------------------------------------\n");
    //printf("Filling node arrays usage with values... \n");

    for (i = 1; i<localSize; i++){
        myArray[i] = 123;
    }

    // ghosts.
    myArray[localSize+1] = 8;
    myArray[0] = 8;

    //printf("-------------------------------\n");
    //printf("Communicating Boundary ghosts !\n");
    //printf("-------------------------------\n");

    //printf("Sending ghost value to the right\n");
    MPI_Sendrecv(&myArray[localSize], 1, MPI_INT, rightProc, 12345,
                 &myArray[0], 1, MPI_INT, leftProc, 12345,
                 MPI_COMM_WORLD);

    //printf("Sending ghost value to the left\n");
    MPI_Sendrecv(&myArray[1], 1, MPI_INT, leftProc, 12345,
                 &myArray[localSize+1], 1, MPI_INT, rightProc, 12345,
                 MPI_COMM_WORLD);

    // Now I Want to see if the ghosts are in place !.

    printf("[%d] The head ghost is: %d\n", myid, myArray[0]);
    printf("[%d] The tail ghost is: %d\n", myid, myArray[localSize + 1]);

    MPI_Finalize();

    return 0;
}
Run Code Online (Sandbox Code Playgroud)