How to change MPI lock send and receive without blocking

I am trying to understand the difference between blocking and non-blocking message passing mechanisms in parallel processing using MPI. Suppose we have the following lock code:

#include <stdio.h> 
#include <string.h> 
#include "mpi.h"

int main (int argc, char* argv[]) {
    const int maximum_message_length = 100;
    const int rank_0= 0;
    char message[maximum_message_length+1]; 
    MPI_Status status; /* Info about receive status */ 
    int my_rank; /* This process ID */
    int num_procs; /* Number of processes in run */ 
    int source; /* Process ID to receive from */
    int destination; /* Process ID to send to */
    int tag = 0; /* Message ID */

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); 
    MPI_Comm_size(MPI_COMM_WORLD, &num_procs);

    /* clients processes */
    if (my_rank != server_rank) {
        sprintf(message, "Hello world from process# %d", my_rank);
        MPI_Send(message, strlen(message) + 1, MPI_CHAR, rank_0, tag, MPI_COMM_WORLD);
    } else {    
    /* rank 0 process */ 
        for (source = 0; source < num_procs; source++) { 
            if (source != rank_0) {
                MPI_Recv(message, maximum_message_length + 1, MPI_CHAR, source, tag, 
                MPI_COMM_WORLD,&status);
                fprintf(stderr, "%s\n", message); 
            } 
        } 
    } 
         MPI_Finalize();
}

Each processor performs its task and sends it back to rank_0 (receiver). rank_0 will start a cycle from 1 to n-1 processes and print them sequentially (a step in the cycle may not continue if the current client has not yet sent its task). How to change this code to achieve a mechanism without blocking with MPI_Isendand MPI_Irecv? Do I need to delete the loop in the receiver part (rank_0) and explicitly specify MPI_Irecv (..) for each client, i.e.

MPI_Irecv(message, maximum_message_length + 1, MPI_CHAR, source, tag, 
                    MPI_COMM_WORLD,&status);

Thanks.

+4
1

, , - , , , . , . , .

, , , MPI_Gather!

. -, , , :

#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "mpi.h"

int main (int argc, char* argv[]) {
    const int maximum_message_length = 100;
    const int server_rank = 0;
    char message[maximum_message_length+1];
    char *allmessages;
    MPI_Status *status; /* Info about receive status */
    MPI_Request *req; /* Non-Blocking Requests */
    int my_rank; /* This process ID */
    int num_procs; /* Number of processes in run */
    int source; /* Process ID to receive from */
    int tag = 0; /* Message ID */

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &num_procs);

    /* clients processes */
    if (my_rank != server_rank) {
        sprintf(message, "Hello world from process# %d", my_rank);
        MPI_Send(message, maximum_message_length + 1, MPI_CHAR, server_rank,
                 tag, MPI_COMM_WORLD);
    } else {

. server.rank. :

    /* rank 0 process */
        allmessages = malloc((maximum_message_length+1)*num_procs);
        status = malloc(sizeof(MPI_Status)*num_procs);
        req = malloc(sizeof(MPI_Request)*num_procs);

        for (source = 0; source < num_procs; source++) {
            req[source] = MPI_REQUEST_NULL;
            if (source != server_rank) {
                /* Post non-blocking receive for source */
                MPI_Irecv(allmessages+(source*(maximum_message_length+1)),
                          maximum_message_length + 1, MPI_CHAR, source, tag,
                          MPI_COMM_WORLD, req+source);
                /* Proceed without waiting on the receive */
                /* (posting further receives */
            }
        }
        /* Wait on all communications to complete */
        MPI_Waitall(num_procs, req, status);
        /* Print the messages in order to the screen */
        for (source = 0; source < num_procs; source++) {
            if (source != server_rank) {
                fprintf(stderr, "%s\n",
                        allmessages+(source*(maximum_message_length+1)));
            }
        }
    }
    MPI_Finalize();
}

, , . MPI_Waitall, , . , server_rank , MPI_REQUEST_NULL, . , , , MPI_Waitany. , - , .

MPI_Gather :

#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include "mpi.h"

int main (int argc, char* argv[]) {
    const int maximum_message_length = 100;
    const int server_rank = 0;
    char message[maximum_message_length+1];
    char *allmessages;
    int my_rank; /* This process ID */
    int num_procs; /* Number of processes in run */
    int source; /* Process ID to receive from */
    int tag = 0; /* Message ID */

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &num_procs);

    if (my_rank == server_rank) {
        allmessages = malloc((maximum_message_length+1)*num_procs);
    }
    sprintf(message, "Hello world from process# %d", my_rank);
    MPI_Gather(message, (maximum_message_length+1), MPI_CHAR,
               allmessages, (maximum_message_length+1), MPI_CHAR,
               server_rank, MPI_COMM_WORLD);

    if (my_rank == server_rank) {
        /* Print the messages in order to the screen */
        for (source = 0; source < num_procs; source++) {
            if (source != server_rank) {
                fprintf(stderr, "%s\n",
                        allmessages+(source*(maximum_message_length+1)));
            }
        }
    }
    MPI_Finalize();
}

MPI-3 MPI_Igather.

, ( MPI_Waitall) MPI_Waitany, :

    for (i = 0; i < num_procs-1; i++) {
        /* Wait on any next communication to complete */
        MPI_Waitany(num_procs, req, &source, status);
        fprintf(stderr, "%s\n",
                allmessages+(source*(maximum_message_length+1)));
    }
+4

Source: https://habr.com/ru/post/1606189/


All Articles