Can I attach a callback to complete the request?

In MPI, you can run an asynchronous message transfer procedure (for example, receive, MPI_Irecv). Is it possible to connect a callback function that will be executed as soon as the request is completed? For example, to process the received data.

This is an example of what I'm looking for:

#include "mpi.h"
#include <stdio.h>

void mycallback(void* data){
   (int*)data += 1; // add one to the received data
}

int main(int argc, char *argv[]){
    int myid, numprocs, left, right;
    int buffer[10], buffer2[10];
    MPI_Request request;
    MPI_Status status;

    MPI_Init(&argc,&argv);
    MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
    MPI_Comm_rank(MPI_COMM_WORLD, &myid);

    right = (myid + 1) % numprocs;
    left = myid - 1;
    if (left < 0)
        left = numprocs - 1;

    MPI_Irecv(buffer, 10, MPI_INT, left, 123, MPI_COMM_WORLD, &request);
 // Attach_Callback(request, &mycallback); //somewhere after this point recv is completed an f is executed
    MPI_Send(buffer2, 10, MPI_INT, right, 123, MPI_COMM_WORLD);
    MPI_Wait(&request, &status); //the recv and the callback must have been called at this point
    MPI_Finalize();
    return 0;
}

I found that there is a function MPI_Grequest_startand MPI_Grequest_complete, but they seem to be for something else, since the generated request is not related to a specific message transfer.

Maybe I need to implement Grequest (generic query) in which the callback consists of MPI_Recv(not MPI_Irecv). Is that an idea?

+4
source share
1 answer

.

@AhmedMasud, : http://mpi-forum.org/docs/mpi-3.1/mpi31-report/node297.htm#Node297

, , , Irecv, - ( MPI ).

, , . , : - MPI? , - , Irecv Isend, - , , (, , ( )).

, ? 0- > 1, 1- > 2... n-1- > n, n- > 0, ( ), ( , : -)):

template<class Type> 
void Parallel::sendUp(Type& bufferSend,
                      Type& bufferRec, 
                      long len)
{
    if(this->rank()%2==0)
    {
        if(this->rank()!=this->size()-1)
        {
            this->send(bufferSend,len,this->rank());
        }
        if(this->rank()!= 0)
        {
            this->receive(bufferRec,len,this->rank()-1);
        }
        else if(this->size()%2==0)
        {
            this->receive(bufferRec,len,this->size()-1);
        }
    }
    else
    {
        this->receive( bufferRec, len , this->rank()-1);
        if(this->grid_rank()!=this->grid_size()-1)
        {
            this->send(bufferSend,len,this->rank()+1);
        }
        else
        {
            this->send( bufferSend, len , 0);
        }
    }

    if(this->size()%2!=0)
    {
        if(this->rank()==this->size()-1)
        {
            this->send( bufferSend, len , 0);
        }
        if(this->grid()==0)
        {
            this->receive(bufferRec, len , this->size()-1);
        }
    }
}

"" MPI, :

parallel.rank() = rank in the comm
parallel.size() = size of the comm
parallel.send/rec() is defined as follow

template<class Type> 
void Parallel::send(Type* array, int len, int to)
{
    MPI_Send(array, len*sizeof(Type), MPI_BYTE, to, 0,comm_);
}

template<class Type> 
void Parallel::rec(Type* array, int len, int to)
{
    MPI_Send(array, len*sizeof(Type), MPI_BYTE, to, 0,comm_);
}

template<class Type>
MPI_Status Parallel2d::receive(Type& array, int from, int len)
{
    MPI_Status  status;
    MPI_Recv( &array, len*sizeof(Type), MPI_BYTE, from, 0,comm_,&status);
    return status;
}

, .

+2

Source: https://habr.com/ru/post/1696036/


All Articles