Tutorial 3:
 
 
   
 
 
   
 rc = MPI_Init(&argc,&argv);
initiate MPI
rc|= MPI_Comm_size(MPI_COMM_WORLD,&numtasks);
determine number of processors
 rc|= MPI_Comm_rank(MPI_COMM_WORLD,&taskid);
determine the id of current processor.
 
         MPI_Send(&offset, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
send  offset to dest
         MPI_Send(&rows, 1, MPI_INT, dest, mtype, MPI_COMM_WORLD);
send  rows to dest
         MPI_Send(&a[offset][0], rows*NCA, MPI_DOUBLE, dest, mtype,
                   MPI_COMM_WORLD);
send the array a[offset][0] to dest
         MPI_Send(&b, NCA*NCB, MPI_DOUBLE, dest, mtype, MPI_COMM_WORLD);
send the matrix b to dest
 
         MPI_Recv(&offset, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status);
         MPI_Recv(&rows, 1, MPI_INT, source, mtype, MPI_COMM_WORLD, &status);
         MPI_Recv(&c[offset][0], rows*NCB, MPI_DOUBLE, source, mtype, MPI_COMM_WORLD,&status);
receive offset, arrows, and the array c[offset][0] from source.
 
      MPI_Recv(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
      MPI_Recv(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status);
      MPI_Recv(&a, rows*NCA, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status);
      MPI_Recv(&b, NCA*NCB, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status);
receive offset, rows, a, and b from MASTER.
 
 
      MPI_Send(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD);
      MPI_Send(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD);
      MPI_Send(&c, rows*NCB, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD);
send offset, rows, and matrix c to MASTER.
 
 
 
 
   
 
 
 MPI_Barrier(MPI_Comm comm)

It blocks the function of a processor until all the processors (specified in comm) calls it.

 
   
 
(a), (b), (e), (f)
 
 
   
MPI_Test
To check for the status of an operation (such as nonblocking send or receive).

MPI_Wait
To wait until an operation is completed. It only returns message upon the completion of a specified operation.

 
 
   
 
 
two routines used twice each.

MPI_Isend(&outmsg,1,MPI_INT,right,tag,MPI_COMM_WORLD,&request);
send outmsg to right without waiting for the completion.

MPI_Wait(&request, &status);
wait until outmsg is completely sent.
 
 

        MPI_Isend (&outmsg, 1, MPI_INT, right, tag, MPI_COMM_WORLD, &request);
         MPI_Wait (&request, &status);

Can be jointly replaced by MPI_Send.

#include <stdio.h>
#define MASTER 0
#include "mpi.h"

main(int argc, char **argv)
     {
     int ntasks, taskid, right, left, inmsg, outmsg, tag;
     MPI_Request  request;
     MPI_Status   status;
     char tchar[8];
     outmsg = 0;

/* learn number of tasks in partition and task ID */
     MPI_Init(&argc, &argv);
     MPI_Comm_rank(MPI_COMM_WORLD, &taskid );
     MPI_Comm_size(MPI_COMM_WORLD, &ntasks );
/* compute source and destination for messages */
     if (taskid == ntasks-1) right = 0;
     else right = taskid + 1;
     if (taskid == 0) left = ntasks - 1;
     else left = taskid - 1;
     tag = 1;
/* master sets message, sends it to the right,
 * then waits for its return
 */
        if (taskid == MASTER)
          {
          outmsg = 7;
          printf("%d: message to be sent is %d\n", outmsg);
          inmsg = 0;
 MPI_Isend(&outmsg,1,MPI_INT,right,tag,MPI_COMM_WORLD,&request);
         MPI_Wait(&request, &status);
          MPI_Recv(&inmsg, 1, MPI_INT, left, tag, MPI_COMM_WORLD, &status);
          printf ("MASTER received message %d, content is %d\n", tag, inmsg);
          }

/* worker reads message, passes it on
 * the use of mp_send/mp_wait here is equivalent to mp_bsend
 */
        else
          {
          MPI_Recv(&inmsg, 1, MPI_INT, left, tag, MPI_COMM_WORLD, &status);
          outmsg = inmsg;
         MPI_Isend (&outmsg, 1, MPI_INT, right, tag, MPI_COMM_WORLD, &request);
         MPI_Wait (&request, &status);
          printf ("%d processed message %d, content is %d\n", taskid,
                   tag, outmsg);
          }
     MPI_Finalize();
     return 0;
   }

 
 
 
   
   
 
#include <stdio.h>
#include "mpi.h"
#define NROW 3
#define NCOL 4
void set_groups(MPI_Comm *, MPI_Comm *);

void set_groups(MPI_Comm *row_comm, MPI_Comm *col_comm)
{
  MPI_Group base_grp, grp;
  MPI_Comm temp_comm;
  int row_list[NCOL], col_list[NROW], irow, icol,
    rank_in_world, i, j;

  /*
    ------------------------------------------------
    Get base group from MPI_COMM_WORLD communicator
    ------------------------------------------------ */

  MPI_Comm_group(MPI_COMM_WORLD,&base_grp);
 
  /*  ------------------------------------------------------------
      Establish the row and column to which this processor belongs
      ------------------------------------------------------------ */

  MPI_Comm_rank(MPI_COMM_WORLD,&rank_in_world);
  irow = (rank_in_world % NROW);
  icol = (rank_in_world/NROW);
 
  /*  -------------------------
      Build row groups
      -------------------------- */

  row_list[0] = 0;
  for (i=1; i<NCOL; i++)
    row_list[i] = row_list[i-1] + NROW;
  for (i=0; i<NROW; i++) {
    MPI_Group_incl(base_grp,NCOL,row_list,&grp);
    MPI_Comm_create(MPI_COMM_WORLD,grp,&temp_comm);
    if (irow == i) *row_comm=temp_comm;
    for (j=0; j<NCOL; j++) {
      row_list[j] = row_list[j]+1;
    }
  }

  /*  -------------------------
      Build column groups
      -------------------------- */

  for (i=0; i<NROW; i++)
    col_list[i] = i;
  for (i=0; i<NCOL; i++) {
    MPI_Group_incl(base_grp,NROW,col_list,&grp);
    MPI_Comm_create(MPI_COMM_WORLD,grp,&temp_comm);
    if (icol == i) *col_comm=temp_comm;
    for (j=0; j<NROW; j++) {
      col_list[j] = col_list[j]+NROW;
    }
  }
}

main(int argc, char **argv)
{
  MPI_Comm row_comm, col_comm;
  int row_hgt[NCOL], col_hgt[NROW], max_row, max_col,
    rank_in_world, maxht,i;

  MPI_Init(&argc, &argv);
  MPI_Comm_rank(MPI_COMM_WORLD,&rank_in_world);

  /*  ---------------------------------------------------------------
      A real simulation would calculate a meaningful maxht here
      --------------------------------------------------------------- */
  maxht = rank_in_world;

  set_groups(&row_comm, &col_comm);

  MPI_Allgather(&maxht,1,MPI_INT,&row_hgt,1,MPI_INT,row_comm);
  MPI_Allgather(&maxht,1,MPI_INT,&col_hgt,1,MPI_INT,col_comm);
  max_row = row_hgt[0];
  for (i=1; i<NCOL; i++)
    if (row_hgt[i] > max_row) max_row=row_hgt[i];
  max_col = col_hgt[1];
  for (i=1; i<NROW; i++)
    if (col_hgt[i] > max_col) max_col=col_hgt[i];
  printf("%d, %d, %d\n", rank_in_world, max_row, max_col);

}