#include "mpi.h"
#include <math.h>
int main(int argc, cgar argv[] )
{
int n, myid, numprocs, i;
double myq q, h, sum, x;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD,
&numprocs);
MPI_Comm_rank(MPI_COMM_WORLD,
&&myid);
while (1) {
if (myid==0){
printf("Enter the number of intervals: (0 quits) ");
scanf("%d", &n);
}
MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD);
if (n==0)
break;
else {
h = 1.0 / (double) n;
sum = 0.0
for (i= myid + 1; i <= n; i += numprocs) {
x = h * ((double)i - 0.5);
sum += 1.0 + exp(x)
}
myq = h * sum
MPI_Reduce(&myq, &q, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if (myid == 0)
printf("e is approximately %.16f, q);
}
}
MPI_Finalize();
return 0;
}
/**********************************************************
* MPI Example - Array Assignment - C Version
*
* DESCRIPTION:
* In this simple example, the master task initiates numtasks-1
number of
* worker tasks. It then distributes an equal portion
of an array to each
* worker task. Each worker task receives its portion
of the array, and
* performs a simple value assignment to each of its elements.
The value
* assigned to each element is simply that element's index
in the array+1.
* Each worker task then sends its portion of the array
back to the master
* task. As the master receives back each portion
of the array, selected
* elements are displayed.
****************************************************************/
#include "mpi.h"
#include <stdio.h>
#define ARRAYSIZE 60000
#define MASTER
0 /* taskid of first process
*/
int main(argc,argv)
int argc;
char *argv[];
{
int numtasks,
/* total number of MPI tasks in partitiion */
numworkers,
/* number of worker tasks */
taskid,
/* task identifier */
rc,
/* return error code */
dest,
/* destination task id to send message */
index,
/* index into the array */
i,
/* loop variable */
arraymsg = 1,
/* setting a message type */
indexmsg = 2,
/* setting a message type */
source,
/* origin task id of message */
chunksize;
/* for partitioning the array */
float data[ARRAYSIZE],
/* the intial array */
result[ARRAYSIZE];
/* for holding results of array operations */
MPI_Status status;
/************************* initializations ***************************
* Find out how many tasks are in this partition and what my task id
is. Then
* define the number of worker tasks and the array partition size as
chunksize.
* Note: For this example, the MP_PROCS environment variable should
be set
* to an odd number...to insure even distribution of the array to numtasks-1
* worker tasks.
******************************************************************/
rc = MPI_Init(&argc,&argv);
rc|= MPI_Comm_size(MPI_COMM_WORLD,&numtasks);
rc|= MPI_Comm_rank(MPI_COMM_WORLD,&taskid);
if (rc != MPI_SUCCESS)
printf ("error initializing MPI and
obtaining task ID information\n");
else
printf ("MPI task ID = %d\n", taskid);
printf("%d tasks, I am task %d\n", numtasks, taskid);
numworkers = numtasks-1;
chunksize = (ARRAYSIZE / numworkers);
/**************************** master task ***************************/
if (taskid == MASTER)
{
printf("\n*********** Starting MPI Example
1 ************\n");
printf("MASTER: number of worker tasks
will be= %d\n",numworkers);
fflush(stdout);
/* Initialize the array */
for(i=0; i<ARRAYSIZE; i++)
data[i] = 0.0;
index = 0;
/* Send each worker task its portion
of the array */
for (dest=1; dest<= numworkers; dest++)
{
printf("Sending to
worker task %d\n",dest);
fflush(stdout);
MPI_Send(&index,
1, MPI_INT, dest, indexmsg, MPI_COMM_WORLD);
MPI_Send(&data[index],
chunksize, MPI_FLOAT, dest, arraymsg,
MPI_COMM_WORLD);
index = index + chunksize;
}
/* Now wait to receive back the results
from each worker task and print */
/* a few sample values */
for (i=1; i<= numworkers; i++)
{
source = i;
MPI_Recv(&index,
1, MPI_INT, source, indexmsg, MPI_COMM_WORLD, &status);
MPI_Recv(&result[index],
chunksize, MPI_FLOAT, source, arraymsg,
MPI_COMM_WORLD, &status);
printf("---------------------------------------------------\n");
printf("MASTER: Sample
results from worker task = %d\n",source);
printf("
result[%d]=%f\n", index, result[index]);
printf("
result[%d]=%f\n", index+100, result[index+100]);
printf("
result[%d]=%f\n\n", index+1000, result[index+1000]);
fflush(stdout);
}
printf("MASTER: All Done! \n");
}
/**************************** worker task ****************************/
if (taskid > MASTER)
{
/* Receive my portion of array from
the master task */
source = MASTER;
MPI_Recv(&index, 1, MPI_INT, source,
indexmsg, MPI_COMM_WORLD, &status);
MPI_Recv(&result[index], chunksize,
MPI_FLOAT, source, arraymsg,
MPI_COMM_WORLD, &status);
/* Do a simple value assignment to each
of my array elements */
for(i=index; i < index + chunksize;
i++)
result[i] = i + 1;
/* Send my results back to the master
task */
dest = MASTER;
MPI_Send(&index, 1, MPI_INT, dest,
indexmsg, MPI_COMM_WORLD);
MPI_Send(&result[index], chunksize,
MPI_FLOAT, MASTER, arraymsg,
MPI_COMM_WORLD);
}
MPI_Finalize();
}
/******************************************************************************
* MPI Timing Program - C Version
******************************************************************************/
#include "mpi.h" /* including MPI library */
#include <stdio.h>
#include <sys/time.h>
#include <time.h>
#define NUMBER_REPS 20
#define MESSAGE_SIZE 4
int main(argc,argv)
int argc;
char *argv[];
{
int reps;
/* number of samples per test */
struct timeval tv1, tv2; /* for timing
*/
int dt1, dt2;
/* time for one iter */
int at1, at2;
/* accum. time */
int n;
char *inmsg, *outmsg;
/* Buffer containing message */
int type;
int numtasks, taskid;
int rc, dest, source;
MPI_Status status; /* MPI variable */
rc = MPI_Init(&argc,&argv); /* Initiate MPI
*/
rc|= MPI_Comm_size(MPI_COMM_WORLD,&numtasks); /*
No. of computers */
rc|= MPI_Comm_rank(MPI_COMM_WORLD,&taskid); /*
My ID */
if (rc != MPI_SUCCESS)
printf ("error initializing MPI and
obtaining task ID information\n");
else
printf ("task ID = %d\n", taskid);
if (numtasks != 2)
{
fprintf(stderr, "Only using 2 tasks...continuing\n");
}
at1 = 0;
inmsg = (char *) malloc(MESSAGE_SIZE);
outmsg = (char *) malloc(MESSAGE_SIZE);
type = 1;
reps = NUMBER_REPS;
if (taskid == 0)
{
/* round-trip timing test */
printf("Doing round trip test, minimal
message size, %d reps.\n",reps);
dest = 1;
source = 1;
for (n = 1; n <= reps; n++)
{
gettimeofday(&tv1,
(struct timeval*)0);
/* before time */
/* send message to
worker - message type set to 1. */
/* If return code
indicates error quit */
rc = MPI_Send(&outmsg,
MESSAGE_SIZE, MPI_BYTE, dest, type,
MPI_COMM_WORLD); /* Sending message to a worker */
if (rc != MPI_SUCCESS)
{
fprintf(stderr, "Send error in processor 1\n");
exit(1);
}
/* Now wait to receive
the echo reply from the worker */
/* If return code
indicates error quit */
rc = MPI_Recv(&inmsg,
MESSAGE_SIZE, MPI_BYTE, source, type,
MPI_COMM_WORLD, &status); /* Receiving message from a worker
*/
if (rc != MPI_SUCCESS)
/* Any error? */
{
fprintf(stderr, "Recieve error in processor 0\n");
exit(1);
}
gettimeofday(&tv2, (struct timeval*)0); /* after time */
/* calculate round
trip time and print */
dt1 = (tv2.tv_sec
- tv1.tv_sec) * 1000000 + tv2.tv_usec - tv1.tv_usec;
printf("round trip#
%2d uSec = %8d\n", n, dt1);
at1 += dt1;
}
printf("\n*** Round Trip Avg uSec =
%d\n", at1 / reps);
} else if (taskid == 1)
{
dest = 0;
source = 0;
for (n = 1; n <= reps; n++)
{
rc = MPI_Recv(&inmsg,
MESSAGE_SIZE, MPI_BYTE, source, type,
MPI_COMM_WORLD, &status); /* Receiving message from master */
if (rc != MPI_SUCCESS)
{
fprintf(stderr, "Recieve error in processor 0\n");
exit(1);
}
rc = MPI_Send(&outmsg,
MESSAGE_SIZE, MPI_BYTE, dest, type,
MPI_COMM_WORLD); /* Sending message to master */
if (rc != MPI_SUCCESS)
{
fprintf(stderr, "Send error in processor 1\n");
exit(1);
}
}
}
MPI_Finalize(); /* Terminating MPI */
exit(0);
}