Skip to content
Snippets Groups Projects
Commit 32f8a973 authored by David Goz's avatar David Goz :sleeping:
Browse files

mpi/miscellaneous examples added

parent ae96d2fa
No related branches found
No related tags found
No related merge requests found
Showing
with 1039 additions and 0 deletions
#include <iostream>
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#define NELEMENTS 7
using namespace std;
int main(int argc, char ** argv)
{
int rank, size;
int buf[NELEMENTS];
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
if (rank == 0) {
for(int i = 0; i < NELEMENTS; i++) buf[i] = 1 + i*i;
}
MPI_Bcast(buf, NELEMENTS, MPI_INT, 0, MPI_COMM_WORLD);
// for(rank = 1; rank < size; rank++)
// {
// for(int i = 0; i < NELEMENTS; i++)
// {
// cout<<"buf["<<i<<"] = "<<buf[i]<<endl;
// }
// cout<<endl;
// }
if (rank == 0) {
cout<<"I am processor "<<rank<<endl;
cout<<endl;
for(int i = 0; i < NELEMENTS; i++)
{
cout<<"buf["<<i<<"] = "<<buf[i]<<endl;
}
}
cout<<endl;
if (rank == 1) {
cout<<"I am processor "<<rank<<endl;
cout<<endl;
for(int i = 0; i < NELEMENTS; i++)
{
cout<<"buf["<<i<<"] = "<<buf[i]<<endl;
}
}
cout<<endl;
if (rank == 2) {
cout<<"I am processor "<<rank<<endl;
cout<<endl;
for(int i = 0; i < NELEMENTS; i++)
{
cout<<"buf["<<i<<"] = "<<buf[i]<<endl;
}
}
cout<<endl;
if (rank == 3) {
cout<<"I am processor "<<rank<<endl;
cout<<endl;
for(int i = 0; i < NELEMENTS; i++)
{
cout<<"buf["<<i<<"] = "<<buf[i]<<endl;
}
}
// if (rank == 0) {
// cout<<"I am processor "<<rank<<endl;
// cout<<endl;
// for(int i = 0; i < NELEMENTS; i++)
// {
// cout<<"buf["<<i<<"] = "<<buf[i]<<endl;
// }
// }
MPI_Finalize();
return 0;
}
#include <iostream>
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
//#define STR_LENGTH 32
#define STR_LENGTH 21
using namespace std;
int main(int argc, char ** argv)
{
int rank, size;
/* NONONONONO*/
// char string[STR_LENGTH];
// string[] = {'H','e','l','l','o',',',' ','I',' ','a','m',' ','p','r','o','c','e','s','s','o','r'};
// for(int i = 0; i < STR_LENGTH; i++) string[i] = "Hello, I'm processor";
// for(int i = 0; i < STR_LENGTH; i++) cout<<string[i]<<endl;;
// char mystring[] = { 'H', 'e', 'l', 'l', 'o', '\0' };
// for(int i = 0; i < STR_LENGTH; i++)
/* NONONONONO*/
char mystring[] = {'H','e','l','l','o',',',' ','I',' ','a','m',' ','p','r','o','c','e','s','s','o','r','\0'};
cout<<mystring<<endl;
char string[STR_LENGTH];
// for(int i = 0; i < STR_LENGTH; i++) string[i] = mystring[i];
//
// for(int i = 0; i < STR_LENGTH; i++) cout<<string[i];
// for(int i = 0; i < STR_LENGTH; i++) cout<<mystring[i];
// cout<<endl;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
// if (rank == 0) {
// for(int i = 0; i < STR_LENGTH; i++) string[i] = mystring[i];
// for(int i = 0; i < STR_LENGTH; i++) cout<<string[i];
// cout<<endl;
// }
if (rank == 0) {
for(int i = 0; i < STR_LENGTH; i++) string[i] = mystring[i];
for(int i = 0; i < STR_LENGTH; i++) cout<<string[i];
cout<<endl;
}
MPI_Bcast(string, STR_LENGTH, MPI_CHAR, 0, MPI_COMM_WORLD);
// if (rank == 0) {
// for(int i = 0; i < STR_LENGTH; i++) cout<<string[i];
// cout<<" "<<rank<<endl;
// }
//
// cout<<endl;
//
// if (rank == 1) {
// for(int i = 0; i < STR_LENGTH; i++) cout<<string[i];
// cout<<" "<<rank<<endl;
// }
//
// cout<<endl;
//
// if (rank == 2) {
// for(int i = 0; i < STR_LENGTH; i++) cout<<string[i];
// cout<<" "<<rank<<endl;
// }
//
// cout<<endl;
//
// if (rank == 3) {
// for(int i = 0; i < STR_LENGTH; i++) cout<<string[i];
// cout<<" "<<rank<<endl;
// }
//
// cout<<endl;
// for(rank = 0; rank < size; rank++)
for(int n = 0; n < size; n++)
{
for(int i = 0; i < STR_LENGTH; i++) cout<<string[i];
cout<<" "<<rank<<endl;
}
MPI_Finalize();
return 0;
}
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char ** argv)
{
int n, rank, size;
double data;
double *send_buf, *recv_buf;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
recv_buf = (double *) malloc(size*sizeof(double)); // allocate memory
send_buf = (double *) malloc(size*sizeof(double));
data = rank*rank + 1.0; // generate data on different procs
MPI_Gather(&data, 1, MPI_DOUBLE, recv_buf, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
if (rank == 0){
printf ("[Gather()]:\n");
for (n = 0; n < size; n++) printf ("rnd[%d] = %f\n",n,recv_buf[n]);
}
if (rank == 0){
for (n = 0; n < size; n++) send_buf[n] = n*n - 1.0; // Generate “size” random numbers
}
MPI_Scatter(send_buf, 1, MPI_DOUBLE, &data, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
printf ("[Scatter, proc #%d] = %f\n",rank,data);
MPI_Finalize();
return 0;
}
/* ///////////////////////////////////////////////////////////////////// */
/*!
\file
\brief Solve 1D heat equation.
Solve the 1D heat equation using a 1st order explicit method
on a parallel domain.
\author A. Mignone (mignone@to.infn.it)
\date March 12, 2020
*/
/* ///////////////////////////////////////////////////////////////////// */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define PARALLEL
#ifdef PARALLEL
#include <mpi.h>
#endif
#define NX_GLOB 64 /* Global number of interior points */
#define NGHOST 1
void Write (double *, double *, int, int);
int main(int argc, char ** argv)
{
int i, k, beg, end;
int nx_loc; /* Local grid size */
int dstL = -1, dstR=-1; /* Rank of left and right neighbour procs */
int rank=0, size=1;
double t, tstop, dt, cfl = 0.5;
double *u0;
double *u1;
double xbeg = 0.0;
double xend = +1.0;
double xglob[NX_GLOB + 2*NGHOST]; // Global grid array
double *xloc;
double dx; /* Mesh spacing */
#ifdef PARALLEL
double *send_buf;
double *recv_buf;
#endif
FILE *fp;
/* --------------------------------------------------------
0. Initialize parallel environment & get neighbour
proc rank
-------------------------------------------------------- */
#ifdef PARALLEL
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
dstL = rank - 1;
dstR = rank + 1;
if (dstL < 0) dstL = MPI_PROC_NULL;
if (dstR >= size) dstR = MPI_PROC_NULL;
#endif
/* --------------------------------------------------------
1. Generate global & local grids
-------------------------------------------------------- */
#ifdef PARALLEL
nx_loc = NX_GLOB/size;
beg = NGHOST;
end = beg + nx_loc - 1;
dx = (xend - xbeg)/(NX_GLOB+1);
for (i = 0; i < NX_GLOB + 2*NGHOST; i++){
xglob[i] = xbeg + (i-beg+1)*dx;
}
xloc = xglob + nx_loc*rank; /* Use pointer arithmetic */
#else
nx_loc = NX_GLOB;
beg = NGHOST;
end = beg + nx_loc - 1;
dx = (xend - xbeg)/(NX_GLOB+1);
for (i = 0; i < NX_GLOB + 2*NGHOST; i++){
xglob[i] = xbeg + (i-beg+1)*dx;
}
xloc = xglob; /* Use pointer arithmetic */
#endif
/* --------------------------------------------------------
2. Allocate memory on local grids
-------------------------------------------------------- */
u0 = (double *) malloc((nx_loc + 2*NGHOST)*sizeof(double));
u1 = (double *) malloc((nx_loc + 2*NGHOST)*sizeof(double));
#ifdef PARALLEL
{
int proc, go;
for (proc = 0; proc < size; proc++){
go = proc;
MPI_Bcast(&go, 1, MPI_INT, 0, MPI_COMM_WORLD);
if (rank == go) {
printf ("[Rank %d]\n",rank);
printf (" dstL = %d, dstR = %d\n",dstL, dstR);
printf (" beg, end = %d, %d; x = [%f, %f]\n",
beg, end, xloc[beg],xloc[end]);
}
MPI_Barrier(MPI_COMM_WORLD);
}
}
#endif
/* --------------------------------------------------------
3. Set initial condition
-------------------------------------------------------- */
for (i = beg; i <= end; i++){
u0[i] = sin(M_PI*xloc[i]);
}
/* --------------------------------------------------------
4. Advance solution
-------------------------------------------------------- */
t = 0.0;
tstop = 0.1;
dt = cfl*dx*dx;
k = 0;
Write (xloc, u0, beg, end);
while (t < tstop){
if (rank == 0){
printf ("step #%d; t = %8.3e\n",k,t);
}
/* -- 4a. Set physical boundary conditions -- */
if (dstL == MPI_PROC_NULL){
u0[beg-1] = 0.0;
}
if (dstR == MPI_PROC_NULL){
u0[end+1] = 0.0;
}
/* -- 4b. Set inter-process boundary conditions -- */
#ifdef PARALLEL
send_buf = u0 + end - (NGHOST - 1); // Address of rightmost interior point
recv_buf = u0 + 0; // Address of leftmost ghost zone
MPI_Sendrecv (send_buf, NGHOST, MPI_DOUBLE, dstR, 0,
recv_buf, NGHOST, MPI_DOUBLE, dstL, 0,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
send_buf = u0 + beg; // Address of leftmost interior point
recv_buf = u0 + end + 1; // Address of first ghost zone on the right
MPI_Sendrecv (send_buf, NGHOST, MPI_DOUBLE, dstL, 0,
recv_buf, NGHOST, MPI_DOUBLE, dstR, 0,
MPI_COMM_WORLD, MPI_STATUS_IGNORE);
#endif
/* -- 4c. Advance solution by one time step -- */
for (i = beg; i <= end; i++){
u1[i] = u0[i] + dt/(dx*dx)*(u0[i-1] - 2.0*u0[i] + u0[i+1]);
}
t += dt;
k++;
/* -- 4d. Copy arrays for next time level -- */
for (i = beg; i <= end; i++) u0[i] = u1[i];
}
Write (xloc, u0, beg, end);
#ifdef PARALLEL
MPI_Finalize();
#endif
return 0;
}
/* ********************************************************************* */
void Write (double *x, double *u, int beg, int end)
/*
*********************************************************************** */
{
int i;
int rank;
static int n = 0; /* File number */
FILE *fp;
char fname[32];
/* --------------------------------------------------------
1. Serial output
-------------------------------------------------------- */
#ifndef PARALLEL
sprintf (fname,"heat_eq%02d.dat",n);
fp = fopen (fname,"w");
for (i = beg; i <= end; i++) fprintf (fp, "%12.6e %12.6e\n", x[i], u[i]);
fclose(fp);
#endif
/* --------------------------------------------------------
2. Parallel output
-------------------------------------------------------- */
#ifdef PARALLEL
/* -- 2a. Process #0 gathers data and does the writing -- */
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
int nx_loc = end - beg + 1;
static double *recv_buf;
if (recv_buf == NULL) {
recv_buf = (double *) malloc((NX_GLOB + 2*NGHOST)*sizeof(double));
}
MPI_Gather (u + beg, nx_loc, MPI_DOUBLE,
recv_buf + beg, nx_loc, MPI_DOUBLE, 0, MPI_COMM_WORLD);
if (rank == 0){
sprintf (fname,"heat_eq%02d.dat",n);
fp = fopen (fname,"w");
for (i = beg; i < beg+NX_GLOB; i++) {
fprintf (fp, "%f %f\n", x[i], recv_buf[i]);
}
fclose(fp);
}
/* -- 2b. Shared file pointer -- */
/* -- 2c. Individual file pointer -- */
#endif
n++;
}
/*
MAPLE Script:
restart;
u := A*exp(-D*mu^2*t)*sin(mu*x + B) + C;
eq := diff(u,t) - D*diff(diff(u,x),x);
simplify(eq);
*/
File added
/* ///////////////////////////////////////////////////////////////////// */
/*!
\file
\brief Subarray example.
Proc #0 creates a large array Abig with NROWS rows and NCOLS columns.
Proc #1 receives a subarray with nrows_sub rows and ncols_sub
columns starting at starts[].
\author A. Mignone (mignone@to.infn.it)
\date March 14, 2020
*/
/* ///////////////////////////////////////////////////////////////////// */
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#include "tools.c"
#define NROWS 5
#define NCOLS 6
int main(int argc, char **argv)
{
int i,j;
int rank, size;
int nrows_sub = 3;
int ncols_sub = 2;
int starts[2] = {1,3};
int subsizes[2] = {nrows_sub,ncols_sub};
int bigsizes[2] = {NROWS, NCOLS};
int **Abig;
int **Asub;
MPI_Datatype MPI_Subarr;
/* --------------------------------------------------------
0. Initialize the MPI execution environment,
create subarray type
-------------------------------------------------------- */
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Type_create_subarray(2, bigsizes, subsizes, starts,
MPI_ORDER_C, MPI_INT, &MPI_Subarr);
MPI_Type_commit(&MPI_Subarr);
if (size < 2) {
if (rank == 0){
fprintf(stderr,"! Need at least 2 processors.\n");
}
MPI_Finalize();
return 1;
}
/* --------------------------------------------------------
1. Proc #0 creates the big array,
Proc #1 receives the subarray.
-------------------------------------------------------- */
if (rank == 0) {
Abig = Allocate_2DintArray(NROWS, NCOLS);
/* -- 1a. Fill array -- */
for (i = 0; i < NROWS; i++){
for (j = 0; j < NCOLS; j++){
Abig[i][j] = j + i*NCOLS;
}}
/* -- 1b. Show array -- */
Show_2DintArray(Abig, NROWS, NCOLS, "Big array (proc #0):");
MPI_Send(&(Abig[0][0]), 1, MPI_Subarr, 1, 123, MPI_COMM_WORLD);
free(Abig[0]);
free(Abig);
} else if (rank == 1) {
Asub = Allocate_2DintArray(nrows_sub, ncols_sub);
MPI_Recv(&(Asub[0][0]), nrows_sub*ncols_sub, MPI_INT, 0,
123, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
Show_2DintArray(Asub, nrows_sub, ncols_sub, "Received subarray (proc #1):");
free(Asub[0]);
free(Asub);
}
MPI_Type_free(&MPI_Subarr);
MPI_Finalize();
return 0;
}
File added
/* ///////////////////////////////////////////////////////////////////// */
/*!
\file
\brief Collection of useful functions for the MPI course.
This file provides some simple function for memory allocation
(dbl and int 2D array), printing.
Function prototyping is already included here and should not be
repeated elsewhere.
Simply include this file after the main header section when needed:
\code
#include <stdio.h>
...
#include "tools.c"
...
int main()
{
.
.
.
}
\endcodes
\author A. Mignone (mignone@to.infn.it)
\date March 14, 2020
*/
/* ///////////////////////////////////////////////////////////////////// */
double **Allocate_2DdblArray(int, int);
int **Allocate_2DintArray(int, int);
void Show_2DdblArray(double **, int, int, const char *);
void Show_2DintArray(int **, int, int, const char *);
/* ********************************************************************* */
double **Allocate_2DdblArray(int nx, int ny)
/*
* Allocate memory for a double precision array with
* nx rows and ny columns
*********************************************************************** */
{
int i,j;
double **buf;
buf = (double **)malloc (nx*sizeof(double *));
buf[0] = (double *) malloc (nx*ny*sizeof(double));
for (j = 1; j < nx; j++) buf[j] = buf[j-1] + ny;
return buf;
}
/* ********************************************************************* */
int **Allocate_2DintArray(int nx, int ny)
/*
* Allocate memory for an integer-type array with
* nx rows and ny columns
*********************************************************************** */
{
int i,j;
int **buf;
buf = (int **)malloc (nx*sizeof(int *));
buf[0] = (int *) malloc (nx*ny*sizeof(int));
for (j = 1; j < nx; j++) buf[j] = buf[j-1] + ny;
return buf;
}
/* ********************************************************************* */
void Show_2DdblArray(double **A, int nx, int ny, const char *string)
/*
*********************************************************************** */
{
int i, j;
printf ("%s\n",string);
printf ("------------------------------\n");
for (i = 0; i < nx; i++) {
for (j = 0; j < ny; j++) {
printf ("%8.2f ", A[i][j]);
}
printf ("\n");
}
printf ("------------------------------\n");
}
/* ********************************************************************* */
void Show_2DintArray(int **A, int nx, int ny, const char *string)
/*
*********************************************************************** */
{
int i, j;
printf ("%s\n",string);
for (j = 0; j < ny; j++) printf ("-----");
printf ("\n");
for (i = 0; i < nx; i++) {
for (j = 0; j < ny; j++) {
printf ("%03d ", A[i][j]);
}
printf ("\n");
}
for (j = 0; j < ny; j++) printf ("-----");
printf ("\n");
}
File added
/* ///////////////////////////////////////////////////////////////////// */
/*!
\file
\brief Writing of a 1D buffer in parallel.
Write a 1D buffer in parallel using 4 different versions.
For a contiguous data type, use:
VERSION == 1 employs shared file pointer
VERSION == 2 employs individual file pointer with offset computed
by the MPI_File_seek()
VERSION == 3 defines a file view with offset depending on the process
rank
VERSION == 4 similar to VERSION == 3, but using a contiguous MPI
datatype
A non-contiguous version is handled with VERSION == 5, which defines
a MPI vector datatype and a file view.
\author A. Mignone (mignone@to.infn.it)
\date March 1, 2020
*/
/* ///////////////////////////////////////////////////////////////////// */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <mpi.h>
#define NELEM 3
#define VERSION 5
int main(int argc, char **argv)
{
int i, rank, size;
double buf[NELEM];
char fname[] = "arr1D.bin";
MPI_File fh;
MPI_Offset disp;
/* --------------------------------------------------------
0. Initialize the MPI execution environment
-------------------------------------------------------- */
MPI_Init (&argc, &argv);
MPI_Comm_rank (MPI_COMM_WORLD, &rank);
MPI_Comm_size (MPI_COMM_WORLD, &size);
/* --------------------------------------------------------
1. Initialize array
-------------------------------------------------------- */
for (i = 0; i < NELEM; i++) buf[i] = rank;
/* --------------------------------------------------------
2. Delete, re-open file and write
-------------------------------------------------------- */
MPI_File_delete(fname, MPI_INFO_NULL);
MPI_File_open( MPI_COMM_WORLD, fname,
MPI_MODE_CREATE | MPI_MODE_RDWR,
MPI_INFO_NULL, &fh );
#if VERSION == 1 // Contiguous data, shared file pointer
MPI_File_write_ordered(fh, buf, NELEM, MPI_DOUBLE, MPI_STATUS_IGNORE);
#elif VERSION == 2 // Contiguous data, individual file pointer
disp = rank*NELEM*sizeof(double); // In bytes
MPI_File_seek(fh, disp, MPI_SEEK_SET);
MPI_File_write(fh, buf, NELEM, MPI_DOUBLE, MPI_STATUS_IGNORE);
#elif VERSION == 3 // Contiguous data, file view
disp = rank*NELEM*sizeof(double);
MPI_File_set_view(fh, disp, MPI_DOUBLE, MPI_DOUBLE, "native", MPI_INFO_NULL);
MPI_File_write(fh, buf, NELEM, MPI_DOUBLE, MPI_STATUS_IGNORE);
#elif VERSION == 4 // Contiguous data, file view with MPI datatype
MPI_Datatype cntg_type;
MPI_Type_contiguous(NELEM, MPI_DOUBLE, &cntg_type);
MPI_Type_commit(&cntg_type);
disp = rank*NELEM*sizeof(double);
MPI_File_set_view(fh, disp, MPI_BYTE, cntg_type, "native", MPI_INFO_NULL);
MPI_File_write(fh, buf, 1, cntg_type, MPI_STATUS_IGNORE);
MPI_Type_free(&cntg_type);
#elif VERSION == 5 // Non-contiguous data, file view, vector type
MPI_Datatype vec_type;
for (i = 0; i < NELEM; i++) buf[i] = rank + 0.1*i;
MPI_Type_vector(NELEM, 1, size, MPI_DOUBLE, &vec_type);
MPI_Type_commit(&vec_type);
disp = rank*sizeof(double);
MPI_File_set_view(fh, disp, MPI_DOUBLE, vec_type, "native", MPI_INFO_NULL);
MPI_File_write(fh, buf, NELEM, MPI_DOUBLE, MPI_STATUS_IGNORE);
MPI_Type_free(&vec_type);
#endif
MPI_File_close(&fh);
MPI_Finalize();
return 0;
}
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <mpi.h>
#include "tools.c"
#define NDIM 2
#define NX_GLOB 16
#define NY_GLOB 8
#define VERSION 2
int main(int argc, char ** argv)
{
int i, j, rank, size;
int nx, ny;
int nprocs[NDIM];
int gsizes[NDIM];
int lsizes[NDIM];
int periods[NDIM] = {0,0};
int coords[NDIM];
int start[NDIM];
double **A;
MPI_Datatype subarr_type;
MPI_Comm MPI_COMM_CART;
char fname[] = "arr2D.bin";
/* --------------------------------------------------------
0. Initialize the MPI execution environment
-------------------------------------------------------- */
MPI_Init (&argc, &argv);
MPI_Comm_rank (MPI_COMM_WORLD, &rank);
MPI_Comm_size (MPI_COMM_WORLD, &size);
/* --------------------------------------------------------
1. Create a 2D domain decomposition
-------------------------------------------------------- */
/* -- 1a. Attempt to find a maximally cubic decomposition -- */
nprocs[0] = (int)sqrt(size);
nprocs[1] = size/nprocs[0];
if (nprocs[0]*nprocs[1] != size){
if (rank == 0) printf ("! Cannot decompose\n");
MPI_Finalize();
return 1;
}
/* -- 1b. Create communicator -- */
MPI_Cart_create(MPI_COMM_WORLD, NDIM, nprocs, periods, 0, &MPI_COMM_CART);
MPI_Cart_get(MPI_COMM_CART, NDIM, nprocs, periods, coords);
gsizes[0] = NX_GLOB;
gsizes[1] = NY_GLOB;
lsizes[0] = nx = NX_GLOB/nprocs[0];
lsizes[1] = ny = NY_GLOB/nprocs[1];
if (rank == 0){
printf ("Domain decomposed in %d X %d procs\n",nprocs[0],nprocs[1]);
printf ("Local grid size = %d X %d\n",lsizes[0], lsizes[1]);
}
/* --------------------------------------------------------
2. Allocate memory and fill 2D array on local domain
-------------------------------------------------------- */
A = Allocate_2DdblArray(ny,nx);
for (j = 0; j < ny; j++) {
for (i = 0; i < nx; i++) {
A[j][i] = rank;
}}
/* --------------------------------------------------------
3. Create new datatypes
-------------------------------------------------------- */
start[0] = coords[0]*lsizes[0];
start[1] = coords[1]*lsizes[1];
MPI_Type_create_subarray (NDIM, gsizes, lsizes, start, MPI_ORDER_FORTRAN,
MPI_DOUBLE, &subarr_type);
MPI_Type_commit (&subarr_type);
/* --------------------------------------------------------
4. Open file for writing
-------------------------------------------------------- */
MPI_File_delete(fname, MPI_INFO_NULL);
MPI_File fh;
MPI_Status status;
MPI_File_open(MPI_COMM_CART, fname,
MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
MPI_File_set_view(fh, 0, MPI_DOUBLE, subarr_type, "native", MPI_INFO_NULL);
MPI_File_write_all(fh, A[0], nx*ny, MPI_DOUBLE, MPI_STATUS_IGNORE);
MPI_File_close(&fh);
MPI_Type_free(&subarr_type);
MPI_Finalize();
return 0;
}
File mode changed from 100755 to 100644
#include <mpi.h>
#include <iostream>
#define SIZE 2
#define X 0
#define Y 1
int main(int argc, char **argv)
{
using namespace std;
int task, ntasks;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
MPI_Comm_rank(MPI_COMM_WORLD, &task);
if (argc <= 1)
{
if (!task)
{
cout << "\n\t Usage: <executable> <number processes along X> \n" << endl;
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
exit(EXIT_FAILURE);
}
}
const int cartesian_grid_x = (int)strtol(argv[1], NULL, 10);
const int cartesian_grid_y = ((ntasks % cartesian_grid_x == 0) ? (ntasks / cartesian_grid_x) : -1);
if (cartesian_grid_y == -1)
{
if (!task)
{
cout << "\n\t ntasks % cartesian_grid_x != 0 ... aborting ...\n" << endl;
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
exit(EXIT_FAILURE);
}
}
static int dims[SIZE] = {cartesian_grid_x, cartesian_grid_y};
static int periods[SIZE] = {0, 0};
static int reorder = 0;
MPI_Comm comm2d;
MPI_Cart_create(MPI_COMM_WORLD, SIZE, dims, periods, reorder, &comm2d);
int coords[SIZE];
MPI_Cart_coords(comm2d, task, SIZE, coords);
int nbrright, nbrleft;
MPI_Cart_shift(comm2d, Y, 1, &nbrleft, &nbrright);
int nbrtop, nbrbottom;
MPI_Cart_shift(comm2d, X, 1, &nbrbottom, &nbrtop);
for (int rank=0 ; rank<ntasks; rank++)
{
MPI_Barrier(MPI_COMM_WORLD);
if (rank == task)
{
cout << "\n\t Task: " << task << endl;
cout << "\t\t coords[" << coords[X] << "," << coords[Y] << "]" << endl;
cout << "\t\t nbrright: " << nbrright << " - nbrleft : " << nbrleft << endl;
cout << "\t\t nbrtop : " << nbrtop << " - nbrbottom: " << nbrbottom << endl;
cout << endl;
}
}
MPI_Comm_free(&comm2d);
MPI_Finalize();
return 0;
}
/* ///////////////////////////////////////////////////////////////////// */
/*!
\file
\brief Data communication in a ring.
For each processor, define a unique string defining the process
color and perform n cyclic permutations by transferring the color
name to the process to the right.
Three versions are provided:
i) using MPI_Send / Recv() (select VERSION == 0);
ii) using MPI_Sendrecv() (select VERSION == 1);
iii) using MPI_Isend / Irecv() (select VERSION == 2).
\author A. Mignone (mignone@to.infn.it)
\date March 10, 2020
*/
/* ///////////////////////////////////////////////////////////////////// */
#include <mpi.h>
#include <stdio.h>
#define STR_LENGTH 32
#define VERSION 1
int main(int argc, char ** argv)
{
int rank, size;
int dstL, dstR;
char recv_buf[STR_LENGTH];
char send_buf[STR_LENGTH];
MPI_Request req;
/* -- 1. Initialize the MPI execution environment -- */
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
/* -- 2. Define color -- */
if (rank == 0) sprintf (send_buf, "red");
if (rank == 1) sprintf (send_buf, "blue");
if (rank == 2) sprintf (send_buf, "green");
if (rank == 3) sprintf (send_buf, "black");
if (rank == 4) sprintf (send_buf, "orange");
if (rank == 5) sprintf (send_buf, "yellow");
if (size > 6){
if (rank == 0) printf ("! Cannot execute with more than six processes\n");
MPI_Finalize();
return 0;
}
/* -- 3. Determine neighbour processors -- */
dstL = rank - 1;
dstR = rank + 1;
if (dstL < 0) dstL = size-1;
if (dstR >= size) dstR = 0;
int n;
for (n = 0; n < size; n++){
#if VERSION == 1
MPI_Send(send_buf, STR_LENGTH, MPI_CHAR, dstR, 0, MPI_COMM_WORLD);
MPI_Recv(recv_buf, STR_LENGTH, MPI_CHAR, dstL, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
#elif VERSION == 2
MPI_Sendrecv(send_buf, STR_LENGTH, MPI_CHAR, dstR, 0,
recv_buf, STR_LENGTH, MPI_CHAR, dstL, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
#elif VERSION == 3
MPI_Isend(send_buf, STR_LENGTH, MPI_CHAR, dstR, 0, MPI_COMM_WORLD, &req);
MPI_Irecv(recv_buf, STR_LENGTH, MPI_CHAR, dstL, 0, MPI_COMM_WORLD, &req);
MPI_Wait (&req, MPI_STATUS_IGNORE);
#endif
/* -- Replace color -- */
sprintf (send_buf,"%s",recv_buf);
if (rank == 0){
printf ("Proc #%d, I've changed my color is %s\n", rank, send_buf);
}
}
MPI_Finalize();
return 0;
}
File added
File mode changed from 100755 to 100644
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment