Skip to content
Snippets Groups Projects
Commit ab66905e authored by David Goz's avatar David Goz :sleeping:
Browse files

/mpi/comp_comm/ comments global data structure

parent 5d29c556
No related branches found
No related tags found
No related merge requests found
...@@ -19,25 +19,25 @@ MyData **global_phi; ...@@ -19,25 +19,25 @@ MyData **global_phi;
typedef struct MyGrid typedef struct MyGrid
{ {
int local_start[NDIM]; int local_start[NDIM]; /* Local start index in each dimension */
int local_end[NDIM]; int local_end[NDIM]; /* Local end index in each dimension */
int global_start[NDIM]; int global_start[NDIM]; /* Global start index in each dimension */
int global_end[NDIM]; int global_end[NDIM]; /* Global end index in each dimension */
int dim[NDIM]; int dim[NDIM]; /* Local domain size (no ghosts) */
} myDomain; } myDomain;
typedef struct Task_2D_Cartesian typedef struct Task_2D_Cartesian
{ {
int rank; int rank; /* Local process rank */
int nranks; int nranks; /* Communicator size */
int coords[NDIM]; int coords[NDIM]; /* Cartesian topology coordinate */
myDomain domain; myDomain domain; /* MyGrid structure (defined above) */
int nbrtop; int nbrtop; /* Top neighbor process in cartesian topology */
int nbrbottom; int nbrbottom; /* Bottom neighbor process in cartesian topology */
int nbrleft; int nbrleft; /* Left neighbor process in cartesian topology */
int nbrright; int nbrright; /* Right neighbor process in cartesian topology */
MPI_Comm comm2d; MPI_Comm comm2d; /* Cartesian communicator */
} Task; } Task;
/* function prototypes */ /* function prototypes */
...@@ -136,9 +136,7 @@ int main(int argc, char **argv) ...@@ -136,9 +136,7 @@ int main(int argc, char **argv)
/************************************************************************************************************/ /************************************************************************************************************/
/* 2D MPI-cartesian decomposition: /* 2D MPI-cartesian decomposition:
the grids are replicated across the MPI processes. the grids are distributed across the MPI processes.
This approach is not the most efficient in terms of memory usage,
because the arrays are replicated across MPI process instead of to be distributed */
/* get the reminder, i.e. take into account uneven /* get the reminder, i.e. take into account uneven
decomposition of points among the processes */ decomposition of points among the processes */
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment