From ab66905e1c127fd0db4c46ff9ebaa2907fabff76 Mon Sep 17 00:00:00 2001
From: David Goz <david.goz@inaf.it>
Date: Sat, 22 Jun 2024 11:27:49 +0200
Subject: [PATCH] /mpi/comp_comm/ comments global data structure

---
 .../comp_comm/src/jacobi_2D_mpi_comp_comm.c   | 32 +++++++++----------
 1 file changed, 15 insertions(+), 17 deletions(-)

diff --git a/jacobi/mpi/comp_comm/src/jacobi_2D_mpi_comp_comm.c b/jacobi/mpi/comp_comm/src/jacobi_2D_mpi_comp_comm.c
index 1b620e9..d4de3f4 100644
--- a/jacobi/mpi/comp_comm/src/jacobi_2D_mpi_comp_comm.c
+++ b/jacobi/mpi/comp_comm/src/jacobi_2D_mpi_comp_comm.c
@@ -19,25 +19,25 @@ MyData **global_phi;
 
 typedef struct MyGrid
 {
-  int local_start[NDIM];
-  int local_end[NDIM];
-  int global_start[NDIM];
-  int global_end[NDIM];
-  int dim[NDIM];
+  int local_start[NDIM];  /* Local start index in each dimension  */   
+  int local_end[NDIM];    /* Local end index in each dimension    */ 
+  int global_start[NDIM]; /* Global start index in each dimension */
+  int global_end[NDIM];   /* Global end index in each dimension   */
+  int dim[NDIM];          /* Local domain size (no ghosts)        */
 } myDomain;
 
 
 typedef struct Task_2D_Cartesian
 {
-  int rank;
-  int nranks;
-  int coords[NDIM];
-  myDomain domain;
-  int nbrtop;
-  int nbrbottom;
-  int nbrleft;
-  int nbrright;
-  MPI_Comm comm2d;
+  int rank;              /* Local process rank                            */
+  int nranks;            /* Communicator size                             */
+  int coords[NDIM];      /* Cartesian topology coordinate                 */
+  myDomain domain;       /* MyGrid structure (defined above)              */
+  int nbrtop;            /* Top neighbor process in cartesian topology    */
+  int nbrbottom;         /* Bottom neighbor process in cartesian topology */
+  int nbrleft;           /* Left neighbor process in cartesian topology   */
+  int nbrright;          /* Right neighbor process in cartesian topology  */
+  MPI_Comm comm2d;       /* Cartesian communicator                        */
 } Task;
 
 /* function prototypes */
@@ -136,9 +136,7 @@ int main(int argc, char **argv)
   
   /************************************************************************************************************/
   /* 2D MPI-cartesian decomposition:
-     the grids are replicated across the MPI processes.
-     This approach is not the most efficient in terms of memory usage,
-     because the arrays are replicated across MPI process instead of to be distributed */
+     the grids are distributed across the MPI processes.
 
   /* get the reminder, i.e. take into account uneven
      decomposition of points among the processes    */
-- 
GitLab