diff --git a/jacobi/mpi/comp_comm/src/jacobi_2D_mpi_comp_comm.c b/jacobi/mpi/comp_comm/src/jacobi_2D_mpi_comp_comm.c
index 1b620e9773cdf0a2e63b16db51c9c45572c3dc56..d4de3f485216ce03e65ce3e478001a79c16cfb26 100644
--- a/jacobi/mpi/comp_comm/src/jacobi_2D_mpi_comp_comm.c
+++ b/jacobi/mpi/comp_comm/src/jacobi_2D_mpi_comp_comm.c
@@ -19,25 +19,25 @@ MyData **global_phi;
 
 typedef struct MyGrid
 {
-  int local_start[NDIM];
-  int local_end[NDIM];
-  int global_start[NDIM];
-  int global_end[NDIM];
-  int dim[NDIM];
+  int local_start[NDIM];  /* Local start index in each dimension  */   
+  int local_end[NDIM];    /* Local end index in each dimension    */ 
+  int global_start[NDIM]; /* Global start index in each dimension */
+  int global_end[NDIM];   /* Global end index in each dimension   */
+  int dim[NDIM];          /* Local domain size (no ghosts)        */
 } myDomain;
 
 
 typedef struct Task_2D_Cartesian
 {
-  int rank;
-  int nranks;
-  int coords[NDIM];
-  myDomain domain;
-  int nbrtop;
-  int nbrbottom;
-  int nbrleft;
-  int nbrright;
-  MPI_Comm comm2d;
+  int rank;              /* Local process rank                            */
+  int nranks;            /* Communicator size                             */
+  int coords[NDIM];      /* Cartesian topology coordinate                 */
+  myDomain domain;       /* MyGrid structure (defined above)              */
+  int nbrtop;            /* Top neighbor process in cartesian topology    */
+  int nbrbottom;         /* Bottom neighbor process in cartesian topology */
+  int nbrleft;           /* Left neighbor process in cartesian topology   */
+  int nbrright;          /* Right neighbor process in cartesian topology  */
+  MPI_Comm comm2d;       /* Cartesian communicator                        */
 } Task;
 
 /* function prototypes */
@@ -136,9 +136,7 @@ int main(int argc, char **argv)
   
   /************************************************************************************************************/
   /* 2D MPI-cartesian decomposition:
-     the grids are replicated across the MPI processes.
-     This approach is not the most efficient in terms of memory usage,
-     because the arrays are replicated across MPI process instead of to be distributed */
+     the grids are distributed across the MPI processes.
 
   /* get the reminder, i.e. take into account uneven
      decomposition of points among the processes    */