From 39c018b40a12fdc2ca0f3d73432e892f3800fa8f Mon Sep 17 00:00:00 2001
From: David Goz <david.goz@inaf.it>
Date: Thu, 20 Jun 2024 10:21:33 +0200
Subject: [PATCH] mpi_comp_comm bug fixing

---
 .../comp_comm/src/jacobi_2D_mpi_comp_comm.c   | 107 ++++++++++++------
 jacobi/mpi/comp_comm/src/tools.c              |   6 +-
 2 files changed, 74 insertions(+), 39 deletions(-)

diff --git a/jacobi/mpi/comp_comm/src/jacobi_2D_mpi_comp_comm.c b/jacobi/mpi/comp_comm/src/jacobi_2D_mpi_comp_comm.c
index b36017e..234d06a 100644
--- a/jacobi/mpi/comp_comm/src/jacobi_2D_mpi_comp_comm.c
+++ b/jacobi/mpi/comp_comm/src/jacobi_2D_mpi_comp_comm.c
@@ -20,6 +20,7 @@ typedef struct MyGrid
 {
   int start[NDIM];
   int end[NDIM];
+  int dim[NDIM];
 } myDomain;
 
 
@@ -173,6 +174,9 @@ int main(int argc, char **argv)
       /* boundaries */
       ThisTask.domain.start[dim] = ((ThisTask.domain.start[dim] == 0)         ? NGHOST  : ThisTask.domain.start[dim]);
       ThisTask.domain.end[dim]   = ((ThisTask.domain.end[dim] == NX_GLOB + 1) ? NX_GLOB : ThisTask.domain.end[dim]);
+
+      ThisTask.domain.dim[X] = (ThisTask.domain.end[X] - ThisTask.domain.start[X]);
+      ThisTask.domain.dim[Y] = (ThisTask.domain.end[Y] - ThisTask.domain.start[Y]);
     }
 
 #if defined(DEBUG)
@@ -207,26 +211,25 @@ int main(int argc, char **argv)
      -------------------------------------------------------- */
     
   const int ibeg   = NGHOST;
-  const int iend   = ibeg + NX_GLOB - 1;
-  const int nx     = iend - ibeg + 1;
-  const int nx_tot = nx + 2 * NGHOST;
+  /* const int iend   = ibeg + NX_GLOB - 1; */
+  /* const int nx     = iend - ibeg + 1; */
+  /* const int nx_tot = nx + 2 * NGHOST; */
     
   const int jbeg   = NGHOST;
-  const int jend   = jbeg + NY_GLOB - 1;
-  const int ny     = jend - jbeg + 1;
-  const int ny_tot = ny + 2 * NGHOST;
-
-  if (rank == MASTER)
-    {
-      printf("\n\t Grid indices:");
-      printf("\n\t\t ibeg, iend = %d, %d; nx_tot = %d"    ,ibeg, iend, nx_tot);
-      printf("\n\t\t jbeg, jend = %d, %d; ny_tot = %d\n\n",jbeg, jend, ny_tot);
-    }
+  /* const int jend   = jbeg + NY_GLOB - 1; */
+  /* const int ny     = jend - jbeg + 1; */
+  /* const int ny_tot = ny + 2 * NGHOST; */
+
+  /* if (rank == MASTER) */
+  /*   { */
+  /*     printf("\n\t Grid indices:"); */
+  /*     printf("\n\t\t ibeg, iend = %d, %d; nx_tot = %d"    ,ibeg, iend, nx_tot); */
+  /*     printf("\n\t\t jbeg, jend = %d, %d; ny_tot = %d\n\n",jbeg, jend, ny_tot); */
+  /*   } */
   
   /* --------------------------------------------------------
-     2. Generate grid, allocate memory
-        Not optimized because the grids are (unnecessarily)
-	replicated across MPI processes
+     2. Generate grids, allocate memory
+        distributed across MPI processes
      -------------------------------------------------------- */
 
   /* memory allocation */
@@ -237,23 +240,24 @@ int main(int argc, char **argv)
   /* initial conditions */
   for (int i=0 ; i<(NX_GLOB + 2*NGHOST) ; i++) xg[i] = xbeg + (i - ibeg + 1) * delta[X];
   for (int j=0 ; j<(NY_GLOB + 2*NGHOST) ; j++) yg[j] = ybeg + (j - jbeg + 1) * delta[Y];
-  MyData *x = xg; /* Global and local grids are the same  */
-  MyData *y = yg; /* for serial version of the code       */
 
-  /* grids memory allocation */
-  MyData **phi  = Allocate_2DdblArray(ny_tot, nx_tot);
-  MyData **phi0 = Allocate_2DdblArray(ny_tot, nx_tot);
-    
+  /* grids memory allocation distributed across MPI processes */
+  MyData **phi  = Allocate_2DdblArray(ThisTask.domain.dim[Y] + 2, ThisTask.domain.dim[X] + 2);
+  MyData **phi0 = Allocate_2DdblArray(ThisTask.domain.dim[Y] + 2, ThisTask.domain.dim[X] + 2);
+
   /* --------------------------------------------------------
-     3. Initialize solution array to 0
+     3. Set boundary conditions   
      -------------------------------------------------------- */
-    
-  for (int j=jbeg ; j<=jend ; j++)
-    for (int i=ibeg ; i<=iend ; i++)
-      {
-	phi0[j][i] = 0.0;
-	phi[j][i]  = 0.0;
-      }
+
+  BoundaryConditions(phi0, xg, yg, nx, ny);
+  BoundaryConditions(phi,  xg, yg, nx, ny);
+
+
+
+
+
+
+  
   
   /* --------------------------------------------------------
      4. Main iteration cycle
@@ -262,8 +266,6 @@ int main(int argc, char **argv)
   const double time_start = MPI_Wtime();
   
   /* -- 4a. Set boundary conditions first -- */  
-  BoundaryConditions(phi0, x, y, nx, ny);
-  BoundaryConditions(phi, x, y, nx, ny);
 
   MyData err = 1.0;
   /* iterations */
@@ -342,10 +344,42 @@ void BoundaryConditions(MyData **const restrict phi,
 			MyData  *const restrict x,
 			MyData  *const restrict y,
                         const int               nx,
-			const int               ny)
-/*
-*********************************************************************** */
+			const int               ny,
+			Task    *const restrict ThisTask)
+/**********************************************************************************/
 {
+  /* left */
+  if (ThisTask->nbrleft == MPI_PROC_NULL) /* no left neighbor */
+    {
+      for (int j=ThisTask->domain.start[X] ; j<ThisTask->domain.end[X] ; j++)
+	phi[j][0] = (1.0 - y[j]);
+    }
+
+  /* right */
+  if (ThisTask->nbrright == MPI_PROC_NULL) /* no right neighbor */
+    {
+      for (int j=ThisTask->domain.start[X] ; j<ThisTask->domain.end[X] ; j++)
+	phi[j][ThisTask->domain.end[Y] + 1] = (y[j] * y[j]);
+    }
+
+  /* bottom */
+  if (ThisTask->nbrbottom == MPI_PROC_NULL) /* no bottom neighbor */
+    {
+      for (int i=ThisTask->domain.start[Y] ; i<ThisTask->domain.end[Y] ; i++)
+	phi[0][i] = (1.0 - x[i]);
+    }
+  
+  /* top */
+  if (ThisTask->nbrtop == MPI_PROC_NULL) /* no top neghbor */
+    {
+      for (int i=ThisTask->domain.start[Y] ; i<ThisTask->domain.end[Y] ; i++)
+	phi[ThisTask->domain.end[X] + 1][i] = x[i];
+    }
+
+
+
+  return;
+  
   const int ibeg = NGHOST;
   const int iend = ibeg + nx - 1;
     
@@ -402,6 +436,7 @@ void JacobiAlgorithm(MyData **const restrict Phi,
   return;
 }
 
+
 void Jacobi_Communication(MyData      **const restrict Phi,
 			  MyData      **const restrict Phi0,
 			  MyData       *const restrict error,
@@ -483,7 +518,7 @@ void Jacobi_Communication(MyData      **const restrict Phi,
 	  JacobiAlgorithm(Phi, Phi0, delta, (jbeg - 1), (jend + 1), (iend + 1), (iend + 1), error);
 	  break;
 	}
-    }  
+    }
   
   MPI_Type_free(&column);
 
diff --git a/jacobi/mpi/comp_comm/src/tools.c b/jacobi/mpi/comp_comm/src/tools.c
index be2761d..547d9a9 100644
--- a/jacobi/mpi/comp_comm/src/tools.c
+++ b/jacobi/mpi/comp_comm/src/tools.c
@@ -12,14 +12,14 @@
 /* ********************************************************************* */
 MyData **Allocate_2DdblArray(const int nx, const int ny)
 /*
- * Allocate memory for a double precision array with
+ * Allocate contiguous memory for a MyData array with
  * nx rows and ny columns
  *********************************************************************** */
 {
-  MyData **buf = malloc(nx * sizeof(MyData *));
+  MyData **buf = calloc(nx, sizeof(MyData *));
   assert(buf != NULL);
 
-  buf[0] = (MyData *) malloc(nx * ny * sizeof(MyData));
+ buf[0] = (MyData *) calloc(nx * ny,  sizeof(MyData));
   assert(buf[0] != NULL);
   
   for (int j=1 ; j<nx ; j++)
-- 
GitLab