diff --git a/jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/Broadcast_1.cc b/jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/Broadcast_1.cc
new file mode 100644
index 0000000000000000000000000000000000000000..88302cf76d4366a1831e639d3f554d28e2dd320f
--- /dev/null
+++ b/jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/Broadcast_1.cc
@@ -0,0 +1,88 @@
+#include <iostream>
+
+#include <mpi.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#define NELEMENTS 7
+
+using namespace std;
+
+int main(int argc, char ** argv)
+{
+    int rank, size;
+    int buf[NELEMENTS];
+    
+    MPI_Init(&argc, &argv);
+    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+    MPI_Comm_size(MPI_COMM_WORLD, &size);
+    
+    if (rank == 0) {
+        for(int i = 0; i < NELEMENTS; i++) buf[i] = 1 + i*i;
+    }
+    
+    MPI_Bcast(buf, NELEMENTS, MPI_INT, 0, MPI_COMM_WORLD);
+    
+//    for(rank = 1; rank < size; rank++)
+//    {
+//        for(int i = 0; i < NELEMENTS; i++)
+//        {
+//            cout<<"buf["<<i<<"] = "<<buf[i]<<endl;
+//        }
+//        cout<<endl;
+//    }
+    
+    if (rank == 0) {
+        cout<<"I am processor "<<rank<<endl;
+        cout<<endl;
+        for(int i = 0; i < NELEMENTS; i++)
+        {
+            cout<<"buf["<<i<<"] = "<<buf[i]<<endl;
+        }
+    }
+    
+    cout<<endl;
+    
+    if (rank == 1) {
+        cout<<"I am processor "<<rank<<endl;
+        cout<<endl;
+        for(int i = 0; i < NELEMENTS; i++)
+        {
+            cout<<"buf["<<i<<"] = "<<buf[i]<<endl;
+        }
+    }
+    
+    cout<<endl;
+    
+    if (rank == 2) {
+        cout<<"I am processor "<<rank<<endl;
+        cout<<endl;
+        for(int i = 0; i < NELEMENTS; i++)
+        {
+            cout<<"buf["<<i<<"] = "<<buf[i]<<endl;
+        }
+    }
+    
+    cout<<endl;
+    
+    if (rank == 3) {
+        cout<<"I am processor "<<rank<<endl;
+        cout<<endl;
+        for(int i = 0; i < NELEMENTS; i++)
+        {
+            cout<<"buf["<<i<<"] = "<<buf[i]<<endl;
+        }
+    }
+    
+//    if (rank == 0) {
+//        cout<<"I am processor "<<rank<<endl;
+//        cout<<endl;
+//        for(int i = 0; i < NELEMENTS; i++)
+//        {
+//            cout<<"buf["<<i<<"] = "<<buf[i]<<endl;
+//        }
+//    }
+    
+    MPI_Finalize();
+    return 0;
+}
diff --git a/jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/Broadcast_2.cc b/jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/Broadcast_2.cc
new file mode 100644
index 0000000000000000000000000000000000000000..b23b1c8dcf03d484cf0f5d3fb72249a2815be937
--- /dev/null
+++ b/jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/Broadcast_2.cc
@@ -0,0 +1,99 @@
+#include <iostream>
+
+#include <mpi.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+//#define STR_LENGTH 32
+#define STR_LENGTH 21
+
+using namespace std;
+
+int main(int argc, char ** argv)
+{
+    int rank, size;
+    /* NONONONONO*/
+//    char string[STR_LENGTH];
+    
+//    string[] = {'H','e','l','l','o',',',' ','I',' ','a','m',' ','p','r','o','c','e','s','s','o','r'};
+    
+//    for(int i = 0; i < STR_LENGTH; i++) string[i] = "Hello, I'm processor";
+    
+//    for(int i = 0; i < STR_LENGTH; i++) cout<<string[i]<<endl;;
+    
+//    char mystring[] = { 'H', 'e', 'l', 'l', 'o', '\0' };
+//    for(int i = 0; i < STR_LENGTH; i++)
+    /* NONONONONO*/
+ 
+    
+    
+    
+    char mystring[] = {'H','e','l','l','o',',',' ','I',' ','a','m',' ','p','r','o','c','e','s','s','o','r','\0'};
+    cout<<mystring<<endl;
+    
+    char string[STR_LENGTH];
+    
+//    for(int i = 0; i < STR_LENGTH; i++) string[i] = mystring[i];
+//    
+//    for(int i = 0; i < STR_LENGTH; i++) cout<<string[i];
+//    for(int i = 0; i < STR_LENGTH; i++) cout<<mystring[i];
+//    cout<<endl;
+    
+
+    
+    MPI_Init(&argc, &argv);
+    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+    MPI_Comm_size(MPI_COMM_WORLD, &size);
+
+//    if (rank == 0) {
+//        for(int i = 0; i < STR_LENGTH; i++) string[i] = mystring[i];
+//        for(int i = 0; i < STR_LENGTH; i++) cout<<string[i];
+//        cout<<endl;
+//    }
+    
+    if (rank == 0) {
+        for(int i = 0; i < STR_LENGTH; i++) string[i] = mystring[i];
+        for(int i = 0; i < STR_LENGTH; i++) cout<<string[i];
+        cout<<endl;
+    }
+    
+    MPI_Bcast(string, STR_LENGTH, MPI_CHAR, 0, MPI_COMM_WORLD);
+    
+//    if (rank == 0) {
+//        for(int i = 0; i < STR_LENGTH; i++) cout<<string[i];
+//        cout<<" "<<rank<<endl;
+//    }
+//    
+//    cout<<endl;
+//    
+//    if (rank == 1) {
+//        for(int i = 0; i < STR_LENGTH; i++) cout<<string[i];
+//        cout<<" "<<rank<<endl;
+//    }
+//    
+//    cout<<endl;
+//    
+//    if (rank == 2) {
+//        for(int i = 0; i < STR_LENGTH; i++) cout<<string[i];
+//        cout<<" "<<rank<<endl;
+//    }
+//    
+//    cout<<endl;
+//    
+//    if (rank == 3) {
+//        for(int i = 0; i < STR_LENGTH; i++) cout<<string[i];
+//        cout<<" "<<rank<<endl;
+//    }
+//    
+//    cout<<endl;
+    
+//    for(rank = 0; rank < size; rank++)
+    for(int n = 0; n < size; n++)
+    {
+        for(int i = 0; i < STR_LENGTH; i++) cout<<string[i];
+        cout<<" "<<rank<<endl;
+    }
+
+    MPI_Finalize();
+    return 0;
+}
diff --git a/jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/Gather_Scatter.c b/jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/Gather_Scatter.c
new file mode 100644
index 0000000000000000000000000000000000000000..917dc94c3a19ab8a4d68c07b423174b9b83848ed
--- /dev/null
+++ b/jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/Gather_Scatter.c
@@ -0,0 +1,36 @@
+#include <mpi.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+int main(int argc, char ** argv)
+{
+    int n, rank, size;
+    double data;
+    double *send_buf, *recv_buf;
+    
+    MPI_Init(&argc, &argv);
+    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+    MPI_Comm_size(MPI_COMM_WORLD, &size);
+    
+    recv_buf = (double *) malloc(size*sizeof(double)); // allocate memory
+    send_buf = (double *) malloc(size*sizeof(double));
+    
+    data = rank*rank + 1.0; // generate data on different procs
+    MPI_Gather(&data,    1, MPI_DOUBLE, recv_buf, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
+    
+    if (rank == 0){
+        printf ("[Gather()]:\n");
+        for (n = 0; n < size; n++) printf ("rnd[%d] = %f\n",n,recv_buf[n]);
+    }
+    
+    if (rank == 0){
+        for (n = 0; n < size; n++) send_buf[n] = n*n - 1.0; // Generate “size” random numbers
+    }
+    
+    MPI_Scatter(send_buf, 1, MPI_DOUBLE, &data,    1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
+    
+    printf ("[Scatter, proc #%d] = %f\n",rank,data);
+    
+    MPI_Finalize();
+    return 0;
+}
diff --git a/jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/Heat_Equation_Gather.c b/jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/Heat_Equation_Gather.c
new file mode 100644
index 0000000000000000000000000000000000000000..3e80507fd25e7c20f3a6e9c79b6cd06b2d26f07d
--- /dev/null
+++ b/jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/Heat_Equation_Gather.c
@@ -0,0 +1,251 @@
+/* ///////////////////////////////////////////////////////////////////// */
+/*!
+ \file
+ \brief Solve 1D heat equation.
+ 
+ Solve the 1D heat equation using a 1st order explicit method
+ on a parallel domain.
+ 
+ \author A. Mignone (mignone@to.infn.it)
+ \date   March 12, 2020
+ */
+/* ///////////////////////////////////////////////////////////////////// */
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+
+#define PARALLEL
+
+#ifdef PARALLEL
+#include <mpi.h>
+#endif
+
+#define NX_GLOB   64  /* Global number of interior points */
+#define NGHOST     1
+
+void Write (double *, double *, int, int);
+
+int main(int argc, char ** argv)
+{
+    int    i, k, beg, end;
+    int    nx_loc;               /* Local grid size */
+    int    dstL = -1, dstR=-1;   /* Rank of left and right neighbour procs */
+    int    rank=0, size=1;
+    double t, tstop, dt, cfl = 0.5;
+    double *u0;
+    double *u1;
+    double xbeg =  0.0;
+    double xend = +1.0;
+    double xglob[NX_GLOB + 2*NGHOST];   // Global grid array
+    double *xloc;
+    double dx;    /* Mesh spacing */
+#ifdef PARALLEL
+    double *send_buf;
+    double *recv_buf;
+#endif
+    FILE *fp;
+    
+    /* --------------------------------------------------------
+     0. Initialize parallel environment & get neighbour
+     proc rank
+     -------------------------------------------------------- */
+    
+#ifdef PARALLEL
+    MPI_Init(&argc, &argv);
+    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+    MPI_Comm_size(MPI_COMM_WORLD, &size);
+    
+    dstL   = rank - 1;
+    dstR   = rank + 1;
+    if (dstL < 0)     dstL = MPI_PROC_NULL;
+    if (dstR >= size) dstR = MPI_PROC_NULL;
+#endif
+    
+    /* --------------------------------------------------------
+     1. Generate global & local grids
+     -------------------------------------------------------- */
+    
+#ifdef PARALLEL
+    nx_loc = NX_GLOB/size;
+    beg    = NGHOST;
+    end    = beg + nx_loc - 1;
+    
+    dx = (xend - xbeg)/(NX_GLOB+1);
+    for (i = 0; i < NX_GLOB + 2*NGHOST; i++){
+        xglob[i] = xbeg + (i-beg+1)*dx;
+    }
+    xloc = xglob + nx_loc*rank;  /* Use pointer arithmetic */
+#else
+    nx_loc = NX_GLOB;
+    beg    = NGHOST;
+    end    = beg + nx_loc - 1;
+    
+    dx = (xend - xbeg)/(NX_GLOB+1);
+    for (i = 0; i < NX_GLOB + 2*NGHOST; i++){
+        xglob[i] = xbeg + (i-beg+1)*dx;
+    }
+    xloc = xglob;  /* Use pointer arithmetic */
+#endif
+    
+    /* --------------------------------------------------------
+     2. Allocate memory on local grids
+     -------------------------------------------------------- */
+    
+    u0 = (double *) malloc((nx_loc + 2*NGHOST)*sizeof(double));
+    u1 = (double *) malloc((nx_loc + 2*NGHOST)*sizeof(double));
+    
+#ifdef PARALLEL
+    {
+        int proc, go;
+        for (proc = 0; proc < size; proc++){
+            go = proc;
+            MPI_Bcast(&go, 1, MPI_INT, 0, MPI_COMM_WORLD);
+            if (rank == go) {
+                printf ("[Rank %d]\n",rank);
+                printf ("  dstL = %d, dstR = %d\n",dstL, dstR);
+                printf ("  beg, end = %d, %d; x = [%f, %f]\n",
+                        beg, end, xloc[beg],xloc[end]);
+            }
+            MPI_Barrier(MPI_COMM_WORLD);
+        }
+    }
+#endif
+    
+    /* --------------------------------------------------------
+     3. Set initial condition
+     -------------------------------------------------------- */
+    
+    for (i = beg; i <= end; i++){
+        u0[i] = sin(M_PI*xloc[i]);
+    }
+    
+    /* --------------------------------------------------------
+     4. Advance solution
+     -------------------------------------------------------- */
+    
+    t     = 0.0;
+    tstop = 0.1;
+    dt    = cfl*dx*dx;
+    k     = 0;
+    
+    Write (xloc, u0, beg, end);
+    while (t < tstop){
+        
+        if (rank == 0){
+            printf ("step #%d; t = %8.3e\n",k,t);
+        }
+        
+        /* -- 4a. Set physical boundary conditions -- */
+        
+        if (dstL == MPI_PROC_NULL){
+            u0[beg-1] = 0.0;
+        }
+        if (dstR == MPI_PROC_NULL){
+            u0[end+1] = 0.0;
+        }
+        
+        /* -- 4b. Set inter-process boundary conditions -- */
+        
+#ifdef PARALLEL
+        send_buf = u0 + end - (NGHOST - 1);  // Address of rightmost interior point
+        recv_buf = u0 + 0;                   // Address of leftmost ghost zone
+        MPI_Sendrecv (send_buf, NGHOST, MPI_DOUBLE, dstR, 0,
+                      recv_buf, NGHOST, MPI_DOUBLE, dstL, 0,
+                      MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+        
+        send_buf = u0 + beg;     // Address of leftmost interior point
+        recv_buf = u0 + end + 1; // Address of first ghost zone on the right
+        MPI_Sendrecv (send_buf, NGHOST, MPI_DOUBLE, dstL, 0,
+                      recv_buf, NGHOST, MPI_DOUBLE, dstR, 0,
+                      MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+#endif
+        
+        /* -- 4c. Advance solution by one time step -- */
+        
+        for (i = beg; i <= end; i++){
+            u1[i] = u0[i] + dt/(dx*dx)*(u0[i-1] - 2.0*u0[i] + u0[i+1]);
+        }
+        t += dt;
+        k++;
+        
+        /* -- 4d. Copy arrays for next time level -- */
+        
+        for (i = beg; i <= end; i++) u0[i] = u1[i];
+    }
+    Write (xloc, u0, beg, end);
+    
+#ifdef PARALLEL
+    MPI_Finalize();
+#endif
+    return 0;
+}
+
+/* ********************************************************************* */
+void Write (double *x, double *u, int beg, int end)
+/*
+ *********************************************************************** */
+{
+    int    i;
+    int    rank;
+    static int n = 0;  /* File number */
+    FILE *fp;
+    char fname[32];
+    
+    /* --------------------------------------------------------
+     1. Serial output
+     -------------------------------------------------------- */
+    
+#ifndef PARALLEL
+    sprintf (fname,"heat_eq%02d.dat",n);
+    fp = fopen (fname,"w");
+    for (i = beg; i <= end; i++) fprintf (fp, "%12.6e  %12.6e\n", x[i], u[i]);
+    fclose(fp);
+#endif
+    
+    /* --------------------------------------------------------
+     2. Parallel output
+     -------------------------------------------------------- */
+    
+#ifdef PARALLEL
+    
+    /* -- 2a. Process #0 gathers data and does the writing -- */
+    
+    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+    int nx_loc = end - beg + 1;
+    static double *recv_buf;
+    if (recv_buf == NULL) {
+        recv_buf = (double *) malloc((NX_GLOB + 2*NGHOST)*sizeof(double));
+    }
+    
+    MPI_Gather (u + beg, nx_loc, MPI_DOUBLE,
+                recv_buf + beg, nx_loc, MPI_DOUBLE, 0, MPI_COMM_WORLD);
+    if (rank == 0){
+        sprintf (fname,"heat_eq%02d.dat",n);
+        fp = fopen (fname,"w");
+        for (i = beg; i < beg+NX_GLOB; i++) {
+            fprintf (fp, "%f  %f\n", x[i], recv_buf[i]);
+        }   
+        fclose(fp);
+    }
+    
+    /* -- 2b. Shared file pointer -- */
+    
+    /* -- 2c. Individual file pointer -- */
+    
+#endif
+    
+    n++;
+}
+
+
+
+/*
+ MAPLE Script:
+ 
+ restart;
+ u := A*exp(-D*mu^2*t)*sin(mu*x + B) + C;
+ eq := diff(u,t) - D*diff(diff(u,x),x);
+ simplify(eq);
+ 
+ 
+ */
diff --git a/jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/lecture_Broadcast_Gather_Scatter.pdf b/jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/lecture_Broadcast_Gather_Scatter.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..b31c13174dc4baf16637694a733b0125dc2fc950
Binary files /dev/null and b/jacobi/mpi/miscellaneous/Broadcast_Gather_Scatter/lecture_Broadcast_Gather_Scatter.pdf differ
diff --git a/jacobi/mpi/miscellaneous/MPI_Datatypes/Subarray.c b/jacobi/mpi/miscellaneous/MPI_Datatypes/Subarray.c
new file mode 100644
index 0000000000000000000000000000000000000000..157ed4519c5784c2730ae188d0cc4b1c290615b4
--- /dev/null
+++ b/jacobi/mpi/miscellaneous/MPI_Datatypes/Subarray.c
@@ -0,0 +1,96 @@
+/* ///////////////////////////////////////////////////////////////////// */
+/*!
+ \file
+ \brief Subarray example.
+ 
+ Proc #0 creates a large array Abig with NROWS rows and NCOLS columns.
+ Proc #1 receives a subarray with nrows_sub rows and ncols_sub
+ columns starting at starts[].
+ 
+ \author A. Mignone (mignone@to.infn.it)
+ \date   March 14, 2020
+ */
+/* ///////////////////////////////////////////////////////////////////// */
+#include <stdio.h>
+#include <stdlib.h>
+#include <mpi.h>
+#include "tools.c"
+
+#define NROWS   5
+#define NCOLS   6
+
+int main(int argc, char **argv)
+{
+    int i,j;
+    int rank, size;
+    int nrows_sub = 3;
+    int ncols_sub = 2;
+    int starts[2]    = {1,3};
+    int subsizes[2]  = {nrows_sub,ncols_sub};
+    int bigsizes[2]  = {NROWS, NCOLS};
+    int **Abig;
+    int **Asub;
+    MPI_Datatype MPI_Subarr;
+    
+    /* --------------------------------------------------------
+     0. Initialize the MPI execution environment,
+     create subarray type
+     -------------------------------------------------------- */
+    
+    MPI_Init(&argc, &argv);
+    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+    MPI_Comm_size(MPI_COMM_WORLD, &size);
+    
+    MPI_Type_create_subarray(2, bigsizes, subsizes, starts,
+                             MPI_ORDER_C, MPI_INT, &MPI_Subarr);
+    MPI_Type_commit(&MPI_Subarr);
+    
+    if (size < 2) {
+        if (rank == 0){
+            fprintf(stderr,"! Need at least 2  processors.\n");
+        }
+        MPI_Finalize();
+        return 1;
+    }
+    
+    /* --------------------------------------------------------
+     1. Proc #0 creates the big array,
+     Proc #1 receives the subarray.
+     -------------------------------------------------------- */
+    
+    if (rank == 0) {
+        Abig = Allocate_2DintArray(NROWS, NCOLS);
+        
+        /* -- 1a. Fill array -- */
+        
+        for (i = 0; i < NROWS; i++){
+            for (j = 0; j < NCOLS; j++){
+                Abig[i][j] = j + i*NCOLS;
+            }}
+        
+        /* -- 1b. Show array -- */
+        
+        Show_2DintArray(Abig, NROWS, NCOLS, "Big array (proc #0):");
+        
+        MPI_Send(&(Abig[0][0]), 1, MPI_Subarr, 1, 123, MPI_COMM_WORLD);
+        
+        free(Abig[0]);
+        free(Abig);
+        
+    } else if (rank == 1) {
+        
+        Asub = Allocate_2DintArray(nrows_sub, ncols_sub);
+        
+        MPI_Recv(&(Asub[0][0]), nrows_sub*ncols_sub, MPI_INT, 0,
+                 123, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+        
+        Show_2DintArray(Asub, nrows_sub, ncols_sub, "Received subarray (proc #1):");
+        
+        free(Asub[0]);
+        free(Asub);
+    }
+    
+    MPI_Type_free(&MPI_Subarr);
+    MPI_Finalize();
+    return 0;
+}
diff --git a/jacobi/mpi/miscellaneous/MPI_Datatypes/lecture_MPI_Datatypes.pdf b/jacobi/mpi/miscellaneous/MPI_Datatypes/lecture_MPI_Datatypes.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..088b82d3adff8d2871ec9d9294f6640900f6cbef
Binary files /dev/null and b/jacobi/mpi/miscellaneous/MPI_Datatypes/lecture_MPI_Datatypes.pdf differ
diff --git a/jacobi/mpi/miscellaneous/MPI_Datatypes/tools.c b/jacobi/mpi/miscellaneous/MPI_Datatypes/tools.c
new file mode 100644
index 0000000000000000000000000000000000000000..58d03366e6ba5027d07cd155f396375e8d991d0d
--- /dev/null
+++ b/jacobi/mpi/miscellaneous/MPI_Datatypes/tools.c
@@ -0,0 +1,104 @@
+/* ///////////////////////////////////////////////////////////////////// */
+/*!
+ \file
+ \brief Collection of useful functions for the MPI course.
+ 
+ This file provides some simple function for memory allocation
+ (dbl and int 2D array), printing.
+ Function prototyping is already included here and should not be
+ repeated elsewhere.
+ Simply include this file after the main header section when needed:
+ \code
+ #include <stdio.h>
+ ...
+ #include "tools.c"
+ ...
+ int main()
+ {
+ .
+ .
+ .
+ }
+ \endcodes
+ 
+ \author A. Mignone (mignone@to.infn.it)
+ \date   March 14, 2020
+ */
+/* ///////////////////////////////////////////////////////////////////// */
+double **Allocate_2DdblArray(int, int);
+int    **Allocate_2DintArray(int, int);
+void   Show_2DdblArray(double **, int, int, const char *);
+void   Show_2DintArray(int **, int, int, const char *);
+
+/* ********************************************************************* */
+double **Allocate_2DdblArray(int nx, int ny)
+/*
+ * Allocate memory for a double precision array with
+ * nx rows and ny columns
+ *********************************************************************** */
+{
+    int i,j;
+    double **buf;
+    
+    buf    = (double **)malloc (nx*sizeof(double *));
+    buf[0] = (double *) malloc (nx*ny*sizeof(double));
+    for (j = 1; j < nx; j++) buf[j] = buf[j-1] + ny;
+    
+    return buf;
+}
+/* ********************************************************************* */
+int **Allocate_2DintArray(int nx, int ny)
+/*
+ * Allocate memory for an integer-type array with
+ * nx rows and ny columns
+ *********************************************************************** */
+{
+    int i,j;
+    int **buf;
+    
+    buf    = (int **)malloc (nx*sizeof(int *));
+    buf[0] = (int *) malloc (nx*ny*sizeof(int));
+    for (j = 1; j < nx; j++) buf[j] = buf[j-1] + ny;
+    
+    return buf;
+}
+
+
+/* ********************************************************************* */
+void Show_2DdblArray(double **A, int nx, int ny, const char *string)
+/*
+ *********************************************************************** */
+{
+    int i, j;
+    
+    printf ("%s\n",string);
+    printf ("------------------------------\n");
+    for (i = 0; i < nx; i++) {
+        for (j = 0; j < ny; j++) {
+            printf ("%8.2f  ", A[i][j]);
+        }
+        printf ("\n");
+    }
+    printf ("------------------------------\n");
+}
+/* ********************************************************************* */
+void Show_2DintArray(int **A, int nx, int ny, const char *string)
+/*
+ *********************************************************************** */
+{
+    int i, j;
+    
+    printf ("%s\n",string);
+    for (j = 0; j < ny; j++) printf ("-----");
+    printf ("\n");
+    
+    for (i = 0; i < nx; i++) {
+        for (j = 0; j < ny; j++) {
+            printf ("%03d  ", A[i][j]);
+        }
+        printf ("\n");
+    }
+    
+    for (j = 0; j < ny; j++) printf ("-----");
+    printf ("\n");
+}
diff --git a/jacobi/mpi/miscellaneous/Parallel_IO/lecture_Parallel_IO.pdf b/jacobi/mpi/miscellaneous/Parallel_IO/lecture_Parallel_IO.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..befbe43337c70f92e19b5af9857fda500df097fc
Binary files /dev/null and b/jacobi/mpi/miscellaneous/Parallel_IO/lecture_Parallel_IO.pdf differ
diff --git a/jacobi/mpi/miscellaneous/Parallel_IO/write_arr1D.c b/jacobi/mpi/miscellaneous/Parallel_IO/write_arr1D.c
new file mode 100644
index 0000000000000000000000000000000000000000..cf1cf14136b40806c3c3df95bac48f75ba21ffbc
--- /dev/null
+++ b/jacobi/mpi/miscellaneous/Parallel_IO/write_arr1D.c
@@ -0,0 +1,101 @@
+/* ///////////////////////////////////////////////////////////////////// */
+/*!
+ \file
+ \brief Writing of a 1D buffer in parallel.
+ 
+ Write a 1D buffer in parallel using 4 different versions.
+ For a contiguous data type, use:
+ 
+ VERSION == 1 employs shared file pointer
+ VERSION == 2 employs individual file pointer with offset computed
+ by the MPI_File_seek()
+ VERSION == 3 defines a file view with offset depending on the process
+ rank
+ VERSION == 4 similar to VERSION == 3, but using a contiguous MPI
+ datatype
+ 
+ A non-contiguous version is handled with VERSION == 5, which defines
+ a MPI vector datatype and a file view.
+ 
+ \author A. Mignone (mignone@to.infn.it)
+ \date   March 1, 2020
+ */
+/* ///////////////////////////////////////////////////////////////////// */
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <mpi.h>
+
+#define NELEM    3
+
+#define VERSION  5
+int main(int argc, char **argv)
+{
+    int i, rank, size;
+    double buf[NELEM];
+    char fname[] = "arr1D.bin";
+    MPI_File   fh;
+    MPI_Offset disp;
+    
+    /* --------------------------------------------------------
+     0. Initialize the MPI execution environment
+     -------------------------------------------------------- */
+    
+    MPI_Init (&argc, &argv);
+    MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+    MPI_Comm_size (MPI_COMM_WORLD, &size);
+    
+    /* --------------------------------------------------------
+     1. Initialize array
+     -------------------------------------------------------- */
+    
+    for (i = 0; i < NELEM; i++) buf[i] = rank;
+    
+    /* --------------------------------------------------------
+     2. Delete, re-open file and write
+     -------------------------------------------------------- */
+    
+    MPI_File_delete(fname, MPI_INFO_NULL);
+    MPI_File_open(  MPI_COMM_WORLD, fname,
+                  MPI_MODE_CREATE | MPI_MODE_RDWR,
+                  MPI_INFO_NULL, &fh  );
+    
+#if VERSION == 1     // Contiguous data, shared file pointer
+    MPI_File_write_ordered(fh, buf, NELEM, MPI_DOUBLE, MPI_STATUS_IGNORE);
+#elif VERSION == 2   // Contiguous data, individual file pointer
+    disp = rank*NELEM*sizeof(double);   // In bytes
+    MPI_File_seek(fh, disp, MPI_SEEK_SET);
+    MPI_File_write(fh, buf, NELEM, MPI_DOUBLE, MPI_STATUS_IGNORE);
+#elif VERSION == 3   // Contiguous data, file view
+    disp = rank*NELEM*sizeof(double);
+    MPI_File_set_view(fh, disp, MPI_DOUBLE, MPI_DOUBLE, "native", MPI_INFO_NULL);
+    MPI_File_write(fh, buf, NELEM, MPI_DOUBLE, MPI_STATUS_IGNORE);
+#elif VERSION == 4   // Contiguous data, file view with MPI datatype
+    MPI_Datatype cntg_type;
+    
+    MPI_Type_contiguous(NELEM, MPI_DOUBLE, &cntg_type);
+    MPI_Type_commit(&cntg_type);
+    
+    disp = rank*NELEM*sizeof(double);
+    
+    MPI_File_set_view(fh, disp, MPI_BYTE, cntg_type, "native", MPI_INFO_NULL);
+    MPI_File_write(fh, buf, 1, cntg_type, MPI_STATUS_IGNORE);
+    MPI_Type_free(&cntg_type);
+#elif VERSION == 5   // Non-contiguous data, file view, vector type
+    MPI_Datatype vec_type;
+    
+    for (i = 0; i < NELEM; i++) buf[i] = rank + 0.1*i;
+    
+    MPI_Type_vector(NELEM, 1, size, MPI_DOUBLE, &vec_type);
+    MPI_Type_commit(&vec_type);
+    
+    disp = rank*sizeof(double);
+    MPI_File_set_view(fh, disp, MPI_DOUBLE, vec_type, "native", MPI_INFO_NULL);
+    MPI_File_write(fh, buf, NELEM, MPI_DOUBLE, MPI_STATUS_IGNORE);
+    MPI_Type_free(&vec_type);
+#endif
+    
+    MPI_File_close(&fh);
+    MPI_Finalize();
+    return 0;
+}
diff --git a/jacobi/mpi/miscellaneous/Parallel_IO/write_arr2D.c b/jacobi/mpi/miscellaneous/Parallel_IO/write_arr2D.c
new file mode 100644
index 0000000000000000000000000000000000000000..1aaf24504595d0ebf2e3d5e3212f03179c5955d1
--- /dev/null
+++ b/jacobi/mpi/miscellaneous/Parallel_IO/write_arr2D.c
@@ -0,0 +1,105 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <mpi.h>
+#include "tools.c"
+
+#define NDIM      2
+#define NX_GLOB   16
+#define NY_GLOB   8
+
+#define VERSION   2
+
+int main(int argc, char ** argv)
+{
+    int i, j, rank, size;
+    int nx, ny;
+    int nprocs[NDIM];
+    int gsizes[NDIM];
+    int lsizes[NDIM];
+    int periods[NDIM] = {0,0};
+    int coords[NDIM];
+    int start[NDIM];
+    double **A;
+    MPI_Datatype subarr_type;
+    MPI_Comm MPI_COMM_CART;
+    char fname[] = "arr2D.bin";
+    
+    /* --------------------------------------------------------
+     0. Initialize the MPI execution environment
+     -------------------------------------------------------- */
+    
+    MPI_Init (&argc, &argv);
+    MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+    MPI_Comm_size (MPI_COMM_WORLD, &size);
+    
+    /* --------------------------------------------------------
+     1. Create a 2D domain decomposition
+     -------------------------------------------------------- */
+    
+    /* -- 1a. Attempt to find a maximally cubic decomposition -- */
+    
+    nprocs[0] = (int)sqrt(size);
+    nprocs[1] = size/nprocs[0];
+    if (nprocs[0]*nprocs[1] != size){
+        if (rank == 0) printf ("! Cannot decompose\n");
+        MPI_Finalize();
+        return 1;
+    }
+    
+    /* -- 1b. Create communicator -- */
+    
+    MPI_Cart_create(MPI_COMM_WORLD, NDIM, nprocs, periods, 0, &MPI_COMM_CART);
+    MPI_Cart_get(MPI_COMM_CART, NDIM, nprocs, periods, coords);
+    
+    gsizes[0] = NX_GLOB;
+    gsizes[1] = NY_GLOB;
+    
+    lsizes[0] = nx = NX_GLOB/nprocs[0];
+    lsizes[1] = ny = NY_GLOB/nprocs[1];
+    
+    if (rank == 0){
+        printf ("Domain decomposed in %d X %d procs\n",nprocs[0],nprocs[1]);
+        printf ("Local grid size = %d X %d\n",lsizes[0], lsizes[1]);
+    }
+    
+    /* --------------------------------------------------------
+     2. Allocate memory and fill 2D array on local domain
+     -------------------------------------------------------- */
+    
+    A = Allocate_2DdblArray(ny,nx);
+    for (j = 0; j < ny; j++) {
+        for (i = 0; i < nx; i++) {
+            A[j][i] = rank;
+        }}
+    
+    /* --------------------------------------------------------
+     3. Create new datatypes
+     -------------------------------------------------------- */
+    
+    start[0] = coords[0]*lsizes[0];
+    start[1] = coords[1]*lsizes[1];
+    
+    MPI_Type_create_subarray (NDIM, gsizes, lsizes, start, MPI_ORDER_FORTRAN,
+                              MPI_DOUBLE, &subarr_type);
+    MPI_Type_commit (&subarr_type);
+    
+    /* --------------------------------------------------------
+     4. Open file for writing
+     -------------------------------------------------------- */
+    
+    MPI_File_delete(fname, MPI_INFO_NULL);
+    
+    MPI_File fh;
+    MPI_Status status;
+    
+    MPI_File_open(MPI_COMM_CART, fname,
+                  MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
+    MPI_File_set_view(fh, 0, MPI_DOUBLE, subarr_type, "native", MPI_INFO_NULL);
+    MPI_File_write_all(fh, A[0], nx*ny, MPI_DOUBLE, MPI_STATUS_IGNORE);
+    MPI_File_close(&fh);
+    
+    MPI_Type_free(&subarr_type);
+    MPI_Finalize();
+    return 0;
+}
diff --git a/jacobi/mpi/miscellaneous/cartesian b/jacobi/mpi/miscellaneous/cartesian
old mode 100755
new mode 100644
diff --git a/jacobi/mpi/miscellaneous/cartesian.cpp b/jacobi/mpi/miscellaneous/cartesian.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..7b5a7e505a9d63ade4f4e8e35f48cb895e8e1660
--- /dev/null
+++ b/jacobi/mpi/miscellaneous/cartesian.cpp
@@ -0,0 +1,71 @@
+#include <mpi.h>
+#include <iostream>
+
+#define SIZE 2
+#define X    0
+#define Y    1
+
+int main(int argc, char **argv)
+{
+  using namespace std;
+  
+  int task, ntasks;
+  MPI_Init(&argc, &argv);
+  MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
+  MPI_Comm_rank(MPI_COMM_WORLD, &task);
+
+  if (argc <= 1)
+    {
+      if (!task)
+	{
+	  cout << "\n\t Usage: <executable> <number processes along X> \n" << endl;
+	  MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
+	  exit(EXIT_FAILURE);
+	}
+    }
+  
+  const int cartesian_grid_x = (int)strtol(argv[1], NULL, 10);
+  const int cartesian_grid_y = ((ntasks % cartesian_grid_x == 0) ? (ntasks / cartesian_grid_x) : -1);
+  if (cartesian_grid_y == -1)
+    {
+      if (!task)
+	{
+	  cout << "\n\t ntasks % cartesian_grid_x != 0 ... aborting ...\n" << endl;
+	  MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
+	  exit(EXIT_FAILURE);
+	}
+    }
+  
+  static int dims[SIZE] = {cartesian_grid_x, cartesian_grid_y};
+  static int periods[SIZE] = {0, 0};
+  static int reorder = 0;
+  MPI_Comm comm2d;
+  MPI_Cart_create(MPI_COMM_WORLD, SIZE, dims, periods, reorder, &comm2d);
+
+  int coords[SIZE];
+  MPI_Cart_coords(comm2d, task, SIZE, coords);
+
+  int nbrright, nbrleft;
+  MPI_Cart_shift(comm2d, Y, 1, &nbrleft, &nbrright);
+
+  int nbrtop, nbrbottom;
+  MPI_Cart_shift(comm2d, X, 1, &nbrbottom, &nbrtop);
+
+  for (int rank=0 ; rank<ntasks; rank++)
+    {
+      MPI_Barrier(MPI_COMM_WORLD);
+      if (rank == task)
+	{
+	  cout << "\n\t Task: " << task << endl;
+	  cout << "\t\t coords[" << coords[X] << "," << coords[Y] << "]" << endl;
+	  cout << "\t\t nbrright: " << nbrright << " - nbrleft  : " << nbrleft << endl;
+	  cout << "\t\t nbrtop  : " << nbrtop   << " - nbrbottom: " << nbrbottom << endl;
+	  cout << endl;
+	}
+    }
+
+  MPI_Comm_free(&comm2d);
+  MPI_Finalize();
+  
+  return 0;
+}
diff --git a/jacobi/mpi/miscellaneous/color_ring.c b/jacobi/mpi/miscellaneous/color_ring.c
new file mode 100644
index 0000000000000000000000000000000000000000..70ff1b544ee674dd68d785f565de4958344e82fb
--- /dev/null
+++ b/jacobi/mpi/miscellaneous/color_ring.c
@@ -0,0 +1,88 @@
+/* ///////////////////////////////////////////////////////////////////// */
+/*!
+ \file
+ \brief Data communication in a ring.
+ 
+ For each processor, define a unique string defining the process
+ color and perform n cyclic permutations by transferring the color
+ name to the process to the right.
+ Three versions are provided:
+ 
+ i)   using MPI_Send / Recv()   (select VERSION == 0);
+ ii)  using MPI_Sendrecv()      (select VERSION == 1);
+ iii) using MPI_Isend / Irecv() (select VERSION == 2).
+ 
+ \author A. Mignone (mignone@to.infn.it)
+ \date   March 10, 2020
+ */
+/* ///////////////////////////////////////////////////////////////////// */
+
+
+#include <mpi.h>
+#include <stdio.h>
+
+#define STR_LENGTH 32
+#define VERSION 1
+
+int main(int argc, char ** argv)
+{
+    int rank, size;
+    int dstL, dstR;
+    char recv_buf[STR_LENGTH];
+    char send_buf[STR_LENGTH];
+    MPI_Request req;
+    
+    /* -- 1. Initialize the MPI execution environment -- */
+    
+    MPI_Init(&argc, &argv);
+    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
+    MPI_Comm_size(MPI_COMM_WORLD, &size);
+    
+    /* -- 2. Define color -- */
+    
+    if (rank == 0) sprintf (send_buf, "red");
+    if (rank == 1) sprintf (send_buf, "blue");
+    if (rank == 2) sprintf (send_buf, "green");
+    if (rank == 3) sprintf (send_buf, "black");
+    if (rank == 4) sprintf (send_buf, "orange");
+    if (rank == 5) sprintf (send_buf, "yellow");
+    
+    if (size > 6){
+        if (rank == 0) printf ("! Cannot execute with more than six processes\n");
+        MPI_Finalize();
+        return 0;
+    }
+    
+    /* -- 3. Determine neighbour processors -- */
+    
+    dstL  = rank - 1;
+    dstR  = rank + 1;
+    if (dstL < 0)     dstL = size-1;
+    if (dstR >= size) dstR = 0;
+    
+    int n;
+    for (n = 0; n < size; n++){
+#if VERSION == 1
+        MPI_Send(send_buf, STR_LENGTH, MPI_CHAR, dstR, 0, MPI_COMM_WORLD);
+        MPI_Recv(recv_buf, STR_LENGTH, MPI_CHAR, dstL, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
+#elif VERSION == 2
+        MPI_Sendrecv(send_buf, STR_LENGTH, MPI_CHAR, dstR, 0,
+                     recv_buf, STR_LENGTH, MPI_CHAR, dstL, 0, MPI_COMM_WORLD,
+                     MPI_STATUS_IGNORE);
+#elif VERSION == 3
+        MPI_Isend(send_buf, STR_LENGTH, MPI_CHAR, dstR, 0, MPI_COMM_WORLD, &req);
+        MPI_Irecv(recv_buf, STR_LENGTH, MPI_CHAR, dstL, 0, MPI_COMM_WORLD, &req);
+        MPI_Wait (&req, MPI_STATUS_IGNORE);
+#endif
+        
+        /* -- Replace color -- */
+        
+        sprintf (send_buf,"%s",recv_buf);
+        if (rank == 0){
+            printf ("Proc #%d, I've changed my color is %s\n", rank, send_buf);
+        }
+    }
+    
+    MPI_Finalize();
+    return 0;
+}
diff --git a/jacobi/mpi/miscellaneous/lecture_color_ring.pdf b/jacobi/mpi/miscellaneous/lecture_color_ring.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..dc1f501c160a2157484c67d32cf6a185a826eb40
Binary files /dev/null and b/jacobi/mpi/miscellaneous/lecture_color_ring.pdf differ
diff --git a/jacobi/mpi/miscellaneous/subarray b/jacobi/mpi/miscellaneous/subarray
old mode 100755
new mode 100644