Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
HPC_SCHOOL_2024
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Container registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
HPC_SCHOOL
HPC_SCHOOL_2024
Commits
39c018b4
Commit
39c018b4
authored
1 year ago
by
David Goz
Browse files
Options
Downloads
Patches
Plain Diff
mpi_comp_comm bug fixing
parent
26284f2f
Loading
Loading
No related merge requests found
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
jacobi/mpi/comp_comm/src/jacobi_2D_mpi_comp_comm.c
+71
-36
71 additions, 36 deletions
jacobi/mpi/comp_comm/src/jacobi_2D_mpi_comp_comm.c
jacobi/mpi/comp_comm/src/tools.c
+3
-3
3 additions, 3 deletions
jacobi/mpi/comp_comm/src/tools.c
with
74 additions
and
39 deletions
jacobi/mpi/comp_comm/src/jacobi_2D_mpi_comp_comm.c
+
71
−
36
View file @
39c018b4
...
@@ -20,6 +20,7 @@ typedef struct MyGrid
...
@@ -20,6 +20,7 @@ typedef struct MyGrid
{
{
int
start
[
NDIM
];
int
start
[
NDIM
];
int
end
[
NDIM
];
int
end
[
NDIM
];
int
dim
[
NDIM
];
}
myDomain
;
}
myDomain
;
...
@@ -173,6 +174,9 @@ int main(int argc, char **argv)
...
@@ -173,6 +174,9 @@ int main(int argc, char **argv)
/* boundaries */
/* boundaries */
ThisTask
.
domain
.
start
[
dim
]
=
((
ThisTask
.
domain
.
start
[
dim
]
==
0
)
?
NGHOST
:
ThisTask
.
domain
.
start
[
dim
]);
ThisTask
.
domain
.
start
[
dim
]
=
((
ThisTask
.
domain
.
start
[
dim
]
==
0
)
?
NGHOST
:
ThisTask
.
domain
.
start
[
dim
]);
ThisTask
.
domain
.
end
[
dim
]
=
((
ThisTask
.
domain
.
end
[
dim
]
==
NX_GLOB
+
1
)
?
NX_GLOB
:
ThisTask
.
domain
.
end
[
dim
]);
ThisTask
.
domain
.
end
[
dim
]
=
((
ThisTask
.
domain
.
end
[
dim
]
==
NX_GLOB
+
1
)
?
NX_GLOB
:
ThisTask
.
domain
.
end
[
dim
]);
ThisTask
.
domain
.
dim
[
X
]
=
(
ThisTask
.
domain
.
end
[
X
]
-
ThisTask
.
domain
.
start
[
X
]);
ThisTask
.
domain
.
dim
[
Y
]
=
(
ThisTask
.
domain
.
end
[
Y
]
-
ThisTask
.
domain
.
start
[
Y
]);
}
}
#if defined(DEBUG)
#if defined(DEBUG)
...
@@ -207,26 +211,25 @@ int main(int argc, char **argv)
...
@@ -207,26 +211,25 @@ int main(int argc, char **argv)
-------------------------------------------------------- */
-------------------------------------------------------- */
const
int
ibeg
=
NGHOST
;
const
int
ibeg
=
NGHOST
;
const
int
iend
=
ibeg
+
NX_GLOB
-
1
;
/*
const int iend = ibeg + NX_GLOB - 1;
*/
const
int
nx
=
iend
-
ibeg
+
1
;
/*
const int nx = iend - ibeg + 1;
*/
const
int
nx_tot
=
nx
+
2
*
NGHOST
;
/*
const int nx_tot = nx + 2 * NGHOST;
*/
const
int
jbeg
=
NGHOST
;
const
int
jbeg
=
NGHOST
;
const
int
jend
=
jbeg
+
NY_GLOB
-
1
;
/*
const int jend = jbeg + NY_GLOB - 1;
*/
const
int
ny
=
jend
-
jbeg
+
1
;
/*
const int ny = jend - jbeg + 1;
*/
const
int
ny_tot
=
ny
+
2
*
NGHOST
;
/*
const int ny_tot = ny + 2 * NGHOST;
*/
if
(
rank
==
MASTER
)
/*
if (rank == MASTER)
*/
{
/* { */
printf
(
"
\n\t
Grid indices:"
);
/*
printf("\n\t Grid indices:");
*/
printf
(
"
\n\t\t
ibeg, iend = %d, %d; nx_tot = %d"
,
ibeg
,
iend
,
nx_tot
);
/*
printf("\n\t\t ibeg, iend = %d, %d; nx_tot = %d" ,ibeg, iend, nx_tot);
*/
printf
(
"
\n\t\t
jbeg, jend = %d, %d; ny_tot = %d
\n\n
"
,
jbeg
,
jend
,
ny_tot
);
/*
printf("\n\t\t jbeg, jend = %d, %d; ny_tot = %d\n\n",jbeg, jend, ny_tot);
*/
}
/* } */
/* --------------------------------------------------------
/* --------------------------------------------------------
2. Generate grid, allocate memory
2. Generate grids, allocate memory
Not optimized because the grids are (unnecessarily)
distributed across MPI processes
replicated across MPI processes
-------------------------------------------------------- */
-------------------------------------------------------- */
/* memory allocation */
/* memory allocation */
...
@@ -237,23 +240,24 @@ int main(int argc, char **argv)
...
@@ -237,23 +240,24 @@ int main(int argc, char **argv)
/* initial conditions */
/* initial conditions */
for
(
int
i
=
0
;
i
<
(
NX_GLOB
+
2
*
NGHOST
)
;
i
++
)
xg
[
i
]
=
xbeg
+
(
i
-
ibeg
+
1
)
*
delta
[
X
];
for
(
int
i
=
0
;
i
<
(
NX_GLOB
+
2
*
NGHOST
)
;
i
++
)
xg
[
i
]
=
xbeg
+
(
i
-
ibeg
+
1
)
*
delta
[
X
];
for
(
int
j
=
0
;
j
<
(
NY_GLOB
+
2
*
NGHOST
)
;
j
++
)
yg
[
j
]
=
ybeg
+
(
j
-
jbeg
+
1
)
*
delta
[
Y
];
for
(
int
j
=
0
;
j
<
(
NY_GLOB
+
2
*
NGHOST
)
;
j
++
)
yg
[
j
]
=
ybeg
+
(
j
-
jbeg
+
1
)
*
delta
[
Y
];
MyData
*
x
=
xg
;
/* Global and local grids are the same */
MyData
*
y
=
yg
;
/* for serial version of the code */
/* grids memory allocation */
/* grids memory allocation
distributed across MPI processes
*/
MyData
**
phi
=
Allocate_2DdblArray
(
ny_tot
,
nx_tot
);
MyData
**
phi
=
Allocate_2DdblArray
(
ThisTask
.
domain
.
dim
[
Y
]
+
2
,
ThisTask
.
domain
.
dim
[
X
]
+
2
);
MyData
**
phi0
=
Allocate_2DdblArray
(
ny_tot
,
nx_tot
);
MyData
**
phi0
=
Allocate_2DdblArray
(
ThisTask
.
domain
.
dim
[
Y
]
+
2
,
ThisTask
.
domain
.
dim
[
X
]
+
2
);
/* --------------------------------------------------------
/* --------------------------------------------------------
3.
Initialize solution array to 0
3.
Set boundary conditions
-------------------------------------------------------- */
-------------------------------------------------------- */
for
(
int
j
=
jbeg
;
j
<=
jend
;
j
++
)
BoundaryConditions
(
phi0
,
xg
,
yg
,
nx
,
ny
);
for
(
int
i
=
ibeg
;
i
<=
iend
;
i
++
)
BoundaryConditions
(
phi
,
xg
,
yg
,
nx
,
ny
);
{
phi0
[
j
][
i
]
=
0
.
0
;
phi
[
j
][
i
]
=
0
.
0
;
}
/* --------------------------------------------------------
/* --------------------------------------------------------
4. Main iteration cycle
4. Main iteration cycle
...
@@ -262,8 +266,6 @@ int main(int argc, char **argv)
...
@@ -262,8 +266,6 @@ int main(int argc, char **argv)
const
double
time_start
=
MPI_Wtime
();
const
double
time_start
=
MPI_Wtime
();
/* -- 4a. Set boundary conditions first -- */
/* -- 4a. Set boundary conditions first -- */
BoundaryConditions
(
phi0
,
x
,
y
,
nx
,
ny
);
BoundaryConditions
(
phi
,
x
,
y
,
nx
,
ny
);
MyData
err
=
1
.
0
;
MyData
err
=
1
.
0
;
/* iterations */
/* iterations */
...
@@ -342,10 +344,42 @@ void BoundaryConditions(MyData **const restrict phi,
...
@@ -342,10 +344,42 @@ void BoundaryConditions(MyData **const restrict phi,
MyData
*
const
restrict
x
,
MyData
*
const
restrict
x
,
MyData
*
const
restrict
y
,
MyData
*
const
restrict
y
,
const
int
nx
,
const
int
nx
,
const
int
ny
)
const
int
ny
,
/*
Task
*
const
restrict
ThisTask
)
***********************************************************************
*/
/**********
************************************************************************/
{
{
/* left */
if
(
ThisTask
->
nbrleft
==
MPI_PROC_NULL
)
/* no left neighbor */
{
for
(
int
j
=
ThisTask
->
domain
.
start
[
X
]
;
j
<
ThisTask
->
domain
.
end
[
X
]
;
j
++
)
phi
[
j
][
0
]
=
(
1
.
0
-
y
[
j
]);
}
/* right */
if
(
ThisTask
->
nbrright
==
MPI_PROC_NULL
)
/* no right neighbor */
{
for
(
int
j
=
ThisTask
->
domain
.
start
[
X
]
;
j
<
ThisTask
->
domain
.
end
[
X
]
;
j
++
)
phi
[
j
][
ThisTask
->
domain
.
end
[
Y
]
+
1
]
=
(
y
[
j
]
*
y
[
j
]);
}
/* bottom */
if
(
ThisTask
->
nbrbottom
==
MPI_PROC_NULL
)
/* no bottom neighbor */
{
for
(
int
i
=
ThisTask
->
domain
.
start
[
Y
]
;
i
<
ThisTask
->
domain
.
end
[
Y
]
;
i
++
)
phi
[
0
][
i
]
=
(
1
.
0
-
x
[
i
]);
}
/* top */
if
(
ThisTask
->
nbrtop
==
MPI_PROC_NULL
)
/* no top neghbor */
{
for
(
int
i
=
ThisTask
->
domain
.
start
[
Y
]
;
i
<
ThisTask
->
domain
.
end
[
Y
]
;
i
++
)
phi
[
ThisTask
->
domain
.
end
[
X
]
+
1
][
i
]
=
x
[
i
];
}
return
;
const
int
ibeg
=
NGHOST
;
const
int
ibeg
=
NGHOST
;
const
int
iend
=
ibeg
+
nx
-
1
;
const
int
iend
=
ibeg
+
nx
-
1
;
...
@@ -402,6 +436,7 @@ void JacobiAlgorithm(MyData **const restrict Phi,
...
@@ -402,6 +436,7 @@ void JacobiAlgorithm(MyData **const restrict Phi,
return
;
return
;
}
}
void
Jacobi_Communication
(
MyData
**
const
restrict
Phi
,
void
Jacobi_Communication
(
MyData
**
const
restrict
Phi
,
MyData
**
const
restrict
Phi0
,
MyData
**
const
restrict
Phi0
,
MyData
*
const
restrict
error
,
MyData
*
const
restrict
error
,
...
...
This diff is collapsed.
Click to expand it.
jacobi/mpi/comp_comm/src/tools.c
+
3
−
3
View file @
39c018b4
...
@@ -12,14 +12,14 @@
...
@@ -12,14 +12,14 @@
/* ********************************************************************* */
/* ********************************************************************* */
MyData
**
Allocate_2DdblArray
(
const
int
nx
,
const
int
ny
)
MyData
**
Allocate_2DdblArray
(
const
int
nx
,
const
int
ny
)
/*
/*
* Allocate memory for a
double precision
array with
* Allocate
contiguous
memory for a
MyData
array with
* nx rows and ny columns
* nx rows and ny columns
*********************************************************************** */
*********************************************************************** */
{
{
MyData
**
buf
=
m
alloc
(
nx
*
sizeof
(
MyData
*
));
MyData
**
buf
=
c
alloc
(
nx
,
sizeof
(
MyData
*
));
assert
(
buf
!=
NULL
);
assert
(
buf
!=
NULL
);
buf
[
0
]
=
(
MyData
*
)
m
alloc
(
nx
*
ny
*
sizeof
(
MyData
));
buf
[
0
]
=
(
MyData
*
)
c
alloc
(
nx
*
ny
,
sizeof
(
MyData
));
assert
(
buf
[
0
]
!=
NULL
);
assert
(
buf
[
0
]
!=
NULL
);
for
(
int
j
=
1
;
j
<
nx
;
j
++
)
for
(
int
j
=
1
;
j
<
nx
;
j
++
)
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment