Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
H
HPC_Imaging
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Container registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Claudio Gheller
HPC_Imaging
Commits
ea94cf82
Commit
ea94cf82
authored
Sep 8, 2023
by
Giovanni Lacopo
Browse files
Options
Downloads
Patches
Plain Diff
Memory bug fixing
parent
011d30c6
No related branches found
No related tags found
No related merge requests found
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
gridding_cpu.c
+21
-30
21 additions, 30 deletions
gridding_cpu.c
gridding_nccl.cpp
+24
-31
24 additions, 31 deletions
gridding_nccl.cpp
gridding_rccl.cpp
+22
-31
22 additions, 31 deletions
gridding_rccl.cpp
with
67 additions
and
92 deletions
gridding_cpu.c
+
21
−
30
View file @
ea94cf82
...
@@ -84,17 +84,15 @@ void gridding_data()
...
@@ -84,17 +84,15 @@ void gridding_data()
// find the largest value in histo_send[]
// find the largest value in histo_send[]
//
//
uint
Nsec
=
histo_send
[
0
];
for
(
uint
isector
=
1
;
isector
<
nsectors
;
isector
++
)
Nsec
=
(
Nsec
<
histo_send
[
isector
]
?
histo_send
[
isector
]
:
Nsec
);
for
(
uint
isector
=
0
;
isector
<
nsectors
;
isector
++
)
{
double
start
=
CPU_TIME_wt
;
uint
Nsec
=
histo_send
[
isector
];
uint
Nweightss
=
Nsec
*
metaData
.
polarisations
;
uint
Nweightss
=
Nsec
*
metaData
.
polarisations
;
uint
Nvissec
=
Nweightss
*
metaData
.
freq_per_chan
;
uint
Nvissec
=
Nweightss
*
metaData
.
freq_per_chan
;
// allocate sector arrays
// note: we use the largest allocation among all sectors
//
unsigned
long
long
int
mem_size
=
(
Nsec
*
3
)
*
sizeof
(
double_t
)
+
(
Nvissec
*
2
+
Nweightss
)
*
sizeof
(
float_t
);
double_t
*
memory
=
(
double
*
)
malloc
(
(
Nsec
*
3
)
*
sizeof
(
double_t
)
+
double_t
*
memory
=
(
double
*
)
malloc
(
(
Nsec
*
3
)
*
sizeof
(
double_t
)
+
(
Nvissec
*
2
+
Nweightss
)
*
sizeof
(
float_t
)
);
(
Nvissec
*
2
+
Nweightss
)
*
sizeof
(
float_t
)
);
...
@@ -109,12 +107,6 @@ void gridding_data()
...
@@ -109,12 +107,6 @@ void gridding_data()
float_t
*
visimgs
=
(
float_t
*
)
visreals
+
Nvissec
;
float_t
*
visimgs
=
(
float_t
*
)
visreals
+
Nvissec
;
for
(
uint
isector
=
0
;
isector
<
nsectors
;
isector
++
)
{
double
start
=
CPU_TIME_wt
;
memset
(
memory
,
0
,
mem_size
);
// select data for this sector
// select data for this sector
uint
icount
=
0
;
uint
icount
=
0
;
...
@@ -183,14 +175,14 @@ void gridding_data()
...
@@ -183,14 +175,14 @@ void gridding_data()
printf
(
"Processing sector %ld
\n
"
,
isector
);
printf
(
"Processing sector %ld
\n
"
,
isector
);
#endif
#endif
start
=
CPU_TIME_wt
;
double
*
stacking_target_array
;
double
*
stacking_target_array
;
if
(
size
>
1
)
if
(
size
>
1
)
stacking_target_array
=
gridss
;
stacking_target_array
=
gridss
;
else
else
stacking_target_array
=
grid
;
stacking_target_array
=
grid
;
start
=
CPU_TIME_wt
;
//We have to call different GPUs per MPI task!!! [GL]
//We have to call different GPUs per MPI task!!! [GL]
wstack
(
param
.
num_w_planes
,
wstack
(
param
.
num_w_planes
,
Nsec
,
Nsec
,
...
@@ -259,9 +251,8 @@ void gridding_data()
...
@@ -259,9 +251,8 @@ void gridding_data()
memset
(
gridss
,
0
,
2
*
param
.
num_w_planes
*
xaxis
*
yaxis
*
sizeof
(
double
)
);
memset
(
gridss
,
0
,
2
*
param
.
num_w_planes
*
xaxis
*
yaxis
*
sizeof
(
double
)
);
}
}
}
free
(
memory
);
free
(
memory
);
}
if
(
size
>
1
)
if
(
size
>
1
)
{
{
...
...
This diff is collapsed.
Click to expand it.
gridding_nccl.cpp
+
24
−
31
View file @
ea94cf82
...
@@ -68,30 +68,6 @@ void gridding_data(){
...
@@ -68,30 +68,6 @@ void gridding_data(){
// find the largest value in histo_send[]
// find the largest value in histo_send[]
//
//
uint
Nsec
=
histo_send
[
0
];
for
(
uint
isector
=
1
;
isector
<
nsectors
;
isector
++
)
Nsec
=
(
Nsec
<
histo_send
[
isector
]
?
histo_send
[
isector
]
:
Nsec
);
uint
Nweightss
=
Nsec
*
metaData
.
polarisations
;
uint
Nvissec
=
Nweightss
*
metaData
.
freq_per_chan
;
// allocate sector arrays
// note: we use the largest allocation among all sectors
//
unsigned
long
long
int
mem_size
=
(
Nsec
*
3
)
*
sizeof
(
double_t
)
+
(
Nvissec
*
2
+
Nweightss
)
*
sizeof
(
float_t
);
double_t
*
memory
=
(
double
*
)
malloc
(
(
Nsec
*
3
)
*
sizeof
(
double_t
)
+
(
Nvissec
*
2
+
Nweightss
)
*
sizeof
(
float_t
)
);
if
(
memory
==
NULL
)
shutdown_wstacking
(
NOT_ENOUGH_MEM_STACKING
,
"Not enough memory for stacking"
,
__FILE__
,
__LINE__
);
double_t
*
uus
=
(
double
*
)
memory
;
double_t
*
vvs
=
(
double
*
)
uus
+
Nsec
;
double_t
*
wws
=
(
double
*
)
vvs
+
Nsec
;
float_t
*
weightss
=
(
float_t
*
)(
wws
+
Nsec
);
float_t
*
visreals
=
weightss
+
Nweightss
;
float_t
*
visimgs
=
visreals
+
Nvissec
;
//Initialize nccl
//Initialize nccl
...
@@ -129,7 +105,23 @@ void gridding_data(){
...
@@ -129,7 +105,23 @@ void gridding_data(){
double
start
=
CPU_TIME_wt
;
double
start
=
CPU_TIME_wt
;
memset
(
memory
,
0
,
mem_size
);
uint
Nsec
=
histo_send
[
isector
];
uint
Nweightss
=
Nsec
*
metaData
.
polarisations
;
uint
Nvissec
=
Nweightss
*
metaData
.
freq_per_chan
;
double_t
*
memory
=
(
double
*
)
malloc
(
(
Nsec
*
3
)
*
sizeof
(
double_t
)
+
(
Nvissec
*
2
+
Nweightss
)
*
sizeof
(
float_t
)
);
if
(
memory
==
NULL
)
shutdown_wstacking
(
NOT_ENOUGH_MEM_STACKING
,
"Not enough memory for stacking"
,
__FILE__
,
__LINE__
);
double_t
*
uus
=
(
double_t
*
)
memory
;
double_t
*
vvs
=
(
double_t
*
)
uus
+
Nsec
;
double_t
*
wws
=
(
double_t
*
)
vvs
+
Nsec
;
float_t
*
weightss
=
(
float_t
*
)((
double_t
*
)
wws
+
Nsec
);
float_t
*
visreals
=
(
float_t
*
)
weightss
+
Nweightss
;
float_t
*
visimgs
=
(
float_t
*
)
visreals
+
Nvissec
;
// select data for this sector
// select data for this sector
uint
icount
=
0
;
uint
icount
=
0
;
...
@@ -246,10 +238,10 @@ void gridding_data(){
...
@@ -246,10 +238,10 @@ void gridding_data(){
// int target_rank = (int)isector; it implied that size >= nsectors
// int target_rank = (int)isector; it implied that size >= nsectors
int
target_rank
=
(
int
)(
isector
%
size
);
int
target_rank
=
(
int
)(
isector
%
size
);
start
=
CPU_TIME_wt
;
cudaStreamSynchronize
(
stream_reduce
);
cudaStreamSynchronize
(
stream_reduce
);
start
=
CPU_TIME_wt
;
ncclReduce
(
gridss_gpu
,
grid_gpu
,
size_of_grid
,
ncclDouble
,
ncclSum
,
target_rank
,
comm
,
stream_reduce
);
ncclReduce
(
gridss_gpu
,
grid_gpu
,
size_of_grid
,
ncclDouble
,
ncclSum
,
target_rank
,
comm
,
stream_reduce
);
cudaStreamSynchronize
(
stream_reduce
);
cudaStreamSynchronize
(
stream_reduce
);
...
@@ -260,11 +252,12 @@ void gridding_data(){
...
@@ -260,11 +252,12 @@ void gridding_data(){
memset
(
gridss
,
0
,
2
*
param
.
num_w_planes
*
xaxis
*
yaxis
*
sizeof
(
double
)
);
memset
(
gridss
,
0
,
2
*
param
.
num_w_planes
*
xaxis
*
yaxis
*
sizeof
(
double
)
);
}
}
free
(
memory
);
}
}
//Copy data back from device to host (to be deleted in next steps)
//Copy data back from device to host (to be deleted in next steps)
free
(
memory
);
cudaMemcpyAsync
(
grid
,
grid_gpu
,
2
*
param
.
num_w_planes
*
xaxis
*
yaxis
*
sizeof
(
double
),
cudaMemcpyDeviceToHost
,
stream_reduce
);
cudaMemcpyAsync
(
grid
,
grid_gpu
,
2
*
param
.
num_w_planes
*
xaxis
*
yaxis
*
sizeof
(
double
),
cudaMemcpyDeviceToHost
,
stream_reduce
);
MPI_Barrier
(
MPI_COMM_WORLD
);
MPI_Barrier
(
MPI_COMM_WORLD
);
...
...
This diff is collapsed.
Click to expand it.
gridding_rccl.cpp
+
22
−
31
View file @
ea94cf82
...
@@ -64,32 +64,6 @@ void gridding_data(){
...
@@ -64,32 +64,6 @@ void gridding_data(){
if
(
rank
==
0
)
if
(
rank
==
0
)
printf
(
"RESOLUTION = %f rad, %f arcsec
\n
"
,
resolution
,
resolution_asec
);
printf
(
"RESOLUTION = %f rad, %f arcsec
\n
"
,
resolution
,
resolution_asec
);
// find the largest value in histo_send[]
//
uint
Nsec
=
histo_send
[
0
];
for
(
uint
isector
=
1
;
isector
<
nsectors
;
isector
++
)
Nsec
=
(
Nsec
<
histo_send
[
isector
]
?
histo_send
[
isector
]
:
Nsec
);
uint
Nweightss
=
Nsec
*
metaData
.
polarisations
;
uint
Nvissec
=
Nweightss
*
metaData
.
freq_per_chan
;
// allocate sector arrays
// note: we use the largest allocation among all sectors
//
unsigned
long
long
int
mem_size
=
(
Nsec
*
3
)
*
sizeof
(
double_t
)
+
(
Nvissec
*
2
+
Nweightss
)
*
sizeof
(
float_t
);
double_t
*
memory
=
(
double
*
)
malloc
(
(
Nsec
*
3
)
*
sizeof
(
double_t
)
+
(
Nvissec
*
2
+
Nweightss
)
*
sizeof
(
float_t
)
);
if
(
memory
==
NULL
)
shutdown_wstacking
(
NOT_ENOUGH_MEM_STACKING
,
"Not enough memory for stacking"
,
__FILE__
,
__LINE__
);
double_ty
*
uus
=
(
double
*
)
memory
;
double_ty
*
vvs
=
(
double
*
)
uus
+
Nsec
;
double_ty
*
wws
=
(
double
*
)
vvs
+
Nsec
;
float_ty
*
weightss
=
(
float_ty
*
)(
wws
+
Nsec
);
float_ty
*
visreals
=
weightss
+
Nweightss
;
float_ty
*
visimgs
=
visreals
+
Nvissec
;
//Initialize nccl
//Initialize nccl
...
@@ -127,7 +101,23 @@ void gridding_data(){
...
@@ -127,7 +101,23 @@ void gridding_data(){
double
start
=
CPU_TIME_wt
;
double
start
=
CPU_TIME_wt
;
memset
(
memory
,
0
,
mem_size
);
uint
Nsec
=
histo_send
[
isector
];
uint
Nweightss
=
Nsec
*
metaData
.
polarisations
;
uint
Nvissec
=
Nweightss
*
metaData
.
freq_per_chan
;
double_t
*
memory
=
(
double
*
)
malloc
(
(
Nsec
*
3
)
*
sizeof
(
double_t
)
+
(
Nvissec
*
2
+
Nweightss
)
*
sizeof
(
float_t
)
);
if
(
memory
==
NULL
)
shutdown_wstacking
(
NOT_ENOUGH_MEM_STACKING
,
"Not enough memory for stacking"
,
__FILE__
,
__LINE__
);
double_t
*
uus
=
(
double_t
*
)
memory
;
double_t
*
vvs
=
(
double_t
*
)
uus
+
Nsec
;
double_t
*
wws
=
(
double_t
*
)
vvs
+
Nsec
;
float_t
*
weightss
=
(
float_t
*
)((
double_t
*
)
wws
+
Nsec
);
float_t
*
visreals
=
(
float_t
*
)
weightss
+
Nweightss
;
float_t
*
visimgs
=
(
float_t
*
)
visreals
+
Nvissec
;
// select data for this sector
// select data for this sector
uint
icount
=
0
;
uint
icount
=
0
;
...
@@ -244,10 +234,11 @@ void gridding_data(){
...
@@ -244,10 +234,11 @@ void gridding_data(){
// int target_rank = (int)isector; it implied that size >= nsectors
// int target_rank = (int)isector; it implied that size >= nsectors
int
target_rank
=
(
int
)(
isector
%
size
);
int
target_rank
=
(
int
)(
isector
%
size
);
start
=
CPU_TIME_wt
;
hipStreamSynchronize
(
stream_reduce
);
hipStreamSynchronize
(
stream_reduce
);
start
=
CPU_TIME_wt
;
ncclReduce
(
gridss_gpu
,
grid_gpu
,
size_of_grid
,
ncclDouble
,
ncclSum
,
target_rank
,
comm
,
stream_reduce
);
ncclReduce
(
gridss_gpu
,
grid_gpu
,
size_of_grid
,
ncclDouble
,
ncclSum
,
target_rank
,
comm
,
stream_reduce
);
hipStreamSynchronize
(
stream_reduce
);
hipStreamSynchronize
(
stream_reduce
);
...
@@ -258,11 +249,11 @@ void gridding_data(){
...
@@ -258,11 +249,11 @@ void gridding_data(){
memset
(
gridss
,
0
,
2
*
param
.
num_w_planes
*
xaxis
*
yaxis
*
sizeof
(
double
)
);
memset
(
gridss
,
0
,
2
*
param
.
num_w_planes
*
xaxis
*
yaxis
*
sizeof
(
double
)
);
}
}
free
(
memory
);
}
}
//Copy data back from device to host (to be deleted in next steps)
//Copy data back from device to host (to be deleted in next steps)
free
(
memory
);
hipMemcpyAsync
(
grid
,
grid_gpu
,
2
*
param
.
num_w_planes
*
xaxis
*
yaxis
*
sizeof
(
double
),
hipMemcpyDeviceToHost
,
stream_reduce
);
hipMemcpyAsync
(
grid
,
grid_gpu
,
2
*
param
.
num_w_planes
*
xaxis
*
yaxis
*
sizeof
(
double
),
hipMemcpyDeviceToHost
,
stream_reduce
);
MPI_Barrier
(
MPI_COMM_WORLD
);
MPI_Barrier
(
MPI_COMM_WORLD
);
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment