Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
H
HPC_Imaging
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Container registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Claudio Gheller
HPC_Imaging
Commits
d05e7f1e
Commit
d05e7f1e
authored
3 years ago
by
Luca Tornatore
Browse files
Options
Downloads
Patches
Plain Diff
fixed call to numa_allocate_shared_windows()
parent
f26545d8
No related branches found
No related tags found
No related merge requests found
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
init.c
+1
-1
1 addition, 1 deletion
init.c
numa.c
+54
-54
54 additions, 54 deletions
numa.c
with
55 additions
and
55 deletions
init.c
+
1
−
1
View file @
d05e7f1e
...
...
@@ -366,7 +366,7 @@ void allocate_memory() {
gridss_real
=
(
double
*
)
calloc
(
size_of_grid
/
2
,
sizeof
(
double
));
gridss_img
=
(
double
*
)
calloc
(
size_of_grid
/
2
,
sizeof
(
double
));
numa_allocate_shared_windows
(
Me
,
size_of_grid
*
sizeof
(
double
)
*
1
.
1
,
size_of_grid
*
sizeof
(
double
)
*
1
.
1
);
numa_allocate_shared_windows
(
&
Me
,
size_of_grid
*
sizeof
(
double
)
*
1
.
1
,
size_of_grid
*
sizeof
(
double
)
*
1
.
1
);
// Create destination slab
grid
=
(
double
*
)
calloc
(
size_of_grid
,
sizeof
(
double
));
...
...
This diff is collapsed.
Click to expand it.
numa.c
+
54
−
54
View file @
d05e7f1e
...
...
@@ -75,10 +75,10 @@ int numa_init( int Rank, int Size, MPI_Comm *MYWORLD, map_t *Me )
}
int
numa_allocate_shared_windows
(
map_t
*
M
e
,
MPI_Aint
size
,
MPI_Aint
host_size
)
int
numa_allocate_shared_windows
(
map_t
*
m
e
,
MPI_Aint
size
,
MPI_Aint
host_size
)
{
int
SHMEMl
=
M
e
->
SHMEMl
;
int
SHMEMl
=
m
e
->
SHMEMl
;
MPI_Info
winfo
;
MPI_Info_create
(
&
winfo
);
...
...
@@ -100,38 +100,38 @@ int numa_allocate_shared_windows( map_t *Me, MPI_Aint size, MPI_Aint host_size
win_host_size
=
size
;
M
e
->
win
.
size
=
win_host_size
;
MPI_Win_allocate_shared
(
M
e
->
win
.
size
,
1
,
winfo
,
*
M
e
->
COMM
[
SHMEMl
],
&
(
M
e
->
win
.
ptr
),
&
(
M
e
->
win
.
win
));
m
e
->
win
.
size
=
win_host_size
;
MPI_Win_allocate_shared
(
m
e
->
win
.
size
,
1
,
winfo
,
*
m
e
->
COMM
[
SHMEMl
],
&
(
m
e
->
win
.
ptr
),
&
(
m
e
->
win
.
win
));
MPI_Aint
wsize
=
(
M
e
->
Rank
[
SHMEMl
]
==
0
?
win_hostmaster_size
:
0
);
MPI_Win_allocate_shared
(
wsize
,
1
,
winfo
,
*
M
e
->
COMM
[
SHMEMl
],
&
win_hostmaster_ptr
,
&
win_hostmaster
);
MPI_Aint
wsize
=
(
m
e
->
Rank
[
SHMEMl
]
==
0
?
win_hostmaster_size
:
0
);
MPI_Win_allocate_shared
(
wsize
,
1
,
winfo
,
*
m
e
->
COMM
[
SHMEMl
],
&
win_hostmaster_ptr
,
&
win_hostmaster
);
M
e
->
swins
=
(
win_t
*
)
malloc
(
M
e
->
Ntasks
[
SHMEMl
]
*
sizeof
(
win_t
)
);
M
e
->
swins
[
M
e
->
Rank
[
SHMEMl
]]
=
M
e
->
win
;
//
M
e->swins = (win_t*)malloc(
M
e->Ntasks[SHMEMl]*sizeof(win_t));
m
e
->
swins
=
(
win_t
*
)
malloc
(
m
e
->
Ntasks
[
SHMEMl
]
*
sizeof
(
win_t
)
);
m
e
->
swins
[
m
e
->
Rank
[
SHMEMl
]]
=
m
e
->
win
;
//
m
e->swins = (win_t*)malloc(
m
e->Ntasks[SHMEMl]*sizeof(win_t));
// get the addresses of all the windows from my siblings
// at my shared-memory level
//
for
(
int
t
=
0
;
t
<
M
e
->
Ntasks
[
SHMEMl
];
t
++
)
if
(
t
!=
M
e
->
Rank
[
SHMEMl
]
)
MPI_Win_shared_query
(
M
e
->
win
.
win
,
t
,
&
(
M
e
->
swins
[
t
].
size
),
&
(
M
e
->
swins
[
t
].
disp
),
&
(
M
e
->
swins
[
t
].
ptr
)
);
for
(
int
t
=
0
;
t
<
m
e
->
Ntasks
[
SHMEMl
];
t
++
)
if
(
t
!=
m
e
->
Rank
[
SHMEMl
]
)
MPI_Win_shared_query
(
m
e
->
win
.
win
,
t
,
&
(
m
e
->
swins
[
t
].
size
),
&
(
m
e
->
swins
[
t
].
disp
),
&
(
m
e
->
swins
[
t
].
ptr
)
);
if
(
M
e
->
Rank
[
SHMEMl
]
!=
0
)
if
(
m
e
->
Rank
[
SHMEMl
]
!=
0
)
MPI_Win_shared_query
(
win_hostmaster
,
0
,
&
(
win_hostmaster_size
),
&
win_hostmaster_disp
,
&
win_hostmaster_ptr
);
return
0
;
}
int
numa_shutdown
(
int
Rank
,
int
Size
,
MPI_Comm
*
MYWORLD
,
map_t
*
M
e
)
int
numa_shutdown
(
int
Rank
,
int
Size
,
MPI_Comm
*
MYWORLD
,
map_t
*
m
e
)
{
// free every shared memory and window
//
MPI_Win_free
(
&
(
M
e
->
win
.
win
));
MPI_Win_free
(
&
(
m
e
->
win
.
win
));
// free all the structures if needed
//
free
(
M
e
->
Ranks_to_host
);
free
(
M
e
->
swins
);
free
(
m
e
->
Ranks_to_host
);
free
(
m
e
->
swins
);
// anything else
//
...
...
@@ -141,92 +141,92 @@ int numa_shutdown( int Rank, int Size, MPI_Comm *MYWORLD, map_t *Me )
}
int
numa_build_mapping
(
int
Rank
,
int
Size
,
MPI_Comm
*
MYWORLD
,
map_t
*
M
e
)
int
numa_build_mapping
(
int
Rank
,
int
Size
,
MPI_Comm
*
MYWORLD
,
map_t
*
m
e
)
{
COMM
[
WORLD
]
=
*
MYWORLD
;
M
e
->
Ntasks
[
WORLD
]
=
Size
;
M
e
->
Rank
[
WORLD
]
=
Rank
;
M
e
->
COMM
[
WORLD
]
=
&
COMM
[
WORLD
];
m
e
->
Ntasks
[
WORLD
]
=
Size
;
m
e
->
Rank
[
WORLD
]
=
Rank
;
m
e
->
COMM
[
WORLD
]
=
&
COMM
[
WORLD
];
M
e
->
mycpu
=
get_cpu_id
();
m
e
->
mycpu
=
get_cpu_id
();
// --- find how many hosts we are running on;
// that is needed to build the communicator
// among the masters of each host
//
numa_map_hostnames
(
&
COMM
[
WORLD
],
Rank
,
Size
,
M
e
);
numa_map_hostnames
(
&
COMM
[
WORLD
],
Rank
,
Size
,
m
e
);
M
e
->
MAXl
=
(
M
e
->
Nhosts
>
1
?
HOSTS
:
myHOST
);
m
e
->
MAXl
=
(
m
e
->
Nhosts
>
1
?
HOSTS
:
myHOST
);
// --- create the communicator for each host
//
MPI_Comm_split
(
COMM
[
WORLD
],
M
e
->
myhost
,
M
e
->
Rank
[
WORLD
],
&
COMM
[
myHOST
]);
MPI_Comm_split
(
COMM
[
WORLD
],
m
e
->
myhost
,
m
e
->
Rank
[
WORLD
],
&
COMM
[
myHOST
]);
MPI_Comm_size
(
COMM
[
myHOST
],
&
Size
);
MPI_Comm_rank
(
COMM
[
myHOST
],
&
Rank
);
M
e
->
COMM
[
myHOST
]
=
&
COMM
[
myHOST
];
M
e
->
Rank
[
myHOST
]
=
Rank
;
M
e
->
Ntasks
[
myHOST
]
=
Size
;
m
e
->
COMM
[
myHOST
]
=
&
COMM
[
myHOST
];
m
e
->
Rank
[
myHOST
]
=
Rank
;
m
e
->
Ntasks
[
myHOST
]
=
Size
;
// with the following gathering we build-up the mapping Ranks_to_hosts, so that
// we know which host each mpi rank (meaning the original rank) belongs to
//
MPI_Allgather
(
&
M
e
->
myhost
,
sizeof
(
M
e
->
myhost
),
MPI_BYTE
,
M
e
->
Ranks_to_host
,
sizeof
(
M
e
->
myhost
),
MPI_BYTE
,
COMM
[
WORLD
]
);
MPI_Allgather
(
&
m
e
->
myhost
,
sizeof
(
m
e
->
myhost
),
MPI_BYTE
,
m
e
->
Ranks_to_host
,
sizeof
(
m
e
->
myhost
),
MPI_BYTE
,
COMM
[
WORLD
]
);
M
e
->
Ranks_to_myhost
=
(
int
*
)
malloc
(
M
e
->
Ntasks
[
myHOST
]
*
sizeof
(
int
));
m
e
->
Ranks_to_myhost
=
(
int
*
)
malloc
(
m
e
->
Ntasks
[
myHOST
]
*
sizeof
(
int
));
MPI_Allgather
(
&
global_rank
,
sizeof
(
global_rank
),
MPI_BYTE
,
M
e
->
Ranks_to_myhost
,
sizeof
(
global_rank
),
MPI_BYTE
,
*
M
e
->
COMM
[
myHOST
]);
m
e
->
Ranks_to_myhost
,
sizeof
(
global_rank
),
MPI_BYTE
,
*
m
e
->
COMM
[
myHOST
]);
// --- create the communicator for the
// masters of each host
//
int
Im_host_master
=
(
M
e
->
Rank
[
myHOST
]
==
0
);
MPI_Comm_split
(
COMM
[
WORLD
],
Im_host_master
,
M
e
->
Rank
[
WORLD
],
&
COMM
[
HOSTS
]);
int
Im_host_master
=
(
m
e
->
Rank
[
myHOST
]
==
0
);
MPI_Comm_split
(
COMM
[
WORLD
],
Im_host_master
,
m
e
->
Rank
[
WORLD
],
&
COMM
[
HOSTS
]);
//
// NOTE: by default, the Rank 0 in WORLD is also Rank 0 in HOSTS
//
if
(
Im_host_master
)
{
M
e
->
COMM
[
HOSTS
]
=
&
COMM
[
HOSTS
];
M
e
->
Ntasks
[
HOSTS
]
=
M
e
->
Nhosts
;
MPI_Comm_rank
(
COMM
[
HOSTS
],
&
(
M
e
->
Rank
[
HOSTS
]));
m
e
->
COMM
[
HOSTS
]
=
&
COMM
[
HOSTS
];
m
e
->
Ntasks
[
HOSTS
]
=
m
e
->
Nhosts
;
MPI_Comm_rank
(
COMM
[
HOSTS
],
&
(
m
e
->
Rank
[
HOSTS
]));
}
else
{
M
e
->
COMM
[
HOSTS
]
=
NULL
;
M
e
->
Ntasks
[
HOSTS
]
=
0
;
M
e
->
Rank
[
HOSTS
]
=
-
1
;
m
e
->
COMM
[
HOSTS
]
=
NULL
;
m
e
->
Ntasks
[
HOSTS
]
=
0
;
m
e
->
Rank
[
HOSTS
]
=
-
1
;
}
// --- create the communicator for the
// numa node
//
MPI_Comm_split_type
(
COMM
[
myHOST
],
MPI_COMM_TYPE_SHARED
,
M
e
->
Rank
[
myHOST
],
MPI_INFO_NULL
,
&
COMM
[
NUMA
]);
M
e
->
COMM
[
NUMA
]
=
&
COMM
[
NUMA
];
MPI_Comm_size
(
COMM
[
NUMA
],
&
(
M
e
->
Ntasks
[
NUMA
]));
MPI_Comm_rank
(
COMM
[
NUMA
],
&
(
M
e
->
Rank
[
NUMA
]));
MPI_Comm_split_type
(
COMM
[
myHOST
],
MPI_COMM_TYPE_SHARED
,
m
e
->
Rank
[
myHOST
],
MPI_INFO_NULL
,
&
COMM
[
NUMA
]);
m
e
->
COMM
[
NUMA
]
=
&
COMM
[
NUMA
];
MPI_Comm_size
(
COMM
[
NUMA
],
&
(
m
e
->
Ntasks
[
NUMA
]));
MPI_Comm_rank
(
COMM
[
NUMA
],
&
(
m
e
->
Rank
[
NUMA
]));
// check whether NUMA == myHOST and determine
// the maximum level of shared memory in the
// topology
//
if
(
M
e
->
Ntasks
[
NUMA
]
==
M
e
->
Ntasks
[
myHOST
]
)
if
(
m
e
->
Ntasks
[
NUMA
]
==
m
e
->
Ntasks
[
myHOST
]
)
{
// collapse levels from NUMA to myHOST
//
M
e
->
Ntasks
[
ISLAND
]
=
M
e
->
Ntasks
[
NUMA
];
// equating to NUMA as we know the rank better via MPI_SHARED
M
e
->
Rank
[
ISLAND
]
=
M
e
->
Rank
[
NUMA
];
M
e
->
COMM
[
ISLAND
]
=
M
e
->
COMM
[
NUMA
];
m
e
->
Ntasks
[
ISLAND
]
=
m
e
->
Ntasks
[
NUMA
];
// equating to NUMA as we know the rank better via MPI_SHARED
m
e
->
Rank
[
ISLAND
]
=
m
e
->
Rank
[
NUMA
];
m
e
->
COMM
[
ISLAND
]
=
m
e
->
COMM
[
NUMA
];
M
e
->
Rank
[
myHOST
]
=
M
e
->
Rank
[
NUMA
];
M
e
->
COMM
[
myHOST
]
=
M
e
->
COMM
[
NUMA
];
M
e
->
SHMEMl
=
myHOST
;
m
e
->
Rank
[
myHOST
]
=
m
e
->
Rank
[
NUMA
];
m
e
->
COMM
[
myHOST
]
=
m
e
->
COMM
[
NUMA
];
m
e
->
SHMEMl
=
myHOST
;
}
else
{
...
...
@@ -234,15 +234,15 @@ int numa_build_mapping( int Rank, int Size, MPI_Comm *MYWORLD, map_t *Me )
// at this moment
printf
(
">>> It seems that rank %d belongs to a node for which "
" the node topology does not coincide
\n
"
,
Rank
);
M
e
->
SHMEMl
=
NUMA
;
m
e
->
SHMEMl
=
NUMA
;
}
int
check_SHMEM_level
=
1
;
int
globalcheck_SHMEM_level
;
int
globalmax_SHMEM_level
;
MPI_Allreduce
(
&
(
M
e
->
SHMEMl
),
&
globalmax_SHMEM_level
,
1
,
MPI_INT
,
MPI_MAX
,
*
MYWORLD
);
MPI_Allreduce
(
&
(
m
e
->
SHMEMl
),
&
globalmax_SHMEM_level
,
1
,
MPI_INT
,
MPI_MAX
,
*
MYWORLD
);
check_SHMEM_level
=
(
(
M
e
->
SHMEMl
==
myHOST
)
&&
(
globalmax_SHMEM_level
==
M
e
->
SHMEMl
)
);
check_SHMEM_level
=
(
(
m
e
->
SHMEMl
==
myHOST
)
&&
(
globalmax_SHMEM_level
==
m
e
->
SHMEMl
)
);
MPI_Allreduce
(
&
check_SHMEM_level
,
&
globalcheck_SHMEM_level
,
1
,
MPI_INT
,
MPI_MAX
,
*
MYWORLD
);
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment