Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
D
dADP
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Container registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Luca Tornatore
dADP
Commits
80024d41
Commit
80024d41
authored
Mar 5, 2024
by
lykos98
Browse files
Options
Downloads
Patches
Plain Diff
implemented point exchange still preliminarly
parent
6204319a
Branches
Branches containing commit
Tags
Tags containing commit
No related merge requests found
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
src/tree/tree.c
+56
-23
56 additions, 23 deletions
src/tree/tree.c
with
56 additions
and
23 deletions
src/tree/tree.c
+
56
−
23
View file @
80024d41
...
...
@@ -958,7 +958,7 @@ void build_top_kdtree(global_context_t *ctx, pointset_t *og_pointset, top_kdtree
MPI_DB_PRINT
(
"Root is %p
\n
"
,
tree
->
root
);
if
(
I_AM_MASTER
)
{
tree_print
(
ctx
,
tree
->
root
);
//
tree_print(ctx, tree -> root);
write_nodes_to_file
(
ctx
,
tree
,
"bb/nodes_50_blobs_more_var.csv"
);
}
...
...
@@ -1031,8 +1031,9 @@ int partition_data_around_key(int* key, float_t *val, int vec_len, int ref_key ,
void
exchange_points
(
global_context_t
*
ctx
,
top_kdtree_t
*
tree
)
{
size_
t
*
points_per_proc
=
(
size_
t
*
)
malloc
(
ctx
->
world_size
*
sizeof
(
size_
t
));
in
t
*
points_per_proc
=
(
in
t
*
)
malloc
(
ctx
->
world_size
*
sizeof
(
in
t
));
int
*
points_owners
=
(
int
*
)
malloc
(
ctx
->
dims
*
ctx
->
local_n_points
*
sizeof
(
float_t
));
int
*
partition_offset
=
(
int
*
)
malloc
(
ctx
->
world_size
*
sizeof
(
int
));
/* compute owner */
for
(
size_t
i
=
0
;
i
<
ctx
->
local_n_points
;
++
i
)
{
...
...
@@ -1044,9 +1045,12 @@ void exchange_points(global_context_t* ctx, top_kdtree_t* tree)
int
last_idx
=
0
;
int
len
=
ctx
->
local_n_points
;
float_t
*
curr_data
=
ctx
->
local_data
;
partition_offset
[
0
]
=
0
;
for
(
int
owner
=
1
;
owner
<
ctx
->
world_size
;
++
owner
)
{
last_idx
=
partition_data_around_key
(
points_owners
,
ctx
->
local_data
,
ctx
->
dims
,
owner
,
last_idx
,
ctx
->
local_n_points
);
partition_offset
[
owner
]
=
last_idx
;
points_per_proc
[
owner
-
1
]
=
last_idx
;
}
...
...
@@ -1060,39 +1064,68 @@ void exchange_points(global_context_t* ctx, top_kdtree_t* tree)
/*
MPI_DB_PRINT("Points per proc begin: ");
for(int i = 0; i < ctx -> world_size; ++i)
{
MPI_DB_PRINT("%lu ", points_per_proc[i]);
}
MPI_DB_PRINT("\n");
*/
MPI_Allreduce
(
MPI_IN_PLACE
,
points_per_proc
,
ctx
->
world_size
,
MPI_UINT64_T
,
MPI_SUM
,
ctx
->
mpi_communicator
);
int* points_per_proc_all = (int*)malloc(ctx -> world_size * sizeof(int));
MPI_Allreduce(MPI_IN_PLACE, points_per_proc_all, ctx -> world_size, MPI_INT,MPI_SUM, ctx -> mpi_communicator);
size_t test_num = 0;
for
(
int
i
=
0
;
i
<
ctx
->
world_size
;
++
i
)
test_num
+=
points_per_proc
[
i
];
for(int i = 0; i < ctx -> world_size; ++i) test_num += points_per_proc
_all
[i];
MPI_DB_PRINT("Master has n_points %lu and in node population has %lu points\n", ctx -> n_points, test_num);
/*
MPI_DB_PRINT("Points per proc after: ");
for(int i = 0; i < ctx -> world_size; ++i)
{
MPI_DB_PRINT("%lu ", points_per_proc[i]);
MPI_DB_PRINT("%lu ", points_per_proc
_all
[i]);
}
MPI_DB_PRINT("\n");
free(points_per_proc_all);
*/
int
*
rcvcount
=
(
int
*
)
malloc
(
ctx
->
world_size
*
sizeof
(
int
));
int
*
displs
=
(
int
*
)
malloc
(
ctx
->
world_size
*
sizeof
(
int
));
float_t
*
rcvbuffer
=
NULL
;
int
tot_count
=
0
;
for
(
int
rcv
=
0
;
rcv
<
ctx
->
world_size
;
++
rcv
)
{
/* recieve the number of points to recieve from each proc */
MPI_Gather
(
&
(
points_per_proc
[
rcv
]),
1
,
MPI_INT
,
rcvcount
,
1
,
MPI_INT
,
rcv
,
ctx
->
mpi_communicator
);
float_t
*
send_buffer
=
ctx
->
local_data
+
(
ctx
->
dims
*
partition_offset
[
rcv
]);
/*
for(int i = 0; i < ctx -> local_n_points; ++i
)
/*
if I am the reciever recieve */
if
(
rcv
==
ctx
->
mpi_rank
)
{
MPI_DB_PRINT("%d ", points_owners[i]);
if(i % 10 == 0) MPI_DB_PRINT("\n");
displs
[
0
]
=
0
;
for
(
int
i
=
1
;
i
<
ctx
->
world_size
;
++
i
)
displs
[
i
]
=
displs
[
i
-
1
]
+
rcvcount
[
i
-
1
];
/*multiply for number of elements */
for
(
int
i
=
0
;
i
<
ctx
->
world_size
;
++
i
)
{
displs
[
i
]
=
displs
[
i
]
*
ctx
->
dims
;
rcvcount
[
i
]
=
rcvcount
[
i
]
*
ctx
->
dims
;
tot_count
+=
rcvcount
[
i
];
}
*/
//DB_PRINT("[RANK %d] is recieving %d elements %d points\n", rcv, tot_count, tot_count / ctx -> dims);
rcvbuffer
=
(
float_t
*
)
malloc
(
tot_count
*
sizeof
(
float_t
));
}
MPI_Gatherv
(
send_buffer
,
points_per_proc
[
rcv
],
MPI_MY_FLOAT
,
rcvbuffer
,
rcvcount
,
displs
,
MPI_MY_FLOAT
,
rcv
,
ctx
->
mpi_communicator
);
}
ctx
->
local_n_points
=
tot_count
;
/* free prv pointer */
free
(
ctx
->
local_data
);
ctx
->
local_data
=
rcvbuffer
;
free
(
points_owners
);
free
(
points_per_proc
);
free
(
partition_offset
);
free
(
rcvcount
);
free
(
displs
);
}
void
simulate_master_read_and_scatter
(
int
dims
,
size_t
n
,
global_context_t
*
ctx
)
...
...
@@ -1184,7 +1217,6 @@ void simulate_master_read_and_scatter(int dims, size_t n, global_context_t *ctx)
original_ps
.
lb_box
=
(
float_t
*
)
malloc
(
ctx
->
dims
*
sizeof
(
float_t
));
original_ps
.
ub_box
=
(
float_t
*
)
malloc
(
ctx
->
dims
*
sizeof
(
float_t
));
float_t
incr
=
0
.
05
;
float_t
tol
=
0
.
002
;
top_kdtree_t
tree
;
...
...
@@ -1196,6 +1228,7 @@ void simulate_master_read_and_scatter(int dims, size_t n, global_context_t *ctx)
free
(
send_counts
);
free
(
displacements
);
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
sign in
to comment