Skip to content
Snippets Groups Projects
Commit cc6ed30a authored by lykos98's avatar lykos98
Browse files

added diagnostic print

parent ad26df1f
Branches
Tags
No related merge requests found
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
#include <omp.h> #include <omp.h>
#include <sys/sysinfo.h>
#define WRITE_NGBH #define WRITE_NGBH
#define WRITE_TOP_NODES #define WRITE_TOP_NODES
...@@ -1289,11 +1290,42 @@ void convert_heap_idx_to_global(global_context_t* ctx, heap* H) ...@@ -1289,11 +1290,42 @@ void convert_heap_idx_to_global(global_context_t* ctx, heap* H)
} }
} }
void print_diagnositcs(global_context_t* ctx, int k)
{
MPI_Comm shmcomm;
MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, 0,
MPI_INFO_NULL, &shmcomm);
int shm_world_size;
MPI_Comm_size(shmcomm, &shm_world_size);
MPI_DB_PRINT("[INFO] Got %d ranks per node \n",shm_world_size);
/* data */
float_t memory_use = (float_t)ctx -> local_n_points * ctx -> dims * sizeof(float_t);
memory_use += (float_t)sizeof(datapoint_info_t)* (float_t)(ctx -> local_n_points);
/* ngbh */
memory_use += (float_t)sizeof(heap_node)*(float_t)k * (float_t)(ctx -> local_n_points);
memory_use = memory_use / 1e9 * shm_world_size;
MPI_DB_PRINT(" Got ~%d points per node and %d ngbh per points\n", ctx -> local_n_points * shm_world_size, k);
MPI_DB_PRINT(" Expected to use ~%.2lfGB of memory for each node, plus memory required to communicate ngbh\n", memory_use);
struct sysinfo info;
sysinfo(&info);
if(memory_use > 0.5 * (float_t)info.freeram / 1e9)
MPI_DB_PRINT("/!\\ Projected memory usage is more than half of the node memory, may go into troubles while communicating ngbh\n");
MPI_Barrier(ctx -> mpi_communicator);
}
void mpi_ngbh_search(global_context_t* ctx, datapoint_info_t* dp_info, top_kdtree_t* top_tree, kdtree_v2* local_tree, float_t* data, int k) void mpi_ngbh_search(global_context_t* ctx, datapoint_info_t* dp_info, top_kdtree_t* top_tree, kdtree_v2* local_tree, float_t* data, int k)
{ {
/* local search */ /* local search */
/* print diagnostics */
print_diagnositcs(ctx, k);
MPI_DB_PRINT("[MASTER] Local ngbh search "); MPI_DB_PRINT("[MASTER] Local ngbh search ");
MPI_Barrier(ctx -> mpi_communicator);
#pragma omp parallel for #pragma omp parallel for
for(int p = 0; p < ctx -> local_n_points; ++p) for(int p = 0; p < ctx -> local_n_points; ++p)
{ {
...@@ -1842,7 +1874,7 @@ void simulate_master_read_and_scatter(int dims, size_t n, global_context_t *ctx) ...@@ -1842,7 +1874,7 @@ void simulate_master_read_and_scatter(int dims, size_t n, global_context_t *ctx)
// std_g0163178_Me14_091_0000 // std_g0163178_Me14_091_0000
/* 10^6 points ca.*/ /* 10^6 points ca.*/
data = read_data_file(ctx,"../norm_data/std_LR_091_0001",MY_TRUE); //data = read_data_file(ctx,"../norm_data/std_LR_091_0001",MY_TRUE);
/* 10^7 ~ 8M points */ /* 10^7 ~ 8M points */
...@@ -1853,7 +1885,7 @@ void simulate_master_read_and_scatter(int dims, size_t n, global_context_t *ctx) ...@@ -1853,7 +1885,7 @@ void simulate_master_read_and_scatter(int dims, size_t n, global_context_t *ctx)
// //
//34 M //34 M
//data = read_data_file(ctx,"../norm_data/std_g1212639_091_0001",MY_TRUE); data = read_data_file(ctx,"../norm_data/std_g1212639_091_0001",MY_TRUE);
ctx->dims = 5; ctx->dims = 5;
// ctx -> n_points = 48*5*2000; // ctx -> n_points = 48*5*2000;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment