diff --git a/src/cluster/cluster.cpp b/src/cluster/cluster.cpp index 8c499689ef27676671b5d05f2a9ddd3d0d117b05..3b50640f5c4ab253c5a40e8b8b0d1217cdaf4f32 100644 --- a/src/cluster/cluster.cpp +++ b/src/cluster/cluster.cpp @@ -228,7 +228,6 @@ void cluster(const string& config_file, const string& data_file, const string& o int nsph = gconf->number_of_spheres; // Sanity check on number of sphere consistency, should always be verified if (s_nsph == nsph) { - // Shortcuts to variables stored in configuration objects ScatteringAngles *p_scattering_angles = new ScatteringAngles(gconf); double wp = sconf->wp; // ClusterOutputInfo : Thread 0 of MPI process 0 allocates the memory to @@ -297,8 +296,8 @@ void cluster(const string& config_file, const string& data_file, const string& o #pragma omp single { jer = cluster_jxi488_cycle(jxi488, sconf, gconf, p_scattering_angles, cid, p_output, output_path, vtppoanp); - } - } + } // OMP sinlge + } // OMP parallel #ifdef USE_NVTX nvtxRangePop(); #endif @@ -544,7 +543,7 @@ void cluster(const string& config_file, const string& data_file, const string& o time_logger->log(message); fclose(timing_file); delete time_logger; - } // end instructions block of MPI process 0 + } // end of instruction block for MPI process 0 //=============================== // instruction block for MPI processes different from 0 @@ -658,7 +657,7 @@ void cluster(const string& config_file, const string& data_file, const string& o } } // ixi488: close strided loop running on MPI processes - // Clean memory + // Clean memory #pragma omp barrier if (myompthread == 0) { delete[] p_outarray; @@ -738,6 +737,8 @@ int cluster_jxi488_cycle(int jxi488, ScattererConfiguration *sconf, GeometryConf if (jer != 0) { output->vec_ier[jindex - 1] = 1; output->vec_jxi[jindex - 1] = -jxi488; + logger->log("Error in HJV for scale " + to_string(jxi488) + "!", LOG_ERRO); + delete logger; return jer; // break; // rewrite this to go to the end of the function, to free locally allocated variables and return jer } diff --git a/src/cluster/np_cluster.cpp b/src/cluster/np_cluster.cpp index b34aca368fc9f0ebbc26dc19fcfc6a0542a2a84b..7beb785cf6003945889b2d9263bbf2fa23934942 100644 --- a/src/cluster/np_cluster.cpp +++ b/src/cluster/np_cluster.cpp @@ -70,26 +70,27 @@ extern void cluster(const string& config_file, const string& data_file, const st * \return result: `int` An exit code passed to the OS (0 for succesful execution). */ int main(int argc, char **argv) { + int ierr = 0; #ifdef MPI_VERSION - int ierr = MPI_Init(&argc, &argv); - // create and initialise class with essential MPI data - mixMPI *mpidata = new mixMPI(MPI_COMM_WORLD); + ierr = MPI_Init(&argc, &argv); + // create and initialise class with essential MPI data + mixMPI *mpidata = new mixMPI(MPI_COMM_WORLD); #else - // create a the class with dummy data if we are not using MPI at all - mixMPI *mpidata = new mixMPI(); + // create a the class with dummy data if we are not using MPI at all + mixMPI *mpidata = new mixMPI(); #endif - string config_file = "../../test_data/cluster/DEDFB"; - string data_file = "../../test_data/cluster/DCLU"; - string output_path = "."; - if (argc == 4) { - config_file = string(argv[1]); - data_file = string(argv[2]); - output_path = string(argv[3]); - } - cluster(config_file, data_file, output_path, mpidata); + string config_file = "../../test_data/cluster/DEDFB"; + string data_file = "../../test_data/cluster/DCLU"; + string output_path = "."; + if (argc == 4) { + config_file = string(argv[1]); + data_file = string(argv[2]); + output_path = string(argv[3]); + } + cluster(config_file, data_file, output_path, mpidata); #ifdef MPI_VERSION - MPI_Finalize(); + MPI_Finalize(); #endif - delete mpidata; - return 0; + delete mpidata; + return ierr; }