From 2244fd0c4b973964e4c80276546b7320e4c5166d Mon Sep 17 00:00:00 2001 From: "Mulas, Giacomo" <gmulas@oa-cagliari.inaf.it> Date: Mon, 27 May 2024 21:05:10 +0200 Subject: [PATCH] move binary file output to virtual file passed via MPI to process 0, thread 0 for actual writing to disk --- src/cluster/cluster.cpp | 341 +++++++++++++++++++++++++--------------- src/include/file_io.h | 275 ++++++++++++++++---------------- src/libnptm/file_io.cpp | 228 +++++++++++++++++++++++---- 3 files changed, 549 insertions(+), 295 deletions(-) diff --git a/src/cluster/cluster.cpp b/src/cluster/cluster.cpp index 3a73321b..8bce3ec2 100644 --- a/src/cluster/cluster.cpp +++ b/src/cluster/cluster.cpp @@ -86,7 +86,7 @@ using namespace std; // I would like to put it all in a struct, but then I'd have to write a constructor for it, due to members defined as references, creating a worse nightmare than the one I'd like to simplify... -int cluster_jxi488_cycle(int jxi488, ScattererConfiguration *sconf, GeometryConfiguration *gconf, ScatteringAngles *sa, ClusterIterationData *cid, VirtualAsciiFile *output, const string& output_path, fstream& tppoan); +int cluster_jxi488_cycle(int jxi488, ScattererConfiguration *sconf, GeometryConfiguration *gconf, ScatteringAngles *sa, ClusterIterationData *cid, VirtualAsciiFile *output, const string& output_path, VirtualBinaryFile *vtppoanp); /*! \brief C++ implementation of CLU * @@ -228,11 +228,13 @@ void cluster(const string& config_file, const string& data_file, const string& o double exri = sqrt(exdc); sprintf(virtual_line, " REFR. INDEX OF EXTERNAL MEDIUM=%15.7lE\n\0", exri); p_output->append_line(virtual_line); - fstream *tppoanp = new fstream; - fstream &tppoan = *tppoanp; + VirtualBinaryFile *vtppoanp = new VirtualBinaryFile(); + VirtualBinaryLine *vbinline = NULL; + // fstream *tppoanp = new fstream; + // fstream &tppoan = *tppoanp; string tppoan_name = output_path + "/c_TPPOAN"; - tppoan.open(tppoan_name.c_str(), ios::out | ios::binary); - if (tppoan.is_open()) { + // tppoan.open(tppoan_name.c_str(), ios::out | ios::binary); + // if (tppoan.is_open()) { #ifdef USE_MAGMA logger->log("INFO: using MAGMA calls.\n", LOG_INFO); #elif defined USE_LAPACK @@ -248,14 +250,26 @@ void cluster(const string& config_file, const string& data_file, const string& o int nths = p_scattering_angles->nths; int nph = p_scattering_angles->nph; int nphs = p_scattering_angles->nphs; - tppoan.write(reinterpret_cast<char *>(&iavm), sizeof(int)); - tppoan.write(reinterpret_cast<char *>(&isam), sizeof(int)); - tppoan.write(reinterpret_cast<char *>(&inpol), sizeof(int)); - tppoan.write(reinterpret_cast<char *>(&nxi), sizeof(int)); - tppoan.write(reinterpret_cast<char *>(&nth), sizeof(int)); - tppoan.write(reinterpret_cast<char *>(&nph), sizeof(int)); - tppoan.write(reinterpret_cast<char *>(&nths), sizeof(int)); - tppoan.write(reinterpret_cast<char *>(&nphs), sizeof(int)); + // logger->log("INFO: size of vtppoanp->_file_lines before first append is "+to_string((vtppoanp->_file_lines)->size())+"\n"); + // tppoan.write(reinterpret_cast<char *>(&iavm), sizeof(int)); + vtppoanp->append_line(VirtualBinaryLine(iavm)); + // logger->log("INFO: size of vtppoanp->_file_lines after first append is "+to_string((vtppoanp->_file_lines)->size())+"\n"); + // tppoan.write(reinterpret_cast<char *>(&isam), sizeof(int)); + vtppoanp->append_line(VirtualBinaryLine(isam)); + // logger->log("INFO: size of vtppoanp->_file_lines after second append is "+to_string((vtppoanp->_file_lines)->size())+"\n"); + // logger->log("INFO: vtppoanp->_file_lines[0]._data_size is " + to_string(((vtppoanp->_file_lines)->at(0))._data_size)+"\n"); + // tppoan.write(reinterpret_cast<char *>(&inpol), sizeof(int)); + vtppoanp->append_line(VirtualBinaryLine(inpol)); + // tppoan.write(reinterpret_cast<char *>(&nxi), sizeof(int)); + vtppoanp->append_line(VirtualBinaryLine(nxi)); + // tppoan.write(reinterpret_cast<char *>(&nth), sizeof(int)); + vtppoanp->append_line(VirtualBinaryLine(nth)); + // tppoan.write(reinterpret_cast<char *>(&nph), sizeof(int)); + vtppoanp->append_line(VirtualBinaryLine(nph)); + // tppoan.write(reinterpret_cast<char *>(&nths), sizeof(int)); + vtppoanp->append_line(VirtualBinaryLine(nths)); + // tppoan.write(reinterpret_cast<char *>(&nphs), sizeof(int)); + vtppoanp->append_line(VirtualBinaryLine(nphs)); if (sconf->idfc < 0) { cid->vk = cid->xip * cid->wn; sprintf(virtual_line, " VK=%15.7lE, XI IS SCALE FACTOR FOR LENGTHS\n\0", cid->vk); @@ -269,7 +283,7 @@ void cluster(const string& config_file, const string& data_file, const string& o #ifdef USE_NVTX nvtxRangePush("First iteration"); #endif - int jer = cluster_jxi488_cycle(jxi488, sconf, gconf, p_scattering_angles, cid, p_output, output_path, tppoan); + int jer = cluster_jxi488_cycle(jxi488, sconf, gconf, p_scattering_angles, cid, p_output, output_path, vtppoanp); #ifdef USE_NVTX nvtxRangePop(); #endif @@ -284,7 +298,7 @@ void cluster(const string& config_file, const string& data_file, const string& o time_logger->log(message); if (jer != 0) { // First loop failed. Halt the calculation. - tppoan.close(); + // tppoan.close(); fclose(timing_file); //fclose(output); delete p_output; @@ -309,6 +323,7 @@ void cluster(const string& config_file, const string& data_file, const string& o // Create this variable and initialise it with a default here, so that it is defined anyway, with or without OpenMP support enabled int ompnumthreads = 1; VirtualAsciiFile **p_outarray = NULL; + VirtualBinaryFile **vtppoanarray = NULL; #ifdef USE_NVTX nvtxRangePush("Parallel loop"); @@ -323,29 +338,34 @@ void cluster(const string& config_file, const string& data_file, const string& o if (myompthread == 0) { ompnumthreads = omp_get_num_threads(); p_outarray = new VirtualAsciiFile*[ompnumthreads]; + vtppoanarray = new VirtualBinaryFile*[ompnumthreads]; } #endif // To test parallelism, I will now start feeding this function with "clean" copies of the parameters, so that they will not be changed by previous iterations, and each one will behave as the first one. Define all (empty) variables here, so they have the correct scope, then they get different definitions depending on thread number ClusterIterationData *cid_2 = NULL; //FILE *output_2 = NULL; VirtualAsciiFile *p_output_2 = NULL; - fstream *tppoanp_2 = NULL; + VirtualBinaryFile *vtppoanp_2 = NULL; + // fstream *tppoanp_2 = NULL; // for threads other than the 0, create distinct copies of all relevant data, while for thread 0 just define new references / pointers to the original ones if (myompthread == 0) { cid_2 = cid; //output_2 = output; p_output_2 = p_output; - tppoanp_2 = tppoanp; + // tppoanp_2 = tppoanp; + vtppoanp_2 = vtppoanp; } else { // this is not thread 0, so do create fresh copies of all local variables cid_2 = new ClusterIterationData(*cid); //output_2 = fopen((output_path + "/c_OCLU_" + to_string(mpidata->rank) + "_" + to_string(myompthread)).c_str(), "w"); p_output_2 = new VirtualAsciiFile(); - tppoanp_2 = new fstream; - tppoanp_2->open((output_path + "/c_TPPOAN_" + to_string(mpidata->rank) + "_" + to_string(myompthread)).c_str(), ios::out | ios::binary); + vtppoanp_2 = new VirtualBinaryFile(); + // tppoanp_2 = new fstream; + // tppoanp_2->open((output_path + "/c_TPPOAN_" + to_string(mpidata->rank) + "_" + to_string(myompthread)).c_str(), ios::out | ios::binary); } p_outarray[myompthread] = p_output_2; - fstream &tppoan_2 = *tppoanp_2; + vtppoanarray[myompthread] = vtppoanp_2; + // fstream &tppoan_2 = *tppoanp_2; // make sure all threads align here: I don't want the following loop to accidentally start for thread 0, possibly modifying some variables before they are copied by all other threads #pragma omp barrier if (myompthread==0) { @@ -354,7 +374,7 @@ void cluster(const string& config_file, const string& data_file, const string& o // ok, now I can actually start the parallel calculations #pragma omp for for (jxi488 = cid_2->firstxi; jxi488 <= cid_2->lastxi; jxi488++) { - int jer = cluster_jxi488_cycle(jxi488, sconf, gconf, p_scattering_angles, cid_2, p_output_2, output_path, *tppoanp_2); + int jer = cluster_jxi488_cycle(jxi488, sconf, gconf, p_scattering_angles, cid_2, p_output_2, output_path, vtppoanp_2); } #pragma omp barrier @@ -364,8 +384,8 @@ void cluster(const string& config_file, const string& data_file, const string& o //fclose(output_2); // p_output_2->write_to_disk(output_path + "/c_OCLU_" + to_string(mpidata->rank) + "_" + to_string(myompthread)); // delete p_output_2; - tppoanp_2->close(); - delete tppoanp_2; + // tppoanp_2->close(); + // delete tppoanp_2; } #pragma omp barrier { @@ -385,11 +405,16 @@ void cluster(const string& config_file, const string& data_file, const string& o for (int ti=1; ti<ompnumthreads; ti++) { p_outarray[0]->append(*(p_outarray[ti])); delete p_outarray[ti]; + vtppoanarray[0]->append(*(vtppoanarray[ti])); + delete vtppoanarray[ti]; } p_outarray[0]->write_to_disk(output_path + "/c_OCLU"); delete p_outarray[0]; delete[] p_outarray; - for (int ri = 1; ri < ompnumthreads; ri++) { + vtppoanarray[0]->write_to_disk(output_path + "/c_TPPOAN_bis"); + delete vtppoanarray[0]; + delete[] vtppoanarray; + // for (int ri = 1; ri < ompnumthreads; ri++) { // string partial_file_name = output_path + "/c_OCLU_" + to_string(mpidata->rank) + "_" + to_string(ri); // string message = "Copying ASCII output in MPI process " + to_string(mpidata->rank) + " of thread " + to_string(ri) + " of " + to_string(ompnumthreads - 1) + "... "; // logger->log(message, LOG_DEBG); @@ -410,22 +435,22 @@ void cluster(const string& config_file, const string& data_file, const string& o // fclose(partial_output); // remove(partial_file_name.c_str()); // logger->log("done.\n", LOG_DEBG); - string partial_file_name = output_path + "/c_TPPOAN_" + to_string(mpidata->rank) + "_" + to_string(ri); - string message = "Copying binary output in MPI process " + to_string(mpidata->rank) + " of thread " + to_string(ri) + " of " + to_string(ompnumthreads - 1) + "... "; - logger->log(message, LOG_DEBG); - fstream partial_tppoan; - partial_tppoan.open(partial_file_name.c_str(), ios::in | ios::binary); - partial_tppoan.seekg(0, ios::end); - long buffer_size = partial_tppoan.tellg(); - char *binary_buffer = new char[buffer_size]; - partial_tppoan.seekg(0, ios::beg); - partial_tppoan.read(binary_buffer, buffer_size); - tppoan.write(binary_buffer, buffer_size); - partial_tppoan.close(); - delete[] binary_buffer; - remove(partial_file_name.c_str()); - logger->log("done.\n", LOG_DEBG); - } + // string partial_file_name = output_path + "/c_TPPOAN_" + to_string(mpidata->rank) + "_" + to_string(ri); + // string message = "Copying binary output in MPI process " + to_string(mpidata->rank) + " of thread " + to_string(ri) + " of " + to_string(ompnumthreads - 1) + "... "; + // logger->log(message, LOG_DEBG); + // fstream partial_tppoan; + // partial_tppoan.open(partial_file_name.c_str(), ios::in | ios::binary); + // partial_tppoan.seekg(0, ios::end); + // long buffer_size = partial_tppoan.tellg(); + // char *binary_buffer = new char[buffer_size]; + // partial_tppoan.seekg(0, ios::beg); + // partial_tppoan.read(binary_buffer, buffer_size); + // // tppoan.write(binary_buffer, buffer_size); + // partial_tppoan.close(); + // delete[] binary_buffer; + // remove(partial_file_name.c_str()); + // logger->log("done.\n", LOG_DEBG); + // } } #endif // here go the code to append the files written in MPI processes > 0 to the ones on MPI process 0 @@ -437,10 +462,13 @@ void cluster(const string& config_file, const string& data_file, const string& o VirtualAsciiFile *p_output = new VirtualAsciiFile(mpidata, rr); p_output->append_to_disk(output_path + "/c_OCLU"); delete p_output; + VirtualBinaryFile *vtppoanp = new VirtualBinaryFile(mpidata, rr); + vtppoanp->append_to_disk(output_path + "/c_TPPOAN_bis"); + delete vtppoanp; // // how many openmp threads did process rr use? - int remotethreads; - MPI_Recv(&remotethreads, 1, MPI_INT, rr, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - for (int ri=0; ri<remotethreads; ri++) { + // int remotethreads; + // MPI_Recv(&remotethreads, 1, MPI_INT, rr, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE); + // for (int ri=0; ri<remotethreads; ri++) { // // first get the ASCII local file // char *chunk_buffer; // int chunk_buffer_size = -1; @@ -465,18 +493,18 @@ void cluster(const string& config_file, const string& data_file, const string& o // } // // if (ri<remotethreads-1) fprintf(output, "\n"); - // now get the binary local file - long buffer_size = 0; - // get the size of the buffer - MPI_Recv(&buffer_size, 1, MPI_LONG, rr, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - // allocate the bufer - char *binary_buffer = new char[buffer_size]; - // actually receive the buffer - MPI_Recv(binary_buffer, buffer_size, MPI_CHAR, rr, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); - // we can write it to disk - tppoan.write(binary_buffer, buffer_size); - delete[] binary_buffer; - } + // // now get the binary local file + // long buffer_size = 0; + // // get the size of the buffer + // MPI_Recv(&buffer_size, 1, MPI_LONG, rr, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE); + // // allocate the bufer + // char *binary_buffer = new char[buffer_size]; + // // actually receive the buffer + // MPI_Recv(binary_buffer, buffer_size, MPI_CHAR, rr, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); + // // we can write it to disk + // // tppoan.write(binary_buffer, buffer_size); + // delete[] binary_buffer; + // } } } @@ -484,11 +512,11 @@ void cluster(const string& config_file, const string& data_file, const string& o #ifdef USE_NVTX nvtxRangePop(); #endif - tppoanp->close(); - delete tppoanp; - } else { // In case TPPOAN could not be opened. Should never happen. - logger->err("\nERROR: failed to open TPPOAN file.\n"); - } + // tppoanp->close(); + // delete tppoanp; + // } else { // In case TPPOAN could not be opened. Should never happen. + // logger->err("\nERROR: failed to open TPPOAN file.\n"); + // } // fclose(output); // p_output->write_to_disk(output_path + "/c_OCLU"); // delete p_output; @@ -527,6 +555,7 @@ void cluster(const string& config_file, const string& data_file, const string& o // Create this variable and initialise it with a default here, so that it is defined anyway, with or without OpenMP support enabled int ompnumthreads = 1; VirtualAsciiFile **p_outarray = NULL; + VirtualBinaryFile **vtppoanarray = NULL; #pragma omp parallel { @@ -538,13 +567,15 @@ void cluster(const string& config_file, const string& data_file, const string& o if (myompthread == 0) { ompnumthreads = omp_get_num_threads(); p_outarray = new VirtualAsciiFile*[ompnumthreads]; + vtppoanarray = new VirtualBinaryFile*[ompnumthreads]; } #endif // To test parallelism, I will now start feeding this function with "clean" copies of the parameters, so that they will not be changed by previous iterations, and each one will behave as the first one. Define all (empty) variables here, so they have the correct scope, then they get different definitions depending on thread number ClusterIterationData *cid_2 = NULL; //FILE *output_2 = NULL; VirtualAsciiFile *p_output_2 = NULL; - fstream *tppoanp_2 = NULL; + VirtualBinaryFile *vtppoanp_2 = NULL; + // fstream *tppoanp_2 = NULL; // for threads other than the 0, create distinct copies of all relevant data, while for thread 0 just define new references / pointers to the original ones if (myompthread == 0) { cid_2 = cid; @@ -557,16 +588,18 @@ void cluster(const string& config_file, const string& data_file, const string& o // output_2 = fopen((output_path + "/c_OCLU_" + to_string(mpidata->rank) + "_" + to_string(myompthread)).c_str(), "w"); p_output_2 = new VirtualAsciiFile(); p_outarray[myompthread] = p_output_2; - tppoanp_2 = new fstream; - tppoanp_2->open((output_path + "/c_TPPOAN_" + to_string(mpidata->rank) + "_" + to_string(myompthread)).c_str(), ios::out | ios::binary); - fstream &tppoan_2 = *tppoanp_2; + vtppoanp_2 = new VirtualBinaryFile(); + vtppoanarray[myompthread] = vtppoanp_2; + // tppoanp_2 = new fstream; + // tppoanp_2->open((output_path + "/c_TPPOAN_" + to_string(mpidata->rank) + "_" + to_string(myompthread)).c_str(), ios::out | ios::binary); + // fstream &tppoan_2 = *tppoanp_2; // make sure all threads align here: I don't want the following loop to accidentally start for thread 0, possibly modifying some variables before they are copied by all other threads #pragma omp barrier if (myompthread==0) logger->log("Syncing OpenMP threads and starting the loop on wavelengths\n"); // ok, now I can actually start the parallel calculations #pragma omp for for (int jxi488 = cid_2->firstxi; jxi488 <= cid_2->lastxi; jxi488++) { - int jer = cluster_jxi488_cycle(jxi488, sconf, gconf, p_scattering_angles, cid_2, p_output_2, output_path, *tppoanp_2); + int jer = cluster_jxi488_cycle(jxi488, sconf, gconf, p_scattering_angles, cid_2, p_output_2, output_path, vtppoanp_2); } #pragma omp barrier @@ -577,8 +610,8 @@ void cluster(const string& config_file, const string& data_file, const string& o // fclose(output_2); // p_output_2->write_to_disk(output_path + "/c_OCLU_" + to_string(mpidata->rank) + "_" + to_string(myompthread)); // delete p_output_2; - tppoanp_2->close(); - delete tppoanp_2; + // tppoanp_2->close(); + // delete tppoanp_2; #pragma omp barrier { string message = "INFO: Closing thread-local output files of thread " + to_string(myompthread) + " and syncing threads.\n"; @@ -590,14 +623,19 @@ void cluster(const string& config_file, const string& data_file, const string& o for (int ti=1; ti<ompnumthreads; ti++) { p_outarray[0]->append(*(p_outarray[ti])); delete p_outarray[ti]; + vtppoanarray[0]->append(*(vtppoanarray[ti])); + delete vtppoanarray[ti]; } p_outarray[0]->mpisend(mpidata); delete p_outarray[0]; delete[] p_outarray; + vtppoanarray[0]->mpisend(mpidata); + delete vtppoanarray[0]; + delete[] vtppoanarray; // // tell MPI process 0 how many threads we have on this process (not necessarily the same across all processes) - MPI_Send(&ompnumthreads, 1, MPI_INT, 0, 1, MPI_COMM_WORLD); + // MPI_Send(&ompnumthreads, 1, MPI_INT, 0, 1, MPI_COMM_WORLD); // // reopen local files, send them all to MPI process 0 - for (int ri = 0; ri < ompnumthreads; ri++) { + // for (int ri = 0; ri < ompnumthreads; ri++) { // string partial_file_name = output_path + "/c_OCLU_" + to_string(mpidata->rank) + "_" + to_string(ri); // string message = "Copying ASCII output in MPI process " + to_string(mpidata->rank) + " of thread " + to_string(ri) + " of " + to_string(ompnumthreads - 1) + "... "; // logger->log(message, LOG_DEBG); @@ -643,26 +681,26 @@ void cluster(const string& config_file, const string& data_file, const string& o // remove(partial_file_name.c_str()); // logger->log("done.\n", LOG_DEBG); - string partial_file_name = output_path + "/c_TPPOAN_" + to_string(mpidata->rank) + "_" + to_string(ri); - string message = "Copying binary output in MPI process " + to_string(mpidata->rank) + " of thread " + to_string(ri) + " of " + to_string(ompnumthreads - 1) + "... "; - logger->log(message, LOG_DEBG); - fstream partial_tppoan; - partial_tppoan.open(partial_file_name.c_str(), ios::in | ios::binary); - partial_tppoan.seekg(0, ios::end); - long buffer_size = partial_tppoan.tellg(); - char *binary_buffer = new char[buffer_size]; - partial_tppoan.seekg(0, ios::beg); - partial_tppoan.read(binary_buffer, buffer_size); - // tell MPI process 0 how large is the buffer - MPI_Send(&buffer_size, 1, MPI_LONG, 0, 1, MPI_COMM_WORLD); - // actually send the buffer - MPI_Send(binary_buffer, buffer_size, MPI_CHAR, 0, 0, MPI_COMM_WORLD); - // tppoan.write(binary_buffer, buffer_size); - partial_tppoan.close(); - delete[] binary_buffer; - remove(partial_file_name.c_str()); - logger->log("done.\n", LOG_DEBG); - } + // string partial_file_name = output_path + "/c_TPPOAN_" + to_string(mpidata->rank) + "_" + to_string(ri); + // string message = "Copying binary output in MPI process " + to_string(mpidata->rank) + " of thread " + to_string(ri) + " of " + to_string(ompnumthreads - 1) + "... "; + // logger->log(message, LOG_DEBG); + // fstream partial_tppoan; + // partial_tppoan.open(partial_file_name.c_str(), ios::in | ios::binary); + // partial_tppoan.seekg(0, ios::end); + // long buffer_size = partial_tppoan.tellg(); + // char *binary_buffer = new char[buffer_size]; + // partial_tppoan.seekg(0, ios::beg); + // partial_tppoan.read(binary_buffer, buffer_size); + // // tell MPI process 0 how large is the buffer + // MPI_Send(&buffer_size, 1, MPI_LONG, 0, 1, MPI_COMM_WORLD); + // // actually send the buffer + // MPI_Send(binary_buffer, buffer_size, MPI_CHAR, 0, 0, MPI_COMM_WORLD); + // // // tppoan.write(binary_buffer, buffer_size); + // partial_tppoan.close(); + // delete[] binary_buffer; + // remove(partial_file_name.c_str()); + // logger->log("done.\n", LOG_DEBG); + // } } // Clean memory delete cid; @@ -681,7 +719,7 @@ void cluster(const string& config_file, const string& data_file, const string& o delete logger; } -int cluster_jxi488_cycle(int jxi488, ScattererConfiguration *sconf, GeometryConfiguration *gconf, ScatteringAngles *sa, ClusterIterationData *cid, VirtualAsciiFile *output, const string& output_path, fstream& tppoan) +int cluster_jxi488_cycle(int jxi488, ScattererConfiguration *sconf, GeometryConfiguration *gconf, ScatteringAngles *sa, ClusterIterationData *cid, VirtualAsciiFile *output, const string& output_path, VirtualBinaryFile *vtppoanp) { int nxi = sconf->number_of_scales; char virtual_line[256]; @@ -893,7 +931,8 @@ int cluster_jxi488_cycle(int jxi488, ScattererConfiguration *sconf, GeometryConf s0mag = cabs(s0) * cs0; sprintf(virtual_line, " QSCHU=%15.7lE, PSCHU=%15.7lE, S0MAG=%15.7lE\n\0", qschu, pschu, s0mag); output->append_line(virtual_line); - tppoan.write(reinterpret_cast<char *>(&(cid->vk)), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&(cid->vk)), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(cid->vk)); pcrsm0(cid->vk, exri, inpol, cid->c1, cid->c1ao, cid->c4); apcra(cid->zpv, cid->c4->le, cid->c1ao->am0m, inpol, sqk, cid->gapm, cid->gappm); #ifdef USE_NVTX @@ -994,11 +1033,16 @@ int cluster_jxi488_cycle(int jxi488, ScattererConfiguration *sconf, GeometryConf jw = 1; } // label 196 - tppoan.write(reinterpret_cast<char *>(&th), sizeof(double)); - tppoan.write(reinterpret_cast<char *>(&ph), sizeof(double)); - tppoan.write(reinterpret_cast<char *>(&ths), sizeof(double)); - tppoan.write(reinterpret_cast<char *>(&phs), sizeof(double)); - tppoan.write(reinterpret_cast<char *>(&(cid->scan)), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&th), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(th)); + // tppoan.write(reinterpret_cast<char *>(&ph), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(ph)); + // tppoan.write(reinterpret_cast<char *>(&ths), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(ths)); + // tppoan.write(reinterpret_cast<char *>(&phs), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(phs)); + // tppoan.write(reinterpret_cast<char *>(&(cid->scan)), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(cid->scan)); if (jaw != 0) { jaw = 0; mextc(cid->vk, exri, cid->c1ao->fsacm, cid->cextlr, cid->cext); @@ -1006,31 +1050,41 @@ int cluster_jxi488_cycle(int jxi488, ScattererConfiguration *sconf, GeometryConf for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { double value = cid->cext[i][j]; - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); } } for (int i = 0; i < 2; i++) { double value = cid->c1ao->scscm[i]; - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); value = real(cid->c1ao->scscpm[i]); - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); value = imag(cid->c1ao->scscpm[i]); - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); value = cid->c1ao->ecscm[i]; - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); value = real(cid->c1ao->ecscpm[i]); - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); value = imag(cid->c1ao->ecscpm[i]); - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); } for (int i = 0; i < 3; i++) { for (int j = 0; j < 2; j++) { double value = cid->gapm[i][j]; - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); value = real(cid->gappm[i][j]); - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); value = imag(cid->gappm[i][j]); - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); } } sprintf(virtual_line, " CLUSTER (ENSEMBLE AVERAGE, MODE%2d)\n\0", iavm); @@ -1203,73 +1257,95 @@ int cluster_jxi488_cycle(int jxi488, ScattererConfiguration *sconf, GeometryConf for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { double value = cid->cext[i][j]; - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); } } for (int i = 0; i < 2; i++) { double value = cid->c1ao->scsc[i]; - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); value = real(cid->c1ao->scscp[i]); - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); value = imag(cid->c1ao->scscp[i]); - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); value = cid->c1ao->ecsc[i]; - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); value = real(cid->c1ao->ecscp[i]); - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); value = imag(cid->c1ao->ecscp[i]); - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); } for (int i = 0; i < 3; i++) { for (int j = 0; j < 2; j++) { double value = cid->gap[i][j]; - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); value = real(cid->gapp[i][j]); - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); value = imag(cid->gapp[i][j]); - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); } } for (int i = 0; i < 2; i++) { for (int j = 0; j < 3; j++) { double value = cid->tqce[i][j]; - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); value = real(cid->tqcpe[i][j]); - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); value = imag(cid->tqcpe[i][j]); - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); } } for (int i = 0; i < 2; i++) { for (int j = 0; j < 3; j++) { double value = cid->tqcs[i][j]; - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); value = real(cid->tqcps[i][j]); - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); value = imag(cid->tqcps[i][j]); - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); } } for (int i = 0; i < 3; i++) { double value = cid->u[i]; - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); value = cid->up[i]; - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); value = cid->un[i]; - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); } } // label 254 for (int i = 0; i < 16; i++) { double value = real(cid->c1->vint[i]); - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); value = imag(cid->c1->vint[i]); - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); } for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { double value = cid->cmul[i][j]; - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); } } int jlr = 2; @@ -1414,14 +1490,17 @@ int cluster_jxi488_cycle(int jxi488, ScattererConfiguration *sconf, GeometryConf for (int i = 0; i < 16; i++) { double value; value = real(cid->c1ao->vintm[i]); - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); value = imag(cid->c1ao->vintm[i]); - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); } for (int i = 0; i < 4; i++) { for (int j = 0; j < 4; j++) { double value = cid->cmul[i][j]; - tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + // tppoan.write(reinterpret_cast<char *>(&value), sizeof(double)); + vtppoanp->append_line(VirtualBinaryLine(value)); } } sprintf(virtual_line, " CLUSTER (ENSEMBLE AVERAGE, MODE%2d)\n\0", iavm); diff --git a/src/include/file_io.h b/src/include/file_io.h index 3057bbf1..df475bf7 100644 --- a/src/include/file_io.h +++ b/src/include/file_io.h @@ -252,146 +252,157 @@ public: */ void mpisend(const mixMPI *mpidata); }; -#endif -// /*! \class VirtualBinaryLine -// * -// * \brief Virtual representation of a binary file line. -// */ -// class VirtualBinaryLine { +/*! \class VirtualBinaryLine + * + * \brief Virtual representation of a binary file line. + */ +class VirtualBinaryLine { // protected: // //! \brief The pointer to the piece of data to be written, cast to char * // char *_data_pointer; // //! \brief the size of the data block. // size_t _data_size; -// /*! \brief VirtualBinaryLine instance constructor. -// * -// * \param mydata: `int, double, long, float, complex, or dcomplex`piece of data to put in the line. -// */ -// VirtualBinaryLine(int mydata); -// VirtualBinaryLine(long mydata); -// VirtualBinaryLine(float mydata); -// VirtualBinaryLine(double mydata); -// VirtualBinaryLine(complex mydata); -// VirtualBinaryLine(dcomplex mydata); - -// /*! \brief VirtualBinaryLine copy constructor. -// * -// * \param rhs: `const VirtualBinaryLine&` Reference to a VirtualBinaryLine instance. -// */ -// VirtualBinaryLine(const VirtualBinaryLine& rhs); - -// /*! \brief VirtualBinaryLine instance constructor copying all contents off MPISend() calls from MPI process rr. -// * -// * \param mpidata: `mixMPI *` pointer to MPI data structure. -// * \param rr: `int` rank of the MPI process sending the data. -// */ -// VirtualBinaryLine(const mixMPI *mpidata, int rr); - -// /*! \brief VirtualBinaryLine instance destroyer. -// */ -// ~VirtualBinaryLine(); - -// /*! \brief Send VirtualBinaryLine instance to MPI process 0 via MPISend() calls. -// * -// * \param mpidata: `mixMPI *` pointer to MPI data structure. -// */ -// void mpisend(const mixMPI *mpidata); -// }; - - -// /*! \class VirtualBinaryFile -// * -// * \brief Virtual representation of a binary file. -// */ -// class VirtualBinaryFile { -// protected: -// //! \brief The number of lines. -// // int32_t _num_lines; -// //! \brief A vector of strings representing the file lines. -// std::vector<VirtualBinaryLine> *_file_lines; - -// public: -// // const int32_t &num_lines = _num_lines; -// /*! \brief VirtualBinaryFile instance constructor. -// * -// * \param lines: `int32_t` Number of lines, if known in advance (optional, default is 0). -// */ -// VirtualBinaryFile(int32_t lines = 0); - -// /*! \brief VirtualBinaryFile copy constructor. -// * -// * \param rhs: `const VirtualBinaryFile&` Reference to a VirtualBinaryFile instance. -// */ -// VirtualBinaryFile(const VirtualBinaryFile& rhs); - -// /*! \brief VirtualBinaryFile instance constructor copying all contents off MPISend() calls from MPI process rr. -// * -// * \param mpidata: `mixMPI *` pointer to MPI data structure. -// * \param rr: `int` rank of the MPI process sending the data. -// */ -// VirtualBinaryFile(const mixMPI *mpidata, int rr); - -// /*! \brief VirtualBinaryFile instance destroyer. -// */ -// ~VirtualBinaryFile(); - -// /*! \brief Append another VirtualBinaryFile at the end of the current instance. -// * -// * \param rhs: `const VirtualBinaryFile&` Reference to the VirtualBinaryFile to be appended. -// */ -// void append(VirtualBinaryFile& rhs); - -// /*! \brief Append a line at the end of the file. -// * -// * \param line: `const string&` Reference to a string representing the line. -// */ -// void append_line(const std::string& line); - -// /*! \brief Append the contents of the VirtualBinaryFile to a physical file on disk. -// * -// * \param file_name: `const string&` Name of the file to append contents to. -// * \return result: `int` A result code (0 if successful). -// */ -// int append_to_disk(const std::string& file_name); - -// /*! \brief Insert another VirtualBinaryFile at a given position. -// * -// * This function inserts a target VirtualBinaryFile in the current one at the given -// * position. Optionally, a range of lines to be inserted can be specified, otherwise -// * the full content of the target file is inserted. This function DOES NOT increase -// * the size of the inner storage and it can only be used if the inner storage has -// * already been adjusted to contain the insertion target. -// * -// * \param position: `int32_t` The position at which the other file is inserted in this one. -// * \param rhs: `const VirtualBinaryFile&` The refence to the VirtualBinaryFile to be inserted. -// * \param start: `int32_t` The first line to be inserted (optional, default is 0). -// * \param end: `int32_t` The last line to be inserted (optional, default is 0 to read all). -// * \param line: `const string&` Reference to a string representing the line. -// * \return result: `int` A result code (0 if successful). -// */ -// int insert(int32_t position, VirtualBinaryFile& rhs, int32_t start = 0, int32_t end = 0); + +public: + //! \brief The pointer to the piece of data to be written, cast to char * + char *_data_pointer; + //! \brief the size of the data block. + size_t _data_size; + //! \brief Read only view of `_data_pointer`. + const char* data_pointer = _data_pointer; + //! \brief Read only view of `_data_size`. + const size_t & data_size = _data_size; + + /*! \brief VirtualBinaryLine instance constructor. + * + * \param mydata: `int, double, long, float, complex, or dcomplex`piece of data to put in the line. + */ + VirtualBinaryLine(int mydata); + VirtualBinaryLine(long mydata); + VirtualBinaryLine(float mydata); + VirtualBinaryLine(double mydata); + // VirtualBinaryLine(complex mydata); + VirtualBinaryLine(dcomplex mydata); + + /*! \brief VirtualBinaryLine copy constructor. + * + * \param rhs: `const VirtualBinaryLine&` Reference to a VirtualBinaryLine instance. + */ + VirtualBinaryLine(const VirtualBinaryLine& rhs); + + /*! \brief VirtualBinaryLine instance constructor copying all contents off MPISend() calls from MPI process rr. + * + * \param mpidata: `mixMPI *` pointer to MPI data structure. + * \param rr: `int` rank of the MPI process sending the data. + */ + VirtualBinaryLine(const mixMPI *mpidata, int rr); + + /*! \brief VirtualBinaryLine instance destroyer. + */ + ~VirtualBinaryLine(); + + /*! \brief Send VirtualBinaryLine instance to MPI process 0 via MPISend() calls. + * + * \param mpidata: `mixMPI *` pointer to MPI data structure. + */ + void mpisend(const mixMPI *mpidata); +}; + + +/*! \class VirtualBinaryFile + * + * \brief Virtual representation of a binary file. + */ +class VirtualBinaryFile { +protected: + //! \brief The number of lines. + // int32_t _num_lines; + // //! \brief A vector of strings representing the file lines. + // std::vector<VirtualBinaryLine> *_file_lines; + +public: + //! \brief A vector of strings representing the file lines. + std::vector<VirtualBinaryLine> *_file_lines; + // const int32_t &num_lines = _num_lines; + /*! \brief VirtualBinaryFile empty instance constructor. + * + */ + VirtualBinaryFile(); + + /*! \brief VirtualBinaryFile copy constructor. + * + * \param rhs: `const VirtualBinaryFile&` Reference to a VirtualBinaryFile instance. + */ + VirtualBinaryFile(const VirtualBinaryFile& rhs); + + /*! \brief VirtualBinaryFile instance constructor copying all contents off MPISend() calls from MPI process rr. + * + * \param mpidata: `mixMPI *` pointer to MPI data structure. + * \param rr: `int` rank of the MPI process sending the data. + */ + VirtualBinaryFile(const mixMPI *mpidata, int rr); + + /*! \brief VirtualBinaryFile instance destroyer. + */ + ~VirtualBinaryFile(); + + /*! \brief Append another VirtualBinaryFile at the end of the current instance. + * + * \param rhs: `const VirtualBinaryFile&` Reference to the VirtualBinaryFile to be appended. + */ + void append(VirtualBinaryFile& rhs); + + /*! \brief Append a line at the end of the file. + * + * \param line: `const string&` Reference to a string representing the line. + */ + void append_line(const VirtualBinaryLine& line); + + /*! \brief Append the contents of the VirtualBinaryFile to a physical file on disk. + * + * \param file_name: `const string&` Name of the file to append contents to. + * \return result: `int` A result code (0 if successful). + */ + int append_to_disk(const std::string& file_name); + + // /*! \brief Insert another VirtualBinaryFile at a given position. + // * + // * This function inserts a target VirtualBinaryFile in the current one at the given + // * position. Optionally, a range of lines to be inserted can be specified, otherwise + // * the full content of the target file is inserted. This function DOES NOT increase + // * the size of the inner storage and it can only be used if the inner storage has + // * already been adjusted to contain the insertion target. + // * + // * \param position: `int32_t` The position at which the other file is inserted in this one. + // * \param rhs: `const VirtualBinaryFile&` The refence to the VirtualBinaryFile to be inserted. + // * \param start: `int32_t` The first line to be inserted (optional, default is 0). + // * \param end: `int32_t` The last line to be inserted (optional, default is 0 to read all). + // * \param line: `const string&` Reference to a string representing the line. + // * \return result: `int` A result code (0 if successful). + // */ + // int insert(int32_t position, VirtualBinaryFile& rhs, int32_t start = 0, int32_t end = 0); -// /*! \brief Get the number of lines in the current instance. -// * -// * \return size: `int32_t` The number of lines in the VirtualBinaryFile instance. -// */ -// int32_t number_of_lines() { return _file_lines->size(); } + /*! \brief Get the number of lines in the current instance. + * + * \return size: `int32_t` The number of lines in the VirtualBinaryFile instance. + */ + int32_t number_of_lines() { return _file_lines->size(); } -// /*! \brief Write virtual file contents to a real file on disk. -// * -// * \param file_name: `const string&` Name of the file to append contents to. -// * \return result: `int` A result code (0 if successful). -// */ -// int write_to_disk(const std::string& file_name); - -// /*! \brief Send VirtualBinaryFile instance to MPI process 0 via MPISend() calls. -// * -// * \param mpidata: `mixMPI *` pointer to MPI data structure. -// */ -// void mpisend(const mixMPI *mpidata); -// }; -//#endif + /*! \brief Write virtual file contents to a real file on disk. + * + * \param file_name: `const string&` Name of the file to append contents to. + * \return result: `int` A result code (0 if successful). + */ + int write_to_disk(const std::string& file_name); + + /*! \brief Send VirtualBinaryFile instance to MPI process 0 via MPISend() calls. + * + * \param mpidata: `mixMPI *` pointer to MPI data structure. + */ + void mpisend(const mixMPI *mpidata); +}; +#endif diff --git a/src/libnptm/file_io.cpp b/src/libnptm/file_io.cpp index 65d4215b..46fe4659 100644 --- a/src/libnptm/file_io.cpp +++ b/src/libnptm/file_io.cpp @@ -16,6 +16,10 @@ #include "../include/List.h" #endif +#ifndef INCLUDE_TYPES_H_ +#include "../include/types.h" +#endif + #ifndef INCLUDE_FILE_IO_H_ #include "../include/file_io.h" #endif @@ -301,6 +305,7 @@ int VirtualAsciiFile::append_to_disk(const std::string& file_name) { for (vector<string>::iterator it = _file_lines->begin(); it != _file_lines->end(); ++it) { output_file << *it; } + output_file.close(); } else { result = 1; } @@ -334,6 +339,7 @@ int VirtualAsciiFile::write_to_disk(const std::string& file_name) { for (vector<string>::iterator it = _file_lines->begin(); it != _file_lines->end(); ++it) { output_file << *it; } + output_file.close(); } else { result = 1; } @@ -361,45 +367,203 @@ void VirtualAsciiFile::mpisend(const mixMPI *mpidata) { /* >>> End of VirtualAsciiFile class implementation <<< */ // /* >>> VirtualBinaryLine class implementation <<< */ -// VirtualBinaryLine::VirtualBinaryLine(int mydata) { -// _data_size = sizeof(mydata) -// int *buffer = malloc(_data_size); -// *buffer = mydata; -// _data_pointer = reinterpret_cast<char *>(buffer); -// } +VirtualBinaryLine::VirtualBinaryLine(int mydata) { + _data_size = sizeof(mydata); + int *buffer = (int *) malloc(_data_size); + *buffer = mydata; + _data_pointer = reinterpret_cast<char *>(buffer); +} -// VirtualBinaryLine::VirtualBinaryLine(double mydata) { -// _data_size = sizeof(mydata) -// double *buffer = malloc(_data_size); -// *buffer = mydata; -// _data_pointer = reinterpret_cast<char *>(buffer); -// } +VirtualBinaryLine::VirtualBinaryLine(double mydata) { + _data_size = sizeof(mydata); + double *buffer = (double *) malloc(_data_size); + *buffer = mydata; + _data_pointer = reinterpret_cast<char *>(buffer); +} -// VirtualBinaryLine::VirtualBinaryLine(float mydata) { -// _data_size = sizeof(mydata) -// float *buffer = malloc(_data_size); -// *buffer = mydata; -// _data_pointer = reinterpret_cast<char *>(buffer); -// } +VirtualBinaryLine::VirtualBinaryLine(float mydata) { + _data_size = sizeof(mydata); + float *buffer = (float *) malloc(_data_size); + *buffer = mydata; + _data_pointer = reinterpret_cast<char *>(buffer); +} -// VirtualBinaryLine::VirtualBinaryLine(long mydata) { -// _data_size = sizeof(mydata) -// long *buffer = malloc(_data_size); -// *buffer = mydata; -// _data_pointer = reinterpret_cast<char *>(buffer); -// } +VirtualBinaryLine::VirtualBinaryLine(long mydata) { + _data_size = sizeof(mydata); + long *buffer = (long *) malloc(_data_size); + *buffer = mydata; + _data_pointer = reinterpret_cast<char *>(buffer); +} -// VirtualBinaryLine::VirtualBinaryLine(dcomplex mydata) { -// _data_size = sizeof(mydata) -// dcomplex *buffer = malloc(_data_size); -// *buffer = mydata; -// _data_pointer = reinterpret_cast<char *>(buffer); -// } +VirtualBinaryLine::VirtualBinaryLine(dcomplex mydata) { + _data_size = sizeof(mydata); + dcomplex *buffer = (dcomplex *) malloc(_data_size); + *buffer = mydata; + _data_pointer = reinterpret_cast<char *>(buffer); +} // VirtualBinaryLine::VirtualBinaryLine(complex mydata) { -// _data_size = sizeof(mydata) -// complex *buffer = malloc(_data_size); +// _data_size = sizeof(mydata); +// complex *buffer = (complex *) malloc(_data_size); // *buffer = mydata; // _data_pointer = reinterpret_cast<char *>(buffer); // } +VirtualBinaryLine::VirtualBinaryLine(const VirtualBinaryLine& rhs) { + _data_size = rhs._data_size; + _data_pointer = reinterpret_cast<char *>(malloc(rhs._data_size)); + memcpy(_data_pointer, rhs._data_pointer, _data_size); +} + +#ifdef MPI_VERSION +VirtualBinaryLine::VirtualBinaryLine(const mixMPI *mpidata, int rr) { + // receive mysize from MPI process rr + int32_t mysize; + MPI_Recv(&mysize, 1, MPI_INT32_T, rr, 10, MPI_COMM_WORLD, MPI_STATUS_IGNORE); + _data_size = mysize; + // allocate the buffer accordingly + _data_pointer = reinterpret_cast<char *>(malloc(mysize)); + // receive the char buffer + MPI_Recv(_data_pointer, mysize, MPI_CHAR, rr, 10, MPI_COMM_WORLD, MPI_STATUS_IGNORE); +} + +#endif + +VirtualBinaryLine::~VirtualBinaryLine() { + if (_data_pointer != NULL) { + free(_data_pointer); + _data_pointer = NULL; + } +} + +#ifdef MPI_VERSION +void VirtualBinaryLine::mpisend(const mixMPI *mpidata) { + // Send VirtualBinaryLine instance to MPI process 0 via MPISend() calls + // first send the size + int32_t mysize = _data_size; + MPI_Send(&mysize, 1, MPI_INT32_T, 0, 10, MPI_COMM_WORLD); + // now send the data + MPI_Send(_data_pointer, mysize, MPI_CHAR, 0, 10, MPI_COMM_WORLD); +} +#endif +/* >>> End of VirtualBinaryLine class implementation <<< */ + + +/* >>> VirtualBinaryFile class implementation <<< */ +VirtualBinaryFile::VirtualBinaryFile() { + _file_lines = new vector<VirtualBinaryLine>(); +} + +VirtualBinaryFile::VirtualBinaryFile(const VirtualBinaryFile& rhs) { + // _num_lines = rhs._num_lines; + _file_lines = new vector<VirtualBinaryLine>(); + for (vector<VirtualBinaryLine>::iterator it = rhs._file_lines->begin(); it != rhs._file_lines->end(); ++it) { + _file_lines->push_back(VirtualBinaryLine(*it)); + } +} + +#ifdef MPI_VERSION +VirtualBinaryFile::VirtualBinaryFile(const mixMPI *mpidata, int rr) { + // receive _num_lines from MPI process rr + int32_t num_lines; + MPI_Recv(&num_lines, 1, MPI_INT32_T, rr, 10, MPI_COMM_WORLD, MPI_STATUS_IGNORE); + _file_lines = new vector<VirtualBinaryLine>(); + // loop over data to receive + for (int32_t zi=0; zi<num_lines; zi++) { + // receive the line of data + _file_lines->push_back(VirtualBinaryLine(mpidata, rr)); + } +} +#endif + +VirtualBinaryFile::~VirtualBinaryFile() { + // is it necessary to pop them out one by one? isn't there the dedicated method of std::vector to clean the vector? + // besides, shouldn't this be done anyway by the destructor of std:vector? + // for (vector<VirtualBinaryLine>::iterator it = _file_lines->begin(); it != _file_lines->end(); ++it) { + // delete it; + // } + while (!_file_lines->size() > 0) { + _file_lines->pop_back(); + } + if (_file_lines != NULL) delete _file_lines; +} + +void VirtualBinaryFile::append(VirtualBinaryFile& rhs) { + // concatenate the virtualasciifile pointed by rhs to the current one + // can't we use the dedicated method insert of std::vector to do the appending, instead of an explicit loop? + for (vector<VirtualBinaryLine>::iterator it = rhs._file_lines->begin(); it != rhs._file_lines->end(); ++it) { + _file_lines->push_back(VirtualBinaryLine(*it)); + } +} + +void VirtualBinaryFile::append_line(const VirtualBinaryLine& line) { + // would it be worth reimplementing a sprintf-like method, so that we can give it all the arguments we would give to sprintf and get rid of the intermediate buffer completely? + // append a line of output to the virtualasciifile + _file_lines->push_back(VirtualBinaryLine(line)); +} + +int VirtualBinaryFile::append_to_disk(const std::string& file_name) { + // dump to disk the contents of the virtualasciifile, appending at the end of the given file_name + int result = 0; + fstream output_file; + output_file.open(file_name, ios::app | ios::binary); + if (output_file.is_open()) { + for (vector<VirtualBinaryLine>::iterator it = _file_lines->begin(); it != _file_lines->end(); ++it) { + output_file.write(it->_data_pointer, it->_data_size); + } + output_file.close(); + } else { + result = 1; + } + return result; +} + +// int VirtualBinaryFile::insert(int32_t position, VirtualBinaryFile& rhs, int32_t start, int32_t end) { +// int result = 0; +// if (start == 0 && end == 0) { +// end = rhs.number_of_lines(); +// } +// int32_t final_index = position + end - start; +// if (final_index <= number_of_lines()) { +// for (int32_t li = start; li < end; li++) { +// // since here we are replacing the previous placeholder empty strings, make sure they are properly released when they are replaced (i.e. try it with a simple hello world example and pass it through valgrind) +// VirtualBinaryLine templine = VirtualBinaryLine(rhs._file_lines->at(li)); +// _file_lines->at(position++) = templine; +// } +// } else { +// // ERROR: target file is too long; +// result = 1; +// } +// return result; +// } + +int VirtualBinaryFile::write_to_disk(const std::string& file_name) { + // dump to disk the contents of the virtualasciifile, replacing the given file_name + int result = 0; + fstream output_file; + output_file.open(file_name, ios::out | ios::binary); + if (output_file.is_open()) { + for (vector<VirtualBinaryLine>::iterator it = _file_lines->begin(); it != _file_lines->end(); ++it) { + output_file.write(it->_data_pointer, it->_data_size); + } + output_file.close(); + } else { + result = 1; + } + return result; +} + +#ifdef MPI_VERSION +void VirtualBinaryFile::mpisend(const mixMPI *mpidata) { + // Send VirtualBinaryFile instance to MPI process 0 via MPISend() calls + // first send the size + int32_t num_lines = _file_lines->size(); + MPI_Send(&num_lines, 1, MPI_INT32_T, 0, 10, MPI_COMM_WORLD); + // now loop over data to send + for (vector<VirtualBinaryLine>::iterator it = _file_lines->begin(); it != _file_lines->end(); ++it) { + it->mpisend(mpidata); + } +} +#endif + +/* >>> End of VirtualBinaryFile class implementation <<< */ -- GitLab