Skip to content
Snippets Groups Projects
Commit 96e2224f authored by Mulas, Giacomo's avatar Mulas, Giacomo
Browse files

prepare moving to writing virtual files to disk at each MPI iteration

parent 81198925
No related branches found
No related tags found
No related merge requests found
......@@ -250,14 +250,10 @@ void cluster(const string& config_file, const string& data_file, const string& o
int nths = p_scattering_angles->nths;
int nph = p_scattering_angles->nph;
int nphs = p_scattering_angles->nphs;
// logger->log("INFO: size of vtppoanp->_file_lines before first append is "+to_string((vtppoanp->_file_lines)->size())+"\n");
// tppoan.write(reinterpret_cast<char *>(&iavm), sizeof(int));
vtppoanp->append_line(VirtualBinaryLine(iavm));
// logger->log("INFO: size of vtppoanp->_file_lines after first append is "+to_string((vtppoanp->_file_lines)->size())+"\n");
// tppoan.write(reinterpret_cast<char *>(&isam), sizeof(int));
vtppoanp->append_line(VirtualBinaryLine(isam));
// logger->log("INFO: size of vtppoanp->_file_lines after second append is "+to_string((vtppoanp->_file_lines)->size())+"\n");
// logger->log("INFO: vtppoanp->_file_lines[0]._data_size is " + to_string(((vtppoanp->_file_lines)->at(0))._data_size)+"\n");
// tppoan.write(reinterpret_cast<char *>(&inpol), sizeof(int));
vtppoanp->append_line(VirtualBinaryLine(inpol));
// tppoan.write(reinterpret_cast<char *>(&nxi), sizeof(int));
......@@ -310,6 +306,15 @@ void cluster(const string& config_file, const string& data_file, const string& o
delete gconf;
return;
}
// do the first outputs here, so that I open here the new files, afterwards I only append
p_output->write_to_disk(output_path + "/c_OCLU");
// reallocate a new one (even if it would be more efficient to emty the existing one
delete p_output;
p_output = new VirtualAsciiFile();
// now tppoan
vtppoanp->write_to_disk(output_path + "/c_TPPOAN");
delete vtppoanp;
vtppoanp = new VirtualBinaryFile();
// here go the calls that send data to be duplicated on other MPI processes from process 0 to others, using MPI broadcasts, but only if MPI is actually used
#ifdef MPI_VERSION
......@@ -408,10 +413,10 @@ void cluster(const string& config_file, const string& data_file, const string& o
vtppoanarray[0]->append(*(vtppoanarray[ti]));
delete vtppoanarray[ti];
}
p_outarray[0]->write_to_disk(output_path + "/c_OCLU");
p_outarray[0]->append_to_disk(output_path + "/c_OCLU");
delete p_outarray[0];
delete[] p_outarray;
vtppoanarray[0]->write_to_disk(output_path + "/c_TPPOAN_bis");
vtppoanarray[0]->append_to_disk(output_path + "/c_TPPOAN");
delete vtppoanarray[0];
delete[] vtppoanarray;
// for (int ri = 1; ri < ompnumthreads; ri++) {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment