diff --git a/docs/output_overview.rst b/docs/output_overview.rst index 8ece198f36..55f88410dd 100644 --- a/docs/output_overview.rst +++ b/docs/output_overview.rst @@ -134,6 +134,32 @@ The .config.h5 file This file contains stored walker configurations. +* GROUP "root" + * GROUP "state_0" + * DATASET "block" + * int + * SCALAR + + * DATASET "number_of_walkers" + * size_t + * SCALAR + + * DATASET "walker_partition" + * int + * ARRAY ( offsets ) + + * DATASET "walker_weights" + * double + * ARRAY ( weights ) + + * DATASET "walkers" + * double + * ARRAY ( configurations ) + + * DATASET "version" + * int + * ARRAY ( major version number, minor version number ) + The .random.h5 file ~~~~~~~~~~~~~~~~~~~ diff --git a/src/Particle/HDFWalkerInput_0_4.cpp b/src/Particle/HDFWalkerInput_0_4.cpp index 842c83fbe0..d1f01d18ca 100644 --- a/src/Particle/HDFWalkerInput_0_4.cpp +++ b/src/Particle/HDFWalkerInput_0_4.cpp @@ -147,10 +147,11 @@ bool HDFWalkerInput_0_4::read_hdf5(const std::filesystem::path& h5name) } using Buffer_t = std::vector; - Buffer_t posin; std::array dims{nw_in, num_ptcls_, OHMMS_DIM}; - posin.resize(dims[0] * dims[1] * dims[2]); + Buffer_t posin(dims[0] * dims[1] * dims[2]); hin.readSlabReshaped(posin, dims, hdf::walkers); + std::vector weights_in(nw_in); + const bool has_weights = hin.readEntry(weights_in, hdf::walker_weights); std::vector woffsets; hin.read(woffsets, "walker_partition"); @@ -168,11 +169,15 @@ bool HDFWalkerInput_0_4::read_hdf5(const std::filesystem::path& h5name) const int nitems = num_ptcls_ * OHMMS_DIM; const int curWalker = wc_list_.getActiveWalkers(); wc_list_.createWalkers(nw_in, num_ptcls_); - Buffer_t::iterator it(posin.begin() + woffsets[myComm->rank()] * nitems); - for (int i = 0, iw = curWalker; i < nw_in; ++i, ++iw) + + auto it = posin.begin() + woffsets[myComm->rank()] * nitems; + for (int i = 0; i < nw_in; ++i, it += nitems) + copy(it, it + nitems, get_first_address(wc_list_[i + curWalker]->R)); + if (has_weights) { - copy(it, it + nitems, get_first_address(wc_list_[iw]->R)); - it += nitems; + const auto woffset = woffsets[myComm->rank()]; + for (int i = 0; i < nw_in; ++i) + wc_list_[i + curWalker]->Weight = weights_in[i + woffset]; } } @@ -215,38 +220,53 @@ bool HDFWalkerInput_0_4::read_hdf5_scatter(const std::filesystem::path& h5name) using Buffer_t = std::vector; const int np1 = myComm->size() + 1; - std::vector counts(myComm->size()), woffsets(np1, 0); - FairDivideLow(nw_in, myComm->size(), woffsets); + std::vector woffsets_weights(np1, 0); + FairDivideLow(nw_in, myComm->size(), woffsets_weights); - const size_t nw_loc = woffsets[myComm->rank() + 1] - woffsets[myComm->rank()]; + std::vector counts_weights(myComm->size()); + for (int i = 0; i < counts_weights.size(); ++i) + counts_weights[i] = woffsets_weights[i + 1] - woffsets_weights[i]; + // walker counts and offsets for electron coordinates + std::vector woffsets(np1, 0); const int nitems = num_ptcls_ * OHMMS_DIM; for (int i = 0; i < woffsets.size(); ++i) - woffsets[i] *= nitems; + woffsets[i] = nitems * woffsets_weights[i]; + std::vector counts(myComm->size()); for (int i = 0; i < counts.size(); ++i) counts[i] = woffsets[i + 1] - woffsets[i]; std::array dims{nw_in, num_ptcls_, OHMMS_DIM}; - Buffer_t posin(nw_in * nitems), posout(counts[myComm->rank()]); - + Buffer_t posin(nw_in * nitems); + std::vector weights_in(nw_in); + bool has_weights{false}; if (myComm->rank() == 0) { hdf_archive hin(myComm); bool success = hin.open(h5name, H5F_ACC_RDONLY); hin.push(hdf::main_state); hin.readSlabReshaped(posin, dims, hdf::walkers); + has_weights = hin.readEntry(weights_in, hdf::walker_weights); } + Buffer_t posout(counts[myComm->rank()]); + std::vector weights_out(counts[myComm->rank()]); mpi::scatterv(*myComm, posin, posout, counts, woffsets); + mpi::bcast(*myComm, has_weights); + if (has_weights) + mpi::scatterv(*myComm, weights_in, weights_out, counts_weights, woffsets_weights); + + const size_t nw_loc = woffsets[myComm->rank() + 1] - woffsets[myComm->rank()]; const int curWalker = wc_list_.getActiveWalkers(); wc_list_.createWalkers(nw_loc, num_ptcls_); - Buffer_t::iterator it(posout.begin()); - for (int i = 0, iw = curWalker; i < nw_loc; ++i, ++iw) - { - copy(it, it + nitems, get_first_address(wc_list_[iw]->R)); - it += nitems; - } + + auto it = posout.begin(); + for (int i = 0; i < nw_loc; ++i, it += nitems) + std::copy(it, it + nitems, get_first_address(wc_list_[i + curWalker]->R)); + if (has_weights) + for (int i = 0; i < nw_in; ++i) + wc_list_[i + curWalker]->Weight = weights_out[i]; return true; } @@ -259,7 +279,7 @@ bool HDFWalkerInput_0_4::read_phdf5(const std::filesystem::path& h5name) { // handle small dataset with master rank hdf_archive hin(myComm, false); - if (!myComm->rank()) + if (myComm->rank() == 0) { success = hin.open(h5name, H5F_ACC_RDONLY); //check if hdf and xml versions can work together @@ -289,7 +309,7 @@ bool HDFWalkerInput_0_4::read_phdf5(const std::filesystem::path& h5name) // load woffsets by master // can not read collectively since the size may differ from Nranks+1. - if (!myComm->rank()) + if (myComm->rank() == 0) { hin.read(woffsets, "walker_partition"); woffsets_size = woffsets.size(); @@ -308,8 +328,8 @@ bool HDFWalkerInput_0_4::read_phdf5(const std::filesystem::path& h5name) hin.push(hdf::main_state); using Buffer_t = std::vector; - Buffer_t posin; std::array dims{nw_in, num_ptcls_, OHMMS_DIM}; + std::array dims_w{nw_in}; if (woffsets.size() != myComm->size() + 1) { @@ -320,24 +340,30 @@ bool HDFWalkerInput_0_4::read_phdf5(const std::filesystem::path& h5name) const size_t nw_loc = woffsets[myComm->rank() + 1] - woffsets[myComm->rank()]; std::array counts{nw_loc, num_ptcls_, OHMMS_DIM}; + std::array counts_w{nw_loc}; std::array offsets{static_cast(woffsets[myComm->rank()]), 0, 0}; - posin.resize(nw_loc * dims[1] * dims[2]); + std::array offsets_w{static_cast(woffsets[myComm->rank()])}; + Buffer_t posin(nw_loc * dims[1] * dims[2]); + std::vector weights_in(nw_loc); hyperslab_proxy slab(posin, dims, counts, offsets); hin.read(slab, hdf::walkers); + hyperslab_proxy, 1> slab_w(weights_in, dims_w, counts_w, offsets_w); + const bool has_weights = hin.readEntry(slab_w, hdf::walker_weights); + app_log() << " HDFWalkerInput_0_4::put getting " << dims[0] << " walkers " << posin.size() << std::endl; nw_in = woffsets[myComm->rank() + 1] - woffsets[myComm->rank()]; { const int nitems = num_ptcls_ * OHMMS_DIM; const int curWalker = wc_list_.getActiveWalkers(); wc_list_.createWalkers(nw_in, num_ptcls_); - Buffer_t::iterator it(posin.begin()); - for (int i = 0, iw = curWalker; i < nw_in; ++i, ++iw) - { - copy(it, it + nitems, get_first_address(wc_list_[iw]->R)); - it += nitems; - } + auto it = posin.begin(); + for (int i = 0; i < nw_in; ++i, it += nitems) + copy(it, it + nitems, get_first_address(wc_list_[i + curWalker]->R)); + if (has_weights) + for (int i = 0; i < nw_in; ++i) + wc_list_[i + curWalker]->Weight = weights_in[i]; } return true; } diff --git a/src/Particle/HDFWalkerOutput.cpp b/src/Particle/HDFWalkerOutput.cpp index 3d4fb528be..a64e7075a1 100644 --- a/src/Particle/HDFWalkerOutput.cpp +++ b/src/Particle/HDFWalkerOutput.cpp @@ -40,6 +40,10 @@ namespace qmcplusplus * so that subsequent write can utilize existing dataspaces. * HDF5 contains * - state_0 + * -- block + * -- number of walkers + * -- walker_partition + * -- walker_weights * -- walkers * - config_collection * -- NumOfConfigurations current count of the configurations @@ -56,20 +60,9 @@ HDFWalkerOutput::HDFWalkerOutput(size_t num_ptcls, const std::string& aroot, Com number_of_particles_(num_ptcls), myComm(c), currentConfigNumber(0), - RootName(aroot), - RemoteData(2) -// , fw_out(myComm) + RootName(aroot) { block = -1; - // //FileName=myComm->getName()+hdf::config_ext; - // //ConfigFileName=myComm->getName()+".storeConfig.h5"; - // std::string ConfigFileName=myComm->getName()+".storeConfig.h5"; - // HDFVersion cur_version; - // int dim=OHMMS_DIM; - // fw_out.create(ConfigFileName); - // fw_out.write(cur_version.version,hdf::version); - // fw_out.write(number_of_particles_,"NumberElectrons"); - // fw_out.write(dim,"DIM"); } /** Destructor writes the state of random numbers and close the file */ @@ -82,7 +75,7 @@ HDFWalkerOutput::~HDFWalkerOutput() = default; * - version * - state_0 * - block (int) - * - number_of_walkes (int) + * - number_of_walkers (int) * - walker_partition (int array) * - walkers (nw,np,3) */ @@ -120,15 +113,14 @@ void HDFWalkerOutput::write_configuration(const WalkerConfigurations& W, hdf_arc if (nblock > block) { RemoteData[0].resize(wb * W.getActiveWalkers()); - W.putConfigurations(RemoteData[0].data()); + RemoteDataW[0].resize(W.getActiveWalkers()); + W.putConfigurations(RemoteData[0].data(), RemoteDataW[0].data()); block = nblock; } number_of_walkers_ = W.WalkerOffsets[myComm->size()]; hout.write(number_of_walkers_, hdf::num_walkers); - std::array gcounts{number_of_walkers_, number_of_particles_, OHMMS_DIM}; - if (hout.is_parallel()) { { // write walker offset. @@ -152,11 +144,19 @@ void HDFWalkerOutput::write_configuration(const WalkerConfigurations& W, hdf_arc hout.write(slab, "walker_partition"); } { // write walker configuration + std::array gcounts{number_of_walkers_, number_of_particles_, OHMMS_DIM}; std::array counts{W.getActiveWalkers(), number_of_particles_, OHMMS_DIM}; std::array offsets{static_cast(W.WalkerOffsets[myComm->rank()]), 0, 0}; hyperslab_proxy slab(RemoteData[0], gcounts, counts, offsets); hout.write(slab, hdf::walkers); } + { + std::array gcounts{number_of_walkers_}; + std::array counts{W.getActiveWalkers()}; + std::array offsets{static_cast(W.WalkerOffsets[myComm->rank()])}; + hyperslab_proxy, 1> slab(RemoteDataW[0], gcounts, counts, offsets); + hout.write(slab, hdf::walker_weights); + } } else { //gaterv to the master and master writes it, could use isend/irecv @@ -172,106 +172,25 @@ void HDFWalkerOutput::write_configuration(const WalkerConfigurations& W, hdf_arc if (!myComm->rank()) RemoteData[1].resize(wb * W.WalkerOffsets[myComm->size()]); mpi::gatherv(*myComm, RemoteData[0], RemoteData[1], counts, displ); + // update counts and displ for gathering walker weights + for (int i = 0; i < myComm->size(); ++i) + { + counts[i] = (W.WalkerOffsets[i + 1] - W.WalkerOffsets[i]); + displ[i] = W.WalkerOffsets[i]; + } + if (!myComm->rank()) + RemoteDataW[1].resize(W.WalkerOffsets[myComm->size()]); + mpi::gatherv(*myComm, RemoteDataW[0], RemoteDataW[1], counts, displ); } int buffer_id = (myComm->size() > 1) ? 1 : 0; - hout.writeSlabReshaped(RemoteData[buffer_id], gcounts, hdf::walkers); + { + std::array gcounts{number_of_walkers_, number_of_particles_, OHMMS_DIM}; + hout.writeSlabReshaped(RemoteData[buffer_id], gcounts, hdf::walkers); + } + { + std::array gcounts{number_of_walkers_}; + hout.writeSlabReshaped(RemoteDataW[buffer_id], gcounts, hdf::walker_weights); + } } } - -/* -bool HDFWalkerOutput::dump(ForwardWalkingHistoryObject& FWO) -{ -// std::string ConfigFileName=myComm->getName()+".storeConfig.h5"; -// fw_out.open(ConfigFileName); -// -// if (myComm->size()==1) -// { -// for (int i=0; isize(); -// std::stringstream sstr; -// sstr<<"Block_"< posVecs; -// //reserve enough space -// std::vector IDs(fwdata_size,0); -// std::vector ParentIDs(fwdata_size,0); -// std::vector::iterator tend(posVecs.begin()); -// for (int j=0;jsize(); -// std::stringstream sstr; -// sstr<<"Block_"< counts(myComm->size()); -// mpi::all_gather(*myComm,fwdata_size,counts); -// -// std::vector posVecs; -// //reserve space to minimize the allocation -// posVecs.reserve(FWO.number_of_walkers_*n3); -// std::vector myIDs(fwdata_size),pIDs(fwdata_size); -// std::vector::iterator tend(posVecs.begin()); -// for (int j=0;j offsets(myComm->size()+1,0); -// for(int i=0; isize();++i) offsets[i+1]=offsets[i]+counts[i]; -// fwdata_size=offsets.back(); -// fw_out.write(fwdata_size,hdf::num_walkers); -// -// std::vector globalIDs; -// if(myComm->rank()==0) globalIDs.resize(fwdata_size); -// -// //collect WalkerID -// mpi::gatherv(*myComm, myIDs, globalIDs, counts, offsets); -// fw_out.write(globalIDs,"WalkerID"); -// //collect ParentID -// mpi::gatherv(*myComm, pIDs, globalIDs, counts, offsets); -// fw_out.write(globalIDs,"ParentID"); -// -// for(int i=0; i gpos; -// if(myComm->rank()==0) gpos.resize(offsets.back()); -// mpi::gatherv(*myComm, posVecs, gpos, counts, offsets); -// -// fw_out.write(gpos,"Positions"); -// -// fw_out.pop();//close the group -// ++currentConfigNumber; -// } -// } -// #endif -// fw_out.close(); -// FWO.clearConfigsForForwardWalking(); -// - return true; -}*/ - } // namespace qmcplusplus diff --git a/src/Particle/HDFWalkerOutput.h b/src/Particle/HDFWalkerOutput.h index c17f140ffe..6f5d7f2d72 100644 --- a/src/Particle/HDFWalkerOutput.h +++ b/src/Particle/HDFWalkerOutput.h @@ -61,14 +61,9 @@ class HDFWalkerOutput ///PooledData is used to define the shape of multi-dimensional array using BufferType = PooledData; std::vector myRequest; - std::vector RemoteData; + std::array RemoteData; + std::array, 2> RemoteDataW; int block; - - // //define some types for the FW collection - // using FWBufferType = std::vector; - // std::vector FWData; - // std::vector > FWCountData; - void write_configuration(const WalkerConfigurations& W, hdf_archive& hout, int block); }; diff --git a/src/Particle/WalkerConfigurations.cpp b/src/Particle/WalkerConfigurations.cpp index e58f94c2d2..6dd9054c8e 100644 --- a/src/Particle/WalkerConfigurations.cpp +++ b/src/Particle/WalkerConfigurations.cpp @@ -135,12 +135,14 @@ void WalkerConfigurations::reset() } } -void WalkerConfigurations::putConfigurations(Walker_t::RealType* target) const +void WalkerConfigurations::putConfigurations(Walker_t::RealType* target, QMCTraits::FullPrecRealType* weights) const { for (const auto& walker : WalkerList) { std::copy(get_first_address(walker->R), get_last_address(walker->R), target); target += get_last_address(walker->R) - get_first_address(walker->R); + *weights = walker->Weight; + ++weights; } } diff --git a/src/Particle/WalkerConfigurations.h b/src/Particle/WalkerConfigurations.h index 09cc22cf7f..22f46ad6ac 100644 --- a/src/Particle/WalkerConfigurations.h +++ b/src/Particle/WalkerConfigurations.h @@ -187,7 +187,7 @@ class WalkerConfigurations void reset(); ///save the particle positions of all the walkers into target - void putConfigurations(Walker_t::RealType* target) const; + void putConfigurations(Walker_t::RealType* target, QMCTraits::FullPrecRealType* weights) const; protected: ///number of walkers on a node diff --git a/src/io/hdf/HDFVersion.h b/src/io/hdf/HDFVersion.h index 8ded8fb9f2..a4fc1f1049 100644 --- a/src/io/hdf/HDFVersion.h +++ b/src/io/hdf/HDFVersion.h @@ -35,6 +35,7 @@ const char config_group[] = "config_collection"; const char random[] = "random_state"; const char walkers[] = "walkers"; const char num_walkers[] = "number_of_walkers"; +const char walker_weights[] = "walker_weights"; const char energy_history[] = "energy_history"; const char norm_history[] = "norm_history"; const char qmc_status[] = "qmc_status"; diff --git a/tests/io/restart/qmc_short.restart.xml b/tests/io/restart/qmc_short.restart.xml index a796108afa..36911506bf 100644 --- a/tests/io/restart/qmc_short.restart.xml +++ b/tests/io/restart/qmc_short.restart.xml @@ -82,7 +82,7 @@ - + 16 diff --git a/tests/io/restart_batch/qmc_short_batch.restart.xml b/tests/io/restart_batch/qmc_short_batch.restart.xml index b17b3dc685..c1a9cc145a 100644 --- a/tests/io/restart_batch/qmc_short_batch.restart.xml +++ b/tests/io/restart_batch/qmc_short_batch.restart.xml @@ -82,7 +82,7 @@ - + 16