Skip to content

Commit

Permalink
Merge pull request #553 from ax3l/topic-parallelIOtestDedup
Browse files Browse the repository at this point in the history
ParallelIOTest: Less Code Duplication
  • Loading branch information
ax3l authored Aug 22, 2019
2 parents 36ade8b + 99d59f6 commit cca6882
Showing 1 changed file with 65 additions and 99 deletions.
164 changes: 65 additions & 99 deletions test/ParallelIOTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,65 @@ TEST_CASE( "none", "[parallel]" )
{ }
#endif

#if openPMD_HAVE_MPI
void write_test_zero_extent( std::string file_ending, bool writeAllChunks )
{
int mpi_s{-1};
int mpi_r{-1};
MPI_Comm_size(MPI_COMM_WORLD, &mpi_s);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_r);
uint64_t size = static_cast<uint64_t>(mpi_s);
uint64_t rank = static_cast<uint64_t>(mpi_r);
Series o = Series("../samples/parallel_write_zero_extent." + file_ending, AccessType::CREATE, MPI_COMM_WORLD);

ParticleSpecies& e = o.iterations[1].particles["e"];

/* every rank n writes n consecutive cells, increasing values
* rank 0 does a zero-extent write
* two ranks will result in {1}
* three ranks will result in {1, 2, 3}
* four ranks will result in {1, 2, 3, 4, 5, 6} */
uint64_t num_cells = ((size-1)*(size-1) + (size-1))/2; /* (n^2 + n) / 2 */
if( num_cells == 0u )
{
std::cerr << "Test can only be run with at least two ranks" << std::endl;
return;
}

std::vector< double > position_global(num_cells);
double pos{1.};
std::generate(position_global.begin(), position_global.end(), [&pos]{ return pos++; });
std::shared_ptr< double > position_local(new double[rank], [](double *p) { delete[] p;});
uint64_t offset;
if( rank != 0 )
offset = ((rank-1)*(rank-1) + (rank-1))/2;
else
offset = 0;

e["position"]["x"].resetDataset(Dataset(determineDatatype(position_local), {num_cells}));

std::vector< uint64_t > positionOffset_global(num_cells);
uint64_t posOff{1};
std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff]{ return posOff++; });
std::shared_ptr< uint64_t > positionOffset_local(new uint64_t[rank], [](uint64_t *p) { delete[] p;});

e["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local), {num_cells}));

for( uint64_t i = 0; i < rank; ++i )
{
position_local.get()[i] = position_global[offset + i];
positionOffset_local.get()[i] = positionOffset_global[offset + i];
}
if( rank != 0 || writeAllChunks )
{
e["position"]["x"].storeChunk(position_local, {offset}, {rank});
e["positionOffset"]["x"].storeChunk(positionOffset_local, {offset}, {rank});
}

//TODO read back, verify
}
#endif

#if openPMD_HAVE_HDF5 && openPMD_HAVE_MPI
TEST_CASE( "git_hdf5_sample_content_test", "[parallel][hdf5]" )
{
Expand Down Expand Up @@ -113,63 +172,19 @@ TEST_CASE( "hdf5_write_test", "[parallel][hdf5]" )

TEST_CASE( "hdf5_write_test_zero_extent", "[parallel][hdf5]" )
{
int mpi_s{-1};
int mpi_r{-1};
MPI_Comm_size(MPI_COMM_WORLD, &mpi_s);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_r);
uint64_t size = static_cast<uint64_t>(mpi_s);
uint64_t rank = static_cast<uint64_t>(mpi_r);
Series o = Series("../samples/parallel_write_zero_extent.h5", AccessType::CREATE, MPI_COMM_WORLD);

ParticleSpecies& e = o.iterations[1].particles["e"];

/* every rank n writes n consecutive cells, increasing values
* rank 0 does a zero-extent write
* two ranks will result in {1}
* three ranks will result in {1, 2, 3}
* four ranks will result in {1, 2, 3, 4, 5, 6} */
uint64_t num_cells = ((size-1)*(size-1) + (size-1))/2; /* (n^2 + n) / 2 */
if( num_cells == 0u )
{
std::cerr << "Test can only be run with at least two ranks" << std::endl;
return;
}

std::vector< double > position_global(num_cells);
double pos{1.};
std::generate(position_global.begin(), position_global.end(), [&pos]{ return pos++; });
std::shared_ptr< double > position_local(new double[rank], [](double *p) { delete[] p;});
uint64_t offset;
if( rank != 0 )
offset = ((rank-1)*(rank-1) + (rank-1))/2;
else
offset = 0;

e["position"]["x"].resetDataset(Dataset(determineDatatype(position_local), {num_cells}));

std::vector< uint64_t > positionOffset_global(num_cells);
uint64_t posOff{1};
std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff]{ return posOff++; });
std::shared_ptr< uint64_t > positionOffset_local(new uint64_t[rank], [](uint64_t *p) { delete[] p;});
write_test_zero_extent( "h5", true );
}

e["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local), {num_cells}));

for( uint64_t i = 0; i < rank; ++i )
{
position_local.get()[i] = position_global[offset + i];
positionOffset_local.get()[i] = positionOffset_global[offset + i];
}
e["position"]["x"].storeChunk(position_local, {offset}, {rank});
e["positionOffset"]["x"].storeChunk(positionOffset_local, {offset}, {rank});

//TODO read back, verify
}
#else

TEST_CASE( "no_parallel_hdf5", "[parallel][hdf5]" )
{
REQUIRE(true);
}

#endif

#if openPMD_HAVE_ADIOS1 && openPMD_HAVE_MPI
TEST_CASE( "adios_write_test", "[parallel][adios]" )
{
Expand Down Expand Up @@ -208,56 +223,7 @@ TEST_CASE( "adios_write_test", "[parallel][adios]" )

TEST_CASE( "adios_write_test_zero_extent", "[parallel][adios]" )
{
int mpi_s{-1};
int mpi_r{-1};
MPI_Comm_size(MPI_COMM_WORLD, &mpi_s);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_r);
uint64_t size = static_cast<uint64_t>(mpi_s);
uint64_t rank = static_cast<uint64_t>(mpi_r);
Series o = Series("../samples/parallel_write_zero_extent.bp", AccessType::CREATE, MPI_COMM_WORLD);

ParticleSpecies& e = o.iterations[1].particles["e"];

/* every rank n writes n consecutive cells, increasing values
* rank 0 does a zero-extent write
* two ranks will result in {1}
* three ranks will result in {1, 2, 3}
* four ranks will result in {1, 2, 3, 4, 5, 6} */
uint64_t num_cells = ((size-1)*(size-1) + (size-1))/2; /* (n^2 + n) / 2 */
if( num_cells == 0u )
{
std::cerr << "Test can only be run with at least two ranks" << std::endl;
return;
}

std::vector< double > position_global(num_cells);
double pos{1.};
std::generate(position_global.begin(), position_global.end(), [&pos]{ return pos++; });
std::shared_ptr< double > position_local(new double[rank], [](double *p) {delete[] p;});
uint64_t offset;
if( rank != 0 )
offset = ((rank-1)*(rank-1) + (rank-1))/2;
else
offset = 0;

e["position"]["x"].resetDataset(Dataset(determineDatatype(position_local), {num_cells}));

std::vector< uint64_t > positionOffset_global(num_cells);
uint64_t posOff{1};
std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff]{ return posOff++; });
std::shared_ptr< uint64_t > positionOffset_local(new uint64_t[rank], [](uint64_t *p) {delete[] p;});

e["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local), {num_cells}));

for( uint64_t i = 0; i < rank; ++i )
{
position_local.get()[i] = position_global[offset + i];
positionOffset_local.get()[i] = positionOffset_global[offset + i];
}
e["position"]["x"].storeChunk(position_local, {offset}, {rank});
e["positionOffset"]["x"].storeChunk(positionOffset_local, {offset}, {rank});

//TODO read back, verify
write_test_zero_extent( "bp", true );
}

TEST_CASE( "hzdr_adios_sample_content_test", "[parallel][adios1]" )
Expand Down

0 comments on commit cca6882

Please sign in to comment.