24 #include "openPMD/config.hpp" 27 #include "RandomDatasetFiller.hpp" 29 #include "openPMD/DatatypeHelpers.hpp" 30 #include "openPMD/benchmark/mpi/BlockSlicer.hpp" 31 #include "openPMD/benchmark/mpi/DatasetFiller.hpp" 32 #include "openPMD/benchmark/mpi/MPIBenchmarkReport.hpp" 33 #include "openPMD/openPMD.hpp" 56 template <
typename DatasetFillerProv
ider>
61 using extentT = Extent::value_type;
62 MPI_Comm communicator = MPI_COMM_WORLD;
69 std::shared_ptr<BlockSlicer> m_blockSlicer;
71 DatasetFillerProvider m_dfp;
90 std::shared_ptr<BlockSlicer> blockSlicer,
91 DatasetFillerProvider dfp,
92 MPI_Comm comm = MPI_COMM_WORLD);
107 std::string compression,
108 uint8_t compressionLevel,
111 typename decltype(Series::iterations)::key_type iterations,
128 std::string compression,
129 uint8_t compressionLevel,
132 typename decltype(Series::iterations)::key_type iterations);
134 void resetConfigurations();
144 template <
typename Clock>
149 std::string m_basePath;
150 std::vector<std::tuple<
156 typename decltype(Series::iterations)::key_type>>
169 std::pair<Offset, Extent> slice(
int size);
177 template <
typename Clock>
178 struct BenchmarkExecution
182 explicit BenchmarkExecution(
184 : m_benchmark{benchmark}
199 template <
typename T>
200 typename Clock::duration writeBenchmark(
201 std::string
const &compression,
205 std::string
const &extension,
207 typename decltype(Series::iterations)::key_type iterations);
218 template <
typename T>
219 typename Clock::duration readBenchmark(
222 std::string extension,
223 typename decltype(Series::iterations)::key_type iterations);
225 template <
typename T>
237 template <
typename DatasetFillerProv
ider>
238 template <
typename Clock>
243 BenchmarkExecution<Clock> exec{
this};
245 std::set<Datatype> datatypes;
246 for (
auto const &conf : m_configurations)
248 datatypes.insert(std::get<DTYPE>(conf));
250 for (Datatype dt : datatypes)
258 template <
typename DatasetFillerProv
ider>
260 std::string basePath,
262 std::shared_ptr<BlockSlicer> blockSlicer,
263 DatasetFillerProvider dfp,
266 , totalExtent{std::move(tExtent)}
267 , m_blockSlicer{std::move(blockSlicer)}
269 , m_basePath{std::move(basePath)}
271 if (m_blockSlicer ==
nullptr)
272 throw std::runtime_error(
"Argument blockSlicer cannot be a nullptr!");
275 template <
typename DatasetFillerProv
ider>
279 MPI_Comm_size(this->communicator, &actualSize);
281 MPI_Comm_rank(this->communicator, &rank);
282 size = std::min(size, actualSize);
283 return m_blockSlicer->sliceBlock(totalExtent, size, rank);
286 template <
typename DatasetFillerProv
ider>
288 std::string compression,
289 uint8_t compressionLevel,
292 typename decltype(Series::iterations)::key_type iterations,
295 this->m_configurations.emplace_back(
296 compression, compressionLevel, backend, threadSize, dt, iterations);
299 template <
typename DatasetFillerProv
ider>
301 std::string compression,
302 uint8_t compressionLevel,
305 typename decltype(Series::iterations)::key_type iterations)
308 MPI_Comm_size(communicator, &size);
310 compression, compressionLevel, backend, dt, iterations, size);
313 template <
typename DatasetFillerProv
ider>
316 this->m_compressions.clear();
319 template <
typename DatasetFillerProv
ider>
320 template <
typename Clock>
321 template <
typename T>
322 typename Clock::duration
324 std::string
const &compression,
328 std::string
const &extension,
330 typename decltype(Series::iterations)::key_type iterations)
332 MPI_Barrier(m_benchmark->communicator);
333 auto start = Clock::now();
337 m_benchmark->m_basePath +
"." + extension,
339 m_benchmark->communicator);
341 for (
typename decltype(Series::iterations)::key_type i = 0; i < iterations;
344 auto writeData = datasetFiller->produceData();
347 series.iterations[i].meshes[
"id"][MeshRecordComponent::SCALAR];
349 Datatype datatype = determineDatatype(writeData);
351 if (!compression.empty())
353 dataset.setCompression(compression, level);
356 id.resetDataset(dataset);
360 id.storeChunk<
T>(writeData, offset, extent);
364 MPI_Barrier(m_benchmark->communicator);
365 auto end = Clock::now();
368 for (
typename decltype(Series::iterations)::key_type i = 0; i < iterations;
371 datasetFiller->produceData();
373 auto deduct = Clock::now();
375 return end - start - (deduct - end);
378 template <
typename DatasetFillerProv
ider>
379 template <
typename Clock>
380 template <
typename T>
381 typename Clock::duration
385 std::string extension,
386 typename decltype(Series::iterations)::key_type iterations)
388 MPI_Barrier(m_benchmark->communicator);
390 auto start = Clock::now();
393 m_benchmark->m_basePath +
"." + extension,
395 m_benchmark->communicator);
397 for (
typename decltype(Series::iterations)::key_type i = 0; i < iterations;
401 series.iterations[i].meshes[
"id"][MeshRecordComponent::SCALAR];
403 auto chunk_data =
id.loadChunk<
T>(offset, extent);
407 MPI_Barrier(m_benchmark->communicator);
408 auto end = Clock::now();
412 template <
typename DatasetFillerProv
ider>
413 template <
typename Clock>
414 template <
typename T>
418 Datatype dt = determineDatatype<T>();
420 m_benchmark->m_dfp.template operator()<
T>());
421 for (
auto const &config : m_benchmark->m_configurations)
423 std::string compression;
424 uint8_t compressionLevel;
428 typename decltype(Series::iterations)::key_type iterations;
430 compression, compressionLevel, backend, size, dt2, iterations) =
438 auto localCuboid = m_benchmark->slice(size);
440 extentT blockSize = 1;
441 for (
auto ext : localCuboid.second)
445 dsf->setNumberOfItems(blockSize);
447 auto writeTime = writeBenchmark<T>(
455 auto readTime = readBenchmark<T>(
456 localCuboid.first, localCuboid.second, backend, iterations);
465 std::make_pair(writeTime, readTime));
469 template <
typename DatasetFillerProv
ider>
470 template <
typename Clock>
475 throw std::runtime_error(
476 "Unknown/unsupported datatype requested to be benchmarked.");
void addReport(int rootThread, std::string compression, uint8_t level, std::string extension, int threadSize, Datatype dt, typename decltype(Series::iterations)::key_type iterations, std::pair< Duration, Duration > const &report)
Add results for a certain compression strategy and level.
Definition: MPIBenchmarkReport.hpp:191
Definition: Dataset.hpp:35
auto switchType(Datatype dt, Action action, Args &&...args) -> decltype(action. template operator()< char >(std::forward< Args >(args)...))
Generalizes switching over an openPMD datatype.
Definition: DatatypeHelpers.hpp:131
An abstract class to create one iteration of data per thread.
Definition: DatasetFiller.hpp:35
Extent totalExtent
Total extent of the hypercuboid used in the benchmark.
Definition: MPIBenchmark.hpp:67
create new series and truncate existing (files)
MPIBenchmarkReport< typename Clock::duration > runBenchmark(int rootThread=0)
Main function for running a benchmark.
Definition: MPIBenchmark.hpp:240
Datatype
Concrete datatype of an object available at runtime.
Definition: Datatype.hpp:45
Root level of the openPMD hierarchy.
Definition: Series.hpp:537
Class representing a benchmark.
Definition: MPIBenchmark.hpp:57
The report for a single benchmark produced by <openPMD/benchmark/mpi/MPIBenchmark>.
Definition: MPIBenchmarkReport.hpp:44
MPIBenchmark(std::string basePath, Extent tExtent, std::shared_ptr< BlockSlicer > blockSlicer, DatasetFillerProvider dfp, MPI_Comm comm=MPI_COMM_WORLD)
Construct an MPI benchmark manually.
Definition: MPIBenchmark.hpp:259
Public definitions of openPMD-api.
Definition: Date.cpp:28
Definition: MeshRecordComponent.hpp:29
void flush()
Execute all required remaining IO operations to write or read data.
Definition: Series.cpp:350
open series as read-only, fails if series is not found
void addConfiguration(std::string compression, uint8_t compressionLevel, std::string backend, Datatype dt, typename decltype(Series::iterations)::key_type iterations, int threadSize)
Definition: MPIBenchmark.hpp:287