24 #include "openPMD/config.hpp"
27 #include "RandomDatasetFiller.hpp"
29 #include "openPMD/DatatypeHelpers.hpp"
30 #include "openPMD/benchmark/mpi/BlockSlicer.hpp"
31 #include "openPMD/benchmark/mpi/DatasetFiller.hpp"
32 #include "openPMD/benchmark/mpi/MPIBenchmarkReport.hpp"
33 #include "openPMD/openPMD.hpp"
56 template <
typename DatasetFillerProv
ider>
61 using extentT = Extent::value_type;
62 MPI_Comm communicator = MPI_COMM_WORLD;
69 std::shared_ptr<BlockSlicer> m_blockSlicer;
71 DatasetFillerProvider m_dfp;
90 std::shared_ptr<BlockSlicer> blockSlicer,
91 DatasetFillerProvider dfp,
92 MPI_Comm comm = MPI_COMM_WORLD);
105 std::string jsonConfig,
123 std::string jsonConfig,
128 void resetConfigurations();
138 template <
typename Clock>
143 std::string m_basePath;
144 std::vector<std::tuple<
161 std::pair<Offset, Extent> slice(
int size);
169 template <
typename Clock>
170 struct BenchmarkExecution
174 explicit BenchmarkExecution(
176 : m_benchmark{benchmark}
190 template <
typename T>
191 typename Clock::duration writeBenchmark(
192 std::string
const &jsonConfig,
195 std::string
const &extension,
196 std::shared_ptr<DatasetFiller<T>> datasetFiller,
208 template <
typename T>
209 typename Clock::duration readBenchmark(
212 std::string extension,
215 template <
typename T>
217 BenchmarkExecution<Clock> &,
218 MPIBenchmarkReport<typename Clock::duration> &report,
221 static constexpr
char const *errorMsg =
"BenchmarkExecution";
227 template <
typename DatasetFillerProv
ider>
228 template <
typename Clock>
229 MPIBenchmarkReport<typename Clock::duration>
233 BenchmarkExecution<Clock> exec{
this};
235 std::set<Datatype> datatypes;
236 for (
auto const &conf : m_configurations)
238 datatypes.insert(std::get<DTYPE>(conf));
242 switchType<BenchmarkExecution<Clock>>(dt, exec, res, rootThread);
248 template <
typename DatasetFillerProv
ider>
250 std::string basePath,
252 std::shared_ptr<BlockSlicer> blockSlicer,
253 DatasetFillerProvider dfp,
256 , totalExtent{std::move(tExtent)}
257 , m_blockSlicer{std::move(blockSlicer)}
259 , m_basePath{std::move(basePath)}
261 if (m_blockSlicer ==
nullptr)
262 throw std::runtime_error(
"Argument blockSlicer cannot be a nullptr!");
265 template <
typename DatasetFillerProv
ider>
269 MPI_Comm_size(this->communicator, &actualSize);
271 MPI_Comm_rank(this->communicator, &rank);
272 size = std::min(size, actualSize);
273 return m_blockSlicer->sliceBlock(totalExtent, size, rank);
276 template <
typename DatasetFillerProv
ider>
278 std::string jsonConfig,
284 this->m_configurations.emplace_back(
285 std::move(jsonConfig), backend, threadSize, dt, iterations);
288 template <
typename DatasetFillerProv
ider>
290 std::string jsonConfig,
296 MPI_Comm_size(communicator, &size);
297 addConfiguration(std::move(jsonConfig), backend, dt, iterations, size);
300 template <
typename DatasetFillerProv
ider>
303 this->m_compressions.clear();
306 template <
typename DatasetFillerProv
ider>
307 template <
typename Clock>
308 template <
typename T>
309 typename Clock::duration
310 MPIBenchmark<DatasetFillerProvider>::BenchmarkExecution<Clock>::writeBenchmark(
311 std::string
const &jsonConfig,
314 std::string
const &extension,
315 std::shared_ptr<DatasetFiller<T>> datasetFiller,
318 MPI_Barrier(m_benchmark->communicator);
319 auto start = Clock::now();
322 Series series = Series(
323 m_benchmark->m_basePath +
"." + extension,
325 m_benchmark->communicator,
330 auto writeData = datasetFiller->produceData();
332 MeshRecordComponent
id =
333 series.iterations[i].meshes[
"id"][MeshRecordComponent::SCALAR];
335 Datatype datatype = determineDatatype(writeData);
336 Dataset dataset = Dataset(datatype, m_benchmark->totalExtent);
338 id.resetDataset(dataset);
342 id.storeChunk<T>(writeData, offset, extent);
346 MPI_Barrier(m_benchmark->communicator);
347 auto end = Clock::now();
352 datasetFiller->produceData();
354 auto deduct = Clock::now();
356 return end - start - (deduct - end);
359 template <
typename DatasetFillerProv
ider>
360 template <
typename Clock>
361 template <
typename T>
362 typename Clock::duration
363 MPIBenchmark<DatasetFillerProvider>::BenchmarkExecution<Clock>::readBenchmark(
366 std::string extension,
369 MPI_Barrier(m_benchmark->communicator);
371 auto start = Clock::now();
373 Series series = Series(
374 m_benchmark->m_basePath +
"." + extension,
376 m_benchmark->communicator);
380 MeshRecordComponent
id =
381 series.iterations[i].meshes[
"id"][MeshRecordComponent::SCALAR];
383 auto chunk_data =
id.loadChunk<
T>(offset, extent);
387 MPI_Barrier(m_benchmark->communicator);
388 auto end = Clock::now();
392 template <
typename DatasetFillerProv
ider>
393 template <
typename Clock>
394 template <
typename T>
395 void MPIBenchmark<DatasetFillerProvider>::BenchmarkExecution<Clock>::call(
396 BenchmarkExecution<Clock> &exec,
397 MPIBenchmarkReport<typename Clock::duration> &report,
400 Datatype dt = determineDatatype<T>();
401 auto dsf = std::dynamic_pointer_cast<DatasetFiller<T>>(
402 exec.m_benchmark->m_dfp.template operator()<
T>());
403 for (
auto const &config : exec.m_benchmark->m_configurations)
405 std::string jsonConfig;
410 std::tie(jsonConfig, backend, size, dt2, iterations) = config;
417 auto localCuboid = exec.m_benchmark->slice(size);
419 extentT blockSize = 1;
420 for (
auto ext : localCuboid.second)
424 dsf->setNumberOfItems(blockSize);
426 auto writeTime = exec.writeBenchmark<
T>(
433 auto readTime = exec.readBenchmark<
T>(
434 localCuboid.first, localCuboid.second, backend, iterations);
442 std::make_pair(writeTime, readTime));
Class representing a benchmark.
Definition: MPIBenchmark.hpp:58
void addConfiguration(std::string jsonConfig, std::string backend, Datatype dt, Series::IterationIndex_t iterations, int threadSize)
Definition: MPIBenchmark.hpp:277
Extent totalExtent
Total extent of the hypercuboid used in the benchmark.
Definition: MPIBenchmark.hpp:67
MPIBenchmark(std::string basePath, Extent tExtent, std::shared_ptr< BlockSlicer > blockSlicer, DatasetFillerProvider dfp, MPI_Comm comm=MPI_COMM_WORLD)
Construct an MPI benchmark manually.
Definition: MPIBenchmark.hpp:249
MPIBenchmarkReport< typename Clock::duration > runBenchmark(int rootThread=0)
Main function for running a benchmark.
Definition: MPIBenchmark.hpp:230
Iteration::IterationIndex_t IterationIndex_t
An unsigned integer type, used to identify Iterations in a Series.
Definition: Series.hpp:313
Public definitions of openPMD-api.
Definition: Date.cpp:29
@ CREATE
create new series and truncate existing (files)
@ READ_ONLY
Open Series as read-only, fails if Series is not found.
Datatype
Concrete datatype of an object available at runtime.
Definition: Datatype.hpp:51
The report for a single benchmark produced by <openPMD/benchmark/mpi/MPIBenchmark>.
Definition: MPIBenchmarkReport.hpp:45