"...Kernel/inc/git@code.ornl.gov:mantidproject/mantid.git" did not exist on "add7546dfd8870f2180c6a7743cf5982b025bff5"
Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
/*
* Distributed under the OSI-approved Apache License, Version 2.0. See
* accompanying file Copyright.txt for details.
*
* Write local arrays from multiple processors and make ADIOS join them
* at reading to show a global array
*
* If every process has an array that is different only in one dimension
* it can be presented as a global array by joining the arrays together.
* E.g. if every process has a table with a different number of rows,
* and one does not want to do a global communication to calculate the offsets
* in the global table, one can just write the local arrays and let ADIOS
* calculate the offsets at read time (when all sizes are known by any process).
*
* bpls can show the size of each block of the table:
* bpls -D <file> <variable>
*
* Note: only one dimension can be joinable, every other dimension must be the
* same on each process.
*
* Note: the local dimension size in the joinable dimension is allowed to change
* over time within each processor. However, if the sum of all local sizes
* changes over time, the result will look like a local array.
* (Because global arrays with changing global dimension over time can only be
* handled as local arrays in ADIOS)
*
*
* Created on: Jun 2, 2017
* Author: pnorbert
*/
#include <iostream>
#include <vector>
#include <adios2.h>
#ifdef ADIOS2_HAVE_MPI
#include <mpi.h>
#endif
int main(int argc, char *argv[])
{
int rank = 0, nproc = 1;
#ifdef ADIOS2_HAVE_MPI
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &nproc);
#endif
const bool adiosDebug = true;
const int NSTEPS = 5;
// generate different random numbers on each process,
// but always the same sequence at each run
srand(rank * 32767);
#ifdef ADIOS2_HAVE_MPI
adios2::ADIOS adios(MPI_COMM_WORLD);
#endif
// Application variables for output
// random size per process, 5..10 each
const unsigned int Nrows = rand() % 6 + 5;
const unsigned int Ncols = 4;
// Local array, size is fixed over time on each process
std::vector<double> mytable(Nrows * Ncols);
try
{
// Get io settings from the config file or
// create one with default settings here
adios2::IO &io = adios.DeclareIO("Output");
Podhorszki, Norbert
committed
// io.SetEngine("ADIOS1Writer");
// io.AddTransport("File", {{"library", "MPI"}});
/*
* Define joinable local array: type, name, global and local size
* Starting offset can be an empty vector
* Only one global dimension can be joined
*/
adios2::Variable<double> &varTable = io.DefineVariable<double>(
"table", {adios2::JoinedDim, Ncols}, {}, {Nrows, Ncols});
// Open file. "w" means we overwrite any existing file on disk,
// but Advance() will append steps to the same file.
auto writer = io.Open("joinedArray.bp", adios2::OpenMode::Write);
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
if (writer == nullptr)
throw std::ios_base::failure(
"ERROR: failed to open file with ADIOS\n");
for (int step = 0; step < NSTEPS; step++)
{
for (int row = 0; row < Nrows; row++)
{
for (int col = 0; col < Ncols; col++)
{
mytable[row * Ncols + col] =
rank * 1.0 + row * 0.1 + col * 0.01;
}
}
writer->Write<double>(varTable, mytable.data());
writer->Advance();
}
writer->Close();
}
catch (std::invalid_argument &e)
{
if (rank == 0)
{
std::cout << "Invalid argument exception, STOPPING PROGRAM\n";
std::cout << e.what() << "\n";
}
}
catch (std::ios_base::failure &e)
{
if (rank == 0)
{
std::cout << "System exception, STOPPING PROGRAM\n";
std::cout << e.what() << "\n";
}
}
catch (std::exception &e)
{
if (rank == 0)
{
std::cout << "Exception, STOPPING PROGRAM\n";
std::cout << e.what() << "\n";
}
}
#ifdef ADIOS2_HAVE_MPI
MPI_Finalize();
#endif
return 0;
}