Skip to content
Snippets Groups Projects
Commit d3251c74 authored by Podhorszki, Norbert's avatar Podhorszki, Norbert
Browse files

Added heat transfer C++ example with ASCII output.

parent 53eacc4b
No related branches found
No related tags found
1 merge request!8Integrate groupless
/*
* heatTransfer.cpp
*
* Recreates heat_transfer.f90 (Fortran) in C++
*
* Created on: Feb 2017
* Author: Norbert Podhorszki
*
*/
#include <mpi.h>
#include <stdexcept>
#include <memory>
#include <iostream>
#include <iomanip>
#include <string>
#include <math.h>
#include "HeatTransfer.h"
HeatTransfer::HeatTransfer( std::shared_ptr<Settings> settings )
: m_s{settings}
{
m_T1 = new double*[ m_s->ndx+2 ];
m_T1[0] = new double[ (m_s->ndx+2) * (m_s->ndy+2) ];
m_T2 = new double*[ m_s->ndx+2 ];
m_T2[0] = new double[ (m_s->ndx+2) * (m_s->ndy+2) ];
for (int i = 1; i < m_s->ndx+2; i++)
{
m_T1[i] = m_T1[i-1] + m_s->ndy+2;
m_T2[i] = m_T2[i-1] + m_s->ndy+2;
}
m_TCurrent = m_T1;
m_TNext = m_T2;
}
HeatTransfer::~HeatTransfer()
{
delete[] m_T1[0];
delete[] m_T1;
delete[] m_T2[0];
delete[] m_T2;
}
void HeatTransfer::init(bool init_with_rank)
{
if (init_with_rank)
{
for (int i = 0; i < m_s->ndx+2; i++)
for (int j = 0; j < m_s->ndy+2; j++)
m_T1[i][j] = m_s->rank;
}
else
{
const double hx = 2.0 * 4.0*atan(1.0)/m_s->ndx;
const double hy = 2.0 * 4.0*atan(1.0)/m_s->ndy;
double x, y;
for (int i = 0; i < m_s->ndx+2; i++)
{
x = 0.0 + hx*(i-1);
for (int j = 0; j < m_s->ndy+2; j++)
{
y = 0.0 + hy*(j-1);
m_T1[i][j] = cos(8*x) + cos(6*x) - cos(4*x) + cos(2*x) - cos(x) +
sin(8*y) - sin(6*y) + sin(4*y) - sin(2*y) + sin(y);
}
}
}
m_TCurrent = m_T1;
m_TNext = m_T2;
}
void HeatTransfer::printT(std::string message, MPI_Comm comm)
{
int rank, size;
int tag = 1;
int token;
MPI_Status status;
MPI_Comm_rank( comm, &rank );
MPI_Comm_size( comm, &size );
if( rank > 0)
{
MPI_Recv(&token, 1, MPI_INT, rank-1, tag, comm, &status);
}
std::cout << "Rank " << rank << " " << message << std::endl;
for (int i = 0; i < m_s->ndx+2; i++)
{
std::cout << " T[" << i << "][] = ";
for (int j = 0; j < m_s->ndy+2; j++)
{
std::cout << std::setw(6) << m_TCurrent[i][j];
}
std::cout << std::endl;
}
std::cout << std::flush << std::endl;
if( rank < size-1)
{
MPI_Send(&token, 1, MPI_INT, rank+1, tag, comm);
}
}
void HeatTransfer::switchCurrentNext()
{
double **tmp = m_TCurrent;
m_TCurrent = m_TNext;
m_TNext = tmp;
}
void HeatTransfer::iterate()
{
for( int i = 1; i <= m_s->ndx; ++i)
{
for( int j = 1; j <= m_s->ndy; ++j)
{
m_TNext[i][j] =
omega/4*(m_TCurrent[i-1][j] +
m_TCurrent[i+1][j] +
m_TCurrent[i][j-1] +
m_TCurrent[i][j+1]) +
(1.0-omega)*m_TCurrent[i][j];
}
}
switchCurrentNext();
}
void HeatTransfer::heatEdges()
{
// Heat the whole global edges
if( m_s->posx==0 )
for( int j = 0; j < m_s->ndy+2; ++j)
m_TCurrent[0][j]= edgetemp;
if( m_s->posx==m_s->npx-1 )
for( int j = 0; j < m_s->ndy+2; ++j)
m_TCurrent[m_s->ndx+1][j]= edgetemp;
if (m_s->posy==0)
for( int i = 0; i < m_s->ndx+2; ++i)
m_TCurrent[i][0]= edgetemp;
if (m_s->posy==m_s->npy-1)
for( int i = 0; i < m_s->ndx+2; ++i)
m_TCurrent[i][m_s->ndy+1]= edgetemp;
}
void HeatTransfer::exchange( MPI_Comm comm )
{
// Exchange ghost cells, in the order left-right-up-down
double * send_x = new double[m_s->ndx+2];
double * recv_x = new double[m_s->ndx+2];
// send to left + receive from right
int tag = 1;
MPI_Status status;
if( m_s->rank_left >= 0 )
{
std::cout << "Rank " << m_s->rank << " send left to rank " << m_s->rank_left;
for( int i = 0; i < m_s->ndx+2; ++i)
send_x[i] = m_TCurrent[i][1];
MPI_Send(send_x, m_s->ndx+2, MPI_REAL8, m_s->rank_left, tag, comm);
}
if( m_s->rank_right >= 0 )
{
std::cout << "Rank " << m_s->rank << " receive from right from rank " << m_s->rank_right;
MPI_Recv(recv_x, m_s->ndx+2, MPI_REAL8, m_s->rank_right, tag, comm, &status);
for( int i = 0; i < m_s->ndx+2; ++i)
m_TCurrent[i][m_s->ndy+1] = recv_x[i];
}
// send to right + receive from left
tag = 2;
if( m_s->rank_right >= 0 )
{
std::cout << "Rank " << m_s->rank << " send right to rank " << m_s->rank_right;
for( int i = 0; i < m_s->ndx+2; ++i)
send_x[i] = m_TCurrent[i][m_s->ndy];
MPI_Send(send_x, m_s->ndx+2, MPI_REAL8, m_s->rank_right, tag, comm);
}
if( m_s->rank_left >= 0 )
{
std::cout << "Rank " << m_s->rank << " receive from left from rank " << m_s->rank_left;
MPI_Recv(recv_x, m_s->ndx+2, MPI_REAL8, m_s->rank_left, tag, comm, &status);
for( int i = 0; i < m_s->ndx+2; ++i)
m_TCurrent[i][0] = recv_x[i];
}
// send down + receive from above
tag = 3;
if( m_s->rank_down >= 0 )
{
std::cout << "Rank " << m_s->rank << " send down to rank " << m_s->rank_down;
MPI_Send(m_TCurrent[m_s->ndx], m_s->ndy+2, MPI_REAL8, m_s->rank_down, tag, comm);
}
if ( m_s->rank_up >= 0 )
{
std::cout << "Rank " << m_s->rank << " receive from above from rank " << m_s->rank_up;
MPI_Recv(m_TCurrent[0], m_s->ndy+2, MPI_REAL8, m_s->rank_up, tag, comm, &status);
}
// send up + receive from below
tag = 4;
if( m_s->rank_up >= 0 )
{
std::cout << "Rank " << m_s->rank << " send up to rank " << m_s->rank_up;
MPI_Send(m_TCurrent[1], m_s->ndy+2, MPI_REAL8, m_s->rank_up, tag, comm);
}
if ( m_s->rank_down >= 0 )
{
std::cout << "Rank " << m_s->rank << " receive from below from rank " << m_s->rank_down;
MPI_Recv(m_TCurrent[m_s->ndx+1], m_s->ndy+2, MPI_REAL8, m_s->rank_down, tag, comm, &status);
}
delete[] send_x;
delete[] recv_x;
}
/*
* HeatTransfer.h
*
* Created on: Feb 2017
* Author: Norbert Podhorszki
*/
#ifndef HEATTRANSFER_H_
#define HEATTRANSFER_H_
#include <mpi.h>
#include "Settings.h"
class HeatTransfer
{
public:
HeatTransfer( std::shared_ptr<Settings> settings ); // Create two 2D arrays with ghost cells to compute
~HeatTransfer();
void init(bool init_with_rank); // set up array values with either rank or real demo values
void iterate(); // one local calculation step
void heatEdges(); // reset the heat values at the global edge
void exchange( MPI_Comm comm ); // send updates to neighbors
const double *data() {return m_TCurrent[0];}; // return (1D) pointer to current T data
const double T(int i, int j) {return m_TCurrent[i][j];}; // return current T value at i,j local coordinate
void printT(std::string message, MPI_Comm comm); // debug: print local TCurrent on stdout
private:
const double edgetemp = 3.0; // temperature at the edges of the global plate
const double omega = 0.8; // weight for current temp is (1-omega) in iteration
double **m_T1; // 2D array (ndx+2) * (ndy+2) size, including ghost cells
double **m_T2; // another 2D array
double **m_TCurrent; // pointer to T1 or T2
double **m_TNext; // pointer to T2 or T1
std::shared_ptr<Settings> m_s;
void switchCurrentNext(); // switch the current array with the next array
};
#endif /* HEATTRANSFER_H_ */
/*
* IO.h
*
* Created on: Feb 2017
* Author: Norbert Podhorszki
*/
#ifndef IO_H_
#define IO_H_
#include <mpi.h>
#include "Settings.h"
#include "HeatTransfer.h"
class IO
{
public:
IO( std::string output_basename, MPI_Comm comm );
~IO();
void write( int step, std::shared_ptr<HeatTransfer> ht, std::shared_ptr<Settings> s, MPI_Comm comm );
private:
std::string m_outputfilename;
};
#endif /* IO_H_ */
/*
* IO_ADIOS2.cpp
*
* Created on: Feb 2017
* Author: Norbert Podhorszki
*/
#include "IO.h"
IO::IO( std::string output_basename )
{
m_outputfilename = output_basename + ".bp";
}
IO::~IO()
{
}
void IO::write( int step, int curr, Settings& s )
{
}
/*
* IO_ADIOS2.cpp
*
* Created on: Feb 2017
* Author: Norbert Podhorszki
*/
#include <iostream>
#include <iomanip>
#include <fstream>
#include "IO.h"
static std::ofstream of;
static std::streambuf *buf;
IO::IO( std::string output_basename, MPI_Comm comm )
{
m_outputfilename = output_basename;
if (m_outputfilename == "cout")
{
buf = std::cout.rdbuf();
}
else
{
int rank;
MPI_Comm_rank( comm, &rank );
std::string rs = std::to_string(rank);
of.open(output_basename + rs + ".txt");
buf = of.rdbuf();
}
}
IO::~IO()
{
if (m_outputfilename != "cout")
{
of.close();
}
}
void IO::write( int step, std::shared_ptr<HeatTransfer> ht, std::shared_ptr<Settings> s, MPI_Comm comm)
{
std::ostream out(buf);
if( step == 1)
{
out << "rank=" << s->rank
<< " size=" << s->ndx << "x" << s->ndy
<< " offsets=" << s->offsx << ":" << s->offsy
<< " step=" << step << std::endl;
out << " time row columns " << s->offsy << "..." << s->offsy+s->ndy-1 << std::endl;
out << " ";
for (int j = 1; j <= s->ndy; ++j) {
out << std::setw(9) << s->offsy+j-1;
}
out << "\n--------------------------------------------------------------\n";
}
else
{
out << std::endl;
}
for (int i = 1; i <= s->ndx; ++i)
{
out << std::setw(5) << step << std::setw(5) << s->offsx+i-1;
for (int j = 1; j <= s->ndy; ++j)
{
out << std::setw(9) << ht->T(i,j);
}
out << std::endl;
}
}
# Makefile for testing purposes, will build writer_mpi (make or make mpi) or writer_nompi (make nompi)
# Created on: Feb 13, 2017
# Author: pnorbert
#COMPILERS
CC=g++
MPICC=mpic++
#ADIOS LOCATION
ADIOS_DIR=../..
ADIOS_INCLUDE=-I$(ADIOS_DIR)/include
ADIOS_LIB=$(ADIOS_DIR)/lib/libadios.a
ADIOS_NOMPI_LIB=$(ADIOS_DIR)/lib/libadios_nompi.a
#FLAGS
CFLAGS=-Wall -Wpedantic -Woverloaded-virtual -std=c++11 -O0 -g
LDFLAGS=
default:
@echo "Make targets: ascii hdf5_a hdf5_b phdf5 adios1 adios2"
@echo " ascii: build example with text output(IO_ascii.cpp)"
@echo " hdf5_a: build example with HDF5, separate timesteps (IO_hdf5_a.cpp)"
@echo " hdf5_b: build example with HDF5, combined timesteps (IO_hdf5_b.cpp)"
@echo " phdf5: build example with Parallel HDF5 (IO_phdf5.cpp)"
@echo " adios1: build example with ADIOS 1.x (IO_adios1.cpp)"
@echo " adios2: build example with ADIOS2 (IO_adios2.cpp)"
all: default
help: default
ascii: main.cpp HeatTransfer.cpp Settings.cpp IO_ascii.cpp
$(MPICC) $(CFLAGS) $(ADIOS_INCLUDE) -DHAVE_MPI -o heatTransfer_ascii $^ $(ADIOS_LIB) $(LDFLAGS) -lpthread
adios2: main.cpp HeatTransfer.cpp Settings.cpp IO_adios2.cpp
$(MPICC) $(CFLAGS) $(ADIOS_INCLUDE) -DHAVE_MPI -o heatTransfer_adios2 $^ $(ADIOS_LIB) $(LDFLAGS) -lpthread
clean:
rm -f heatTransfer_ascii heatTransfer_adios2
/*
* Settings.cpp
*
* Created on: Feb 2017
* Author: Norbert Podhorszki
*/
#include <errno.h>
#include <cstdlib>
#include "Settings.h"
static int convertToInt( std::string varName, char *arg )
{
char *end;
int retval = std::strtoll( arg, &end, 10);
if( end[0] || errno == ERANGE )
{
throw std::invalid_argument( "Invalid value given for " + varName + ": "
+ std::string(arg) );
}
return retval;
}
Settings::Settings( int argc, char* argv [], int rank, int nproc )
: rank{rank}, nproc{nproc}
{
if (argc < 8)
{
throw std::invalid_argument( "Not enough arguments" );
}
outputfile = argv[1];
npx = convertToInt("N", argv[2]);
npy = convertToInt("M", argv[3]);
ndx = convertToInt("nx", argv[4]);
ndy = convertToInt("ny", argv[5]);
steps = convertToInt("steps", argv[6]);
iterations = convertToInt("iterations", argv[7]);
if( npx * npy != nproc )
{
throw std::invalid_argument( "N*M must equal the number of processes" );
}
// calculate global array size and the local offsets in that global space
gndx = npx * ndx;
gndy = npy * ndy;
posx = rank % npx;
posy = rank / npx;
offsx = posx * ndx;
offsy = posy * ndy;
// determine neighbors
if( posx == 0 )
rank_left = -1;
else
rank_left = rank-1;
if( posx == npx-1 )
rank_right = -1;
else
rank_right = rank+1;
if( posy == 0 )
rank_up = -1;
else
rank_up = rank - npx;
if( posy == npy-1 )
rank_down = -1;
else
rank_down = rank + npx;
}
Settings::~Settings()
{
}
/*
* Settings.h
*
* Created on: Feb 2017
* Author: Norbert Podhorszki
*/
#ifndef SETTINGS_H_
#define SETTINGS_H_
#include <memory>
class Settings
{
public:
// user arguments
std::string outputfile;
int npx; // Number of processes in X (slow) dimension
int npy; // Number of processes in Y (fast) dimension
int ndx; // Local array size in X dimension per process
int ndy; // Local array size in y dimension per process
int steps; // Number of output steps
int iterations; // Number of computing iterations between steps
// calculated values from those arguments and number of processes
int gndx; // Global array size in slow dimension
int gndy; // Global array size in fast dimension
// X dim positions: rank 0, npx, 2npx... are in the same X position
// Y dim positions: npx number of consecutive processes belong to one row (npx columns)
int posx; // Position of this process in X dimension
int posy; // Position of this process in Y dimension
int offsx; // Offset of local array in X dimension on this process
int offsy; // Offset of local array in Y dimension on this process
int rank; // MPI rank
int nproc; // number of processors
// neighbours by their MPI ranks
int rank_left;
int rank_right;
int rank_up;
int rank_down;
Settings( int argc, char* argv [], int rank, int nproc );
~Settings();
};
#endif /* SETTINGS_H_ */
#include <mpi.h>
#include <stdexcept>
#include <iostream>
#include "heatTransferStructs.h"
/**
* heatTransfer.cpp
*
* Recreates heat_transfer.f90 (Fortran) in C++
*
* Created on: Sep 29, 2016
* Author: William F. Godoy
*
* @param argc
* @param argv
* @return
*/
int main( int argc, char* argv [] )
{
try
{
MPI_Init( &argc, &argv );
int rank;
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
int size;
MPI_Comm_size( MPI_COMM_WORLD, &size );
MPI_Barrier( MPI_COMM_WORLD );
//Have to split and create a 'world' communicator for heatTransfer only
const unsigned int color = 1;
MPI_Comm mpiHeatTransferComm;
MPI_Comm_split( MPI_COMM_WORLD, color, rank, &mpiHeatTransferComm );
MPI_Comm_rank( mpiHeatTransferComm, &rank );
MPI_Comm_size( mpiHeatTransferComm, &size );
double timeStart = MPI_Wtime( );
//here must implement the call to adios init function
}
catch( std::ios_base::failure& e ) //I/O failure (e.g. file not found)
{
std::cout << "I/O base exception caught\n";
std::cout << e.what() << std::endl;
}
catch( std::exception& e ) //All other exceptions
{
std::cout << "Exception caught\n";
std::cout << e.what() << std::endl;
}
return 0;
}
/*
* heatTransferStructs.h
*
* Created on: Sep 29, 2016
* Author: wfg
*/
#ifndef HEATTRANSFERSTRUCTS_H_
#define HEATTRANSFERSTRUCTS_H_
#endif /* HEATTRANSFERSTRUCTS_H_ */
/*
* main.cpp
*
* Recreates heat_transfer.f90 (Fortran) ADIOS tutorial example in C++
*
* Created on: Feb 2017
* Author: Norbert Podhorszki
*
*/
#include <mpi.h>
#include <stdexcept>
#include <memory>
#include <iostream>
#include <string>
#include "Settings.h"
#include "IO.h"
#include "HeatTransfer.h"
void printUsage()
{
std::cout << "Usage: heatTransfer output N M nx ny steps iterations\n"
<< " output: name of output file\n"
<< " N: number of processes in X dimension\n"
<< " M: number of processes in Y dimension\n"
<< " nx: local array size in X dimension per processor\n"
<< " ny: local array size in Y dimension per processor\n"
<< " steps: the total number of steps to output\n"
<< " iterations: one step consist of this many iterations\n\n";
}
int main( int argc, char* argv [] )
{
MPI_Init( &argc, &argv );
/* World comm spans all applications started with the same aprun command
on a Cray XK6. So we have to split and create the local
'world' communicator for heat_transfer only.
In normal start-up, the communicator will just equal the MPI_COMM_WORLD.
*/
int wrank, wnproc;
MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
MPI_Comm_size( MPI_COMM_WORLD, &wnproc );
MPI_Barrier( MPI_COMM_WORLD );
const unsigned int color = 1;
MPI_Comm mpiHeatTransferComm;
MPI_Comm_split( MPI_COMM_WORLD, color, wrank, &mpiHeatTransferComm );
int rank, nproc;
MPI_Comm_rank( mpiHeatTransferComm, &rank );
MPI_Comm_size( mpiHeatTransferComm, &nproc );
try
{
double timeStart = MPI_Wtime();
std::shared_ptr<Settings> settings( new Settings( argc, argv, rank, nproc ));
std::shared_ptr<HeatTransfer> ht( new HeatTransfer( settings ));
std::shared_ptr<IO> io( new IO( settings->outputfile, mpiHeatTransferComm ));
ht->init(false);
ht->printT("Initialized T:", mpiHeatTransferComm);
ht->heatEdges();
ht->printT("Heated T:", mpiHeatTransferComm);
for( int t = 1; t <= settings->steps; ++t )
{
if( rank == 0 )
std::cout << "Step " << t << ":\n";
for( int iter = 1; iter <= settings->iterations; ++iter )
{
ht->iterate();
ht->heatEdges();
ht->exchange( mpiHeatTransferComm );
}
io->write( t, ht, settings, mpiHeatTransferComm );
}
MPI_Barrier( mpiHeatTransferComm );
double timeEnd = MPI_Wtime();
if( rank == 0 )
std::cout << "Total runtime = " << timeEnd-timeStart << "s\n";
}
catch( std::invalid_argument& e ) // command-line argument errors
{
std::cout << e.what() << std::endl;
printUsage();
}
catch( std::ios_base::failure& e ) //I/O failure (e.g. file not found)
{
std::cout << "I/O base exception caught\n";
std::cout << e.what() << std::endl;
}
catch( std::exception& e ) //All other exceptions
{
std::cout << "Exception caught\n";
std::cout << e.what() << std::endl;
}
return 0;
}
/*std::unique_ptr<Settings> ProcessArguments( int argc, char* argv [], int rank, int nproc )
{
auto s = std::unique_ptr<Settings>( new Settings );
if (argc < 8)
{
throw std::invalid_argument( "Not enough arguments" );
}
s->outputfile = argv[1];
s->npx = convertToInt("N", argv[2]);
s->npy = convertToInt("M", argv[3]);
s->ndx = convertToInt("nx", argv[4]);
s->ndy = convertToInt("ny", argv[5]);
s->steps = convertToInt("steps", argv[6]);
s->iterations = convertToInt("iterations", argv[7]);
if( s->npx * s->npy != nproc )
{
throw std::invalid_argument( "N*M must equal the number of processes" );
}
// calculate global array size and the local offsets in that global space
s->gndx = s->npx * s->ndx;
s->gndy = s->npy * s->ndy;
s->posx = rank % s->npx;
s->posy = rank / s->npx;
s->offsx = s->posx * s->ndx;
s->offsy = s->posy * s->ndy;
// determine neighbors
if( s->posx == 0 )
s->rank_left = -1;
else
s->rank_left = rank-1;
if( s->posx == s->npx-1 )
s->rank_right = -1;
else
s->rank_right = rank+1;
if( s->posy == 0 )
s->rank_up = -1;
else
s->rank_up = rank - s->npx;
if( s->posy == s->npy-1 )
s->rank_down = -1;
else
s->rank_down = rank + s->npx;
return s;
}*/
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment