Commit 48e4301b authored by Pillai, Himanshu's avatar Pillai, Himanshu
Browse files

HPX built fixed with cmake , needed to debug to pass the test

parent 2cf2a813
cmake_minimum_required(VERSION 3.10)
set(HPX_DIR "/home/7hp/Downloads/hpx/build")
set(HPX_INCLUDE_DIR "/home/7hp/Downloads/hpx/build/include")
set(HPX_LIB_DIR "/home/7hp/Downloads/hpx/build/lib/")
set(ELM_ROOT "/home/7hp/Downloads/elm_kernels/src/cc/")
set(NETCDF "/usr/local")
set(HPX_IGNORE_COMPILER_COMPATIBILITY On)
set(HPX_CXX_COMPILER "/usr/bin/c++")
set(CMAKE_CXX_COMPILER "/usr/bin/c++")
set(HPX_CXX_COMPILER_ID "GNU")
set(CMAKE_CXX_COMPILER_ID "GNU")
set(CMAKE_MODULE_PATH $HPX_DIR)
if(NOT WIN32)
string(ASCII 27 Esc)
set(ColourReset "${Esc}[m")
set(Red "${Esc}[31m")
set(Green "${Esc}[32m")
set(Yellow "${Esc}[33m")
set(Blue "${Esc}[34m")
set(Magenta "${Esc}[35m")
set(Cyan "${Esc}[36m")
endif()
message(" ")
message(" ${Blue}EeeeeeeeeeeeeeE LLlL Mm Mm Kk kk LllL${ColourReset}")
message(" ${Blue}EeeeeeeeeeeeeeE LLlL Mm Mm Mm Mm Kk kk LllL${ColourReset}")
message(" ${Blue}Eee LLlL Mm Mm Mm Mm Kk kk LllL${ColourReset}")
message(" ${Blue}Eee LllL Mm Mm Mm Mm Kk kk LllL${ColourReset}")
message(" ${Blue}Eee LllL Mm Mm Mm Mm Kk kk LllL${ColourReset}")
message(" ${Blue}Eee LllL Mm Mm Mm Mm Kk kk eeeeeeeee RrRrRrRrRr Nnn Nn eeeeeeeee LllL${ColourReset}")
message(" ${Blue}EeeeeeeeE LllL Mm Mm Mm Kk kk kk eeeeeeeee Rr Rr Nn nn Nn eeeeeeeee LllL${ColourReset}")
message(" ${Blue}EeeeeeeeE LllL Mm Mm Kkk kk eeee Rr Rr Nn nn Nn eeee LllL${ColourReset}")
message(" ${Blue}Eee LllL Mm Mm Kk kk eeeeeeee RrRrRrRrRr Nn nn Nn eeeeeeee LllL${ColourReset}")
message(" ${Blue}Eee LllL Mm Mm Kk kk eeeeeeee RrR Nn nn Nn eeeeeeee LllL${ColourReset}")
message(" ${Blue}Eee LllL Mm Mm Kk kk eeee Rr Rr Nn nn Nn eeee LllL${ColourReset}")
message(" ${Blue}Eee LllL Mm Mm Kk kk eeee Rr Rr Nn nn Nn eeee LllL${ColourReset}")
message(" ${Blue}EeeeeeeeeeeeeeE LllllllllllLllL Mm Mm Kk kk eeeeeeeee Rr Rr Nn nn Nn eeeeeeeee LlllllllLllL${ColourReset}")
message(" ${Blue}EeeeeeeeeeeeeeE LllllllllllLllL Mm Mm Kk kk eeeeeeeee Rr Rr Nn nnNn eeeeeeeee LlllllllLllL${ColourReset}")
message(" ")
message(" ")
message(" ${Green} Copyright 2019, UT Battelle / Oak Ridge National Laboratory ${ColourReset}")
message(" ")
message(" ${Magenta} Collaboration ${ColourReset}")
message(" ${Cyan}Oak Ridge Leadership Computing Facility ${ColourReset}")
message(" ${Cyan}United States Department of Energy ${ColourReset}")
message(" ${Cyan}The Energy Exascale Earth System Model (E3SM) Project ${ColourReset}")
message(" ${Cyan}Coupling Approaches for Next-Generation Architectures (CANGA) - Scientific Discovery through Advanced Computing (SciDAC) ${ColourReset}")
message(" ")
message("${Yellow} Contact ${ColourReset} ${Red}coonet@ornl.gov , pillaihk@ornl.gov ${ColourReset}")
#find_package(OpenMP)
#if (OPENMP_FOUND)
# set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${OpenMP_C_FLAGS}")
# set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${OpenMP_CXX_FLAGS}")
# set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${OpenMP_EXE_LINKER_FLAGS}")
#endif()
project(my_hpx_project CXX)
find_package(HPX REQUIRED)
include_directories(
/home/7hp/Downloads/elm_kernels/src/cc/
/usr/local/include
)
add_hpx_executable(test_CanopyHydrology_kern1_multiple
ESSENTIAL
SOURCES CanopyHydrology_kern1_multiple.cpp
LINK_FLAGS "-I/usr/local/include -I/home/7hp/Downloads/elm_kernels/src/cc/"
COMPONENT_DEPENDENCIES iostreams init
DEPENDENCIES -L/usr/local/lib -lnetcdf)
add_hpx_executable(test_CanopyHydrology_kern1_single
ESSENTIAL
SOURCES CanopyHydrology_kern1_single.cpp
LINK_FLAGS "-I/usr/local/include -I/home/7hp/Downloads/elm_kernels/src/cc/"
COMPONENT_DEPENDENCIES iostreams init
DEPENDENCIES -L/usr/local/lib -lnetcdf)
add_hpx_executable(test_CanopyHydrology_module
ESSENTIAL
SOURCES CanopyHydrology_module.cpp
LINK_FLAGS "-I/usr/local/include -I/home/7hp/Downloads/elm_kernels/src/cc/"
COMPONENT_DEPENDENCIES iostreams init
DEPENDENCIES -L/usr/local/lib -lnetcdf)
#execute_process(
# COMMAND ./test_CanopyHydrology_kern1_single
# OUTPUT_FILE "test_CanopyHydrology_kern1_single.stdout"
#)
#execute_process(
# COMMAND ./test_CanopyHydrology_kern1_multiple
# OUTPUT_FILE "test_CanopyHydrology_kern1_multiple.stdout"
#)
#enable_testing()
#add_test(NAME HPXtest
# COMMAND python ../compare_to_gold.py test_CanopyHydrology_kern1_single test_CanopyHydrology_kern1_multiple
#)
add_custom_target(cleanall
COMMAND rm -r CMakeFiles/ Testing/
COMMAND rm test_* CMakeCache.txt cmake_* CTestTestfile.cmake Makefile
)
\ No newline at end of file
#include <hpx/hpx_main.hpp>
#include <hpx/include/iostreams.hpp>
#include <hpx/include/parallel_for_loop.hpp>
#include <array>
#include <sstream>
#include <iterator>
#include <exception>
#include <string>
#include <stdlib.h>
#include <vector>
#include <iostream>
#include <iomanip>
#include <numeric>
#include <fstream>
#include <algorithm>
#include <assert.h>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <ctime>
#include <sys/time.h>
#include <unistd.h>
//#include <mpi.h>
#include <chrono>
#include "utils.hh"
#include "readers.hh"
#include "CanopyHydrology.hh"
using namespace std::chrono;
namespace ELM {
namespace Utils {
static const int n_months = 12;
static const int n_pfts = 17;
static const int n_max_times = 31 * 24 * 2; // max days per month times hours per
// day * half hour timestep
static const int n_grid_cells = 24;
} // namespace
} // namespace
int main(int argc, char ** argv)
{
using ELM::Utils::n_months;
using ELM::Utils::n_pfts;
using ELM::Utils::n_grid_cells;
using ELM::Utils::n_max_times;
// int myrank, numprocs;
// double mytime, maxtime, mintime, avgtime;
// MPI_Init(&argc,&argv);
// MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
// MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
// MPI_Barrier(MPI_COMM_WORLD);
// fixed magic parameters for now
const int ctype = 1;
const int ltype = 1;
const bool urbpoi = false;
const bool do_capsnow = false;
const int frac_veg_nosno = 1;
int n_irrig_steps_left = 0;
const double dewmx = 0.1;
double dtime = 1800.0;
// phenology state
double* elai = new double[n_grid_cells * n_pfts];
double* esai = new double[n_grid_cells * n_pfts];
ELM::Utils::read_phenology("../links/surfacedataWBW.nc", n_months, n_pfts, 0, elai, esai);
ELM::Utils::read_phenology("../links/surfacedataBRW.nc", n_months, n_pfts, n_months, elai, esai);
// forcing state
double* forc_rain = new double[ n_max_times * n_grid_cells ];
double* forc_snow = new double[ n_max_times * n_grid_cells ];
double* forc_air_temp = new double[ n_max_times * n_grid_cells ];
const int n_times = ELM::Utils::read_forcing("../links/forcing", n_max_times, 0, n_grid_cells, forc_rain, forc_snow, forc_air_temp);
double* forc_irrig = new double[ n_max_times * n_grid_cells ];
// output state by the grid cell
double* qflx_prec_intr= new double[n_grid_cells*n_pfts];
double* qflx_irrig= new double[n_grid_cells*n_pfts];
double* qflx_prec_grnd= new double[n_grid_cells*n_pfts];
double* qflx_snwcp_liq= new double[n_grid_cells*n_pfts];
double* qflx_snwcp_ice = new double[n_grid_cells*n_pfts];
double* qflx_snow_grnd_patch= new double[n_grid_cells*n_pfts];
double* qflx_rain_grnd= new double[n_grid_cells*n_pfts];
// output state by the pft
double* h2o_can = new double[n_grid_cells*n_pfts];
double* end = &h2o_can[n_grid_cells, n_pfts] ;
std::ofstream soln_file;
soln_file.open("test_CanopyHydrology_kern1_multiple.soln");
soln_file << "Time\t Total Canopy Water\t Min Water\t Max Water" << std::endl;
std::cout << "Time\t Total Canopy Water\t Min Water\t Max Water" << std::endl;
auto min_max = std::minmax_element(&h2o_can[0,0], end+1);
soln_file << std::setprecision(16)
<< 0 << "\t" << std::accumulate(&h2o_can[0,0], end+1, 0.)
<< "\t" << *min_max.first
<< "\t" << *min_max.second << std::endl;
std::cout << std::setprecision(16)
<< 0 << "\t" << std::accumulate(&h2o_can[0,0], end+1, 0.)
<< "\t" << *min_max.first
<< "\t" << *min_max.second << std::endl;
auto start = high_resolution_clock::now();
// mytime = MPI_Wtime();
// main loop
// -- the timestep loop cannot/should not be parallelized
for (size_t t = 0; t != n_times; ++t) {
// grid cell and/or pft loop can be parallelized
// hpx::parallel::for_loop(hpx::parallel::execution::par,0, n_grid_cells,
// [n_pfts](const size_t g) { // size_t g = 0; g != n_grid_cells; ++g) {
// for (size_t p = 0; p != n_pfts; ++p) {
for (size_t g = 0; g != n_grid_cells; ++g) {
for (size_t p = 0; p != n_pfts; ++p) {
// NOTE: this currently punts on what to do with the qflx variables!
// Surely they should be either accumulated or stored on PFTs as well.
// --etc
ELM::CanopyHydrology_Interception(dtime,
forc_rain[t,g], forc_snow[t,g], forc_irrig[t,g],
ltype, ctype, urbpoi, do_capsnow,
elai[g,p], esai[g,p], dewmx, frac_veg_nosno,
h2o_can[g,p], n_irrig_steps_left,
qflx_prec_intr[g,p], qflx_irrig[g,p], qflx_prec_grnd[g,p],
qflx_snwcp_liq[g,p], qflx_snwcp_ice[g,p],
qflx_snow_grnd_patch[g,p], qflx_rain_grnd[g,p]);
// qflx_prec_intr[g], qflx_irrig[g], qflx_prec_grnd[g],
// qflx_snwcp_liq[g], qflx_snwcp_ice[g],
// qflx_snow_grnd_patch[g], qflx_rain_grnd[g]);
//printf("%i %i %16.8g %16.8g %16.8g %16.8g %16.8g %16.8g\n", g, p, forc_rain[t,g], forc_snow[t,g], elai[g,p], esai[g,p], h2o_can[g,p], qflx_prec_intr[g]);
}
}//);
auto min_max = std::minmax_element(&h2o_can[0,0], end+1);
std::cout << std::setprecision(16)
<< t+1 << "\t" << std::accumulate(&h2o_can[0,0], end+1, 0.)
<< "\t" << *min_max.first
<< "\t" << *min_max.second << std::endl;
soln_file << std::setprecision(16)
<< t+1 << "\t" << std::accumulate(&h2o_can[0,0], end+1, 0.)
<< "\t" << *min_max.first
<< "\t" << *min_max.second << std::endl;
} soln_file.close();
// mytime = MPI_Wtime() - mytime;
auto stop = high_resolution_clock::now();
// hpx::cout <<"Timing from node "<< myrank << " is "<< mytime << "seconds." << std::endl;
// MPI_Reduce(&mytime, &maxtime, 1, MPI_DOUBLE,MPI_MAX, 0, MPI_COMM_WORLD);
// MPI_Reduce(&mytime, &mintime, 1, MPI_DOUBLE, MPI_MIN, 0,MPI_COMM_WORLD);
// MPI_Reduce(&mytime, &avgtime, 1, MPI_DOUBLE, MPI_SUM, 0,MPI_COMM_WORLD);
// if (myrank == 0) {
// avgtime /= numprocs;
// hpx::cout << "Min: "<< mintime << ", Max: " << maxtime << ", Avg: " <<avgtime << std::endl;
// }
auto duration = duration_cast<microseconds>(stop - start);
hpx::cout << "Time taken by function: "<< duration.count() << " microseconds" << std::endl;
// MPI_Finalize();
return hpx::finalize();
}
#include <hpx/hpx_main.hpp>
#include <hpx/include/iostreams.hpp>
#include <hpx/include/parallel_for_loop.hpp>
#include <netcdf.h>
#include <cmath>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <ctime>
#include <sys/time.h>
#include <unistd.h>
#include <array>
#include <sstream>
#include <iterator>
#include <exception>
#include <string>
#include <stdlib.h>
#include <cstring>
#include <vector>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <assert.h>
//#include <mpi.h>
#include <chrono>
#include "utils.hh"
#include "readers.hh"
#include "CanopyHydrology.hh"
using namespace std::chrono;
namespace ELM {
namespace Utils {
static const int n_months = 12;
static const int n_pfts = 17;
static const int n_max_times = 31 * 24 * 2; // max days per month times hours per
// day * half hour timestep
} // namespace
} // namespace
int main(int argc, char ** argv)
{
// int myrank, numprocs;
// double mytime, maxtime, mintime, avgtime;
// MPI_Init(&argc,&argv);
// MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
// MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
// MPI_Barrier(MPI_COMM_WORLD);
// dimensions
const int n_months = 12;
const int n_pfts = 17;
const int n_max_times = 31 * 24 * 2; // max days per month times hours per
// day * half hour timestep
// fixed magic parameters for now
const int ctype = 1;
const int ltype = 1;
const bool urbpoi = false;
const bool do_capsnow = false;
const int frac_veg_nosno = 1;
const double irrig_rate = 0.;
int n_irrig_steps_left = 0;
const double dewmx = 0.1;
const double dtime = 1800.0;
// phenology state
double* elai = new double[n_months * n_pfts];
double* esai = new double[n_months * n_pfts];
ELM::Utils::read_phenology("../links/surfacedataWBW.nc", n_months, n_pfts, 0, elai, esai);
// forcing state
double* forc_rain = new double[ n_max_times * 1 ];
double* forc_snow = new double[ n_max_times * 1 ];
double* forc_air_temp = new double[ n_max_times * 1 ];
const int n_times = ELM::Utils::read_forcing("../links/forcing", n_max_times, 6, 1, forc_rain, forc_snow, forc_air_temp);
double h2ocan = 0.0;
double qflx_prec_intr = 0.;
double qflx_irrig = 0.;
double qflx_prec_grnd = 0.;
double qflx_snwcp_liq = 0.;
double qflx_snwcp_ice = 0.;
double qflx_snow_grnd_patch = 0.;
double qflx_rain_grnd = 0.;
std::ofstream soln_file;
soln_file.open("test_CanopyHydrology_kern1_single.soln");
soln_file << "Timestep, forc_rain, h2ocan, qflx_prec_grnd, qflx_prec_intr" << std::endl;
auto start = high_resolution_clock::now();
// mytime = MPI_Wtime();
for(size_t itime = 0; itime < n_times; itime += 1) {
// note this call puts all precip as rain for testing
double total_precip = forc_rain[itime,0] + forc_snow[itime,0];
ELM::CanopyHydrology_Interception(dtime, total_precip, 0., irrig_rate,
ltype, ctype, urbpoi, do_capsnow,
elai[5,7], esai[5,7], dewmx, frac_veg_nosno,
h2ocan, n_irrig_steps_left,
qflx_prec_intr, qflx_irrig, qflx_prec_grnd,
qflx_snwcp_liq, qflx_snwcp_ice,
qflx_snow_grnd_patch, qflx_rain_grnd);
soln_file << std::setprecision(16) << itime+1 << "\t" << total_precip << "\t" << h2ocan<< "\t" << qflx_prec_grnd << "\t" << qflx_prec_intr << std::endl;
}
// mytime = MPI_Wtime() - mytime;
auto stop = high_resolution_clock::now();
// std::cout <<"Timing from node "<< myrank << " is "<< mytime << "seconds." << std::endl;
// /*compute max, min, and average timing statistics*/
// MPI_Reduce(&mytime, &maxtime, 1, MPI_DOUBLE,MPI_MAX, 0, MPI_COMM_WORLD);
// MPI_Reduce(&mytime, &mintime, 1, MPI_DOUBLE, MPI_MIN, 0,MPI_COMM_WORLD);
// MPI_Reduce(&mytime, &avgtime, 1, MPI_DOUBLE, MPI_SUM, 0,MPI_COMM_WORLD);
// if (myrank == 0) {
// avgtime /= numprocs;
// std::cout << "Min: "<< mintime << ", Max: " << maxtime << ", Avg: " <<avgtime << std::endl;
// }
auto duration = duration_cast<microseconds>(stop - start);
std::cout << "Time taken by function: "<< duration.count() << " microseconds" << std::endl;
// MPI_Finalize();
return hpx::finalize();
}
#include <hpx/hpx_main.hpp>
#include <hpx/include/iostreams.hpp>
#include <hpx/include/parallel_for_loop.hpp>
#include <array>
#include <sstream>
#include <iterator>
#include <exception>
#include <string>
#include <stdlib.h>
#include <cstring>
#include <vector>
#include <iostream>
#include <iomanip>
#include <numeric>
#include <algorithm>
#include <assert.h>
#include <fstream>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <ctime>
#include <sys/time.h>
#include <unistd.h>
//#include <mpi.h>
#include <chrono>
#include "utils.hh"
#include "readers.hh"
#include "CanopyHydrology.hh"
#include "CanopyHydrology_SnowWater_impl.hh"
using namespace std::chrono;
namespace ELM {
namespace Utils {
static const int n_months = 12;
static const int n_pfts = 17;
static const int n_max_times = 31 * 24 * 2; // max days per month times hours per
// day * half hour timestep
static const int n_grid_cells = 24;
static const int n_levels_snow = 5;
} // namespace
} // namespace
int main(int argc, char ** argv)
{
using ELM::Utils::n_months;
using ELM::Utils::n_pfts;
using ELM::Utils::n_grid_cells;
using ELM::Utils::n_max_times;
using ELM::Utils::n_levels_snow;
// int myrank, numprocs;
// double mytime, maxtime, mintime, avgtime;
// MPI_Init(&argc,&argv);
// MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
// MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
// MPI_Barrier(MPI_COMM_WORLD);
// fixed magic parameters for now
const int ctype = 1;
const int ltype = 1;
const bool urbpoi = false;
const bool do_capsnow = false;
const int frac_veg_nosno = 1;
int n_irrig_steps_left = 0;
const double dewmx = 0.1;
const double dtime = 1800.0;
// fixed magic parameters for SnowWater
const double qflx_snow_melt = 0.;
// fixed magic parameters for fracH2Osfc
const int oldfflag = 0;
const double micro_sigma = 0.1;
const double min_h2osfc = 1.0e-8;
const double n_melt = 0.7;
// phenology input
double* elai = new double[n_grid_cells * n_pfts];
double* esai = new double[n_grid_cells * n_pfts];
ELM::Utils::read_phenology("../links/surfacedataWBW.nc", n_months, n_pfts, 0, elai, esai);
ELM::Utils::read_phenology("../links/surfacedataBRW.nc", n_months, n_pfts, n_months, elai, esai);
// forcing input
double* forc_rain = new double[ n_max_times * n_grid_cells ];
double* forc_snow = new double[ n_max_times * n_grid_cells ];
double* forc_air_temp = new double[ n_max_times * n_grid_cells ];
const int n_times = ELM::Utils::read_forcing("../links/forcing", n_max_times, 0, n_grid_cells, forc_rain, forc_snow, forc_air_temp);
double* forc_irrig = new double[ n_max_times * n_grid_cells ];
double qflx_floodg = 0.0;
// mesh input (though can also change as snow layers evolve)
//
// NOTE: in a real case, these would be populated, but we don't actually
// need them to be for these kernels. --etc
auto z = new double[n_grid_cells*n_levels_snow];
auto zi = new double[n_grid_cells*n_levels_snow];
auto dz = new double[n_grid_cells*n_levels_snow];
// state variables that require ICs and evolve (in/out)
double* h2ocan = new double[n_grid_cells * n_pfts];
double* swe_old = new double[n_grid_cells*n_levels_snow];
double* h2osoi_liq = new double[n_grid_cells*n_levels_snow];
double* h2osoi_ice = new double[n_grid_cells*n_levels_snow];
double* t_soisno = new double[n_grid_cells*n_levels_snow];
double* frac_iceold = new double[n_grid_cells*n_levels_snow];
double* t_grnd = new double[n_grid_cells];
double* h2osno = new double[n_grid_cells];
double* snow_depth = new double[n_grid_cells];
int* snow_level = new int[n_grid_cells]; // note this tracks the snow_depth
double* h2osfc = new double[n_grid_cells];
double* frac_h2osfc = new double[n_grid_cells];
// output fluxes by pft
double* qflx_prec_intr = new double[n_grid_cells * n_pfts];
double* qflx_irrig = new double[n_grid_cells * n_pfts];
double* qflx_prec_grnd = new double[n_grid_cells * n_pfts];
double* qflx_snwcp_liq = new double[n_grid_cells * n_pfts];
double* qflx_snwcp_ice = new double[n_grid_cells * n_pfts];
double* qflx_snow_grnd_patch = new double[n_grid_cells * n_pfts];
double* qflx_rain_grnd = new double[n_grid_cells * n_pfts];
// FIXME: I have no clue what this is... it is inout on WaterSnow. For now I
// am guessing the data structure. Ask Scott. --etc
double* integrated_snow = new double[n_grid_cells];
// output fluxes, state by the column
double* qflx_snow_grnd_col = new double[n_grid_cells];
double* qflx_snow_h2osfc = new double[n_grid_cells];
double* qflx_h2osfc2topsoi = new double[n_grid_cells];
double* qflx_floodc = new double[n_grid_cells];
double* frac_sno_eff = new double[n_grid_cells];
double* frac_sno = new double[n_grid_cells];
double* end = &h2ocan[n_grid_cells, n_pfts] ;
double* end2 = &h2osno[n_grid_cells-1] ;
double* end3 = &frac_h2osfc[n_grid_cells-1] ;
std::ofstream soln_file;
soln_file.open("test_CanopyHydrology_module.soln");
soln_file << "Time\t Total Canopy Water\t Min Water\t Max Water\t Total Snow\t Min Snow\t Max Snow\t Avg Frac Sfc\t Min Frac Sfc\t Max Frac Sfc" << std::endl;
auto min_max_water = std::minmax_element(&h2ocan[0,0], end+1);
auto sum_water = std::accumulate(&h2ocan[0,0], end+1, 0.);
auto min_max_snow = std::minmax_element(&h2osno[0], end2+1);
auto sum_snow = std::accumulate(&h2osno[0], end2+1, 0.);
auto min_max_frac_sfc = std::minmax_element(&frac_h2osfc[0], end3+1);
auto avg_frac_sfc = std::accumulate(&frac_h2osfc[0], end3+1, 0.) / (end3+1 - &frac_h2osfc[0]);
soln_file << std::setprecision(16)
<< 0 << "\t" << sum_water << "\t" << *min_max_water.first << "\t" << *min_max_water.second
<< "\t" << sum_snow << "\t" << *min_max_snow.first << "\t" << *min_max_snow.second
<< "\t" << avg_frac_sfc << "\t" << *min_max_frac_sfc.first << "\t" << *min_max_frac_sfc.second << std::endl;
auto start = high_resolution_clock::now();
// mytime = MPI_Wtime();
// main loop
// -- the timestep loop cannot/should not be parallelized
for (size_t t = 0; t != n_times; ++t) {
// grid cell and/or pft loop can be parallelized
for (size_t g = 0; g != n_grid_cells