Commit 27b0a448 authored by Pillai, Himanshu's avatar Pillai, Himanshu
Browse files

HPX compiled successfully and pass all the test

parent 4b32a5d2
......@@ -63,14 +63,16 @@ include_directories(
/home/7hp/Downloads/elm_kernels/src/cc/
/usr/local/include
)
link_directories(${HPX_LIBRARY_DIR})
add_compile_options(-std=c++11)
add_hpx_executable(test_CanopyHydrology_kern1_multiple
ESSENTIAL
SOURCES CanopyHydrology_kern1_multiple.cpp
LINK_FLAGS "-I/usr/local/include -I/home/7hp/Downloads/elm_kernels/src/cc/"
COMPONENT_DEPENDENCIES iostreams init
DEPENDENCIES -L/usr/local/lib -lnetcdf)
COMPONENT_DEPENDENCIES iostreams init
DEPENDENCIES -g -L/usr/local/lib -lnetcdf)
add_hpx_executable(test_CanopyHydrology_kern1_single
ESSENTIAL
......@@ -93,17 +95,22 @@ add_hpx_executable(test_CanopyHydrology_module
# OUTPUT_FILE "test_CanopyHydrology_kern1_single.stdout"
#)
#execute_process(
# COMMAND ./test_CanopyHydrology_kern1_multiple
# OUTPUT_FILE "test_CanopyHydrology_kern1_multiple.stdout"
#)
execute_process(
COMMAND ./test_CanopyHydrology_kern1_multiple
OUTPUT_FILE "test_CanopyHydrology_kern1_multiple.stdout"
)
#enable_testing()
#add_test(NAME HPXtest
# COMMAND python ../compare_to_gold.py test_CanopyHydrology_kern1_single test_CanopyHydrology_kern1_multiple
#)
execute_process(
COMMAND ./test_CanopyHydrology_module
OUTPUT_FILE "test_CanopyHydrology_module.stdout"
)
enable_testing()
add_test(NAME HPXtest
COMMAND python ../compare_to_gold.py test_CanopyHydrology_kern1_multiple test_CanopyHydrology_module
)
add_custom_target(cleanall
COMMAND rm -r CMakeFiles/ Testing/
COMMAND rm test_* CMakeCache.txt cmake_* CTestTestfile.cmake Makefile
COMMAND rm test_* CMakeCache.txt cmake_* CTestTestfile.cmake Makefile *.soln *.stdout
)
\ No newline at end of file
#include <hpx/hpx_main.hpp>
#include <hpx/include/iostreams.hpp>
#include <hpx/include/parallel_for_loop.hpp>
#include <array>
#include <sstream>
#include <iterator>
......@@ -22,6 +19,13 @@
#include <sys/time.h>
#include <unistd.h>
//#include <mpi.h>
#include <hpx/hpx_init.hpp>
#include <hpx/hpx.hpp>
#include <hpx/hpx_main.hpp>
#include <hpx/include/iostreams.hpp>
#include <hpx/include/parallel_for_loop.hpp>
#include <hpx/components/containers/partitioned_vector/partitioned_vector_view.hpp>
#include <hpx/include/partitioned_vector.hpp>
#include <chrono>
#include "utils.hh"
#include "readers.hh"
......@@ -37,9 +41,13 @@ static const int n_max_times = 31 * 24 * 2; // max days per month times hours pe
// day * half hour timestep
static const int n_grid_cells = 24;
using MatrixState = MatrixStatic<n_grid_cells, n_pfts>;
using MatrixForc = MatrixStatic<n_max_times,n_grid_cells>;
} // namespace
} // namespace
HPX_REGISTER_PARTITIONED_VECTOR(double);
int main(int argc, char ** argv)
{
......@@ -47,14 +55,10 @@ int main(int argc, char ** argv)
using ELM::Utils::n_pfts;
using ELM::Utils::n_grid_cells;
using ELM::Utils::n_max_times;
// int myrank, numprocs;
// double mytime, maxtime, mintime, avgtime;
// MPI_Init(&argc,&argv);
// MPI_Comm_size(MPI_COMM_WORLD,&numprocs);
// MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
// MPI_Barrier(MPI_COMM_WORLD);
using Vec = hpx::partitioned_vector<float>;
using View_2D = hpx::partitioned_vector_view<float,2>;
// fixed magic parameters for now
const int ctype = 1;
const int ltype = 1;
......@@ -67,110 +71,94 @@ int main(int argc, char ** argv)
double dtime = 1800.0;
// phenology state
double* elai = new double[n_grid_cells * n_pfts];
double* esai = new double[n_grid_cells * n_pfts];
ELM::Utils::MatrixState elai;
ELM::Utils::MatrixState esai;
ELM::Utils::read_phenology("../links/surfacedataWBW.nc", n_months, n_pfts, 0, elai, esai);
ELM::Utils::read_phenology("../links/surfacedataBRW.nc", n_months, n_pfts, n_months, elai, esai);
// forcing state
double* forc_rain = new double[ n_max_times * n_grid_cells ];
double* forc_snow = new double[ n_max_times * n_grid_cells ];
double* forc_air_temp = new double[ n_max_times * n_grid_cells ];
ELM::Utils::MatrixForc forc_rain;
ELM::Utils::MatrixForc forc_snow;
ELM::Utils::MatrixForc forc_air_temp;
const int n_times = ELM::Utils::read_forcing("../links/forcing", n_max_times, 0, n_grid_cells, forc_rain, forc_snow, forc_air_temp);
double* forc_irrig = new double[ n_max_times * n_grid_cells ];
ELM::Utils::MatrixForc forc_irrig; forc_irrig = 0.;
// output state by the grid cell
double* qflx_prec_intr= new double[n_grid_cells*n_pfts];
double* qflx_irrig= new double[n_grid_cells*n_pfts];
double* qflx_prec_grnd= new double[n_grid_cells*n_pfts];
double* qflx_snwcp_liq= new double[n_grid_cells*n_pfts];
double* qflx_snwcp_ice = new double[n_grid_cells*n_pfts];
double* qflx_snow_grnd_patch= new double[n_grid_cells*n_pfts];
double* qflx_rain_grnd= new double[n_grid_cells*n_pfts];
auto qflx_prec_intr = ELM::Utils::MatrixState();
auto qflx_irrig = ELM::Utils::MatrixState();
auto qflx_prec_grnd = ELM::Utils::MatrixState();
auto qflx_snwcp_liq = ELM::Utils::MatrixState();
auto qflx_snwcp_ice = ELM::Utils::MatrixState();
auto qflx_snow_grnd_patch = ELM::Utils::MatrixState();
auto qflx_rain_grnd = ELM::Utils::MatrixState();
// output state by the pft
double* h2o_can = new double[n_grid_cells*n_pfts];
double* end = &h2o_can[n_grid_cells, n_pfts] ;
auto h2o_can = ELM::Utils::MatrixState(); h2o_can = 0.;
std::ofstream soln_file;
soln_file.open("test_CanopyHydrology_kern1_multiple.soln");
soln_file << "Time\t Total Canopy Water\t Min Water\t Max Water" << std::endl;
std::cout << "Time\t Total Canopy Water\t Min Water\t Max Water" << std::endl;
auto min_max = std::minmax_element(&h2o_can[0,0], end+1);
auto min_max = std::minmax_element(h2o_can.begin(), h2o_can.end());
soln_file << std::setprecision(16)
<< 0 << "\t" << std::accumulate(&h2o_can[0,0], end+1, 0.)
<< 0 << "\t" << std::accumulate(h2o_can.begin(), h2o_can.end(), 0.)
<< "\t" << *min_max.first
<< "\t" << *min_max.second << std::endl;
std::cout << std::setprecision(16)
<< 0 << "\t" << std::accumulate(&h2o_can[0,0], end+1, 0.)
<< 0 << "\t" << std::accumulate(h2o_can.begin(), h2o_can.end(), 0.)
<< "\t" << *min_max.first
<< "\t" << *min_max.second << std::endl;
auto start = high_resolution_clock::now();
// mytime = MPI_Wtime();
// main loop
// -- the timestep loop cannot/should not be parallelized
for (size_t t = 0; t != n_times; ++t) {
// grid cell and/or pft loop can be parallelized
// hpx::parallel::for_loop(hpx::parallel::execution::par,0, n_grid_cells,
// [n_pfts](const size_t g) { // size_t g = 0; g != n_grid_cells; ++g) {
// for (size_t p = 0; p != n_pfts; ++p) {
for (size_t g = 0; g != n_grid_cells; ++g) {
for (size_t p = 0; p != n_pfts; ++p) {
hpx::parallel::for_loop(hpx::parallel::execution::par.with(
hpx::parallel::execution::static_chunk_size()),
0, n_grid_cells, [&](const size_t g) {
hpx::parallel::for_loop(hpx::parallel::execution::par.with(
hpx::parallel::execution::static_chunk_size()),
0, n_pfts, [&](const size_t p) {
//for (size_t g = 0; g != n_grid_cells; ++g) {
//for (size_t p = 0; p != n_pfts; ++p) {
// NOTE: this currently punts on what to do with the qflx variables!
// Surely they should be either accumulated or stored on PFTs as well.
// --etc
ELM::CanopyHydrology_Interception(dtime,
forc_rain[t,g], forc_snow[t,g], forc_irrig[t,g],
forc_rain(t,g), forc_snow(t,g), forc_irrig(t,g),
ltype, ctype, urbpoi, do_capsnow,
elai[g,p], esai[g,p], dewmx, frac_veg_nosno,
h2o_can[g,p], n_irrig_steps_left,
qflx_prec_intr[g,p], qflx_irrig[g,p], qflx_prec_grnd[g,p],
qflx_snwcp_liq[g,p], qflx_snwcp_ice[g,p],
qflx_snow_grnd_patch[g,p], qflx_rain_grnd[g,p]);
// qflx_prec_intr[g], qflx_irrig[g], qflx_prec_grnd[g],
// qflx_snwcp_liq[g], qflx_snwcp_ice[g],
// qflx_snow_grnd_patch[g], qflx_rain_grnd[g]);
//printf("%i %i %16.8g %16.8g %16.8g %16.8g %16.8g %16.8g\n", g, p, forc_rain[t,g], forc_snow[t,g], elai[g,p], esai[g,p], h2o_can[g,p], qflx_prec_intr[g]);
}
}//);
elai(g,p), esai(g,p), dewmx, frac_veg_nosno,
h2o_can(g,p), n_irrig_steps_left,
qflx_prec_intr(g,p), qflx_irrig(g,p), qflx_prec_grnd(g,p),
qflx_snwcp_liq(g,p), qflx_snwcp_ice(g,p),
qflx_snow_grnd_patch(g,p), qflx_rain_grnd(g,p));
});
});
auto min_max = std::minmax_element(&h2o_can[0,0], end+1);
auto min_max = std::minmax_element(h2o_can.begin(), h2o_can.end());
std::cout << std::setprecision(16)
<< t+1 << "\t" << std::accumulate(&h2o_can[0,0], end+1, 0.)
<< t+1 << "\t" << std::accumulate(h2o_can.begin(), h2o_can.end(), 0.)
<< "\t" << *min_max.first
<< "\t" << *min_max.second << std::endl;
soln_file << std::setprecision(16)
<< t+1 << "\t" << std::accumulate(&h2o_can[0,0], end+1, 0.)
<< t+1 << "\t" << std::accumulate(h2o_can.begin(), h2o_can.end(), 0.)
<< "\t" << *min_max.first
<< "\t" << *min_max.second << std::endl;
} soln_file.close();
// mytime = MPI_Wtime() - mytime;
auto stop = high_resolution_clock::now();
// hpx::cout <<"Timing from node "<< myrank << " is "<< mytime << "seconds." << std::endl;
// MPI_Reduce(&mytime, &maxtime, 1, MPI_DOUBLE,MPI_MAX, 0, MPI_COMM_WORLD);
// MPI_Reduce(&mytime, &mintime, 1, MPI_DOUBLE, MPI_MIN, 0,MPI_COMM_WORLD);
// MPI_Reduce(&mytime, &avgtime, 1, MPI_DOUBLE, MPI_SUM, 0,MPI_COMM_WORLD);
// if (myrank == 0) {
// avgtime /= numprocs;
// hpx::cout << "Min: "<< mintime << ", Max: " << maxtime << ", Avg: " <<avgtime << std::endl;
// }
auto duration = duration_cast<microseconds>(stop - start);
hpx::cout << "Time taken by function: "<< duration.count() << " microseconds" << std::endl;
// MPI_Finalize();
std::cout << "Time taken by function: "<< duration.count() << " microseconds" << std::endl;
return hpx::finalize();
}
#include <hpx/hpx_main.hpp>
#include <hpx/include/iostreams.hpp>
#include <hpx/include/parallel_for_loop.hpp>
#include <netcdf.h>
#include <cmath>
#include <cstdio>
......@@ -22,6 +19,9 @@
#include <fstream>
#include <assert.h>
//#include <mpi.h>
#include <hpx/hpx_main.hpp>
#include <hpx/include/iostreams.hpp>
#include <hpx/include/parallel_for_loop.hpp>
#include <chrono>
#include "utils.hh"
#include "readers.hh"
......@@ -33,10 +33,10 @@ namespace Utils {
static const int n_months = 12;
static const int n_pfts = 17;
using MatrixState = MatrixStatic<n_months, n_pfts>;
static const int n_max_times = 31 * 24 * 2; // max days per month times hours per
// day * half hour timestep
using MatrixForc = MatrixStatic<n_max_times,1>;
} // namespace
} // namespace
......@@ -54,7 +54,6 @@ int main(int argc, char ** argv)
const int n_pfts = 17;
const int n_max_times = 31 * 24 * 2; // max days per month times hours per
// day * half hour timestep
// fixed magic parameters for now
const int ctype = 1;
const int ltype = 1;
......@@ -68,14 +67,14 @@ int main(int argc, char ** argv)
const double dtime = 1800.0;
// phenology state
double* elai = new double[n_months * n_pfts];
double* esai = new double[n_months * n_pfts];
ELM::Utils::MatrixState elai;
ELM::Utils::MatrixState esai;
ELM::Utils::read_phenology("../links/surfacedataWBW.nc", n_months, n_pfts, 0, elai, esai);
// forcing state
double* forc_rain = new double[ n_max_times * 1 ];
double* forc_snow = new double[ n_max_times * 1 ];
double* forc_air_temp = new double[ n_max_times * 1 ];
ELM::Utils::MatrixForc forc_rain;
ELM::Utils::MatrixForc forc_snow;
ELM::Utils::MatrixForc forc_air_temp;
const int n_times = ELM::Utils::read_forcing("../links/forcing", n_max_times, 6, 1, forc_rain, forc_snow, forc_air_temp);
double h2ocan = 0.0;
......@@ -96,10 +95,10 @@ int main(int argc, char ** argv)
for(size_t itime = 0; itime < n_times; itime += 1) {
// note this call puts all precip as rain for testing
double total_precip = forc_rain[itime,0] + forc_snow[itime,0];
double total_precip = forc_rain[itime][0] + forc_snow[itime][0];
ELM::CanopyHydrology_Interception(dtime, total_precip, 0., irrig_rate,
ltype, ctype, urbpoi, do_capsnow,
elai[5,7], esai[5,7], dewmx, frac_veg_nosno,
elai[5][7], esai[5][7], dewmx, frac_veg_nosno,
h2ocan, n_irrig_steps_left,
qflx_prec_intr, qflx_irrig, qflx_prec_grnd,
qflx_snwcp_liq, qflx_snwcp_ice,
......@@ -109,7 +108,7 @@ int main(int argc, char ** argv)
}
// mytime = MPI_Wtime() - mytime;
auto stop = high_resolution_clock::now();
// std::cout <<"Timing from node "<< myrank << " is "<< mytime << "seconds." << std::endl;
// hpx::cout <<"Timing from node "<< myrank << " is "<< mytime << "seconds." << hpx::endl;
// /*compute max, min, and average timing statistics*/
// MPI_Reduce(&mytime, &maxtime, 1, MPI_DOUBLE,MPI_MAX, 0, MPI_COMM_WORLD);
......@@ -117,7 +116,7 @@ int main(int argc, char ** argv)
// MPI_Reduce(&mytime, &avgtime, 1, MPI_DOUBLE, MPI_SUM, 0,MPI_COMM_WORLD);
// if (myrank == 0) {
// avgtime /= numprocs;
// std::cout << "Min: "<< mintime << ", Max: " << maxtime << ", Avg: " <<avgtime << std::endl;
// hpx::cout << "Min: "<< mintime << ", Max: " << maxtime << ", Avg: " <<avgtime << hpx::endl;
// }
......
......@@ -42,6 +42,12 @@ static const int n_max_times = 31 * 24 * 2; // max days per month times hours pe
static const int n_grid_cells = 24;
static const int n_levels_snow = 5;
using MatrixStatePFT = MatrixStatic<n_grid_cells, n_pfts>;
using MatrixStateSoilColumn = MatrixStatic<n_grid_cells, n_levels_snow>;
using MatrixForc = MatrixStatic<n_max_times,n_grid_cells>;
using VectorColumn = VectorStatic<n_grid_cells>;
using VectorColumnInt = VectorStatic<n_grid_cells,int>;
} // namespace
} // namespace
......@@ -82,17 +88,17 @@ int main(int argc, char ** argv)
const double n_melt = 0.7;
// phenology input
double* elai = new double[n_grid_cells * n_pfts];
double* esai = new double[n_grid_cells * n_pfts];
ELM::Utils::MatrixStatePFT elai;
ELM::Utils::MatrixStatePFT esai;
ELM::Utils::read_phenology("../links/surfacedataWBW.nc", n_months, n_pfts, 0, elai, esai);
ELM::Utils::read_phenology("../links/surfacedataBRW.nc", n_months, n_pfts, n_months, elai, esai);
// forcing input
double* forc_rain = new double[ n_max_times * n_grid_cells ];
double* forc_snow = new double[ n_max_times * n_grid_cells ];
double* forc_air_temp = new double[ n_max_times * n_grid_cells ];
ELM::Utils::MatrixForc forc_rain;
ELM::Utils::MatrixForc forc_snow;
ELM::Utils::MatrixForc forc_air_temp;
const int n_times = ELM::Utils::read_forcing("../links/forcing", n_max_times, 0, n_grid_cells, forc_rain, forc_snow, forc_air_temp);
double* forc_irrig = new double[ n_max_times * n_grid_cells ];
ELM::Utils::MatrixForc forc_irrig; forc_irrig = 0.;
double qflx_floodg = 0.0;
......@@ -100,63 +106,59 @@ int main(int argc, char ** argv)
//
// NOTE: in a real case, these would be populated, but we don't actually
// need them to be for these kernels. --etc
auto z = new double[n_grid_cells*n_levels_snow];
auto zi = new double[n_grid_cells*n_levels_snow];
auto dz = new double[n_grid_cells*n_levels_snow];
auto z = ELM::Utils::MatrixStateSoilColumn(0.);
auto zi = ELM::Utils::MatrixStateSoilColumn(0.);
auto dz = ELM::Utils::MatrixStateSoilColumn(0.);
// state variables that require ICs and evolve (in/out)
double* h2ocan = new double[n_grid_cells * n_pfts];
double* swe_old = new double[n_grid_cells*n_levels_snow];
double* h2osoi_liq = new double[n_grid_cells*n_levels_snow];
double* h2osoi_ice = new double[n_grid_cells*n_levels_snow];
double* t_soisno = new double[n_grid_cells*n_levels_snow];
double* frac_iceold = new double[n_grid_cells*n_levels_snow];
double* t_grnd = new double[n_grid_cells];
double* h2osno = new double[n_grid_cells];
double* snow_depth = new double[n_grid_cells];
int* snow_level = new int[n_grid_cells]; // note this tracks the snow_depth
double* h2osfc = new double[n_grid_cells];
double* frac_h2osfc = new double[n_grid_cells];
auto h2ocan = ELM::Utils::MatrixStatePFT(); h2ocan = 0.;
auto swe_old = ELM::Utils::MatrixStateSoilColumn(0.);
auto h2osoi_liq = ELM::Utils::MatrixStateSoilColumn(0.);
auto h2osoi_ice = ELM::Utils::MatrixStateSoilColumn(0.);
auto t_soisno = ELM::Utils::MatrixStateSoilColumn(0.);
auto frac_iceold = ELM::Utils::MatrixStateSoilColumn(0.);
auto t_grnd = ELM::Utils::VectorColumn(0.);
auto h2osno = ELM::Utils::VectorColumn(0.); h2osno = 0.;
auto snow_depth = ELM::Utils::VectorColumn(0.);
auto snow_level = ELM::Utils::VectorColumnInt(0.); // note this tracks the snow_depth
auto h2osfc = ELM::Utils::VectorColumn(0.);
auto frac_h2osfc = ELM::Utils::VectorColumn(0.); frac_h2osfc = 0.;
// output fluxes by pft
double* qflx_prec_intr = new double[n_grid_cells * n_pfts];
double* qflx_irrig = new double[n_grid_cells * n_pfts];
double* qflx_prec_grnd = new double[n_grid_cells * n_pfts];
double* qflx_snwcp_liq = new double[n_grid_cells * n_pfts];
double* qflx_snwcp_ice = new double[n_grid_cells * n_pfts];
double* qflx_snow_grnd_patch = new double[n_grid_cells * n_pfts];
double* qflx_rain_grnd = new double[n_grid_cells * n_pfts];
auto qflx_prec_intr = ELM::Utils::MatrixStatePFT();
auto qflx_irrig = ELM::Utils::MatrixStatePFT();
auto qflx_prec_grnd = ELM::Utils::MatrixStatePFT();
auto qflx_snwcp_liq = ELM::Utils::MatrixStatePFT();
auto qflx_snwcp_ice = ELM::Utils::MatrixStatePFT();
auto qflx_snow_grnd_patch = ELM::Utils::MatrixStatePFT();
auto qflx_rain_grnd = ELM::Utils::MatrixStatePFT();
// FIXME: I have no clue what this is... it is inout on WaterSnow. For now I
// am guessing the data structure. Ask Scott. --etc
double* integrated_snow = new double[n_grid_cells];
auto integrated_snow = ELM::Utils::VectorColumn(0.);
// output fluxes, state by the column
double* qflx_snow_grnd_col = new double[n_grid_cells];
double* qflx_snow_h2osfc = new double[n_grid_cells];
double* qflx_h2osfc2topsoi = new double[n_grid_cells];
double* qflx_floodc = new double[n_grid_cells];
double* frac_sno_eff = new double[n_grid_cells];
double* frac_sno = new double[n_grid_cells];
auto qflx_snow_grnd_col = ELM::Utils::VectorColumn();
auto qflx_snow_h2osfc = ELM::Utils::VectorColumn();
auto qflx_h2osfc2topsoi = ELM::Utils::VectorColumn();
auto qflx_floodc = ELM::Utils::VectorColumn();
double* end = &h2ocan[n_grid_cells, n_pfts] ;
double* end2 = &h2osno[n_grid_cells-1] ;
double* end3 = &frac_h2osfc[n_grid_cells-1] ;
auto frac_sno_eff = ELM::Utils::VectorColumn();
auto frac_sno = ELM::Utils::VectorColumn();
std::ofstream soln_file;
soln_file.open("test_CanopyHydrology_module.soln");
soln_file << "Time\t Total Canopy Water\t Min Water\t Max Water\t Total Snow\t Min Snow\t Max Snow\t Avg Frac Sfc\t Min Frac Sfc\t Max Frac Sfc" << std::endl;
auto min_max_water = std::minmax_element(&h2ocan[0,0], end+1);
auto sum_water = std::accumulate(&h2ocan[0,0], end+1, 0.);
auto min_max_water = std::minmax_element(h2ocan.begin(), h2ocan.end());
auto sum_water = std::accumulate(h2ocan.begin(), h2ocan.end(), 0.);
auto min_max_snow = std::minmax_element(&h2osno[0], end2+1);
auto sum_snow = std::accumulate(&h2osno[0], end2+1, 0.);
auto min_max_snow = std::minmax_element(h2osno.begin(), h2osno.end());
auto sum_snow = std::accumulate(h2osno.begin(), h2osno.end(), 0.);
auto min_max_frac_sfc = std::minmax_element(&frac_h2osfc[0], end3+1);
auto avg_frac_sfc = std::accumulate(&frac_h2osfc[0], end3+1, 0.) / (end3+1 - &frac_h2osfc[0]);
auto min_max_frac_sfc = std::minmax_element(frac_h2osfc.begin(), frac_h2osfc.end());
auto avg_frac_sfc = std::accumulate(frac_h2osfc.begin(), frac_h2osfc.end(), 0.) / (frac_h2osfc.end() - frac_h2osfc.begin());
soln_file << std::setprecision(16)
<< 0 << "\t" << sum_water << "\t" << *min_max_water.first << "\t" << *min_max_water.second
......@@ -168,12 +170,12 @@ int main(int argc, char ** argv)
// -- the timestep loop cannot/should not be parallelized
for (size_t t = 0; t != n_times; ++t) {
// grid cell and/or pft loop can be parallelized
for (size_t g = 0; g != n_grid_cells; ++g) {
double sum;
// PFT level operations
for (size_t p = 0; p != n_pfts; ++p) {
hpx::parallel::for_loop(hpx::parallel::execution::par.with(
hpx::parallel::execution::static_chunk_size()),
0, n_grid_cells, [&](const size_t g) {
hpx::parallel::for_loop(hpx::parallel::execution::par.with(
hpx::parallel::execution::static_chunk_size()),
0, n_pfts, [&](const size_t p) {
//
// Calculate interception
//
......@@ -181,13 +183,13 @@ int main(int argc, char ** argv)
// Surely they should be either accumulated or stored on PFTs as well.
// --etc
ELM::CanopyHydrology_Interception(dtime,
forc_rain[t,g], forc_snow[t,g], forc_irrig[t,g],
forc_rain(t,g), forc_snow(t,g), forc_irrig(t,g),
ltype, ctype, urbpoi, do_capsnow,
elai[g,p], esai[g,p], dewmx, frac_veg_nosno,
h2ocan[g,p], n_irrig_steps_left,
qflx_prec_intr[g,p], qflx_irrig[g,p], qflx_prec_grnd[g,p],
qflx_snwcp_liq[g,p], qflx_snwcp_ice[g,p],
qflx_snow_grnd_patch[g,p], qflx_rain_grnd[g,p]);
elai(g,p), esai(g,p), dewmx, frac_veg_nosno,
h2ocan(g,p), n_irrig_steps_left,
qflx_prec_intr(g,p), qflx_irrig(g,p), qflx_prec_grnd(g,p),
qflx_snwcp_liq(g,p), qflx_snwcp_ice(g,p),
qflx_snow_grnd_patch(g,p), qflx_rain_grnd(g,p));
//printf("%i %i %16.8g %16.8g %16.8g %16.8g %16.8g %16.8g\n", g, p, forc_rain[t,g], forc_snow[t,g], elai[g,p], esai[g,p], h2ocan[g,p], qflx_prec_intr[g]);
//
......@@ -198,13 +200,12 @@ int main(int argc, char ** argv)
// By the PFT?
// --etc
double fwet = 0., fdry = 0.;
ELM::CanopyHydrology_FracWet(frac_veg_nosno, h2ocan[g,p], elai[g,p], esai[g,p], dewmx, fwet, fdry);
sum += qflx_snow_grnd_patch[g,p];
} // end PFT loop
ELM::CanopyHydrology_FracWet(frac_veg_nosno, h2ocan(g,p), elai(g,p), esai(g,p), dewmx, fwet, fdry);
qflx_snow_grnd_col[g] = sum ;
}); // end PFT loop
qflx_snow_grnd_col[g] = std::accumulate(qflx_snow_grnd_patch[g].begin(),
qflx_snow_grnd_patch[g].end(), 0.);
// Column level operations
// NOTE: this is effectively an accumulation kernel/task! --etc
......@@ -217,7 +218,7 @@ int main(int argc, char ** argv)
int newnode;
ELM::CanopyHydrology_SnowWater(dtime, qflx_floodg,
ltype, ctype, urbpoi, do_capsnow, oldfflag,
forc_air_temp[t,g], t_grnd[g],
forc_air_temp(t,g), t_grnd(g),
qflx_snow_grnd_col[g], qflx_snow_melt, n_melt, frac_h2osfc[g],
snow_depth[g], h2osno[g], integrated_snow[g], swe_old[g],
h2osoi_liq[g], h2osoi_ice[g], t_soisno[g], frac_iceold[g],
......@@ -230,19 +231,19 @@ int main(int argc, char ** argv)
// interface specifies a single double. For now passing the 0th
// entry. --etc
ELM::CanopyHydrology_FracH2OSfc(dtime, min_h2osfc, ltype, micro_sigma,
h2osno[g], h2osfc[g], h2osoi_liq[g,0], frac_sno[g], frac_sno_eff[g],
h2osno[g], h2osfc[g], h2osoi_liq[g][0], frac_sno[g], frac_sno_eff[g],
qflx_h2osfc2topsoi[g], frac_h2osfc[g]);
} // end grid cell loop
}); // end grid cell loop
auto min_max_water = std::minmax_element(&h2ocan[0,0], end+1);
auto sum_water = std::accumulate(&h2ocan[0,0], end+1, 0.);
auto min_max_water = std::minmax_element(h2ocan.begin(), h2ocan.end());