Loading CTestConfig.cmake +1 −1 Original line number Diff line number Diff line Loading @@ -7,7 +7,7 @@ set(CTEST_PROJECT_NAME "DCA") set(CTEST_NIGHTLY_START_TIME "00:00:00 GMT") set(CTEST_DROP_METHOD "http") set(CTEST_DROP_METHOD "https") set(CTEST_DROP_SITE "cdash.cscs.ch") set(CTEST_DROP_LOCATION "/submit.php?project=DCA") set(CTEST_DROP_SITE_CDASH TRUE) Loading applications/dca/main_dca.cpp +15 −15 Original line number Diff line number Diff line Loading @@ -20,6 +20,7 @@ #include "dca/io/json/json_reader.hpp" #include "dca/util/git_version.hpp" #include "dca/util/modules.hpp" #include "dca/application/dca_loop_dispatch.hpp" int main(int argc, char** argv) { if (argc < 2) { Loading Loading @@ -71,24 +72,23 @@ int main(int argc, char** argv) { DcaDataType dca_data(parameters); dca_data.initialize(); DcaLoopType dca_loop(parameters, dca_data, concurrency); dca::DistType distribution = parameters.get_g4_distribution(); switch(distribution) { case dca::DistType::MPI: { Profiler profiler(__FUNCTION__, __FILE__, __LINE__); dca_loop.initialize(); dca_loop.execute(); dca_loop.finalize(); DCALoopDispatch<dca::DistType::MPI> dca_loop_dispatch; dca_loop_dispatch(parameters, dca_data, concurrency); } break; case dca::DistType::NONE: { DCALoopDispatch<dca::DistType::NONE> dca_loop_dispatch; dca_loop_dispatch(parameters, dca_data, concurrency); } break; } Profiler::stop(concurrency, parameters.get_filename_profiling()); if (concurrency.id() == concurrency.first()) { std::cout << "\nProcessor " << concurrency.id() << " is writing data." << std::endl; dca_loop.write(); std::cout << "\nFinish time: " << dca::util::print_time() << "\n" << std::endl; } } catch (const std::exception& err) { std::cout << "Unhandled exception in main function:\n\t" << err.what(); Loading build-aux/summit.cmake +4 −0 Original line number Diff line number Diff line Loading @@ -25,6 +25,10 @@ set(SMPIARGS_FLAG_NOMPI "--smpiargs=none" CACHE STRING # Let's keep this option in case we need it again in the future. set(SMPIARGS_FLAG_MPI "" CACHE STRING "Spectrum MPI argument list flag for MPI tests.") # When we want to us a cuda visible devices restriction we need this flag set(SMPIARGS_FLAG_MPI_CVD "--smpiargs=-gpu" CACHE STRING "Spectrum MPI argument list for cuda-mpi tests") # Enable the GPU support. option(DCA_WITH_CUDA "Enable GPU support." ON) Loading cmake/dca_config.cmake +3 −3 Original line number Diff line number Diff line Loading @@ -219,14 +219,14 @@ if (DCA_CLUSTER_SOLVER STREQUAL "CT-INT") elseif (DCA_CLUSTER_SOLVER STREQUAL "CT-AUX") set(DCA_CLUSTER_SOLVER_NAME dca::phys::solver::CT_AUX) set(DCA_CLUSTER_SOLVER_TYPE "dca::phys::solver::CtauxClusterSolver<walker_device, ParametersType, DcaDataType>") set(DCA_CLUSTER_SOLVER_TYPE "dca::phys::solver::CtauxClusterSolver<walker_device, ParametersType, DcaDataType, DIST>") set(DCA_CLUSTER_SOLVER_INCLUDE "dca/phys/dca_step/cluster_solver/ctaux/ctaux_cluster_solver.hpp") elseif (DCA_CLUSTER_SOLVER STREQUAL "SS-CT-HYB") set(DCA_CLUSTER_SOLVER_NAME dca::phys::solver::SS_CT_HYB) set(DCA_CLUSTER_SOLVER_TYPE "dca::phys::solver::SsCtHybClusterSolver<walker_device, ParametersType, DcaDataType>") set(DCA_CLUSTER_SOLVER_TYPE "dca::phys::solver::SsCtHybClusterSolver<walker_device, ParametersType, DcaDataType, DIST>") set(DCA_CLUSTER_SOLVER_INCLUDE "dca/phys/dca_step/cluster_solver/ss_ct_hyb/ss_ct_hyb_cluster_solver.hpp") Loading @@ -252,7 +252,7 @@ option(DCA_WITH_THREADED_SOLVER "Use multiple walker and accumulator threads in if (DCA_WITH_THREADED_SOLVER) dca_add_config_define(DCA_WITH_THREADED_SOLVER) set(DCA_THREADED_SOLVER_TYPE dca::phys::solver::StdThreadQmciClusterSolver<ClusterSolverBaseType>) set(DCA_THREADED_SOLVER_TYPE dca::phys::solver::StdThreadQmciClusterSolver<ClusterSolverBaseType<DIST>>) set(DCA_THREADED_SOLVER_INCLUDE "dca/phys/dca_step/cluster_solver/stdthread_qmci/stdthread_qmci_cluster_solver.hpp") endif() Loading cmake/dca_testing.cmake +12 −3 Original line number Diff line number Diff line Loading @@ -23,7 +23,7 @@ include(CMakeParseArguments) # MPI or CUDA may be given to indicate that the test requires these libraries. MPI_NUMPROC is the # number of MPI processes to use for a test with MPI, the default value is 1. function(dca_add_gtest name) set(options FAST EXTENSIVE STOCHASTIC PERFORMANCE GTEST_MAIN MPI CUDA) set(options FAST EXTENSIVE STOCHASTIC PERFORMANCE GTEST_MAIN MPI CUDA CUDA_CVD) set(oneValueArgs MPI_NUMPROC) set(multiValueArgs INCLUDE_DIRS SOURCES LIBS) cmake_parse_arguments(DCA_ADD_GTEST "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) Loading Loading @@ -81,6 +81,10 @@ function(dca_add_gtest name) return() endif() if (DCA_ADD_GTEST_CUDA_CVD AND NOT DCA_HAVE_CUDA) return() endif() add_executable(${name} ${name}.cpp ${DCA_ADD_GTEST_SOURCES}) # Create a macro with the project source dir. We use this as the root path for reading files in Loading @@ -95,7 +99,7 @@ function(dca_add_gtest name) target_link_libraries(${name} gtest ${DCA_ADD_GTEST_LIBS}) endif() if (DCA_ADD_GTEST_CUDA) if (DCA_ADD_GTEST_CUDA OR DCA_ADD_GTEST_CUDA_CVD) target_include_directories(${name} PRIVATE ${CUDA_TOOLKIT_INCLUDE}) target_link_libraries(${name} ${DCA_CUDA_LIBS}) target_compile_definitions(${name} PRIVATE DCA_HAVE_CUDA) Loading @@ -104,6 +108,11 @@ function(dca_add_gtest name) target_compile_definitions(${name} PRIVATE DCA_HAVE_MAGMA) endif() cuda_add_cublas_to_target(${name}) # a less hacky way to do this would be good but this is used to test # development only feature distributed G4 at the moment. if (DCA_ADD_GTEST_CUDA_CVD) set(CVD_LAUNCHER "${PROJECT_SOURCE_DIR}/test/cvdlauncher.sh") endif() endif() target_include_directories(${name} PRIVATE Loading @@ -117,7 +126,7 @@ function(dca_add_gtest name) add_test(NAME ${name} COMMAND ${TEST_RUNNER} ${MPIEXEC_NUMPROC_FLAG} ${DCA_ADD_GTEST_MPI_NUMPROC} ${MPIEXEC_PREFLAGS} ${SMPIARGS_FLAG_MPI} "$<TARGET_FILE:${name}>") ${MPIEXEC_PREFLAGS} ${SMPIARGS_FLAG_MPI_CVD} ${CVD_LAUNCHER} "$<TARGET_FILE:${name}>") target_link_libraries(${name} ${MPI_C_LIBRARIES}) else() if (TEST_RUNNER) Loading Loading
CTestConfig.cmake +1 −1 Original line number Diff line number Diff line Loading @@ -7,7 +7,7 @@ set(CTEST_PROJECT_NAME "DCA") set(CTEST_NIGHTLY_START_TIME "00:00:00 GMT") set(CTEST_DROP_METHOD "http") set(CTEST_DROP_METHOD "https") set(CTEST_DROP_SITE "cdash.cscs.ch") set(CTEST_DROP_LOCATION "/submit.php?project=DCA") set(CTEST_DROP_SITE_CDASH TRUE) Loading
applications/dca/main_dca.cpp +15 −15 Original line number Diff line number Diff line Loading @@ -20,6 +20,7 @@ #include "dca/io/json/json_reader.hpp" #include "dca/util/git_version.hpp" #include "dca/util/modules.hpp" #include "dca/application/dca_loop_dispatch.hpp" int main(int argc, char** argv) { if (argc < 2) { Loading Loading @@ -71,24 +72,23 @@ int main(int argc, char** argv) { DcaDataType dca_data(parameters); dca_data.initialize(); DcaLoopType dca_loop(parameters, dca_data, concurrency); dca::DistType distribution = parameters.get_g4_distribution(); switch(distribution) { case dca::DistType::MPI: { Profiler profiler(__FUNCTION__, __FILE__, __LINE__); dca_loop.initialize(); dca_loop.execute(); dca_loop.finalize(); DCALoopDispatch<dca::DistType::MPI> dca_loop_dispatch; dca_loop_dispatch(parameters, dca_data, concurrency); } break; case dca::DistType::NONE: { DCALoopDispatch<dca::DistType::NONE> dca_loop_dispatch; dca_loop_dispatch(parameters, dca_data, concurrency); } break; } Profiler::stop(concurrency, parameters.get_filename_profiling()); if (concurrency.id() == concurrency.first()) { std::cout << "\nProcessor " << concurrency.id() << " is writing data." << std::endl; dca_loop.write(); std::cout << "\nFinish time: " << dca::util::print_time() << "\n" << std::endl; } } catch (const std::exception& err) { std::cout << "Unhandled exception in main function:\n\t" << err.what(); Loading
build-aux/summit.cmake +4 −0 Original line number Diff line number Diff line Loading @@ -25,6 +25,10 @@ set(SMPIARGS_FLAG_NOMPI "--smpiargs=none" CACHE STRING # Let's keep this option in case we need it again in the future. set(SMPIARGS_FLAG_MPI "" CACHE STRING "Spectrum MPI argument list flag for MPI tests.") # When we want to us a cuda visible devices restriction we need this flag set(SMPIARGS_FLAG_MPI_CVD "--smpiargs=-gpu" CACHE STRING "Spectrum MPI argument list for cuda-mpi tests") # Enable the GPU support. option(DCA_WITH_CUDA "Enable GPU support." ON) Loading
cmake/dca_config.cmake +3 −3 Original line number Diff line number Diff line Loading @@ -219,14 +219,14 @@ if (DCA_CLUSTER_SOLVER STREQUAL "CT-INT") elseif (DCA_CLUSTER_SOLVER STREQUAL "CT-AUX") set(DCA_CLUSTER_SOLVER_NAME dca::phys::solver::CT_AUX) set(DCA_CLUSTER_SOLVER_TYPE "dca::phys::solver::CtauxClusterSolver<walker_device, ParametersType, DcaDataType>") set(DCA_CLUSTER_SOLVER_TYPE "dca::phys::solver::CtauxClusterSolver<walker_device, ParametersType, DcaDataType, DIST>") set(DCA_CLUSTER_SOLVER_INCLUDE "dca/phys/dca_step/cluster_solver/ctaux/ctaux_cluster_solver.hpp") elseif (DCA_CLUSTER_SOLVER STREQUAL "SS-CT-HYB") set(DCA_CLUSTER_SOLVER_NAME dca::phys::solver::SS_CT_HYB) set(DCA_CLUSTER_SOLVER_TYPE "dca::phys::solver::SsCtHybClusterSolver<walker_device, ParametersType, DcaDataType>") set(DCA_CLUSTER_SOLVER_TYPE "dca::phys::solver::SsCtHybClusterSolver<walker_device, ParametersType, DcaDataType, DIST>") set(DCA_CLUSTER_SOLVER_INCLUDE "dca/phys/dca_step/cluster_solver/ss_ct_hyb/ss_ct_hyb_cluster_solver.hpp") Loading @@ -252,7 +252,7 @@ option(DCA_WITH_THREADED_SOLVER "Use multiple walker and accumulator threads in if (DCA_WITH_THREADED_SOLVER) dca_add_config_define(DCA_WITH_THREADED_SOLVER) set(DCA_THREADED_SOLVER_TYPE dca::phys::solver::StdThreadQmciClusterSolver<ClusterSolverBaseType>) set(DCA_THREADED_SOLVER_TYPE dca::phys::solver::StdThreadQmciClusterSolver<ClusterSolverBaseType<DIST>>) set(DCA_THREADED_SOLVER_INCLUDE "dca/phys/dca_step/cluster_solver/stdthread_qmci/stdthread_qmci_cluster_solver.hpp") endif() Loading
cmake/dca_testing.cmake +12 −3 Original line number Diff line number Diff line Loading @@ -23,7 +23,7 @@ include(CMakeParseArguments) # MPI or CUDA may be given to indicate that the test requires these libraries. MPI_NUMPROC is the # number of MPI processes to use for a test with MPI, the default value is 1. function(dca_add_gtest name) set(options FAST EXTENSIVE STOCHASTIC PERFORMANCE GTEST_MAIN MPI CUDA) set(options FAST EXTENSIVE STOCHASTIC PERFORMANCE GTEST_MAIN MPI CUDA CUDA_CVD) set(oneValueArgs MPI_NUMPROC) set(multiValueArgs INCLUDE_DIRS SOURCES LIBS) cmake_parse_arguments(DCA_ADD_GTEST "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) Loading Loading @@ -81,6 +81,10 @@ function(dca_add_gtest name) return() endif() if (DCA_ADD_GTEST_CUDA_CVD AND NOT DCA_HAVE_CUDA) return() endif() add_executable(${name} ${name}.cpp ${DCA_ADD_GTEST_SOURCES}) # Create a macro with the project source dir. We use this as the root path for reading files in Loading @@ -95,7 +99,7 @@ function(dca_add_gtest name) target_link_libraries(${name} gtest ${DCA_ADD_GTEST_LIBS}) endif() if (DCA_ADD_GTEST_CUDA) if (DCA_ADD_GTEST_CUDA OR DCA_ADD_GTEST_CUDA_CVD) target_include_directories(${name} PRIVATE ${CUDA_TOOLKIT_INCLUDE}) target_link_libraries(${name} ${DCA_CUDA_LIBS}) target_compile_definitions(${name} PRIVATE DCA_HAVE_CUDA) Loading @@ -104,6 +108,11 @@ function(dca_add_gtest name) target_compile_definitions(${name} PRIVATE DCA_HAVE_MAGMA) endif() cuda_add_cublas_to_target(${name}) # a less hacky way to do this would be good but this is used to test # development only feature distributed G4 at the moment. if (DCA_ADD_GTEST_CUDA_CVD) set(CVD_LAUNCHER "${PROJECT_SOURCE_DIR}/test/cvdlauncher.sh") endif() endif() target_include_directories(${name} PRIVATE Loading @@ -117,7 +126,7 @@ function(dca_add_gtest name) add_test(NAME ${name} COMMAND ${TEST_RUNNER} ${MPIEXEC_NUMPROC_FLAG} ${DCA_ADD_GTEST_MPI_NUMPROC} ${MPIEXEC_PREFLAGS} ${SMPIARGS_FLAG_MPI} "$<TARGET_FILE:${name}>") ${MPIEXEC_PREFLAGS} ${SMPIARGS_FLAG_MPI_CVD} ${CVD_LAUNCHER} "$<TARGET_FILE:${name}>") target_link_libraries(${name} ${MPI_C_LIBRARIES}) else() if (TEST_RUNNER) Loading