Commit ebc2f005 authored by Morales Hernandez, Mario's avatar Morales Hernandez, Mario
Browse files

Integrate SWMM coupling into TRITON v2 core solver

This commit integrates the SWMM coupling into the main TRITON solver,
adding SWMM-specific configuration parameters and the coupling step in
the time loop.

Configuration changes (src/config_utils.h):
- Added manhole_diameter, manhole_loss parameters
- Added inp_filename parameter for SWMM .inp file
- Parse SWMM parameters in get_args()

Core solver integration (src/triton.h):
- Include swmm_triton.h when TRITON_SWMM is defined
- Add swmm_model member variable to triton class
- Initialize SWMM after process_runoff()
- Allocate and populate SWMM data arrays (host_vec, device_vec)
- Add SWMM coupling step in compute_new_state() after wet_dry:
 * Copy SWMM new_depth to device
 * Compute SWMM-TRITON exchange flow on GPU via Kokkos kernel
 * Gather exchange_q from all ranks to rank 0
 * Step SWMM forward on rank 0
 * Scatter new_depth back to all ranks
- Finalize SWMM at simulation end

All changes are guarded by #ifdef TRITON_SWMM.
parent c70b5b2a
Loading
Loading
Loading
Loading
+16 −0
Original line number Diff line number Diff line
@@ -51,6 +51,12 @@ namespace ConfigUtils
		const_mann,	/**< Constant manning value to use in every cell in case of no external manning file is provided. */
		hextra;	/**< Represents a the minimum water depth tolerance */

#ifdef TRITON_SWMM
		T
		manhole_diameter,	/**< A constant characteristic length for manholes in SWMM (either diameter or width) */
		manhole_loss;	/**< Loss coefficient for manholes in SWMM links */
#endif

		std::string
		outfile_pattern,	/**< Output file directory and name pattern. */
		hydrograph_filename,	/**< Directory of the Hygrograph file to use. */
@@ -78,6 +84,10 @@ namespace ConfigUtils
		domain_decomposition,	/**< Domain decomposition. Options are static or dynamic. Static by default*/
		print_interval_string;	/**< Print interval as a string. Used to assign default value to print_observation. */

#ifdef TRITON_SWMM
		std::string inp_filename;	/**< inp filename for SWMM model*/
#endif


		std::vector<T>
		src_x_loc,	/**< Vector to hold all the Longitude value of all the flow locations serially. */
@@ -473,6 +483,12 @@ namespace ConfigUtils
		arglist.domain_decomposition = args("domain_decomposition", argmap);
		arglist.factor_interval_domain_decomposition = atoi((args("factor_interval_domain_decomposition", argmap)).c_str());

#ifdef TRITON_SWMM
		arglist.inp_filename = args("inp_filename", argmap);
		arglist.manhole_diameter = atof((args("manhole_diameter", argmap)).c_str());
		arglist.manhole_loss = atof((args("manhole_loss", argmap)).c_str());
#endif

		arglist.sim_start_time = atof((args("sim_start_time", argmap)).c_str());
		arglist.sim_duration = atof((args("sim_duration", argmap)).c_str());
		arglist.print_interval_string = args("print_interval", argmap);
+112 −4
Original line number Diff line number Diff line
@@ -23,6 +23,10 @@
#include "output.h"
#include "mpi_utils.h"

#ifdef TRITON_SWMM
#include "swmm_triton.h"
#endif

#include "Ensify.h"

namespace Triton
@@ -165,6 +169,11 @@ namespace Triton

    Output::output<T> out; /**Object to manage output files. */

#ifdef TRITON_SWMM
    SWMM_triton::swmm_triton swmm_model; /**< SWMM coupling model object */
    T swmm_local_elapsedTime; /**< Local elapsed time for SWMM */
#endif

    gpuStream_t streams;  /**< Cuda stream */
    std::vector<T*> device_vec; /**< Device vector that contains all floating point array to use in simulation. */
    std::vector<int*> device_vec_int; /**< Device vector that contains all integer array to use in simulation. */
@@ -392,10 +401,60 @@ namespace Triton

    process_runoff();

#ifdef TRITON_SWMM
    // Initialize SWMM coupling
    swmm_local_elapsedTime = 0.0;
    swmm_model.initialize(rank, size, arglist.inp_filename, project_dir, dem.get_xll_corner(),
                          dem.get_yll_corner(), cellsize, org_rows, org_cols, pd,
                          arglist.manhole_diameter, arglist.manhole_loss);
#endif

    create_host_aux_vectors();

    create_host_vectors();
    create_device_vectors();

#ifdef TRITON_SWMM
    // Add SWMM data to host vectors after SWMM initialization
    if (swmm_model.num_of_swmm_links > 0) {
      host_vec.push_back(swmm_model.loss.data());
      host_vec.push_back(swmm_model.diameter.data());
      host_vec.push_back(swmm_model.max_depth.data());
      host_vec.push_back(swmm_model.new_depth.data());
      host_vec.push_back(swmm_model.exchange_q.data());

      host_vec_int.push_back(swmm_model.swmm_pos_arr.data());

      // Allocate device memory for SWMM data
      int nbytes_swmm = (sizeof(T) * swmm_model.num_of_swmm_links);
      int nbytes_swmm_int = (sizeof(int) * swmm_model.num_of_swmm_links);

      T *device_loss, *device_diameter, *device_max_depth, *device_new_depth, *device_exchange_q;
      int *device_swmm_pos;

      gpuMalloc((void**)&device_loss, nbytes_swmm);
      gpuMalloc((void**)&device_diameter, nbytes_swmm);
      gpuMalloc((void**)&device_max_depth, nbytes_swmm);
      gpuMalloc((void**)&device_new_depth, nbytes_swmm);
      gpuMalloc((void**)&device_exchange_q, nbytes_swmm);
      gpuMalloc((void**)&device_swmm_pos, nbytes_swmm_int);

      device_vec.push_back(device_loss);
      device_vec.push_back(device_diameter);
      device_vec.push_back(device_max_depth);
      device_vec.push_back(device_new_depth);
      device_vec.push_back(device_exchange_q);
      device_vec_int.push_back(device_swmm_pos);

      // Initialize SWMM device data
      gpuMemcpyAsync(device_vec[SWMM_LOSS], host_vec[SWMM_LOSS], nbytes_swmm, gpuMemcpyHostToDevice, streams);
      gpuMemcpyAsync(device_vec[SWMM_DIAMETER], host_vec[SWMM_DIAMETER], nbytes_swmm, gpuMemcpyHostToDevice, streams);
      gpuMemcpyAsync(device_vec[SWMM_MAXD], host_vec[SWMM_MAXD], nbytes_swmm, gpuMemcpyHostToDevice, streams);
      gpuMemcpyAsync(device_vec[SWMM_NEWD], host_vec[SWMM_NEWD], nbytes_swmm, gpuMemcpyHostToDevice, streams);
      gpuMemcpyAsync(device_vec[SWMM_Q], host_vec[SWMM_Q], nbytes_swmm, gpuMemcpyHostToDevice, streams);
      gpuMemcpyAsync(device_vec_int[SWMMP], host_vec_int[SWMMP], nbytes_swmm_int, gpuMemcpyHostToDevice, streams);
    }
#endif
  }
  
  
@@ -2115,6 +2174,14 @@ namespace Triton
      std::cerr << OK "Simulation ends" << std::endl;
    }

#ifdef TRITON_SWMM
    // Finalize SWMM coupling
    if (swmm_model.num_of_swmm_links > 0) {
      std::string output_dir_swmm = project_dir + "/" + OUTPUT_DIR + "/swmm/";
      swmm_model.end_swmm(output_dir_swmm);
    }
#endif


  }

@@ -2260,6 +2327,47 @@ namespace Triton

    Kernels::wet_dry(rows*cols, rows, cols, global_dt, device_vec[H], device_vec[QX], device_vec[QY], device_vec[DEM], device_vec[MAXH], arglist.hextra,size);

#ifdef TRITON_SWMM
    // SWMM-TRITON coupling
    if (swmm_model.num_of_swmm_links > 0) {
      int nbytes_swmm = (sizeof(T) * swmm_model.num_of_swmm_links);
      int nbytes_swmm_int = (sizeof(int) * swmm_model.num_of_swmm_links);

      // Copy new_depth from host to device (updated from previous SWMM step)
      gpuMemcpyAsync(device_vec[SWMM_NEWD], host_vec[SWMM_NEWD], nbytes_swmm, gpuMemcpyHostToDevice, streams);
      gpuStreamSynchronize(streams);

      // Compute SWMM-TRITON exchange on device
      SWMM_triton::compute_swmm_triton_exchange(swmm_model.num_of_swmm_links, cell_size, global_dt,
                                                  device_vec[H], device_vec[QX], device_vec[QY], arglist.hextra,
                                                  device_vec_int[SWMMP], device_vec[SWMM_LOSS],
                                                  device_vec[SWMM_DIAMETER], device_vec[SWMM_MAXD],
                                                  device_vec[SWMM_NEWD], device_vec[SWMM_Q]);

      // Copy exchange_q from device to host
      gpuMemcpyAsync(host_vec[SWMM_Q], device_vec[SWMM_Q], nbytes_swmm, gpuMemcpyDeviceToHost, streams);
      gpuStreamSynchronize(streams);

      // Gather exchange_q from all ranks to rank 0
      MPI_Gatherv(host_vec[SWMM_Q], swmm_model.num_of_swmm_links, MPI_DATA_TYPE,
                  swmm_model.aux_global_exchange_q, swmm_model.counts, swmm_model.displs,
                  MPI_DATA_TYPE, 0, ENSIFY_COMM_WORLD);

      // Step SWMM forward on rank 0
      if (rank == 0) {
        swmm_model.local_to_global(swmm_model.aux_global_exchange_q, swmm_model.global_exchange_q,
                                    swmm_model.node_to_rank_dict);
        swmm_step(&swmm_local_elapsedTime, swmm_model.global_exchange_q, swmm_model.global_new_depth, global_dt);
        swmm_model.global_to_local(swmm_model.global_new_depth, swmm_model.aux_global_new_depth,
                                    swmm_model.node_to_rank_dict);
      }

      // Scatter new_depth from rank 0 to all ranks
      MPI_Scatterv(swmm_model.aux_global_new_depth, swmm_model.counts, swmm_model.displs,
                   MPI_DATA_TYPE, host_vec[SWMM_NEWD], swmm_model.num_of_swmm_links,
                   MPI_DATA_TYPE, 0, ENSIFY_COMM_WORLD);
    }
#endif


    if (size > 1)