Commit 1df519df authored by Turner, Sean's avatar Turner, Sean
Browse files

head-flow-power bilinear interpolation

parent 76519d20
Loading
Loading
Loading
Loading
+167 −21
Original line number Diff line number Diff line
%% Cell type:markdown id:514e7b0a tags:

## Example optimization

This script illustrates how to optimize the power generating schedule at Wolf Creek dam in order to minimize downstream violations in meeting the power target and without dropping below a specified target storage at Wolf Creek. There are 168 decision variables (release from Wolf Creek) and the objective function is

%% Cell type:code id:1ed54f38 tags:

``` python
# import required libraries
import pandas as pd
import powersheds
import yaml
from dataclasses import dataclass
from pathlib import Path
```

%% Cell type:code id:7a67ff6f tags:

``` python
# set up the model input
@dataclass
class ReservoirData:
    object_type: str
    capacity: float
    initial_storage: float
    set_storage: list[float]
    set_elevation: list[float]
    tailwater_elevation: float
    max_release: float
    min_release: float
    catchment_inflow: list[float]
    target_release: list[float]
    target_power: list[float]
    hrpt_head: list[float]
    hrpt_release: list[float]
    hrpt_power: list[float]
    simulation_order: int
    downstream_object: str

@dataclass
class RiverData:
    object_type: str
    simulation_order: int
    downstream_object: str
    lag: int
    legacy_flows: list[float]

@dataclass
class ConfluenceData:
    object_type: str
    simulation_order: int
    downstream_object: str

@dataclass
class CascadeData:
    reservoirs: dict[str, ReservoirData]
    rivers: dict[str, RiverData]
    confluences: dict[str, ConfluenceData]

# Read the reservoir configuration
with open('cascade_config.yaml', 'r') as file:
    config_dict = yaml.safe_load(file)

# Initialize dictionaries for reservoirs and rivers
reservoir_dict = {}
river_dict = {}
confluence_dict = {}

for name, specs in config_dict.items():
    #specs['simulation_order'] = simulation_orders[name]  # Add simulation order to specs
    if specs['object_type'] == 'reservoir':

        reservoir_dict[name] = ReservoirData(
            **specs,
            catchment_inflow=pd.read_csv(f"time_series/{name}.csv")['catchment_inflow'].tolist(),
            target_release=pd.read_csv(f"time_series/{name}.csv")['target_release'].tolist(),
            target_power=pd.read_csv(f"time_series/{name}.csv")['target_power'].tolist(),
            # storage elevation tables
            set_storage = pd.read_csv(f"storage_HWelevation_tables/{name}.csv")['storage_Mm3'].tolist(),
            set_elevation = pd.read_csv(f"storage_HWelevation_tables/{name}.csv")['elevation_m'].tolist(),
            # head-release-power tables
            hrpt_head = pd.read_csv(f"head_release_power_tables/{name}.csv")['head_m'].tolist(),
            hrpt_release = pd.read_csv(f"head_release_power_tables/{name}.csv")['release_Mm3'].tolist(),
            hrpt_power = pd.read_csv(f"head_release_power_tables/{name}.csv")['power_MW'].tolist(),
        )
    elif specs['object_type'] == 'river':
        river_dict[name] = RiverData(
            **specs,
            legacy_flows=pd.read_csv(f"time_series/{name}.csv")['legacy_flow'].tolist()
        )
    elif specs['object_type'] == 'confluence':
        confluence_dict[name] = ConfluenceData(
            **specs,
        )

cascade_data = CascadeData(
    reservoirs=reservoir_dict,
    rivers=river_dict,
    confluences=confluence_dict
)
```

%% Cell type:code id:083d3c27 tags:

``` python
# check that the model runs
results = powersheds.simulate_cascade(cascade_data)
```

%% Cell type:code id:ace3af70 tags:

``` python
# import tools for optimization

import numpy as np
from scipy.optimize import basinhopping, differential_evolution, dual_annealing
```

%% Cell type:code id:1e4bde66 tags:

``` python
decision_vars = 0

168 *  [decision_vars]

import multiprocessing
import copy
```

%% Cell type:code id:ac8675b5 tags:

``` python
# set up the function to optimize.
# Define the same simulation function as above
def simulation_function(decision_vars):

    power_sequence = list(7 * (12 * [decision_vars[0]] + 12 * [decision_vars[1]]))
    #power_sequence = list(7 * (12 * [decision_vars[0]] + 12 * [decision_vars[1]]))

    power_sequence = list(7 * (6 * [decision_vars[0]] + 6 * [decision_vars[1]] + 6 * [decision_vars[2]] + 6 * [decision_vars[3]]))

    local_cascade_data = copy.deepcopy(cascade_data)  # Make sure to import copy

    # overwrite power with the decision variables
    #cascade_data.reservoirs['WolfCreek'].target_power = 168 * [decision_vars]
    cascade_data.reservoirs['WolfCreek'].target_power = power_sequence
    results = powersheds.simulate_cascade(cascade_data)
    local_cascade_data.reservoirs['WolfCreek'].target_power = power_sequence
    results = powersheds.simulate_cascade(local_cascade_data)

    # compute deviations at Old Hickory
    # compute deviations at Cordell Hull
    x = np.abs(
    np.array(results["CordellHull"]["actual_power"]) - np.array(results["CordellHull"]["target_power"])
    )

    return np.sum(x*x)
    # compute pool elevation deviation from target
    pool_deviation = 230 - results['WolfCreek']['pool_elevation'][167]
    if pool_deviation <= 0:
        penalty = 0
    else:
        penalty = 10000000


    return np.sum(x*x) + penalty

# Initial guess for variables
#initial_guess = 0

# Define bounds as a simple constraint for the optimization (using l-bfgs-b)
bounds = [(0, 50), (100, 300)]
bounds = [(0, 300), (0, 300), (0, 300), (0, 300)]

# Get the number of available CPU cores
num_cores = multiprocessing.cpu_count()

print(num_cores)
```

# Run the basinhopping optimization
#result = basinhopping(simulation_function, initial_guess, niter=100, T=1.0, stepsize=0.5, minimizer_kwargs={"bounds": bounds})
%% Output

result = differential_evolution(simulation_function, bounds=bounds)
    16

%% Cell type:code id:781a09a1 tags:

``` python
result = differential_evolution(
    simulation_function,
    bounds=bounds # Required when using multiple workers
)
print(result)
```

%% Cell type:code id:37a6e404 tags:
%% Output

                 message: Optimization terminated successfully.
                 success: True
                     fun: 57234.68577925186
                       x: [ 1.819e+02  1.196e+02  1.109e-01  3.294e+01]
                     nit: 127
                    nfev: 8150
              population: [[ 1.819e+02  1.196e+02  1.109e-01  3.294e+01]
                           [ 1.843e+02  1.173e+02  3.724e-01  3.296e+01]
                           ...
                           [ 1.822e+02  1.170e+02  3.962e+00  3.048e+01]
                           [ 1.818e+02  1.199e+02  6.714e-01  3.201e+01]]
     population_energies: [ 5.723e+04  5.756e+04 ...  5.861e+04  5.766e+04]

%% Cell type:code id:abce1fe3 tags:

``` python
from joblib import parallel_backend
```

%% Cell type:code id:e6819fa1 tags:

``` python
with parallel_backend('loky', n_jobs=15):  # Try a smaller number like 4
    result = differential_evolution(
        simulation_function,
        bounds=bounds,
        updating='deferred'
    )

print(result)
```

%% Output

                 message: Optimization terminated successfully.
                 success: True
                     fun: 61459.087113666545
                       x: [ 2.127e+02  2.946e+01  8.771e+00  8.021e+01]
                     nit: 94
                    nfev: 5975
              population: [[ 2.127e+02  2.946e+01  8.771e+00  8.021e+01]
                           [ 2.059e+02  2.285e+01  3.522e+00  8.870e+01]
                           ...
                           [ 2.003e+02  3.427e+01  1.681e+01  8.632e+01]
                           [ 2.065e+02  3.436e+01  1.021e+01  8.449e+01]]
     population_energies: [ 6.146e+04  6.214e+04 ...  6.190e+04  6.258e+04]

%% Cell type:code id:324f4f8a tags:

``` python
result = differential_evolution(
    simulation_function,
    bounds=bounds,
    workers=4,
    updating='immediate'
)

print(result)
```

%% Cell type:code id:d6bd99c9 tags:

``` python
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
```

%% Cell type:code id:37a6e404 tags:

``` python
x = np.arange(0, 100)
y = np.arange(200, 300)
xgrid, ygrid = np.meshgrid(x, y)
xy = np.stack([xgrid, ygrid])
```

%% Cell type:code id:7b7310b9 tags:

``` python

fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.view_init(45, -45)
ax.plot_surface(xgrid, ygrid, simulation_function(xy), cmap='terrain')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('eggholder(x, y)')
plt.show()
```

%% Cell type:code id:9e11a63a tags:

``` python
# Create the figure and axes object

def compute_Z(X, Y):

    power_sequence = 7 * (12 * [X] + 12 * [Y])

    # overwrite power with the decision variables
    #cascade_data.reservoirs['WolfCreek'].target_power = 168 * [decision_vars]
    cascade_data.reservoirs['WolfCreek'].target_power = power_sequence
    results = powersheds.simulate_cascade(cascade_data)

    # compute deviations at Old Hickory
    x = np.abs(
    np.array(results["CordellHull"]["actual_power"]) - np.array(results["CordellHull"]["target_power"])
    )

    # compute pool elevation deviation from target
    pool_deviation = 230 - results['WolfCreek']['pool_elevation'][167]
    if pool_deviation <= 0:
        penalty = 0
    else:
        penalty = 1000000

    return np.sum(x*x) + penalty
```

%% Cell type:code id:42a7a246 tags:

``` python
compute_Z(0,0)
```

%% Cell type:code id:4bbed8d3 tags:

``` python
cascade_data.reservoirs['WolfCreek'].target_power = 168 * [decision_vars]
cascade_data.reservoirs['WolfCreek'].target_power
# Create the meshgrid

num_points = 50
x = np.linspace(0, 200, num_points)
y = np.linspace(0, 200, num_points)
X, Y = np.meshgrid(x, y)

# Initialize a Z array for computations
Z = np.zeros_like(X)

# Calculate the Z values for the surface
for i in range(X.shape[0]):
    for j in range(X.shape[1]):
        Z[i, j] = compute_Z(X[i, j], Y[i, j])

# Plotting
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, Z, cmap='viridis')  # you may choose a different colormap

```

%% Cell type:code id:e2f3990e tags:

``` python
```
+24 −18
Original line number Diff line number Diff line
@@ -12,7 +12,7 @@ library(readr)
library(ggplot2)
library(patchwork)
library(purrr)
library(httpgd)
#library(httpgd)
reticulate::use_virtualenv("./.venv", required = TRUE)
setwd("examples/Cumberland/")
```
@@ -38,10 +38,10 @@ class ReservoirData:
    set_storage: list[float]
    set_elevation: list[float]
    # head-power-flow table
    hpf_H: list[float]
    hpf_P: list[float]
    hpf_Q: list[float]
    # currently we use static tailwater elevation
    hpf_h: list[float]
    hpf_p: list[float]
    hpf_q: list[float]
    # static tailwater elevation
    tailwater_elevation: float
    max_release: float
    min_release: float
@@ -92,25 +92,35 @@ for name, specs in config_dict.items():
        #hpf_table = pd.read_parquet(hpf_path)

        reservoir_dict[name] = ReservoirData(
            # specifications from yaml config
            **specs,
            #catchment_inflow = [0] * 168,

            # catchment inflow and target power time series
            catchment_inflow=pd.read_csv(f"time_series/{name}.csv")['catchment_inflow'].tolist(),
            target_power=pd.read_csv(f"time_series/{name}.csv")['target_power'].tolist(),
            # storage elevation tables
            
            # Storage-Elevation tables for interpolation...
            # ... (each column of the storage elevation tables is entered into powersheds as a vector of values)
            set_storage = pd.read_csv(f"storage_HWelevation_tables/{name}.csv")['storage_Mm3'].tolist(),
            set_elevation = pd.read_csv(f"storage_HWelevation_tables/{name}.csv")['elevation_m'].tolist(),
            # head power flow tables
            hpf_H = pd.read_parquet(f"head_power_flow_tables/{name}")["H_m"].astype(float).tolist(),
            hpf_P = pd.read_parquet(f"head_power_flow_tables/{name}")["P_MW"].astype(float).tolist(),
            hpf_Q = pd.read_parquet(f"head_power_flow_tables/{name}")["Q_cumecs"].astype(float).tolist(),

            # Head-Power-Flow tables for interpolation...
            # ... (each column of the head-power-flow table is entered into powersheds as a vector of values)
            hpf_h = pd.read_parquet(f"head_power_flow_tables/{name}")["H_m"].astype(float).tolist(),
            hpf_p = pd.read_parquet(f"head_power_flow_tables/{name}")["P_MW"].astype(float).tolist(),
            hpf_q = pd.read_parquet(f"head_power_flow_tables/{name}")["Q_cumecs"].astype(float).tolist(),
        )
    elif specs['object_type'] == 'river':
        river_dict[name] = RiverData(
            # specifications for river objects from yaml config
            **specs,

            # legacy flows (i.e., initial conditions in rivers between reservoirs in the system)
            legacy_flows=pd.read_csv(f"time_series/{name}.csv")['legacy_flow'].tolist()
        )
    elif specs['object_type'] == 'confluence':
        confluence_dict[name] = ConfluenceData(
            # specifications for confluence objects from yaml config
            **specs,
        )

@@ -123,12 +133,6 @@ cascade_data = CascadeData(

```{python}

print(cascade_data)

```

```{python}

# run the powersheds model!

results = powersheds.simulate_cascade(
@@ -152,7 +156,9 @@ reservoirs_ordered |> map_dfr(function(reservoir){
        release_implemented = reservoir_results$release,
        spill = reservoir_results$spill,
        pool_elevation = reservoir_results$pool_elevation,
        head = reservoir_results$head
        head = reservoir_results$head,
        power_actual = reservoir_results$actual_power,
        power_target = reservoir_results$target_power
    ) |> mutate(hr_index = 1:n())

}) |> mutate(reservoir = factor(reservoir, levels = reservoirs_ordered)) ->
+1 −1
Original line number Diff line number Diff line
[build-system]
requires = ["maturin==1.8.1"]
requires = ["maturin==1.9.4"]
build-backend = "maturin"

[project]
+283 −37

File changed.

Preview size limit exceeded, changes collapsed.

+30 −13
Original line number Diff line number Diff line
@@ -8,8 +8,7 @@ mod helpers;
use helpers::interpolate_elevation;
use helpers::interpolate_storage;
use helpers::compute_release_to_meet_target_power;
use helpers::compute_power;

use helpers::compute_power_given_actual_release;


// Set object types: reservoir, river, confluence
@@ -46,10 +45,11 @@ struct ReservoirData {
    min_release: f64,
    catchment_inflow: Vec<f64>,
    target_power: Vec<f64>,
    power_params_a: f64,
    power_params_b: f64,
    simulation_order: i32,
    downstream_object: String,
    hpf_h: Vec<f64>,
    hpf_p: Vec<f64>,
    hpf_q: Vec<f64>
}

// Set River data struct
@@ -145,13 +145,13 @@ fn simulate_timestep(
    // Calculate head as the difference between pool elevation and tailwater elevation
    let head = previous_pool_elevation - reservoir.tailwater_elevation;

    // Compute the target release, given the power generation desired
    // THIS SECTION TO BE UPDATED USING HEAD-POWER-FLOW TABLES
    // Compute the target release given the power generation desired
    let target_release = compute_release_to_meet_target_power(
        target_power,
        head,
        reservoir.power_params_a,
        reservoir.power_params_b,
        &reservoir.hpf_h,
        &reservoir.hpf_p,
        &reservoir.hpf_q
    );
    
    // Compute available water given inflow and previous storage...
@@ -167,16 +167,33 @@ fn simulate_timestep(
            .min(available_water)
    };
    
    let release_update_required = release != target_release;

    let actual_power = if release_update_required {

        compute_power_given_actual_release(
            release,
            head,
            &reservoir.hpf_h,
            &reservoir.hpf_p,
            &reservoir.hpf_q
        )
    } else {

        target_power

    };

    let potential_storage = available_water - release;

    
    // Calculate actual power based on the actual release and head
    // THIS SECTION TO BE UPDATED USING HEAD-POWER-FLOW TABLES
    // ALSO ADD IF STATEMENT TO VOID THIS COMPUTATION IF release = target_release
    let actual_power = compute_power(
    let actual_power = compute_power_given_actual_release(
        release,
        head,
        reservoir.power_params_a,
        reservoir.power_params_b,
        &reservoir.hpf_h,
        &reservoir.hpf_p,
        &reservoir.hpf_q
    );

    // Determine spill, presently driven solely by storage capacity limit