Commit 5f133575 authored by Blais, Chris's avatar Blais, Chris
Browse files

adding cades scripts

parent 6605a4e0
Loading
Loading
Loading
Loading
+9 −0
Original line number Diff line number Diff line
#!/bin/bash
# Interpreter declaration

. run_soar_comparison_1.sh
. run_soar_comparison_2.sh
. run_soar_comparison_3.sh
. run_soar_comparison_4.sh
. run_soar_comparison_5.sh
. run_soar_comparison_6.sh
 No newline at end of file
+55 −95
Original line number Diff line number Diff line
@@ -7,12 +7,7 @@ import traceback
import yaml
import numpy as np
import time

soar_path = os.path.join(r"C:\Users\tjf\Documents\01_gitlab_repos\soar\python")
if soar_path not in sys.path:
    sys.path.insert(0, soar_path)
else:
    print("soar toolbox already in path")
import argparse

module_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if module_path not in sys.path:
@@ -20,6 +15,7 @@ if module_path not in sys.path:
else:
    print("path already in sys.path")

from context import *
from tools.soar_utilities import define_system, get_pareto_front, get_one_result

# for plotting
@@ -32,74 +28,12 @@ from generate_test_cases import build_test_case_nl, make_layout
import soar.optimization.fEvaluate as feval
import matplotlib.pyplot as plt


def int_to_bool_list(num, nSensorCandidate):
    """
    from Soar toolbox, creates unique id for each possible sensor layout. 
    takes num (sensor layout number, from 0 to 2**nSensorCandidate)
    converts to binary, and uses that for measured/unmeasured determination. 
    """
    form = format(int(nSensorCandidate), "d") + "b"
    bin_string = format(num, form)
    return [x == "1" for x in bin_string[::-1]], bin_string, form

def check_incidence(iIncidence):
    """
    simple check that in nodes equal to out nodes (graph is closed)
    """
    for row in iIncidence:
        assert sum(row) == 0, f"Incidence matrix row sums to {sum(row)}"

def log_err(error_log, e, result, method=""):
    err_trace = traceback.format_exc()
    if result not in error_log.keys():
        error_log[result] = {}

    error_log[result][method] = {
        "error":e, "method":method, "trace":err_trace,
    }

    return error_log

def log_fail(result, failures, error_log, method, or_method, or_soar, layout):
    """
    method: the value we've gotten, and the method used (eg obs_rref, red_lu)
    or_method: observability or redundancy for method
    or_soar: obs or red from soar
    """
    if result not in failures.keys():
        failures[result] = {}
    if method not in error_log[result].keys():
        failures[result][method] = {
            "method":or_method, "soar":or_soar, "layout":layout,
            }
    else: 
        failures[result][method] = "error"
    
    return failures

def log_success(result, successes, method, soar, lu, layout):
    """
    method: results for method (could be tuple of obs and red)
    soar: results for soar
    lu: results for lu
    layout: bool array of measured vars
    """
    successes[result]= {
        "method":method, 
        "soar":soar, 
        "lu":lu, 
        "layout":layout,
        }
    
    return successes

def test_compare_methods(
        system = "ccro", 
        objective="linear", 
        ptype=None, 
        reddefecit=False,
        test_layouts = 1,
        reddeficit=False,
        test_layouts = "sampling",
        verbose=1,
    ):
    """
@@ -108,7 +42,7 @@ def test_compare_methods(
    :param system: Description
    :param objective: linear or bilinear pareto front considered
    :param ptype: Problem type to consider for comparison against soar
    :param reddefecit: True means excess redundancy is NOT considered for Pareto front
    :param reddeficit: True means excess redundancy is NOT considered for Pareto front
    :param test_layouts: 
        str:
            "all" tests all layouts, 
@@ -151,11 +85,13 @@ def test_compare_methods(
            if ptype is None:
                ptype = 1

    # if running redundancy defecit, we do not consider excess redundancy
    if reddefecit:
    # if running redundancy deficit, we do not consider excess redundancy
    if reddeficit:
        soarCriteria = 2
        reddeficit_str = "deficit_red"
    else:
        soarCriteria = 3
        reddeficit_str = "surplus_red"

    nStreams = pgraph.iNumberOfArc
    nSensors = nStreams*nTypeSensors
@@ -176,7 +112,8 @@ def test_compare_methods(
                    raise Exception("Pareto front doesn't exist, please run 'generate_pareto_front.py'")
            case "sampling":
                nTestLayouts = 50
                results = [random.randint(0, maxSensWord) for _ in range(nTestLayouts)]
                random.seed(42)
                results = random.choices(range(0, maxSensWord), k=nTestLayouts)
            

    elif isinstance(test_layouts, list):
@@ -215,6 +152,8 @@ def test_compare_methods(
    #     56134,
    #     ]
    
    # check the soar setup and make sure that we don't double count the layouts

    # for message at end
    errs_rref = 0
    errs_lu = 0
@@ -363,36 +302,57 @@ def test_compare_methods(


    print(f"RREF agree SOAR, LU or qr disagree in {nsuccess_rref} layouts out of {len(results)}")
    results_log_path = os.path.join(results_folder, f"{system}_{objective}_{str(ptype)}.pkl")
    results_log_path = os.path.join(results_folder, f"{system}_{objective}_{reddeficit_str}_{str(ptype)}.pkl")
    with open(results_log_path, "wb") as f:
        pickle.dump(results_log, f)


if __name__ == "__main__":

    parser = argparse.ArgumentParser(description="runs a comparison of MRROR, LU, QR labelling against soar")

    parser.add_argument(
        '--sysname', type=str, 
        help='Name of the system', 
        required=True
    )
    parser.add_argument(
        '--objective', type=str, default="linear", 
        help='run linear, bilinear, or nolinear test layouts',
        required=True,
    )
    parser.add_argument(
        '--problemtype', type=int, default=0, 
        help='Nonlinear case: which problem type to run. otherwise, 0.'
    )
    parser.add_argument(
        '--layoutsampling', type=str, default="sampling", 
        help='which layouts to run. pareto generates pareto, all runs all, sampling is random sample',
        required=True,
    )
    parser.add_argument(
        '--reddeficit', action='store_true', 
        help='run redundancy deficit (no surplus redundancy)'
    )

    parser.add_argument(
        '--overwrite', action='store_true', help='overwrite existing pickle file'
        )
    # run_soar_comparison.py --sysname="pilot" 
    # 3. Parse the arguments from the command line
    args = parser.parse_args()
    t1 = time.time()
    # test_nonlinear_soar(random_sampling = False, ptype=1, verbose=0)
    # test_nonlinear_soar(random_sampling = False, ptype=2, verbose=0)
    # test_nonlinear_soar(random_sampling = False, ptype=3, verbose=0)
    # test_nonlinear_soar(random_sampling = False, ptype=4, verbose=0)
    # test_nonlinear_soar(random_sampling = False, ptype=5, verbose=0)
    # for ptype in range(1,3):

    # for ptype in range(1,6):
    test_compare_methods(
        system = "ccro", 
        objective="nonlinear", 
        ptype=2, 
        reddefecit=False,
        test_layouts = "sampling",
        system = args.sysname, 
        objective=args.objective, 
        ptype=args.problemtype, 
        reddeficit=args.reddeficit,
        test_layouts=args.layoutsampling,
        verbose=0,
    )
    # for ptype in range(1,6):
    #     test_compare_methods(
    #         system = "pilot", 
    #         objective="nonlinear", 
    #         ptype=ptype, 
    #         reddefecit=False,
    #         test_layouts = "sampling",
    #         verbose=1,
    #     )

    t2 = time.time()

    print(f"total test time: {t2 - t1} seconds")
+21 −0
Original line number Diff line number Diff line
#!/bin/bash
# Interpreter declaration

#SBATCH -A birthright
#SBATCH -p batch
#SBATCH -N 1
#SBATCH -n 1
#SBATCH -c 2
#SBATCH -J pilot_comp_nl_1
#SBATCH --mem=50g
#SBATCH -t 02-00:00:00
#SBATCH -o ./pilot_comp_nl_1-out.log
#SBATCH -e ./pilot_comp_nl_1.log
#SBATCH --mail-type=FAIL,END
#SBATCH --mail-user=blaiscj@ornl.gov

# sbatch arguments
source ~/.bashrc
conda activate wwtp
cd /home/tjf/01_nawi/soar_integration/testing/
python run_soar_comparison.py --sysname="pilot" --objective="nonlinear" --problemtype=1 --layoutsampling="sampling" --reddeficit --overwrite
+21 −0
Original line number Diff line number Diff line
#!/bin/bash
# Interpreter declaration

#SBATCH -A birthright
#SBATCH -p batch
#SBATCH -N 1
#SBATCH -n 1
#SBATCH -c 2
#SBATCH -J pilot_comp_nl_2
#SBATCH --mem=50g
#SBATCH -t 02-00:00:00
#SBATCH -o ./pilot_comp_nl_2-out.log
#SBATCH -e ./pilot_comp_nl_2.log
#SBATCH --mail-type=FAIL,END
#SBATCH --mail-user=blaiscj@ornl.gov

# sbatch arguments
source ~/.bashrc
conda activate wwtp
cd /home/tjf/01_nawi/soar_integration/testing/
python run_soar_comparison.py --sysname="pilot" --objective="nonlinear" --problemtype=2 --layoutsampling="sampling" --reddeficit --overwrite
+21 −0
Original line number Diff line number Diff line
#!/bin/bash
# Interpreter declaration

#SBATCH -A birthright
#SBATCH -p batch
#SBATCH -N 1
#SBATCH -n 1
#SBATCH -c 2
#SBATCH -J pilot_comp_nl_3
#SBATCH --mem=50g
#SBATCH -t 02-00:00:00
#SBATCH -o ./pilot_comp_nl_3-out.log
#SBATCH -e ./pilot_comp_nl_3.log
#SBATCH --mail-type=FAIL,END
#SBATCH --mail-user=blaiscj@ornl.gov

# sbatch arguments
source ~/.bashrc
conda activate wwtp
cd /home/tjf/01_nawi/soar_integration/testing/
python run_soar_comparison.py --sysname="pilot" --objective="nonlinear" --problemtype=3 --layoutsampling="sampling" --reddeficit --overwrite
Loading