diff --git a/main.py b/main.py index cd7e1621d2a1e76621d28252b4e7c4fac2a00f4c..737cc8d8ee3d07c4eb387ea9c18ef33c30eb0120 100644 --- a/main.py +++ b/main.py @@ -31,7 +31,7 @@ CLI_CONFIG = SettingsConfigDict( ) -def main(): +def main(cli_args: list[str] | None = None): parser = argparse.ArgumentParser( description=""" ExaDigiT Resource Allocator & Power Simulator (RAPS) @@ -153,7 +153,7 @@ def main(): # TODO: move telemetry and other misc scripts into here - args = parser.parse_args() + args = parser.parse_args(cli_args) args.func(args) diff --git a/tests/conftest.py b/tests/conftest.py index 477588ab47724f27096e3f03023175d3f84427ae..855f969f04aefe6fd79c7ba8f63cc379f7070e60 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,5 +1,9 @@ import pytest import uuid +import shutil +from glob import glob +from pathlib import Path +import gc def pytest_addoption(parser): @@ -15,6 +19,19 @@ def pytest_runtest_setup(item): pytest.skip(reason) -@pytest.fixture -def random_id(): - return f"test-{str(uuid.uuid4())[:8]}" +@pytest.fixture() +def sim_output(): + """ + Handles cleaning up output from the sim. + Can also be used even if you aren't outputing anything to run garbage collection after the sim. + """ + out = f"test-output/test-{str(uuid.uuid4())[:8]}" + yield out + for file in glob(f"{out}*"): + if Path(file).is_dir(): + shutil.rmtree(file) + else: + Path(file).unlink() + + # Also force a garbage collection to clean up memory after running a simulation + gc.collect() diff --git a/tests/systems/conftest.py b/tests/systems/conftest.py index 8e361e9b8ce79eefb53128d3260c84ec5bda10bb..269d101fc5c4b0c017a046565790014f468ceab1 100644 --- a/tests/systems/conftest.py +++ b/tests/systems/conftest.py @@ -1,4 +1,5 @@ import pytest +from tests.util import DATA_PATH @pytest.fixture(params=[ @@ -180,18 +181,24 @@ def system_config(system): @pytest.fixture -def system_file(system): +def system_files(system): files = { "40frontiers": [], - "adastraMI250": ["AdastaJobsMI250_15days.parquet"], - "frontier": ["slurm/joblive/date=2024-01-18/", "jobprofile/date=2024-01-18/"], - "fugaku": ["21_04.parquet"], - "gcloudv2": ["/v2/google_cluster_data_2011_sample"], - "lassen": ["Lassen-Supercomputer-Job-Dataset"], - "marconi100": ["job_table.parquet"], - "mit_supercloud": ["202201"], - "setonix": [""], + "adastraMI250": ["adastraMI250/AdastaJobsMI250_15days.parquet"], + "frontier": ["frontier/slurm/joblive/date=2024-01-18/", "frontier/jobprofile/date=2024-01-18/"], + "fugaku": ["fugaku/21_04.parquet"], + "gcloudv2": ["gcloud/v2/google_cluster_data_2011_sample"], + "lassen": ["lassen/Lassen-Supercomputer-Job-Dataset"], + "marconi100": ["marconi100/job_table.parquet"], + "mit_supercloud": ["mit_supercloud/202201"], + "setonix": [], "summit": [], "lumi": [] } - return files.get(system, files) + + file_list = [DATA_PATH / f for f in files.get(system, [])] + for file in file_list: + assert file.exists(), \ + f"File `{file}' does not exist. does ./data exist or is RAPS_DATA_DIR set?" + + return [str(f) for f in file_list] diff --git a/tests/systems/test_engine.py b/tests/systems/test_engine.py index 974ed2e766ea80e8c0ccc4c4f1ff5a87e5541769..ce4087891e17b8d64d9fc6eea88cd983af17c672 100644 --- a/tests/systems/test_engine.py +++ b/tests/systems/test_engine.py @@ -1,4 +1,3 @@ -import gc import pytest from raps.engine import Engine from raps.sim_config import SimConfig @@ -15,7 +14,7 @@ pytestmark = [ ] -def test_engine(system, system_config): +def test_engine(system, system_config, sim_output): if not system_config.get("main", False): pytest.skip(f"{system} does not support basic main run.") @@ -35,5 +34,3 @@ def test_engine(system, system_config): assert engine_stats['time simulated'] == '0:02:00' # TODO: More specific tests of values - - gc.collect() diff --git a/tests/systems/test_main_basic_run.py b/tests/systems/test_main_basic_run.py index c420b5921829a2c664c6d1d005451c6cf9a9a519..0cc9b690ed20a6050c062fef63a8897cf15a9893 100644 --- a/tests/systems/test_main_basic_run.py +++ b/tests/systems/test_main_basic_run.py @@ -1,6 +1,5 @@ import os import subprocess -import gc import pytest from tests.util import PROJECT_ROOT @@ -11,7 +10,7 @@ pytestmark = [ ] -def test_main_basic_run(system, system_config, random_id): +def test_main_basic_run(system, system_config, sim_output): if not system_config.get("main", False): pytest.skip(f"{system} does not support basic main run.") @@ -20,15 +19,6 @@ def test_main_basic_run(system, system_config, random_id): "python", "main.py", "run", "--time", "1m", "--system", system, - "-o", random_id + "-o", sim_output ], capture_output=True, text=True, stdin=subprocess.DEVNULL) assert result.returncode == 0, f"Failed on {system}: {result.stderr}" - - subprocess.run( - f"rm {random_id}.npz && rm -fr simulation_results/{random_id}", - shell=True, - check=True - ) - - del result - gc.collect() diff --git a/tests/systems/test_main_cooling_run.py b/tests/systems/test_main_cooling_run.py index 62d862121306843009cd92f451d17fc22670c8bd..da0c3a33015b483bbd29b109724dd2c41f920293 100644 --- a/tests/systems/test_main_cooling_run.py +++ b/tests/systems/test_main_cooling_run.py @@ -1,6 +1,5 @@ import os import subprocess -import gc import pytest from tests.util import PROJECT_ROOT @@ -12,7 +11,7 @@ pytestmark = [ ] -def test_main_cooling_run(system, system_config, random_id): +def test_main_cooling_run(system, system_config, sim_output): if not system_config.get("cooling", False): pytest.skip(f"{system} does not support cooling.") @@ -23,15 +22,6 @@ def test_main_cooling_run(system, system_config, random_id): "--system", system, "-c", "--noui", - "-o", random_id + "-o", sim_output ], capture_output=True, text=True, stdin=subprocess.DEVNULL) assert result.returncode == 0, f"Failed on {system}: {result.stderr}" - - subprocess.run( - f"rm {random_id}.npz && rm -fr simulation_results/{random_id}", - shell=True, - check=True - ) - - del result - gc.collect() diff --git a/tests/systems/test_main_cooling_uncertainty_run.py b/tests/systems/test_main_cooling_uncertainty_run.py index 742fe87a87944d01fcbd2fbed206caf74e88c9e9..472771d2e663b8075c78f78fadd77ac0bfa2eef3 100644 --- a/tests/systems/test_main_cooling_uncertainty_run.py +++ b/tests/systems/test_main_cooling_uncertainty_run.py @@ -1,6 +1,5 @@ import os import subprocess -import gc import pytest from tests.util import PROJECT_ROOT @@ -12,7 +11,7 @@ pytestmark = [ ] -def test_main_cooling_uncertainty_run(request, system, system_config, random_id): +def test_main_cooling_uncertainty_run(request, system, system_config, sim_output): print(f"Markexpr: {request.config.option.markexpr}") if not system_config.get("uncertainty", False) or not system_config.get("cooling", False): pytest.skip(f"{system} does not support cooling or uncertainty.") @@ -25,15 +24,6 @@ def test_main_cooling_uncertainty_run(request, system, system_config, random_id) "-c", "-u", "--noui", - "-o", random_id + "-o", sim_output ], capture_output=True, text=True, stdin=subprocess.DEVNULL) assert result.returncode == 0, f"Failed on {system}: {result.stderr}" - - subprocess.run( - f"rm {random_id}.npz && rm -fr simulation_results/{random_id}", - shell=True, - check=True - ) - - del result - gc.collect() diff --git a/tests/systems/test_main_fastforward_run.py b/tests/systems/test_main_fastforward_run.py index 3eb567c0276d77ff191e5161613e7d185687d08d..ab19b2403b179221e2c65798dd524d81e4d0d17f 100644 --- a/tests/systems/test_main_fastforward_run.py +++ b/tests/systems/test_main_fastforward_run.py @@ -1,6 +1,5 @@ import os import subprocess -import gc import pytest from tests.util import PROJECT_ROOT @@ -18,7 +17,7 @@ pytestmark = [ "0m", "1m", "60m", "0h", "1h", "6h", ]) -def test_main_fastforward_run(system, system_config, ff_arg, random_id): +def test_main_fastforward_run(system, system_config, ff_arg, sim_output): if not system_config.get("fastforward", False): pytest.skip(f"{system} does not support basic main run.") @@ -29,15 +28,6 @@ def test_main_fastforward_run(system, system_config, ff_arg, random_id): "--fastforward", ff_arg, "--system", system, "--noui", - "-o", random_id + "-o", sim_output ], capture_output=True, text=True, stdin=subprocess.DEVNULL) assert result.returncode == 0, f"Failed on {system}: {result.stderr}" - - subprocess.run( - f"rm {random_id}.npz && rm -fr simulation_results/{random_id}", - shell=True, - check=True - ) - - del result - gc.collect() diff --git a/tests/systems/test_main_help.py b/tests/systems/test_main_help.py index a651a382a426534c7adcfa05179e9a52095b7b24..97fabefc56ff11be1ff034c6c10fd63d8b990dfc 100644 --- a/tests/systems/test_main_help.py +++ b/tests/systems/test_main_help.py @@ -1,6 +1,5 @@ import os import subprocess -import gc import pytest from tests.util import PROJECT_ROOT @@ -11,7 +10,7 @@ pytestmark = [ ] -def test_main_help(system, system_config, random_id): +def test_main_help(system, system_config): if not system_config.get("main", False): pytest.skip(f"{system} does not support basic main run.") @@ -23,6 +22,3 @@ def test_main_help(system, system_config, random_id): assert result.returncode == 0, f"Failed on {system}: {result.stderr}" assert "usage:" in result.stdout - - del result - gc.collect() diff --git a/tests/systems/test_main_network_run.py b/tests/systems/test_main_network_run.py index 8c7db1e87bfbb7fbcb5bb335420aca099858739a..ea693b49e5d7c89beb20d6bcaf82f3e9b32db562 100644 --- a/tests/systems/test_main_network_run.py +++ b/tests/systems/test_main_network_run.py @@ -1,6 +1,5 @@ import os import subprocess -import gc import pytest from tests.util import PROJECT_ROOT @@ -12,7 +11,7 @@ pytestmark = [ ] -def test_main_network_run(system, system_config, random_id): +def test_main_network_run(system, system_config, sim_output): if not system_config.get("main", False): pytest.skip(f"{system} does not support basic main run.") @@ -25,15 +24,6 @@ def test_main_network_run(system, system_config, random_id): "--time", "1m", "--system", system, "--net", - "-o", random_id + "-o", sim_output ], capture_output=True, text=True, stdin=subprocess.DEVNULL) assert result.returncode == 0, f"Failed on {system}: {result.stderr}" - - subprocess.run( - f"rm {random_id}.npz && rm -fr simulation_results/{random_id}", - shell=True, - check=True - ) - - del result - gc.collect() diff --git a/tests/systems/test_main_network_withdata_run.py b/tests/systems/test_main_network_withdata_run.py index 1dcfee029ad6f51d11c2b70f32088808cca24d88..62b679abba9e0f4eeeb3b33ca2491cff86e817a7 100644 --- a/tests/systems/test_main_network_withdata_run.py +++ b/tests/systems/test_main_network_withdata_run.py @@ -1,6 +1,5 @@ import os import subprocess -import gc import pytest from tests.util import PROJECT_ROOT, DATA_PATH @@ -14,34 +13,17 @@ pytestmark = [ ] -def test_main_network_withdata_run(system, system_config, system_file, random_id): +def test_main_network_withdata_run(system, system_config, system_files, sim_output): if not system_config.get("net", False): pytest.skip(f"{system} does not support basic net run.") - if isinstance(system_file, list): - file_list = [DATA_PATH / system / x for x in system_file] - else: - file_list = [DATA_PATH / system / system_file] - for file in file_list: - assert os.path.isfile(file) or os.path.isdir(file), \ - "File does not exist. does ./data exist or is RAPS_DATA_DIR set?" - os.chdir(PROJECT_ROOT) result = subprocess.run([ "python", "main.py", "run", "--time", "1m", "--system", system, - "-f", *file_list, + "-f", *system_files, "--net", - "-o", random_id + "-o", sim_output ], capture_output=True, text=True, stdin=subprocess.DEVNULL) assert result.returncode == 0, f"Failed on {system}: {result.stderr}" - - subprocess.run( - f"rm {random_id}.npz && rm -fr simulation_results/{random_id}", - shell=True, - check=True - ) - - del result - gc.collect() diff --git a/tests/systems/test_main_noui_run.py b/tests/systems/test_main_noui_run.py index af8bea80d88195e44ea17b91caaa61d4e1c1f09f..08e71898c51fba4813d39e5487aece2e62e9b42c 100644 --- a/tests/systems/test_main_noui_run.py +++ b/tests/systems/test_main_noui_run.py @@ -1,6 +1,5 @@ import os import subprocess -import gc import pytest from tests.util import PROJECT_ROOT @@ -11,7 +10,7 @@ pytestmark = [ ] -def test_main_noui_run(system, system_config, random_id): +def test_main_noui_run(system, system_config, sim_output): if not system_config.get("main", False): pytest.skip(f"{system} does not support basic main run.") @@ -21,15 +20,6 @@ def test_main_noui_run(system, system_config, random_id): "--time", "1m", "--system", system, "--noui", - "-o", random_id + "-o", sim_output ], capture_output=True, text=True, stdin=subprocess.DEVNULL) assert result.returncode == 0, f"Failed on {system}: {result.stderr}" - - subprocess.run( - f"rm {random_id}.npz && rm -fr simulation_results/{random_id}", - shell=True, - check=True - ) - - del result - gc.collect() diff --git a/tests/systems/test_main_time_delta_run.py b/tests/systems/test_main_time_delta_run.py index 880805282785f7757ec19026668430429390fdcf..2ca8477f452fbfc0e0c8326301d28de5d7c8a1be 100644 --- a/tests/systems/test_main_time_delta_run.py +++ b/tests/systems/test_main_time_delta_run.py @@ -1,6 +1,5 @@ import os import subprocess -import gc import pytest from tests.util import PROJECT_ROOT from raps.utils import convert_to_time_unit, convert_seconds_to_hhmmss @@ -22,7 +21,7 @@ pytestmark = [ ("10h", "3h"), ("3d", "1d") ], ids=["1", "1s", "10s", "1m", "1h", "3h", "1d"]) -def test_main_time_delta_run(system, system_config, time_arg, tdelta_arg, random_id): +def test_main_time_delta_run(system, system_config, time_arg, tdelta_arg, sim_output): if not system_config.get("time_delta", False): pytest.skip(f"{system} does not support time_delta run.") @@ -33,17 +32,8 @@ def test_main_time_delta_run(system, system_config, time_arg, tdelta_arg, random "--time-delta", tdelta_arg, "--system", system, "--noui", - "-o", random_id + "-o", sim_output ], capture_output=True, text=True, stdin=subprocess.DEVNULL) assert result.returncode == 0, f"Failed on {system}: {result.stderr}" time = convert_to_time_unit(time_arg) assert f"Time Simulated: {convert_seconds_to_hhmmss(time)}" in result.stdout - - subprocess.run( - f"rm {random_id}.npz && rm -fr simulation_results/{random_id}", - shell=True, - check=True - ) - - del result - gc.collect() diff --git a/tests/systems/test_main_time_delta_sub_second_run.py b/tests/systems/test_main_time_delta_sub_second_run.py index 0bedee88d80b41af59edf94318af62dfa6f39399..55c0e3ca2ad99551770b76406b1291deec480710 100644 --- a/tests/systems/test_main_time_delta_sub_second_run.py +++ b/tests/systems/test_main_time_delta_sub_second_run.py @@ -23,7 +23,7 @@ pytestmark = [ ("100ms", "1ms"), ("100ms", "1s"), ], ids=["1ds", "3ds", "1cs", "1ms", "1cs-for-10ds", "1ms-for-10cs", "1ms-for-100ms", "1s-for-100ms"]) -def test_main_time_delta_sub_second_run(system, system_config, time_arg, tdelta_arg, random_id): +def test_main_time_delta_sub_second_run(system, system_config, time_arg, tdelta_arg, sim_output): if not system_config.get("time_delta", False): pytest.skip(f"{system} does not support time_delta run.") @@ -34,14 +34,14 @@ def test_main_time_delta_sub_second_run(system, system_config, time_arg, tdelta_ "--time-delta", tdelta_arg, "--system", system, "--noui", - "-o", random_id + "-o", sim_output ], capture_output=True, text=True, stdin=subprocess.DEVNULL) assert result.returncode == 0, f"Failed on {system}: {result.stderr}" time = parse_td(time_arg).seconds assert f"Time Simulated: {convert_seconds_to_hhmmss(time)}" in result.stdout subprocess.run( - f"rm {random_id}.npz && rm -fr simulation_results/{random_id}", + f"rm {sim_output}.npz && rm -fr simulation_results/{sim_output}", shell=True, check=True ) diff --git a/tests/systems/test_main_time_ff_delta_run.py b/tests/systems/test_main_time_ff_delta_run.py index a6c87639d0ec02bc95e82929248f75202db1943c..74247583e31067f328560aed438cdf91e0234243 100644 --- a/tests/systems/test_main_time_ff_delta_run.py +++ b/tests/systems/test_main_time_ff_delta_run.py @@ -1,6 +1,5 @@ import os import subprocess -import gc import pytest from tests.util import PROJECT_ROOT @@ -22,7 +21,7 @@ pytestmark = [ pytest.param("3d", "1d", "1d", marks=pytest.mark.long, id="1d (long)"), ], ids=["1", "1s", "10s", "1m", "1h", "3h", "1d"]) def test_main_time_ff_delta_run(system, system_config, time_arg, tdelta_arg, - ff_arg, random_id): + ff_arg, sim_output): if not system_config.get("time_delta", False): pytest.skip(f"{system} does not support time_delta run.") @@ -34,15 +33,6 @@ def test_main_time_ff_delta_run(system, system_config, time_arg, tdelta_arg, "--time-delta", tdelta_arg, "--system", system, "--noui", - "-o", random_id + "-o", sim_output ], capture_output=True, text=True, stdin=subprocess.DEVNULL) assert result.returncode == 0, f"Failed on {system}: {result.stderr}" - - subprocess.run( - f"rm {random_id}.npz && rm -fr simulation_results/{random_id}", - shell=True, - check=True - ) - - del result - gc.collect() diff --git a/tests/systems/test_main_time_run.py b/tests/systems/test_main_time_run.py index c8e00b14293ff6d2820680743585801e0c1f7841..0faa06cb121c1f444ea8e64d0e9e04dbf4e9d1c9 100644 --- a/tests/systems/test_main_time_run.py +++ b/tests/systems/test_main_time_run.py @@ -1,6 +1,5 @@ import os import subprocess -import gc import pytest from tests.util import PROJECT_ROOT @@ -21,7 +20,7 @@ pytestmark = [ "0h", "1h", pytest.param("6h", marks=pytest.mark.long), # mark this one as long ]) -def test_main_time_run(system, system_config, time_args, random_id): +def test_main_time_run(system, system_config, time_args, sim_output): if not system_config.get("main", False): pytest.skip(f"{system} does not support basic main run.") @@ -31,15 +30,6 @@ def test_main_time_run(system, system_config, time_args, random_id): "--time", time_args, "--system", system, "--noui", - "-o", random_id + "-o", sim_output ], capture_output=True, text=True, stdin=subprocess.DEVNULL) assert result.returncode == 0, f"Failed on {system}: {result.stderr}" - - subprocess.run( - f"rm {random_id}.npz && rm -fr simulation_results/{random_id}", - shell=True, - check=True - ) - - del result - gc.collect() diff --git a/tests/systems/test_main_uncertainty_run.py b/tests/systems/test_main_uncertainty_run.py index effdcc64d1bdfb0c789f30eda4e95a45ab9dd534..f3d5bd070eb34a86ce0e698a8835320d4971329e 100644 --- a/tests/systems/test_main_uncertainty_run.py +++ b/tests/systems/test_main_uncertainty_run.py @@ -1,6 +1,5 @@ import os import subprocess -import gc import pytest from tests.util import PROJECT_ROOT @@ -13,7 +12,7 @@ pytestmark = [ ] -def test_main_uncertainty_run(system, system_config, random_id): +def test_main_uncertainty_run(system, system_config, sim_output): if not system_config.get("uncertainty", False): pytest.skip(f"{system} does not support uncertainty.") @@ -24,15 +23,6 @@ def test_main_uncertainty_run(system, system_config, random_id): "--system", system, "-u", "--noui", - "-o", random_id + "-o", sim_output ], capture_output=True, text=True, stdin=subprocess.DEVNULL) assert result.returncode == 0, f"Failed on {system}: {result.stderr}" - - subprocess.run( - f"rm {random_id}.npz && rm -fr simulation_results/{random_id}", - shell=True, - check=True - ) - - del result - gc.collect() diff --git a/tests/systems/test_main_withdata_run.py b/tests/systems/test_main_withdata_run.py index a4cbd55cb37f3773d7d272e32d8113009fe09da5..eb996a346b6920eabc4db8934e3d69018a551b6c 100644 --- a/tests/systems/test_main_withdata_run.py +++ b/tests/systems/test_main_withdata_run.py @@ -1,6 +1,5 @@ import os import subprocess -import gc import pytest from tests.util import PROJECT_ROOT, DATA_PATH @@ -12,33 +11,17 @@ pytestmark = [ ] -def test_main_withdata_run(system, system_config, system_file, random_id): +def test_main_withdata_run(system, system_config, system_files, sim_output): if not system_config.get("main", False): pytest.skip(f"{system} does not support basic main even without data.") if not system_config.get("withdata", False): pytest.skip(f"{system} does not support basic main with data.") - if isinstance(system_file, list): - file_list = [DATA_PATH / system / x for x in system_file] - else: - file_list = [DATA_PATH / system / system_file] - for file in file_list: - assert os.path.isfile(file) or os.path.isdir(file), \ - f"File `{file}' does not exist. does ./data exist or is RAPS_DATA_DIR set?" os.chdir(PROJECT_ROOT) result = subprocess.run([ "python", "main.py", "run", "--time", "1m", "--system", system, - "-f", ','.join(str(p) for p in file_list), - "-o", random_id + "-f", ','.join(system_files), + "-o", sim_output ], capture_output=True, text=True, stdin=subprocess.DEVNULL) assert result.returncode == 0, f"Failed on {system}: {result.stderr}" - - subprocess.run( - f"rm {random_id}.npz && rm -fr simulation_results/{random_id}", - shell=True, - check=True - ) - - del result - gc.collect() diff --git a/tests/systems/test_multi_part_sim_network_run.py b/tests/systems/test_multi_part_sim_network_run.py index ccbadaaac11b063886d553a8f04b08b2053b8827..aa90cca0180db1cbfb4c5b903fbc3bd141670e21 100644 --- a/tests/systems/test_multi_part_sim_network_run.py +++ b/tests/systems/test_multi_part_sim_network_run.py @@ -1,6 +1,5 @@ import os import subprocess -import gc import pytest from tests.util import PROJECT_ROOT @@ -11,8 +10,7 @@ pytestmark = [ ] -def test_multi_part_sim_network_run(system, system_config, random_id): - +def test_multi_part_sim_network_run(system, system_config, sim_output): if not system_config.get("multi-part-sim", False): pytest.skip(f"{system} does not support basic multi-part-sim run.") @@ -25,11 +23,6 @@ def test_multi_part_sim_network_run(system, system_config, random_id): "--time", "1h", "-x", f"{system}/*", "--net", + "-o", sim_output, ], capture_output=True, text=True, stdin=subprocess.DEVNULL) assert result.returncode == 0, f"Failed on {system}: {result.stderr}" - - # TODO: - # Cleanup files after test! - - del result - gc.collect() diff --git a/tests/systems/test_multi_part_sim_withdata_run.py b/tests/systems/test_multi_part_sim_withdata_run.py index 2b183054ea418196642290da9e65a4866fbf4f3c..caaf9e8fb0de40848abb66c4bef2efa1f26255c0 100644 --- a/tests/systems/test_multi_part_sim_withdata_run.py +++ b/tests/systems/test_multi_part_sim_withdata_run.py @@ -12,26 +12,17 @@ pytestmark = [ ] -def test_multi_part_sim_withdata_run(system, system_config, system_file): +def test_multi_part_sim_withdata_run(system, system_config, system_files): if not system_config.get("multi-part-sim", False): pytest.skip(f"{system} does not support basic multi-part-sim run even without data.") if not system_config.get("withdata", False): pytest.skip(f"{system} does not support multi-part-sim run with data.") - if isinstance(system_file, list): - file_list = [DATA_PATH / system / x for x in system_file] - else: - file_list = [DATA_PATH / system / system_file] - for file in file_list: - assert os.path.isfile(file) or os.path.isdir(file), \ - f"File `{file}' does not exist. does ./data exist or is RAPS_DATA_DIR set?" os.chdir(PROJECT_ROOT) result = subprocess.run([ "python", "main.py", "run-multi-part", "--time", "1h", "-x", f"{system}/*", - "-f", *file_list, + "-f", *system_files, ], capture_output=True, text=True, stdin=subprocess.DEVNULL) assert result.returncode == 0, f"Failed on {system}: {result.stderr}" - del result - gc.collect() diff --git a/tests/systems/test_telemetry_withdata_run.py b/tests/systems/test_telemetry_withdata_run.py index e9685f7f3a01a21c8eadd989957511cdbf2294bd..ab6f93cd45c8857bb938f28e704a9442a20f8b5c 100644 --- a/tests/systems/test_telemetry_withdata_run.py +++ b/tests/systems/test_telemetry_withdata_run.py @@ -1,6 +1,5 @@ import os import subprocess -import gc import pytest from tests.util import PROJECT_ROOT, DATA_PATH @@ -11,33 +10,17 @@ pytestmark = [ ] -def test_telemetry_main_withdata_run(system, system_config, system_file, random_id): +def test_telemetry_main_withdata_run(system, system_config, system_files, sim_output): if not system_config.get("telemetry", False): pytest.skip(f"{system} does not support telemetry run.") if not system_config.get("withdata", False): pytest.skip(f"{system} does not support telemetry run with data.") - if isinstance(system_file, list): - file_list = [DATA_PATH / system / x for x in system_file] - else: - file_list = [DATA_PATH / system / system_file] - for file in file_list: - assert os.path.isfile(file) or os.path.isdir(file), \ - f"File `{file}' does not exist. does ./data exist or is RAPS_DATA_DIR set?" os.chdir(PROJECT_ROOT) result = subprocess.run([ "python", "main.py", "telemetry", "--system", system, - "-f", *file_list, - "-o", random_id + "-f", *system_files, + "-o", sim_output, ], capture_output=True, text=True, stdin=subprocess.DEVNULL) assert result.returncode == 0, f"Failed on {system}: {result.stderr}" - - subprocess.run( - f"rm {random_id}.npz ; rm {random_id}.png", - shell=True, - check=True - ) - - del result - gc.collect()