Skip to content
Snippets Groups Projects
Commit eb80002b authored by Simon Heybrock's avatar Simon Heybrock Committed by GitHub
Browse files

Merge pull request #19163 from mantidproject/19113_add_unit_tests_ISIS_Powder_common

Add unit tests for ISIS_Powder common routines
parents fc876921 fdf327ff
No related merge requests found
Showing
with 719 additions and 4431 deletions
source diff could not be displayed: it is too large. Options to address this: view the blob.
8dfe71a63ad15a9880fb6c7ab3165151
a5c54ed2762cd13e6a75f50d09d6b5d7
......@@ -24,50 +24,54 @@ set_property ( TARGET CompileUIUI PROPERTY FOLDER "CompilePyUI" )
set_property ( TARGET CompileUIHFIR_4Circle_Reduction PROPERTY FOLDER "CompilePyUI" )
set ( TEST_PY_FILES
test/AbinsAtomsDataTest.py
test/AbinsCalculateDWSingleCrystalTest.py
test/AbinsCalculatePowderTest.py
test/AbinsCalculateQToscaTest.py
test/AbinsCalculateSingleCrystalTest.py
test/AbinsCalculateSPowderTest.py
test/AbinsDWSingleCrystalDataTest.py
test/AbinsFrequencyPowderGeneratorTest.py
test/AbinsIOmoduleTest.py
test/AbinsKpointsDataTest.py
test/AbinsLoadCASTEPTest.py
test/AbinsLoadCRYSTALTest.py
test/AbinsPowderDataTest.py
test/ConvertToWavelengthTest.py
test/CrystalFieldTest.py
test/DirectReductionHelpersTest.py
test/DirectPropertyManagerTest.py
test/DirectEnergyConversionTest.py
test/ISISDirecInelasticConfigTest.py
test/DirectPropertyManagerTest.py
test/DirectReductionHelpersTest.py
test/IndirectCommonTests.py
test/ISISDirecInelasticConfigTest.py
test/ISISPowderAbsorptionTest.py
test/ISISPowderCommonTest.py
test/ISISPowderInstrumentSettingsTest.py
test/ISISPowderYamlParserTest.py
test/PyChopTest.py
test/ReductionSettingsTest.py
test/ReductionWrapperTest.py
test/ReflectometryQuickAuxiliaryTest.py
test/RunDescriptorTest.py
test/SansIsisGuiSettings.py
test/SANSBatchModeTest.py
test/SANSCentreFinderTest.py
test/SANSCommandInterfaceTest.py
test/SANSDarkRunCorrectionTest.py
test/SANSUserFileParserTest.py
test/SANSUtilityTest.py
test/SansIsisGuiSettings.py
test/SANSIsisInstrumentTest.py
test/SANSReducerTest.py
test/SANSReductionStepsUserFileTest.py
test/SANSUserFileParserTest.py
test/SANSUtilityTest.py
test/SettingsTest.py
test/VesuvioBackgroundTest.py
test/VesuvioFittingTest.py
test/VesuvioProfileTest.py
test/ReductionSettingsTest.py
test/AbinsIOmoduleTest.py
test/AbinsLoadCASTEPTest.py
test/AbinsLoadCRYSTALTest.py
test/AbinsCalculateQToscaTest.py
test/AbinsFrequencyPowderGeneratorTest.py
test/AbinsKpointsDataTest.py
test/AbinsAtomsDataTest.py
test/AbinsDWSingleCrystalDataTest.py
test/AbinsCalculateDWSingleCrystalTest.py
test/AbinsPowderDataTest.py
test/AbinsCalculatePowderTest.py
test/AbinsCalculateSingleCrystalTest.py
test/AbinsCalculateSPowderTest.py
)
# Addition tests for SANS components
add_subdirectory(test/SANS)
# python unit tests
if (PYUNITTEST_FOUND)
pyunittest_add_test ( ${CMAKE_CURRENT_SOURCE_DIR}/test PythonScriptsTest ${TEST_PY_FILES} )
......
from .pearl import Pearl
from .polaris import Polaris
__all__ = ["Pearl", "Polaris"]
......@@ -53,14 +53,14 @@ class Gem(AbstractInst):
multiple_scattering=self._inst_settings.multiple_scattering)
def _crop_banks_to_user_tof(self, focused_banks):
return common.crop_banks_in_tof(focused_banks, self._inst_settings.focused_cropping_values)
return common.crop_banks_using_crop_list(focused_banks, self._inst_settings.focused_cropping_values)
def _crop_raw_to_expected_tof_range(self, ws_to_crop):
raw_cropping_values = self._inst_settings.raw_tof_cropping_values
return common.crop_in_tof(ws_to_crop, raw_cropping_values[0], raw_cropping_values[1])
def _crop_van_to_expected_tof_range(self, van_ws_to_crop):
return common.crop_banks_in_tof(van_ws_to_crop, self._inst_settings.vanadium_cropping_values)
return common.crop_banks_using_crop_list(van_ws_to_crop, self._inst_settings.vanadium_cropping_values)
def _get_sample_empty(self):
sample_empty = self._inst_settings.sample_empty
......
......@@ -109,7 +109,7 @@ class Pearl(AbstractInst):
return grouped_d_spacing, None
def _crop_banks_to_user_tof(self, focused_banks):
return common.crop_banks_in_tof(focused_banks, self._inst_settings.tof_cropping_values)
return common.crop_banks_using_crop_list(focused_banks, self._inst_settings.tof_cropping_values)
def _crop_raw_to_expected_tof_range(self, ws_to_crop):
out_ws = common.crop_in_tof(ws_to_crop=ws_to_crop, x_min=self._inst_settings.raw_data_crop_vals[0],
......
......@@ -47,7 +47,7 @@ class Polaris(AbstractInst):
return True
def _crop_banks_to_user_tof(self, focused_banks):
return common.crop_banks_in_tof(focused_banks, self._inst_settings.focused_cropping_values)
return common.crop_banks_using_crop_list(focused_banks, self._inst_settings.focused_cropping_values)
def _crop_raw_to_expected_tof_range(self, ws_to_crop):
cropped_ws = common.crop_in_tof(ws_to_crop=ws_to_crop, x_min=self._inst_settings.raw_data_crop_values[0],
......@@ -55,8 +55,8 @@ class Polaris(AbstractInst):
return cropped_ws
def _crop_van_to_expected_tof_range(self, van_ws_to_crop):
cropped_ws = common.crop_banks_in_tof(bank_list=van_ws_to_crop,
crop_values_list=self._inst_settings.van_crop_values)
cropped_ws = common.crop_banks_using_crop_list(bank_list=van_ws_to_crop,
crop_values_list=self._inst_settings.van_crop_values)
return cropped_ws
def _generate_auto_vanadium_calibration(self, run_details):
......
......@@ -65,9 +65,9 @@ class InstrumentSettings(object):
self._parse_attributes(self._adv_config_dict, suppress_warnings=suppress_warnings)
if advanced_config or basic_config:
self._parse_attributes(self._basic_conf_dict,
suppress_warnings=(not bool(basic_config or suppress_warnings)))
suppress_warnings=(not basic_config or suppress_warnings))
if advanced_config or basic_config or kwargs:
self._parse_attributes(self._kwargs, suppress_warnings=(not bool(kwargs or suppress_warnings)))
self._parse_attributes(self._kwargs, suppress_warnings=(not kwargs or suppress_warnings))
def _parse_attributes(self, dict_to_parse, suppress_warnings=False):
if not dict_to_parse:
......@@ -101,7 +101,8 @@ class InstrumentSettings(object):
# Does the attribute exist - has it changed and are we suppressing warnings
if not suppress_warnings:
if hasattr(self, attribute_name) and getattr(self, attribute_name) != param_val:
previous_value = getattr(self, attribute_name) if hasattr(self, attribute_name) else None
if previous_value and previous_value != param_val:
# Print warning of what we value we are replacing for which parameter
warnings.warn("Replacing parameter: '" + str(param_map.ext_name) + "' which was previously set to: '" +
str(getattr(self, attribute_name)) + "' with new value: '" + str(param_val) + "'")
......
......@@ -22,9 +22,9 @@ def cal_map_dictionary_key_helper(dictionary, key, append_to_error_message=None)
return dictionary_key_helper(dictionary=dictionary, key=key, throws=True, exception_msg=err_message)
def crop_banks_in_tof(bank_list, crop_values_list):
def crop_banks_using_crop_list(bank_list, crop_values_list):
"""
Crops the each bank by the specified tuple values from a list of tuples in TOF. The number
Crops each bank by the specified tuple values from a list of tuples in TOF. The number
of tuples must match the number of banks to crop. A list of [(100,200), (150,250)] would crop
bank 1 to the values 100, 200 and bank 2 to 150 and 250 in TOF.
:param bank_list: The list of workspaces each containing one bank of data to crop
......@@ -32,11 +32,12 @@ def crop_banks_in_tof(bank_list, crop_values_list):
:return: A list of cropped workspaces
"""
if not isinstance(crop_values_list, list):
if isinstance(bank_list, list):
raise ValueError("The cropping values were not in a list type")
else:
raise RuntimeError("Attempting to use list based cropping on a single workspace not in a list")
raise ValueError("The cropping values were not in a list type")
elif not isinstance(bank_list, list):
# This error is probably internal as we control the bank lists
raise RuntimeError("Attempting to use list based cropping on a single workspace not in a list")
# Finally check the number of elements are equal
if len(bank_list) != len(crop_values_list):
raise RuntimeError("The number of TOF cropping values does not match the number of banks for this instrument")
......@@ -156,7 +157,10 @@ def get_first_run_number(run_number_string):
:return: The first run for the user input of runs
"""
run_numbers = generate_run_numbers(run_number_string=run_number_string)
return run_numbers[0]
if isinstance(run_numbers, list):
run_numbers = run_numbers[0]
return run_numbers
def get_monitor_ws(ws_to_process, run_number_string, instrument):
......@@ -170,8 +174,8 @@ def get_monitor_ws(ws_to_process, run_number_string, instrument):
:param instrument: The instrument to query for the monitor position
:return: The extracted monitor as a workspace
"""
number_list = generate_run_numbers(run_number_string)
monitor_spectra = instrument._get_monitor_spectra_index(number_list[0])
first_run_number = get_first_run_number(run_number_string)
monitor_spectra = instrument._get_monitor_spectra_index(first_run_number)
load_monitor_ws = mantid.ExtractSingleSpectrum(InputWorkspace=ws_to_process, WorkspaceIndex=monitor_spectra)
return load_monitor_ws
......@@ -221,7 +225,7 @@ def load_current_normalised_ws_list(run_number_string, instrument, input_batchin
run_information = instrument._get_run_details(run_number_string=run_number_string)
raw_ws_list = _load_raw_files(run_number_string=run_number_string, instrument=instrument)
if input_batching.lower() == INPUT_BATCHING.Summed.lower() and len(raw_ws_list) > 1:
if input_batching == INPUT_BATCHING.Summed and len(raw_ws_list) > 1:
summed_ws = _sum_ws_range(ws_list=raw_ws_list)
remove_intermediate_workspace(raw_ws_list)
raw_ws_list = [summed_ws]
......@@ -424,4 +428,4 @@ def _run_number_generator(processed_string):
number_generator = kernel.IntArrayProperty('array_generator', processed_string)
return number_generator.value.tolist()
except RuntimeError:
raise RuntimeError("Could not generate run numbers from this input: " + processed_string)
raise ValueError("Could not generate run numbers from this input: " + processed_string)
......@@ -8,8 +8,7 @@ from isis_powder.routines import yaml_sanity
def get_run_dictionary(run_number_string, file_path):
if isinstance(run_number_string, str):
run_number_list = common.generate_run_numbers(run_number_string=run_number_string)
run_number_string = run_number_list[0]
run_number_string = common.get_first_run_number(run_number_string=run_number_string)
config_file = open_yaml_file_as_dictionary(file_path)
yaml_sanity.calibration_file_sanity_check(config_file, file_path)
......@@ -56,8 +55,9 @@ def _find_dictionary_key(dict_to_search, run_number):
generated_runs = common.generate_run_numbers(run_number_string=key)
except RuntimeError:
raise ValueError("Could not parse '" + str(key) + "'\n"
"This should be a range of runs in this cycle in the mapping file."
" Please check your indentation if this should be within a cycle.")
"This should be a range of runs in the mapping file."
" Please check your indentation and YAML syntax is correct.")
if run_number in generated_runs:
return key
......
......@@ -4,7 +4,7 @@ from __future__ import (absolute_import, division, print_function)
def calibration_file_sanity_check(yaml_dict, file_path):
# Check that the dictionary has data
if not yaml_dict:
raise ValueError("YAML dictionary appear to be empty at:\n" + str(file_path))
raise ValueError("YAML files appears to be empty at:\n" + str(file_path))
# Check that we only have one unbounded range at maximum
unbound_key_exists = _does_single_unbound_key_exist(yaml_dict)
......
from __future__ import (absolute_import, division, print_function)
import mantid.simpleapi as mantid
import unittest
from six import iterkeys, assertRaisesRegex
from isis_powder.routines import absorb_corrections
class ISISPowderAbsorptionTest(unittest.TestCase):
def test_sample_is_set_correctly(self):
sample_properties = {
"cylinder_sample_height": 4.0,
"cylinder_sample_radius": 0.25,
"cylinder_position": [0., 0., 0.],
"chemical_formula": "V"
}
ws = mantid.CreateSampleWorkspace(Function='Flat background', NumBanks=1, BankPixelWidth=1, XMax=10, BinWidth=1)
ws = absorb_corrections.run_cylinder_absorb_corrections(ws_to_correct=ws, multiple_scattering=False,
config_dict=sample_properties)
self.assertAlmostEqual(ws.dataY(0)[2], 1.16864808, delta=1e-8)
self.assertAlmostEqual(ws.dataY(0)[5], 1.16872761, delta=1e-8)
self.assertAlmostEqual(ws.dataY(0)[9], 1.16883365, delta=1e-8)
def test_missing_property_is_detected(self):
sample_properties = {
"cylinder_sample_height": 4.0,
"cylinder_sample_radius": 0.25,
"cylinder_position": [0., 0., 0.],
"chemical_formula": "V"
}
ws = mantid.CreateSampleWorkspace(Function='Flat background', NumBanks=1, BankPixelWidth=1, XMax=2, BinWidth=1)
# Test each key one at a time
for blacklisted_key in iterkeys(sample_properties):
# Force python to make a shallow copy
modified_dict = sample_properties.copy()
modified_dict.pop(blacklisted_key)
# Check that is raises an error
with assertRaisesRegex(self, KeyError, "The following key was not found in the advanced configuration"):
ws = absorb_corrections.run_cylinder_absorb_corrections(ws_to_correct=ws, multiple_scattering=False,
config_dict=modified_dict)
# Then check the error actually has the key name in it
with assertRaisesRegex(self, KeyError, blacklisted_key):
ws = absorb_corrections.run_cylinder_absorb_corrections(ws_to_correct=ws, multiple_scattering=False,
config_dict=modified_dict)
def test_formula_requires_number_density(self):
sample_properties = {
"cylinder_sample_height": 4.0,
"cylinder_sample_radius": 0.25,
"cylinder_position": [0., 0., 0.],
"chemical_formula": "V Nb"
}
expected_number_density = 1.234
ws = mantid.CreateSampleWorkspace(Function='Flat background', NumBanks=1, BankPixelWidth=1, XMax=2, BinWidth=1)
with assertRaisesRegex(self, KeyError, "The number density is required as the chemical formula"):
ws = absorb_corrections.run_cylinder_absorb_corrections(ws_to_correct=ws, multiple_scattering=False,
config_dict=sample_properties)
sample_properties["number_density"] = expected_number_density
ws = absorb_corrections.run_cylinder_absorb_corrections(ws_to_correct=ws, multiple_scattering=False,
config_dict=sample_properties)
self.assertEqual(ws.sample().getMaterial().numberDensity, expected_number_density)
if __name__ == "__main__":
unittest.main()
from __future__ import (absolute_import, division, print_function)
import mantid.simpleapi as mantid # Have to import Mantid to setup paths
import unittest
from six import assertRaisesRegex
from isis_powder.routines import common
class ISISPowderCommonTest(unittest.TestCase):
def test_cal_map_dict_helper(self):
missing_key_name = "wrong_key"
correct_key_name = "right_key"
dict_with_key = {correct_key_name: 123}
# Check it correctly raises
with assertRaisesRegex(self, KeyError, "The field '" + missing_key_name + "' is required"):
common.cal_map_dictionary_key_helper(dictionary=dict_with_key, key=missing_key_name)
# Check it correctly appends the passed error message when raising
appended_e_msg = "test append message"
with assertRaisesRegex(self, KeyError, appended_e_msg):
common.cal_map_dictionary_key_helper(dictionary=dict_with_key, key=missing_key_name,
append_to_error_message=appended_e_msg)
# Check that it correctly returns the key value where it exists
self.assertEqual(common.cal_map_dictionary_key_helper(dictionary=dict_with_key, key=correct_key_name), 123)
def test_crop_banks_using_crop_list(self):
bank_list = []
cropping_value = (0, 1000) # Crop to 0-1000 microseconds for unit tests
cropping_value_list = []
expected_number_of_bins = cropping_value[-1] - cropping_value[0]
for i in range(0, 3):
out_name = "crop_banks_in_tof-" + str(i)
cropping_value_list.append(cropping_value)
bank_list.append(mantid.CreateSampleWorkspace(OutputWorkspace=out_name, XMin=0, XMax=1100, BinWidth=1))
# Check a list of WS and single cropping value is detected
with assertRaisesRegex(self, ValueError, "The cropping values were not in a list type"):
common.crop_banks_using_crop_list(bank_list=bank_list, crop_values_list=cropping_value)
# Check a list of cropping values and a single workspace is detected
with assertRaisesRegex(self, RuntimeError, "Attempting to use list based cropping"):
common.crop_banks_using_crop_list(bank_list=bank_list[0], crop_values_list=cropping_value_list)
# What about a mismatch between the number of cropping values and workspaces
with assertRaisesRegex(self, RuntimeError, "The number of TOF cropping values does not match"):
common.crop_banks_using_crop_list(bank_list=bank_list[1:], crop_values_list=cropping_value_list)
# Check we can crop a single workspace from the list
cropped_single_ws_list = common.crop_banks_using_crop_list(bank_list=[bank_list[0]], crop_values_list=[cropping_value])
self.assertEqual(cropped_single_ws_list[0].blocksize(), expected_number_of_bins)
mantid.DeleteWorkspace(Workspace=cropped_single_ws_list[0])
# Check we can crop a whole list
cropped_ws_list = common.crop_banks_using_crop_list(bank_list=bank_list[1:], crop_values_list=cropping_value_list[1:])
for ws in cropped_ws_list[1:]:
self.assertEqual(ws.blocksize(), expected_number_of_bins)
mantid.DeleteWorkspace(Workspace=ws)
def test_crop_in_tof(self):
ws_list = []
x_min = 100
x_max = 500 # Crop to 0-500 microseconds for unit tests
expected_number_of_bins = x_max - x_min
for i in range(0, 3):
out_name = "crop_banks_in_tof-" + str(i)
ws_list.append(mantid.CreateSampleWorkspace(OutputWorkspace=out_name, XMin=0, XMax=600, BinWidth=1))
# Crop a single workspace in TOF
tof_single_ws = common.crop_in_tof(ws_to_crop=ws_list[0], x_min=x_min, x_max=x_max)
self.assertEqual(tof_single_ws.blocksize(), expected_number_of_bins)
mantid.DeleteWorkspace(tof_single_ws)
# Crop a list of workspaces in TOF
cropped_ws_list = common.crop_in_tof(ws_to_crop=ws_list[1:], x_min=x_min, x_max=x_max)
for ws in cropped_ws_list:
self.assertEqual(ws.blocksize(), expected_number_of_bins)
mantid.DeleteWorkspace(ws)
def test_crop_in_tof_coverts_units(self):
# Checks that crop_in_tof converts to TOF before cropping
ws_list = []
x_min = 100
x_max = 200
expected_number_of_bins = 20000 # Hard code number of expected bins for dSpacing
for i in range(0, 3):
out_name = "crop_banks_in_dSpacing-" + str(i)
ws_list.append(mantid.CreateSampleWorkspace(OutputWorkspace=out_name, XMin=0, XMax=20000, BinWidth=1,
XUnit="dSpacing"))
# Crop a single workspace from d_spacing and check the number of bins
tof_single_ws = common.crop_in_tof(ws_to_crop=ws_list[0], x_min=x_min, x_max=x_max)
self.assertEqual(tof_single_ws.blocksize(), expected_number_of_bins)
mantid.DeleteWorkspace(tof_single_ws)
# Crop a list of workspaces in dSpacing
cropped_ws_list = common.crop_in_tof(ws_to_crop=ws_list[1:], x_min=x_min, x_max=x_max)
for ws in cropped_ws_list:
self.assertEqual(ws.blocksize(), expected_number_of_bins)
mantid.DeleteWorkspace(ws)
def test_dictionary_key_helper(self):
good_key_name = "key_exists"
bad_key_name = "key_does_not_exist"
test_dictionary = {good_key_name: 123}
e_msg = "test message"
with self.assertRaises(KeyError):
common.dictionary_key_helper(dictionary=test_dictionary, key=bad_key_name)
with assertRaisesRegex(self, KeyError, e_msg):
common.dictionary_key_helper(dictionary=test_dictionary, key=bad_key_name, exception_msg=e_msg)
self.assertEqual(common.dictionary_key_helper(dictionary=test_dictionary, key=good_key_name), 123)
def test_extract_ws_spectra(self):
number_of_expected_banks = 5
ws_to_split = mantid.CreateSampleWorkspace(XMin=0, XMax=1, BankPixelWidth=1,
NumBanks=number_of_expected_banks)
input_name = ws_to_split.getName()
extracted_banks = common.extract_ws_spectra(ws_to_split=ws_to_split)
self.assertEqual(len(extracted_banks), number_of_expected_banks)
for i, ws in enumerate(extracted_banks):
expected_name = input_name + '-' + str(i + 1)
self.assertEqual(expected_name, ws.getName())
def test_generate_run_numbers(self):
# Mantid handles most of this for us
# First check it can handle int types
test_int_input = 123
int_input_return = common.generate_run_numbers(run_number_string=test_int_input)
# Expect the returned type is a list
self.assertEqual(int_input_return, [test_int_input])
# Check it can handle 10-12 and is inclusive
input_string = "10-12"
expected_values = [10, 11, 12]
returned_values = common.generate_run_numbers(run_number_string=input_string)
self.assertEqual(expected_values, returned_values)
# Check that the underscore syntax used by older pearl_routines scripts is handled
input_string = "10_12"
returned_values = common.generate_run_numbers(run_number_string=input_string)
self.assertEqual(expected_values, returned_values)
# Check that the comma notation is working
input_string = "20, 22, 24"
expected_values = [20, 22, 24]
returned_values = common.generate_run_numbers(run_number_string=input_string)
self.assertEqual(expected_values, returned_values)
# Check we can use a combination of both
input_string = "30-33, 36, 38-39"
expected_values = [30, 31, 32, 33, 36, 38, 39]
returned_values = common.generate_run_numbers(run_number_string=input_string)
self.assertEqual(expected_values, returned_values)
def test_generate_run_numbers_fails(self):
run_input_sting = "text-string"
with assertRaisesRegex(self, ValueError, "Could not generate run numbers from this input"):
common.generate_run_numbers(run_number_string=run_input_sting)
# Check it says what the actual string was
with assertRaisesRegex(self, ValueError, run_input_sting):
common.generate_run_numbers(run_number_string=run_input_sting)
def test_remove_intermediate_workspace(self):
ws_list = []
ws_names_list = []
ws_single_name = "remove_intermediate_ws-single"
ws_single = mantid.CreateSampleWorkspace(OutputWorkspace=ws_single_name, NumBanks=1, BankPixelWidth=1,
XMax=2, BinWidth=1)
for i in range(0, 3):
out_name = "remove_intermediate_ws_" + str(i)
ws_names_list.append(out_name)
ws_list.append(mantid.CreateSampleWorkspace(OutputWorkspace=out_name, NumBanks=1, BankPixelWidth=1,
XMax=2, BinWidth=1))
# Check single workspaces are removed
self.assertEqual(True, mantid.mtd.doesExist(ws_single_name))
common.remove_intermediate_workspace(ws_single)
self.assertEqual(False, mantid.mtd.doesExist(ws_single_name))
# Next check lists are handled
for ws_name in ws_names_list:
self.assertEqual(True, mantid.mtd.doesExist(ws_name))
common.remove_intermediate_workspace(ws_list)
for ws_name in ws_names_list:
self.assertEqual(False, mantid.mtd.doesExist(ws_name))
def test_run_normalise_by_current(self):
initial_value = 17
prtn_charge = '10.0'
expected_value = initial_value / float(prtn_charge)
# Create two workspaces
ws = mantid.CreateWorkspace(DataX=0, DataY=initial_value)
# Add Good Proton Charge Log
mantid.AddSampleLog(Workspace=ws, LogName='gd_prtn_chrg', LogText=prtn_charge, LogType='Number')
self.assertEqual(initial_value, ws.dataY(0)[0])
common.run_normalise_by_current(ws)
self.assertAlmostEqual(expected_value, ws.dataY(0)[0], delta=1e-8)
def test_spline_workspaces(self):
ws_list = []
for i in range(1, 4):
out_name = "test_spline_vanadium-" + str(i)
ws_list.append(mantid.CreateSampleWorkspace(OutputWorkspace=out_name, NumBanks=1, BankPixelWidth=1,
XMax=100, BinWidth=1))
splined_list = common.spline_workspaces(focused_vanadium_spectra=ws_list, num_splines=10)
for ws in splined_list:
self.assertAlmostEqual(ws.dataY(0)[25], 0.28576649, delta=1e-8)
self.assertAlmostEqual(ws.dataY(0)[50], 0.37745918, delta=1e-8)
self.assertAlmostEqual(ws.dataY(0)[75], 0.28133096, delta=1e-8)
for input_ws, splined_ws in zip(ws_list, splined_list):
mantid.DeleteWorkspace(input_ws)
mantid.DeleteWorkspace(splined_ws)
if __name__ == "__main__":
unittest.main()
from __future__ import (absolute_import, division, print_function)
import mantid
import unittest
import warnings
from six import assertRaisesRegex, assertRegex
from isis_powder.routines import InstrumentSettings, ParamMapEntry
class ISISPowderInstrumentSettingsTest(unittest.TestCase):
def test_user_missing_attribute_is_detected(self):
param_entry = ParamMapEntry.ParamMapEntry(ext_name="user_facing_name", int_name="script_facing_name")
inst_settings_obj = InstrumentSettings.InstrumentSettings(param_map=[param_entry])
with assertRaisesRegex(self, AttributeError, "is required but was not set or passed"):
foo = inst_settings_obj.script_facing_name
del foo
def test_developer_missing_attribute_is_detected(self):
param_entry = ParamMapEntry.ParamMapEntry(ext_name="user_facing_name", int_name="script_facing_name")
inst_settings_obj = InstrumentSettings.InstrumentSettings(param_map=[param_entry])
with assertRaisesRegex(self, AttributeError, "Please contact the development team"):
foo = inst_settings_obj.not_known
del foo
def test_set_attribute_is_found(self):
expected_value = 100
param_entry = ParamMapEntry.ParamMapEntry(ext_name="user_facing_name", int_name="script_facing_name")
keyword_args = {"user_facing_name": expected_value}
inst_settings_obj = InstrumentSettings.InstrumentSettings(param_map=[param_entry], kwargs=keyword_args)
self.assertEqual(inst_settings_obj.script_facing_name, expected_value)
def test_updating_attributes_produces_warning_on_init(self):
original_value = 123
new_value = 456
param_entry = ParamMapEntry.ParamMapEntry(ext_name="user_facing_name", int_name="script_facing_name")
# First check this works on init
adv_config = {"user_facing_name": original_value}
keyword_args = {"user_facing_name": new_value}
with warnings.catch_warnings(record=True) as warning_capture:
warnings.simplefilter("always")
inst_settings_obj = InstrumentSettings.InstrumentSettings(param_map=[param_entry], kwargs=keyword_args,
adv_conf_dict=adv_config)
assertRegex(self, str(warning_capture[-1].message), "which was previously set to")
assertRegex(self, str(warning_capture[-1].message), str(original_value))
assertRegex(self, str(warning_capture[-1].message), str(new_value))
self.assertEqual(inst_settings_obj.script_facing_name, new_value)
def test_updating_attributes_produces_warning(self):
original_value = 123
new_value = 456
second_value = 567
param_entry = ParamMapEntry.ParamMapEntry(ext_name="user_facing_name", int_name="script_facing_name")
# First check this works on init
adv_config = {"user_facing_name": original_value}
config_dict = {"user_facing_name": new_value}
keyword_args = {"user_facing_name": second_value}
inst_settings_obj = InstrumentSettings.InstrumentSettings(param_map=[param_entry], adv_conf_dict=adv_config)
self.assertEqual(inst_settings_obj.script_facing_name, original_value)
# Next try to update the attribute and check it gives a warning
with warnings.catch_warnings(record=True) as warning_capture:
warnings.simplefilter("always")
inst_settings_obj.update_attributes(basic_config=config_dict)
assertRegex(self, str(warning_capture[-1].message), "which was previously set to")
assertRegex(self, str(warning_capture[-1].message), str(original_value))
assertRegex(self, str(warning_capture[-1].message), str(new_value))
warnings_current_length = len(warning_capture)
# Then check that we only get one additional warning when replacing values again not two
inst_settings_obj.update_attributes(kwargs=keyword_args)
self.assertEqual(warnings_current_length + 1, len(warning_capture))
warnings_current_length = len(warning_capture)
# Check that the suppress field works by setting it back to second value
inst_settings_obj.update_attributes(kwargs=config_dict, suppress_warnings=True)
self.assertEqual(warnings_current_length, len(warning_capture))
# Check we only get no additional warnings from setting the value to the same
inst_settings_obj.update_attributes(kwargs=config_dict)
self.assertEqual(warnings_current_length, len(warning_capture))
# Finally check it has took the new value (most recently set)
self.assertEqual(inst_settings_obj.script_facing_name, new_value)
def test_inst_settings_enters_into_dicts(self):
param_entries = [
ParamMapEntry.ParamMapEntry(ext_name="user_facing_name", int_name="script_facing_name"),
ParamMapEntry.ParamMapEntry(ext_name="user_facing_name2", int_name="script_facing_name2")
]
expected_value = 101
# Check recursion of a dictionary containing a dictionary takes place
example_dict = {"user_facing_name": expected_value}
nested_dict = {"some_random_name": example_dict}
inst_settings_obj = InstrumentSettings.InstrumentSettings(param_map=param_entries, adv_conf_dict=nested_dict)
self.assertEqual(inst_settings_obj.script_facing_name, expected_value)
# Next check that any attributes that a mixed dictionary contains are added
mixed_dict = {"some_random_name2": example_dict,
"user_facing_name2": expected_value * 2}
second_inst_settings_obj = InstrumentSettings.InstrumentSettings(param_map=param_entries,
adv_conf_dict=mixed_dict)
self.assertEqual(second_inst_settings_obj.script_facing_name, expected_value)
self.assertEqual(second_inst_settings_obj.script_facing_name2, expected_value * 2)
def test_check_enum_check_and_set_works(self):
param_entry = ParamMapEntry.ParamMapEntry(ext_name="user_facing_name", int_name="script_facing_name",
enum_class=SampleEnum)
# First test we cannot set it to a different value
incorrect_value_dict = {"user_facing_name": "wrong"}
with assertRaisesRegex(self, ValueError, "The user specified value: 'wrong' is unknown"):
inst_obj = InstrumentSettings.InstrumentSettings(param_map=[param_entry],
adv_conf_dict=incorrect_value_dict)
# Check that we can set a known good enum
good_value_dict = {"user_facing_name": SampleEnum.a_bar}
inst_obj = InstrumentSettings.InstrumentSettings(param_map=[param_entry], adv_conf_dict=good_value_dict)
self.assertEqual(inst_obj.script_facing_name, SampleEnum.a_bar)
# Next check it passes on mixed case and converts it back to the correct case
different_case_dict = {"user_facing_name": SampleEnum.a_bar.upper()}
inst_obj = InstrumentSettings.InstrumentSettings(param_map=[param_entry], adv_conf_dict=different_case_dict)
self.assertEqual(inst_obj.script_facing_name, SampleEnum.a_bar)
def test_optional_attribute_works(self):
optional_param_entry = ParamMapEntry.ParamMapEntry(ext_name="user_facing_name", int_name="script_facing_name",
optional=True)
param_entry = ParamMapEntry.ParamMapEntry(ext_name="user_facing_name", int_name="script_facing_name",
optional=False)
# Check that not passing an optional and trying to access it works correctly
opt_inst_obj = InstrumentSettings.InstrumentSettings(param_map=[optional_param_entry])
self.assertIsNone(opt_inst_obj.script_facing_name)
# Check that setting optional to false still throws
inst_obj = InstrumentSettings.InstrumentSettings(param_map=[param_entry])
with self.assertRaises(AttributeError):
foo = inst_obj.script_facing_name
# Check if we do set an optional from fresh it does not emit a warning and is set
optional_value = 100
random_value_dict = {"user_facing_name": 8}
optional_value_dict = {"user_facing_name": optional_value}
# Check that setting a value from fresh does not emit a warning
with warnings.catch_warnings(record=True) as warnings_capture:
warnings.simplefilter("always")
num_warnings_before = len(warnings_capture)
opt_inst_obj.update_attributes(kwargs=random_value_dict)
self.assertEqual(len(warnings_capture), num_warnings_before)
# Then check setting it a second time does
opt_inst_obj.update_attributes(kwargs=optional_value_dict)
self.assertEqual(len(warnings_capture), num_warnings_before + 1)
self.assertEqual(opt_inst_obj.script_facing_name, optional_value)
class SampleEnum(object):
enum_friendly_name = "test_enum_name"
# The mixed casing is intentional
a_foo = "a foo"
a_bar = "A BAR"
if __name__ == "__main__":
unittest.main()
from __future__ import (absolute_import, division, print_function)
import mantid
import tempfile
import os
import unittest
import warnings
from six import assertRaisesRegex
from isis_powder.routines import yaml_parser
class ISISPowderYamlParserTest(unittest.TestCase):
def setUp(self):
self.temp_file_paths = []
def tearDown(self):
for path in self.temp_file_paths:
try:
os.remove(path)
except OSError:
warnings.warn("Failed to remove unit test temp file at the following path:\n" + str(path))
self.temp_file_paths = []
def get_temp_file_handle(self):
# Set to mode manually so we don't need to convert to binary in Python 3
file_handle = tempfile.NamedTemporaryFile(delete=False, mode="w+")
self.temp_file_paths.append(file_handle.name)
return file_handle
def test_dictionary_parses_correctly(self):
expected_value = "bar"
second_value = "foo"
# Write in two ranges to check it determines the correct one
yaml_handle = self.get_temp_file_handle()
yaml_handle.write("100-200:\n")
yaml_handle.write(" test_item: '" + expected_value + "'\n")
yaml_handle.write("201-:\n")
yaml_handle.write(" test_item: '" + second_value + "'\n")
# Close handle so the test can access it
yaml_handle.close()
# Check a random value in the mid point
returned_dict = yaml_parser.get_run_dictionary(run_number_string="150", file_path=yaml_handle.name)
self.assertEqual(returned_dict["test_item"], expected_value)
# Check lower bound is respected
returned_dict = yaml_parser.get_run_dictionary(run_number_string="100", file_path=yaml_handle.name)
self.assertEqual(returned_dict["test_item"], expected_value, "Lower bound not respected")
# Check upper bound is respected
returned_dict = yaml_parser.get_run_dictionary(run_number_string="200", file_path=yaml_handle.name)
self.assertEqual(returned_dict["test_item"], expected_value, "Upper bound not respected")
# Check we can handle a range
returned_dict = yaml_parser.get_run_dictionary(run_number_string="120-130", file_path=yaml_handle.name)
self.assertEqual(returned_dict["test_item"], expected_value, "Range returned incorrect value")
# Check the the second dictionary works with unbounded ranges
returned_dict = yaml_parser.get_run_dictionary(run_number_string="205", file_path=yaml_handle.name)
self.assertEqual(returned_dict["test_item"], second_value)
def test_file_not_found_gives_sane_err(self):
# Create a file then delete it so we know it cannot exist at that path
file_handle = tempfile.NamedTemporaryFile(delete=False)
file_path = file_handle.name
file_handle.close()
os.remove(file_path)
if os.path.exists(file_path):
self.fail("File exists after deleting cannot continue this test")
# Check the error message is there
with assertRaisesRegex(self, ValueError, "Config file not found at path"):
yaml_parser.get_run_dictionary(run_number_string="1", file_path=file_path)
def test_is_run_range_unbounded(self):
# Check a valid unbounded range is detected
result = yaml_parser.is_run_range_key_unbounded("10-")
self.assertTrue(result, "Unbounded range not detected")
# Check a bounded range isn't incorrectly detected
result = yaml_parser.is_run_range_key_unbounded("22")
self.assertFalse(result, "Single run incorrectly detected")
# Check a range of runs isn't detected incorrectly
result = yaml_parser.is_run_range_key_unbounded("33-44")
self.assertFalse(result, "Bounded range incorrectly detected")
# What about if it ends in a comma syntax (this will throw elsewhere in the script anyway)
result = yaml_parser.is_run_range_key_unbounded("55-66,")
self.assertFalse(result, "Invalid ending character detected as an unbounded")
def test_blank_file_gives_sane_err(self):
file_handle = self.get_temp_file_handle()
# Write nothing and close
file_path = file_handle.name
file_handle.close()
with assertRaisesRegex(self, ValueError, "YAML files appears to be empty at"):
yaml_parser.get_run_dictionary(run_number_string=1, file_path=file_path)
def test_run_number_not_found_gives_sane_err(self):
expected_val = "yamlParserTest"
file_handle = self.get_temp_file_handle()
file_handle.write("10-20:\n")
file_handle.write(" test_key: '" + expected_val + "'\n")
file_handle.write("21-:\n")
file_handle.write(" test_key: '" + expected_val + "'\n")
file_path = file_handle.name
file_handle.close()
# Test a value in the middle of 1-10
with assertRaisesRegex(self, ValueError, "Run number 5 not recognised in calibration mapping"):
yaml_parser.get_run_dictionary(run_number_string="5", file_path=file_path)
# Check on edge of invalid numbers
with assertRaisesRegex(self, ValueError, "Run number 9 not recognised in calibration mapping"):
yaml_parser.get_run_dictionary(run_number_string=9, file_path=file_path)
# What about a range of numbers
with assertRaisesRegex(self, ValueError, "Run number 2 not recognised in calibration mapping"):
yaml_parser.get_run_dictionary(run_number_string="2-8", file_path=file_path)
# Check valid number still works
returned_dict = yaml_parser.get_run_dictionary(run_number_string="10", file_path=file_path)
self.assertEqual(returned_dict["test_key"], expected_val)
def test_yaml_sanity_check_picks_up_two_unbounded(self):
# Check we can detect two unbounded ranges
file_handle = self.get_temp_file_handle()
file_handle.write("10-:\n")
file_handle.write("20-:\n")
file_path = file_handle.name
file_handle.close()
with assertRaisesRegex(self, ValueError, "Seen multiple unbounded keys in mapping file"):
yaml_parser.get_run_dictionary(run_number_string="11", file_path=file_path)
def test_yaml_sanity_detects_val_larger_than_unbound(self):
# If we have a value that is larger the the unbounded range can we detect this
file_handle = self.get_temp_file_handle()
file_handle.write("30-:\n")
file_handle.write("35:\n")
file_path = file_handle.name
file_handle.close()
with assertRaisesRegex(self, ValueError, "Found a run range in calibration mapping overlaps an unbounded run "
"range"):
yaml_parser.get_run_dictionary(run_number_string="32", file_path=file_path)
if __name__ == "__main__":
unittest.main()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment