Newer
Older
#!/usr/bin/env python
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import sys
import time
from multiprocessing import Process, Array, Manager, Value, Lock
# If any tests happen to hit a PyQt4 import make sure item uses version 2 of the api
# Remove this when everything is switched to qtpy
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
sip.setapi('QDate', 2)
sip.setapi('QDateTime', 2)
sip.setapi('QTextStream', 2)
sip.setapi('QTime', 2)
sip.setapi('QUrl', 2)
except (AttributeError, ImportError):
# Prevents erros in systemtests that use matplotlib directly
os.environ['MPLBACKEND'] = 'Agg'
#########################################################################
# Set up the command line options
#########################################################################
THIS_MODULE_DIR = os.path.dirname(os.path.realpath(__file__))
DEFAULT_FRAMEWORK_LOC = os.path.realpath(os.path.join(THIS_MODULE_DIR, "..", "lib", "systemtests"))
DATA_DIRS_LIST_PATH = os.path.join(THIS_MODULE_DIR, "datasearch-directories.txt")
SAVE_DIR_LIST_PATH = os.path.join(THIS_MODULE_DIR, "defaultsave-directory.txt")
def kill_children(processes):
for process in processes:
process.terminate()
# Exit with status 1 - NOT OK
sys.exit(1)
def main():
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
info = [
"This program will configure mantid run all of the system tests located in",
"the 'tests/analysis' directory.",
"This program will create a temporary 'Mantid.user.properties' file which",
"it will rename to 'Mantid.user.properties.systest' upon completion. The",
"current version of the code does not print to stdout while the test is",
"running, so the impatient user may ^C to kill the process. In this case",
"all of the tests that haven't been run will be marked as skipped in the", "full logs."
]
parser = argparse.ArgumentParser(description=' '.join(info))
parser.add_argument("--email", action="store_true", help="send an email with test status.")
parser.add_argument(
"-x",
"--executable",
dest="executable",
help="The executable path used to run each test. Default is the sys.executable")
parser.add_argument("-a",
"--exec-args",
dest="execargs",
help="Arguments passed to executable for each test Default=[]")
parser.add_argument("--frameworkLoc",
help="location of the system test framework (default=%s)" %
DEFAULT_FRAMEWORK_LOC)
parser.add_argument("--disablepropmake",
action="store_false",
dest="makeprop",
help="By default this will move your properties file out of the " +
"way and create a new one. This option turns off this behavior.")
parser.add_argument(
"-R",
"--tests-regex",
dest="testsInclude",
help="String specifying which tests to run. Simply uses 'string in testname'.")
parser.add_argument(
"-E",
"--excluderegex",
dest="testsExclude",
help="String specifying which tests to not run. Simply uses 'string in testname'.")
loglevelChoices = ["error", "warning", "notice", "information", "debug"]
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
parser.add_argument("-l",
"--loglevel",
dest="loglevel",
choices=loglevelChoices,
help="Set the log level for test running: [" + ', '.join(loglevelChoices) +
"]")
parser.add_argument(
"-j",
"--parallel",
dest="ncores",
action="store",
type=int,
help="The number of instances to run in parallel, like the -j option in ctest. Default is 1."
)
parser.add_argument("-q",
"--quiet",
dest="quiet",
action="store_true",
help="Prints detailed log to terminal.")
parser.add_argument(
"-c",
"--clean",
dest="clean",
action="store_true",
help="Performs a cleanup of the data generated by the test suite (does not run the tests).")
parser.add_argument("--output-on-failure",
dest="output_on_failure",
action="store_true",
help="Print full log for failed tests.")
parser.add_argument('-N', '--dry-run',
dest='dry_run',
action='store_true',
help='Do not run tests just print what would be run.')
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
parser.add_argument("--showskipped",
dest="showskipped",
action="store_true",
help="List the skipped tests.")
parser.add_argument("-d",
"--datapaths",
dest="datapaths",
help="A semicolon-separated list of directories to search for data")
parser.add_argument("-s",
"--savedir",
dest="savedir",
help="A directory to use for the Mantid save path")
parser.add_argument("--archivesearch",
dest="archivesearch",
action="store_true",
help="Turn on archive search for file finder.")
parser.add_argument("--exclude-in-pull-requests",
dest="exclude_in_pr_builds",
action="store_true",
help="Skip tests that are not run in pull request builds")
parser.add_argument("--ignore-failed-imports",
dest="ignore_failed_imports",
action="store_true",
help="Skip tests that do not import correctly rather raising an error.")
parser.set_defaults(frameworkLoc=DEFAULT_FRAMEWORK_LOC,
executable=sys.executable,
makeprop=True,
loglevel="information",
ncores=1,
quiet=False,
output_on_failure=False,
clean=False)
options = parser.parse_args()
sys.path.append(options.frameworkLoc)
# allow PythonInterface/test to be discoverable
sys.path.append(systemtesting.FRAMEWORK_PYTHONINTERFACE_TEST_DIR)
#########################################################################
# Configure mantid
#########################################################################
# Parse files containing the search and save directories, unless otherwise given
data_paths = options.datapaths
if data_paths is None or data_paths == "":
with open(DATA_DIRS_LIST_PATH, 'r') as f_handle:
data_paths = f_handle.read().strip()
save_dir = options.savedir
if save_dir is None or save_dir == "":
with open(SAVE_DIR_LIST_PATH, 'r') as f_handle:
save_dir = f_handle.read().strip()
# Configure properties file
mtdconf = systemtesting.MantidFrameworkConfig(loglevel=options.loglevel,
data_dirs=data_paths,
save_dir=save_dir,
archivesearch=options.archivesearch)
if options.makeprop:
mtdconf.config()
#########################################################################
# Generate list of tests
#########################################################################
runner = systemtesting.TestRunner(executable=options.executable,
exec_args=options.execargs,
escape_quotes=True)
tmgr = systemtesting.TestManager(test_loc=mtdconf.testDir,
runner=runner,
quiet=options.quiet,
testsInclude=options.testsInclude,
testsExclude=options.testsExclude,
exclude_in_pr_builds=options.exclude_in_pr_builds,
ignore_failed_imports=options.ignore_failed_imports)
test_counts, test_list, test_stats, files_required_by_test_module, data_file_lock_status = \
tmgr.generateMasterTestList()
number_of_test_modules = len(test_list.keys())
total_number_of_tests = test_stats[0]
maximum_name_length = test_stats[1]
#########################################################################
# Run the tests with a task scheduler
#########################################################################
# Print message if this is a cleanup run instead of a normal test run
if options.clean:
print("Performing cleanup run")
# Cleanup any pre-existing XML reporter files
entries = os.listdir(mtdconf.saveDir)
for file in entries:
if file.startswith('TEST-systemtests-') and file.endswith('.xml'):
os.remove(os.path.join(mtdconf.saveDir, file))
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
if not options.dry_run:
# Multi-core processes --------------
# An array to hold the processes
processes = []
# A shared array to hold skipped and failed tests + status
results_array = Array('i', [0] * (3 * options.ncores))
# A manager to create a shared dict to store names of skipped and failed tests
manager = Manager()
# A shared dict to store names of skipped and failed tests
status_dict = manager.dict()
# A shared dict to store the global list of tests
tests_dict = manager.dict()
# A shared array with 0s and 1s to keep track of completed tests
tests_lock = Array('i', [0] * number_of_test_modules)
# A shared value to count the number of remaining test modules
tests_left = Value('i', number_of_test_modules)
# A shared value to count the number of completed tests
tests_done = Value('i', 0)
# A shared dict to store which data files are required by each test module
required_files_dict = manager.dict()
for key in files_required_by_test_module.keys():
required_files_dict[key] = files_required_by_test_module[key]
# A shared dict to store the locked status of each data file
locked_files_dict = manager.dict()
for key in data_file_lock_status.keys():
locked_files_dict[key] = data_file_lock_status[key]
# Store in reverse number of number of tests in each module into the shared dictionary
reverse_sorted_dict = [(k, test_counts[k])
for k in sorted(test_counts, key=test_counts.get, reverse=True)]
counter = 0
for key, value in reverse_sorted_dict:
tests_dict[str(counter)] = test_list[key]
counter += 1
if not options.quiet:
print("Test module {} has {} tests:".format(key, value))
for t in test_list[key]:
print(" - {}".format(t._fqtestname))
print()
# Define a lock
lock = Lock()
# Prepare ncores processes
for ip in range(options.ncores):
processes.append(
Process(target=systemtesting.testThreadsLoop,
args=(mtdconf.testDir, mtdconf.saveDir, mtdconf.dataDir, options, tests_dict,
tests_lock, tests_left, results_array, status_dict, total_number_of_tests,
maximum_name_length, tests_done, ip, lock, required_files_dict,
locked_files_dict)))
# Start and join processes
try:
for p in processes:
p.start()
for p in processes:
p.join()
except KeyboardInterrupt:
print("Killed via KeyboardInterrupt")
kill_children(processes)
except Exception as e:
print("Unexpected exception occured: {}".format(e))
kill_children(processes)
# Gather results
skipped_tests = sum(results_array[:options.ncores]) + (test_stats[2] - test_stats[0])
failed_tests = sum(results_array[options.ncores:2 * options.ncores])
total_tests = test_stats[2]
# Find minimum of status: if min == 0, then success is False
success = bool(min(results_array[2 * options.ncores:3 * options.ncores]))
else:
print("Dry run requested. Skipping execution")
#########################################################################
# Cleanup
#########################################################################
# Put the configuration back to its original state
if options.makeprop:
mtdconf.restoreconfig()
total_runtime = time.strftime("%H:%M:%S", time.gmtime(end_time - start_time))
#########################################################################
# Output summary to terminal
#########################################################################
if options.dry_run:
print()
print("Tests that would be executed:")
for suites in itervalues(test_list):
for suite in suites:
print(' ' + suite.name)
elif not options.clean:
nwidth = 80
banner = "#" * nwidth
print("Total runtime: " + total_runtime)
if (skipped_tests > 0) and options.showskipped:
print("\nSKIPPED:")
for key in status_dict.keys():
if status_dict[key] == 'skipped':
print(key)
if failed_tests > 0:
print("\nFAILED:")
for key in status_dict.keys():
if status_dict[key] == 'failed':
print(key)
# Report global statistics on tests
print()
if skipped_tests == total_tests:
print("All tests were skipped")
success = False # fail if everything was skipped
percent = 1. - float(failed_tests) / float(total_tests - skipped_tests)
percent = int(100. * percent)
print("%d%s tests passed, %d tests failed out of %d (%d skipped)" %
(percent, '%', failed_tests, (total_tests - skipped_tests), skipped_tests))
print('All tests passed? ' + str(success))
if not success:
sys.exit(1)
if __name__ == "__main__":
main()