-
Notifications
You must be signed in to change notification settings - Fork 6
/
Copy pathregression_tests.py
222 lines (170 loc) · 8.21 KB
/
regression_tests.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
#!/bin/env python3
"""
Program to manage and run ATS regression tests.
With inspiration, and in some places just stolen, from Ben Andre's
PFloTran regression test suite.
Author: Ethan Coon ([email protected])
"""
import argparse
import sys,os
import textwrap
import time
def commandline_options():
"""
Process the command line arguments and return them as a dict.
"""
parser = argparse.ArgumentParser(description='Run an ATS regression '
'tests or suite of tests.')
parser.add_argument('--ats', default=None,
help='Path to ATS source directory')
parser.add_argument('--backtrace', action='store_true',
help='show exception backtraces as extra debugging '
'output')
parser.add_argument('--check-only', action='store_true', default=False,
help="diff the existing regression files without "
"running ATS again.")
parser.add_argument('--check-performance', action='store_true', default=False,
help="include the performance metrics ('SOLUTION' blocks) "
"in regression checks.")
parser.add_argument('--debug', action='store_true',
help='extra debugging output')
parser.add_argument('-d', '--dry-run',
default=False, action='store_true',
help='perform a dry run, setup the test commands but '
'don\'t run them')
parser.add_argument('-e', '--executable', default=None,
help='path to executable to use for testing')
parser.add_argument('--list-available-suites', default=False, action='store_true',
help='print the list of test suites from the config '
'file and exit')
parser.add_argument('--list-available-tests', default=False, action='store_true',
help='print the list of tests from the config file '
'and exit')
parser.add_argument('--list-tests', default=False, action='store_true',
help='print the list of selected tests from the config '
'file and exit')
parser.add_argument('-m', '--mpiexec', default=None,
help="path to the executable for mpiexec (mpirun, etc)"
"on the current machine.")
parser.add_argument('--mpiexec-global-args', default=None,
help="arguments that must be provided to mpiexec executable")
parser.add_argument('--mpiexec-numprocs-flag', default='-n',
help="mpiexec flag to set number of MPI ranks")
parser.add_argument('--always-mpiexec', default=False, action='store_true',
help="use mpiexec to launch all tests")
parser.add_argument('-n', '--new-tests', default=False, action="store_true",
help="Indicate that there are new tests being run. "
"Skips the output check and creates a new gold file.")
parser.add_argument('--save-dt-history', default=False, action="store_true",
help="When used with --new-tests, does an additional run "
"to get the timestep history for more accurate comparison. ")
parser.add_argument('-s', '--suites', nargs="+", default=[],
help='space separated list of test suite names')
parser.add_argument('-t', '--tests', nargs="+", default=[],
help='space separated list of test names')
parser.add_argument('--exclude', nargs="+", default=[],
help='space separated list of test names to exclude')
parser.add_argument('--timeout', nargs=1, default=None,
help="test timeout (for assuming a job has hung and "
"needs to be killed)")
parser.add_argument('-u', '--update',
action="store_true", default=False,
help='update the tests listed by the "--tests" '
'option, with the current output becoming the new '
'gold standard')
parser.add_argument('configs', metavar='CONFIG_LOCATION', type=str,
nargs='+', help='list of directories and/or configuration '
'files to parse for suites and tests')
options = parser.parse_args()
return options
def main(options):
txtwrap = textwrap.TextWrapper(width=78, subsequent_indent=4*" ")
root_dir = os.getcwd()
if options.ats is not None:
sys.path.append(os.path.join(options.ats, 'tools', 'testing'))
if 'ATS_SRC_DIR' in os.environ:
sys.path.append(os.path.join(os.environ['ATS_SRC_DIR'], 'tools', 'testing'))
import test_manager
silent = options.list_tests or options.list_available_suites or options.list_available_tests
testlog = test_manager.setup_testlog(txtwrap, silent)
test_manager.check_options(options)
if silent:
executable = None
mpiexec = None
else:
executable = test_manager.check_for_executable(options, testlog)
mpiexec = test_manager.check_for_mpiexec(options, testlog)
config_file_list = test_manager.generate_config_file_list(options)
if not silent:
print("Running ATS regression tests :")
# loop through config files, cd into the appropriate directory,
# read the appropriate config file and run the various tests.
start = time.time()
report = {}
for config_file in config_file_list:
print(80 * '=', file=testlog)
print(f'Running {config_file}', file=testlog)
header = os.path.split(config_file)[-1]
if len(header) > 20:
header = header[:20]
else:
header = header + ' '*(20-len(header))
if not silent:
print(f'{header} | ', end='', file=sys.stdout)
# get the absolute path of the directory
test_dir = os.path.dirname(config_file)
# cd into the test directory so that the relative paths in
# test files are correct
os.chdir(test_dir)
if options.debug:
print("Changed to working directory: {0}".format(test_dir))
tm = test_manager.RegressionTestManager(executable, mpiexec)
if options.debug:
tm.debug(True)
# get the relative file name
filename = os.path.basename(config_file)
tm.generate_tests(filename,
options.suites,
options.tests,
options.exclude,
options.timeout,
options.check_performance,
testlog)
if options.debug:
print(70 * '-')
print(tm)
if options.list_available_suites:
tm.display_available_suites()
if options.list_available_tests:
tm.display_available_tests()
if options.list_tests:
tm.display_selected_tests()
if not silent:
tm.run_tests(options.dry_run,
options.update,
options.new_tests,
options.check_only,
False,
testlog,
options.save_dt_history)
report[filename] = tm.run_status()
os.chdir(root_dir)
stop = time.time()
status = 0
if not options.dry_run and not options.update and not options.list_tests:
print("")
run_time = stop - start
test_manager.summary_report_by_file(report, testlog)
test_manager.summary_report(run_time, report, testlog)
status = test_manager.summary_report(run_time, report, sys.stdout)
if options.update:
message = txtwrap.fill(
"Test results were updated! Please document why you modified the "
"gold standard test results in your revision control commit message!\n")
print(''.join(['\n', message, '\n']))
testlog.close()
return status
if __name__ == "__main__":
cmdl_options = commandline_options()
suite_status = main(cmdl_options)
sys.exit(suite_status)