From 14a669a53f7f50913ffa527b352f81f698aa8bcb Mon Sep 17 00:00:00 2001 From: Andrew Bergan Date: Fri, 15 Feb 2019 10:16:13 -0500 Subject: [PATCH] Added option for custom assertions in test_runner.py --- abaverify/_version.py | 2 +- abaverify/main.py | 105 ++++++++++++++++++++++++------------------ readme.md | 3 ++ 3 files changed, 63 insertions(+), 47 deletions(-) diff --git a/abaverify/_version.py b/abaverify/_version.py index 6a9beea..3d18726 100644 --- a/abaverify/_version.py +++ b/abaverify/_version.py @@ -1 +1 @@ -__version__ = "0.4.0" +__version__ = "0.5.0" diff --git a/abaverify/main.py b/abaverify/main.py index 45cc7b6..9378d30 100644 --- a/abaverify/main.py +++ b/abaverify/main.py @@ -272,10 +272,10 @@ def tearDown(self): Removes Abaqus temp files. This function is called by unittest. """ files = os.listdir(os.getcwd()) - patterns = [re.compile('.*abaqus.*\.rpy.*'), re.compile('.*abaqus.*\.rec.*'), re.compile('.*pyc')] + patterns = [re.compile(r'.*abaqus.*\.rpy.*'), re.compile(r'.*abaqus.*\.rec.*'), re.compile(r'.*pyc')] [os.remove(f) for f in files if any(regex.match(f) for regex in patterns)] - def runTest(self, jobName): + def runTest(self, jobName, func=None, arguments=None): """ Run a verification test. @@ -290,6 +290,11 @@ def runTest(self, jobName): The name of the abaqus input deck (without the .inp file extension). Abaverify assumes that there is a corresponding file named _expected.py that defines the expected results. + func : function + A function to evaluate external assertions. The function is passed + self and jobName as arguments. + arguments : list + A list of arguments to pass to the func """ @@ -342,7 +347,7 @@ def runTest(self, jobName): ftp.close() # Run assertions - self._runAssertionsOnResults(jobName) + self._runAssertionsOnResults(jobName, func, arguments) def _runModel(self, jobName, logFileHandle, timer): """ @@ -421,7 +426,7 @@ def _runModel(self, jobName, logFileHandle, timer): else: _callAbaqusOnRemote(cmd=cmd, log=logFileHandle, timer=timer) - def _runAssertionsOnResults(self, jobName): + def _runAssertionsOnResults(self, jobName, func, arguments): """ Runs assertions on each result specified in the _results.py file. @@ -433,55 +438,63 @@ def _runAssertionsOnResults(self, jobName): ---------- jobName : :obj:`str` The name of the abaqus input deck (without the .inp file extension). + func : function + A function to evaluate external assertions. The function is passed + self and jobName as arguments. + arguments : list + A list of arguments to pass to the func """ outputFileName = jobName + '_results.py' outputFileDir = os.path.join(os.getcwd(), 'testOutput') outputFilePath = os.path.join(outputFileDir, outputFileName) - if os.path.isfile(outputFilePath): - sys.path.insert(0, outputFileDir) - results = __import__(outputFileName[:-3]).results - - for r in results: - - # Loop through values if there are more than one - if hasattr(r['computedValue'], '__iter__'): - for i in range(0, len(r['computedValue'])): - computed_val = r['computedValue'][i] - reference_val = r['referenceValue'][i] - - if isinstance(reference_val, tuple): - tolerance_for_result_obj = r['tolerance'] - # when there exists a tuple as a reference val then all other results and deltas - # should also be tuples - self.assertEqual(len(computed_val), len(reference_val), - "Specified reference value should be same length as Computed value") - # tolerance may be specified as a single tuple or a list of tuples. If its the latter - # then index and return the tuple - if isinstance(tolerance_for_result_obj, tuple): - tolerance = tolerance_for_result_obj + if func: + func(self, jobName, arguments) + else: + if os.path.isfile(outputFilePath): + sys.path.insert(0, outputFileDir) + results = __import__(outputFileName[:-3]).results + + for r in results: + + # Loop through values if there are more than one + if hasattr(r['computedValue'], '__iter__'): + for i in range(0, len(r['computedValue'])): + computed_val = r['computedValue'][i] + reference_val = r['referenceValue'][i] + + if isinstance(reference_val, tuple): + tolerance_for_result_obj = r['tolerance'] + # when there exists a tuple as a reference val then all other results and deltas + # should also be tuples + self.assertEqual(len(computed_val), len(reference_val), + "Specified reference value should be same length as Computed value") + # tolerance may be specified as a single tuple or a list of tuples. If its the latter + # then index and return the tuple + if isinstance(tolerance_for_result_obj, tuple): + tolerance = tolerance_for_result_obj + else: + tolerance = tolerance_for_result_obj[i] + self.assertEqual(len(reference_val), len(tolerance), + "Specified tolerance tople should be the same length as the ref") + # loop through entries in tuple (x and y) + for (cv, rv, tolerance) in zip(computed_val, reference_val, tolerance): + self.assertAlmostEqual(cv, rv, delta=tolerance) else: - tolerance = tolerance_for_result_obj[i] - self.assertEqual(len(reference_val), len(tolerance), - "Specified tolerance tople should be the same length as the ref") - # loop through entries in tuple (x and y) - for (cv, rv, tolerance) in zip(computed_val, reference_val, tolerance): - self.assertAlmostEqual(cv, rv, delta=tolerance) - else: - tolerance_for_result_obj = r['tolerance'][i] - self.assertAlmostEqual(computed_val, reference_val, delta=tolerance_for_result_obj) + tolerance_for_result_obj = r['tolerance'][i] + self.assertAlmostEqual(computed_val, reference_val, delta=tolerance_for_result_obj) - else: - if "tolerance" in r: - self.assertAlmostEqual(r['computedValue'], r['referenceValue'], delta=r['tolerance']) - elif "referenceValue" in r: - self.assertEqual(r['computedValue'], r['referenceValue']) else: - # No data to compare with, so pass the test - pass - else: - self.fail('No results file provided by process_results.py. Looking for "%s"' % outputFilePath) + if "tolerance" in r: + self.assertAlmostEqual(r['computedValue'], r['referenceValue'], delta=r['tolerance']) + elif "referenceValue" in r: + self.assertEqual(r['computedValue'], r['referenceValue']) + else: + # No data to compare with, so pass the test + pass + else: + self.fail('No results file provided by process_results.py. Looking for "%s"' % outputFilePath) class ParametricMetaClass(type): @@ -609,7 +622,7 @@ def test(self): ftp.close() # Run assertions - self._runAssertionsOnResults(jobName) + self._runAssertionsOnResults(jobName, None, None) finally: # Make sure temporary files are removed os.remove(jobName + '.inp') # Delete temporary parametric input file @@ -972,7 +985,7 @@ def runTests(relPathToUserSub, double=False, compileCodeFunc=None): if not options.useExistingResults: if not options.keepExistingOutputFile: testOutputPath = os.path.join(os.getcwd(), 'testOutput') - pattern = re.compile('.*\.env$') + pattern = re.compile(r'.*\.env$|__pycache__') for f in os.listdir(testOutputPath): if not pattern.match(f): os.remove(os.path.join(os.getcwd(), 'testOutput', f)) diff --git a/readme.md b/readme.md index e31a7fa..4cd7688 100644 --- a/readme.md +++ b/readme.md @@ -243,5 +243,8 @@ the x's. Similarly y points are defined by normalizing force by area for referen the definition of x and y points through eval statements the comparison for test is identical to the default tabular implementation (comparison to referenceValue within specified tolerance). +## Custom assertions in the test_runner.py file +User-defined assertions can be added without modifying the abaverify code as follows. Optional arguments `func` and `arguments` are provided in the `self.runTest()` function call. `func` is a python function that receives three positional arguments: the abaverify object, the jobname, and the object passed to `arguments`. The user-defined code in `func` may implement any logic necessary and then use the abaverify object to make the necessary assertions. + ## Automatic testing Abaverify has the capability to run a series of tests, generate a report, and plot run times against historical run times. See `automatic.py` and `automatic_testing_script.py` for details.