From 2cf8f91a1280df51966b972acfb6595c096e1fe9 Mon Sep 17 00:00:00 2001 From: Andrew Bergan Date: Mon, 3 Oct 2016 16:31:24 -0400 Subject: [PATCH] 0.1 --- .gitignore | 42 ++ LICENSE.txt | 107 ++++ README.md | 1 - __init__.py | 1 + abaverify/__init__.py | 3 + abaverify/main.py | 454 +++++++++++++++ abaverify/processresults.py | 638 +++++++++++++++++++++ readme.md | 131 +++++ sample_usage.py | 74 +++ tests/for/materialProperties.for | 160 ++++++ tests/for/umat.for | 63 ++ tests/for/usub-util.for | 86 +++ tests/for/vumat.for | 75 +++ tests/tests/abaqus_v6.env | 9 + tests/tests/test_CPS4R_tension.inp | 108 ++++ tests/tests/test_CPS4R_tension_expected.py | 26 + tests/tests/test_runner.py | 26 + 17 files changed, 2003 insertions(+), 1 deletion(-) create mode 100644 .gitignore create mode 100644 LICENSE.txt delete mode 100644 README.md create mode 100644 __init__.py create mode 100644 abaverify/__init__.py create mode 100644 abaverify/main.py create mode 100644 abaverify/processresults.py create mode 100644 readme.md create mode 100644 sample_usage.py create mode 100644 tests/for/materialProperties.for create mode 100644 tests/for/umat.for create mode 100644 tests/for/usub-util.for create mode 100644 tests/for/vumat.for create mode 100644 tests/tests/abaqus_v6.env create mode 100644 tests/tests/test_CPS4R_tension.inp create mode 100644 tests/tests/test_CPS4R_tension_expected.py create mode 100644 tests/tests/test_runner.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..1e40794 --- /dev/null +++ b/.gitignore @@ -0,0 +1,42 @@ +# Compiled python modules. +*.pyc + +# Setuptools distribution folder. +/dist/ +/build/ + +# Python egg metadata, regenerated from source files by setuptools. +/*.egg-info + +# Sublime text +*.sublime* +*.sublime* + +# Abaqus output files +*.abq +*.com +*.dat +*.log +*.mdl +*.msg +*.odb +*.pac +*.par +*.pes +*.pmg +*.prt +*.res +*.sel +*.sim +*.sta +*.stt + +# Abaqus cae files +*.cae +*.jnl +*.rec +*.deps +*.rpy* + +# testOutput +/tests/tests/testOutput/ \ No newline at end of file diff --git a/LICENSE.txt b/LICENSE.txt new file mode 100644 index 0000000..4b4f697 --- /dev/null +++ b/LICENSE.txt @@ -0,0 +1,107 @@ +NASA OPEN SOURCE AGREEMENT VERSION 1.3 + +THIS OPEN SOURCE AGREEMENT (“AGREEMENT”) DEFINES THE RIGHTS OF USE, REPRODUCTION, DISTRIBUTION, MODIFICATION AND REDISTRIBUTION OF CERTAIN COMPUTER SOFTWARE ORIGINALLY RELEASED BY THE UNITED STATES GOVERNMENT AS REPRESENTED BY THE GOVERNMENT AGENCY LISTED BELOW ("GOVERNMENT AGENCY"). THE UNITED STATES GOVERNMENT, AS REPRESENTED BY GOVERNMENT AGENCY, IS AN INTENDED THIRD-PARTY BENEFICIARY OF ALL SUBSEQUENT DISTRIBUTIONS OR REDISTRIBUTIONS OF THE SUBJECT SOFTWARE. ANYONE WHO USES, REPRODUCES, DISTRIBUTES, MODIFIES OR REDISTRIBUTES THE SUBJECT SOFTWARE, AS DEFINED HEREIN, OR ANY PART THEREOF, IS, BY THAT ACTION, ACCEPTING IN FULL THE RESPONSIBILITIES AND OBLIGATIONS CONTAINED IN THIS AGREEMENT. + +Government Agency: National Aeronautics and Space Administration +Government Agency Original Software Designation: LAR-18938-1 +Government Agency Original Software Title: Abaqus User Subroutine Verification (Abaverify) +Government Agency Point of Contact for Original Software: Andrew.c.bergan@nasa.gov + +1. DEFINITIONS + +A. “Contributor” means Government Agency, as the developer of the Original Software, and any entity that makes a Modification. +B. “Covered Patents” mean patent claims licensable by a Contributor that are necessarily infringed by the use or sale of its Modification alone or when combined with the Subject Software. +C. “Display” means the showing of a copy of the Subject Software, either directly or by means of an image, or any other device. +D. “Distribution” means conveyance or transfer of the Subject Software, regardless of means, to another. +E. “Larger Work” means computer software that combines Subject Software, or portions thereof, with software separate from the Subject Software that is not governed by the terms of this Agreement. +F. “Modification” means any alteration of, including addition to or deletion from, the substance or structure of either the Original Software or Subject Software, and includes derivative works, as that term is defined in the Copyright Statute, 17 USC 101. However, the act of including Subject Software as part of a Larger Work does not in and of itself constitute a Modification. +G. “Original Software” means the computer software first released under this Agreement by Government Agency with Government Agency designation LAR-18938 and entitled Abaqus User Subroutine Verification (Abaverify), including source code, object code and accompanying documentation, if any. +H. “Recipient” means anyone who acquires the Subject Software under this Agreement, including all Contributors. +I. “Redistribution” means Distribution of the Subject Software after a Modification has been made. +J. “Reproduction” means the making of a counterpart, image or copy of the Subject Software. +K. “Sale” means the exchange of the Subject Software for money or equivalent value. +L. “Subject Software” means the Original Software, Modifications, or any respective parts thereof. +M. “Use” means the application or employment of the Subject Software for any purpose. + +2. GRANT OF RIGHTS + +A. Under Non-Patent Rights: Subject to the terms and conditions of this Agreement, each Contributor, with respect to its own contribution to the Subject Software, hereby grants to each Recipient a non-exclusive, world-wide, royalty-free license to engage in the following activities pertaining to the Subject Software: + +1. Use +2. Distribution +3. Reproduction +4. Modification +5. Redistribution +6. Display + +B. Under Patent Rights: Subject to the terms and conditions of this Agreement, each Contributor, with respect to its own contribution to the Subject Software, hereby grants to each Recipient under Covered Patents a non-exclusive, world-wide, royalty-free license to engage in the following activities pertaining to the Subject Software: + +1. Use +2. Distribution +3. Reproduction +4. Sale +5. Offer for Sale + +C. The rights granted under Paragraph B. also apply to the combination of a Contributor’s Modification and the Subject Software if, at the time the Modification is added by the Contributor, the addition of such Modification causes the combination to be covered by the Covered Patents. It does not apply to any other combinations that include a Modification. + +D. The rights granted in Paragraphs A. and B. allow the Recipient to sublicense those same rights. Such sublicense must be under the same terms and conditions of this Agreement. + +3. OBLIGATIONS OF RECIPIENT + +A. Distribution or Redistribution of the Subject Software must be made under this Agreement except for additions covered under paragraph 3H. + +1. Whenever a Recipient distributes or redistributes the Subject Software, a copy of this Agreement must be included with each copy of the Subject Software; and +2. If Recipient distributes or redistributes the Subject Software in any form other than source code, Recipient must also make the source code freely available, and must provide with each copy of the Subject Software information on how to obtain the source code in a reasonable manner on or through a medium customarily used for software exchange. + +B. Each Recipient must ensure that the following copyright notice appears prominently in the Subject Software: + +This software may be used, reproduced, and provided to others only as permitted under the terms of the agreement under which it was acquired from the U.S. Government. Neither title to, nor ownership of, the software is hereby transferred. This notice shall remain on all copies of the software. + +AND + +Copyright 2016 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. No copyright is claimed in the United States under Title 17, U.S. Code. All Other Rights Reserved. + +C. Each Contributor must characterize its alteration of the Subject Software as a Modification and must identify itself as the originator of its Modification in a manner that reasonably allows subsequent Recipients to identify the originator of the Modification. In fulfillment of these requirements, Contributor must include a file (e.g., a change log file) that describes the alterations made and the date of the alterations, identifies Contributor as originator of the alterations, and consents to characterization of the alterations as a Modification, for example, by including a statement that the Modification is derived, directly or indirectly, from Original Software provided by Government Agency. Once consent is granted, it may not thereafter be revoked. + +D. A Contributor may add its own copyright notice to the Subject Software. Once a copyright notice has been added to the Subject Software, a Recipient may not remove it without the express permission of the Contributor who added the notice. + +E. A Recipient may not make any representation in the Subject Software or in any promotional, advertising or other material that may be construed as an endorsement by Government Agency or by any prior Recipient of any product or service provided by Recipient, or that may seek to obtain commercial advantage by the fact of Government Agency's or a prior Recipient’s participation in this Agreement. + +F. In an effort to track usage and maintain accurate records of the Subject Software, each Recipient, upon receipt of the Subject Software, is requested to provide Government Agency, by e-mail to the Government Agency Point of Contact listed in clause 5.F., the following information: First and Last Name; Email Address; and Affiliation. Recipient’s name and personal information shall be used for statistical purposes only. Once a Recipient makes a Modification available, it is requested that the Recipient inform Government Agency, by e-mail to the Government Agency Point of Contact listed in clause 5.F., how to access the Modification. + +G. Each Contributor represents that that its Modification is believed to be Contributor’s original creation and does not violate any existing agreements, regulations, statutes or rules, and further that Contributor has sufficient rights to grant the rights conveyed by this Agreement. + +H. A Recipient may choose to offer, and to charge a fee for, warranty, support, indemnity and/or liability obligations to one or more other Recipients of the Subject Software. A Recipient may do so, however, only on its own behalf and not on behalf of Government Agency or any other Recipient. Such a Recipient must make it absolutely clear that any such warranty, support, indemnity and/or liability obligation is offered by that Recipient alone. Further, such Recipient agrees to indemnify Government Agency and every other Recipient for any liability incurred by them as a result of warranty, support, indemnity and/or liability offered by such Recipient. + +I. A Recipient may create a Larger Work by combining Subject Software with separate software not governed by the terms of this agreement and distribute the Larger Work as a single product. In such case, the Recipient must make sure Subject Software, or portions thereof, included in the Larger Work is subject to this Agreement. + +J. Notwithstanding any provisions contained herein, Recipient is hereby put on notice that export of any goods or technical data from the United States may require some form of export license from the U.S. Government. Failure to obtain necessary export licenses may result in criminal liability under U.S. laws. Government Agency neither represents that a license shall not be required nor that, if required, it shall be issued. Nothing granted herein provides any such export license. + +4. DISCLAIMER OF WARRANTIES AND LIABILITIES; WAIVER AND INDEMNIFICATION + +A. No Warranty: THE SUBJECT SOFTWARE IS PROVIDED “AS IS” WITHOUT ANY WARRANTY OF ANY KIND, EITHER EXPRESSED, IMPLIED, OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTY THAT THE SUBJECT SOFTWARE WILL CONFORM TO SPECIFICATIONS, ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR FREEDOM FROM INFRINGEMENT, ANY WARRANTY THAT THE SUBJECT SOFTWARE WILL BE ERROR FREE, OR ANY WARRANTY THAT DOCUMENTATION, IF PROVIDED, WILL CONFORM TO THE SUBJECT SOFTWARE. THIS AGREEMENT DOES NOT, IN ANY MANNER, CONSTITUTE AN ENDORSEMENT BY GOVERNMENT AGENCY OR ANY PRIOR RECIPIENT OF ANY RESULTS, RESULTING DESIGNS, HARDWARE, SOFTWARE PRODUCTS OR ANY OTHER APPLICATIONS RESULTING FROM USE OF THE SUBJECT SOFTWARE. FURTHER, GOVERNMENT AGENCY DISCLAIMS ALL WARRANTIES AND LIABILITIES REGARDING THIRD-PARTY SOFTWARE, IF PRESENT IN THE ORIGINAL SOFTWARE, AND DISTRIBUTES IT “AS IS.” + +B. Waiver and Indemnity: RECIPIENT AGREES TO WAIVE ANY AND ALL CLAIMS AGAINST THE UNITED STATES GOVERNMENT, ITS CONTRACTORS AND SUBCONTRACTORS, AS WELL AS ANY PRIOR RECIPIENT. IF RECIPIENT'S USE OF THE SUBJECT SOFTWARE RESULTS IN ANY LIABILITIES, DEMANDS, DAMAGES, EXPENSES OR LOSSES ARISING FROM SUCH USE, INCLUDING ANY DAMAGES FROM PRODUCTS BASED ON, OR RESULTING FROM, RECIPIENT'S USE OF THE SUBJECT SOFTWARE, RECIPIENT SHALL INDEMNIFY AND HOLD HARMLESS THE UNITED STATES GOVERNMENT, ITS CONTRACTORS AND SUBCONTRACTORS, AS WELL AS ANY PRIOR RECIPIENT, TO THE EXTENT PERMITTED BY LAW. RECIPIENT'S SOLE REMEDY FOR ANY SUCH MATTER SHALL BE THE IMMEDIATE, UNILATERAL TERMINATION OF THIS AGREEMENT. + +5. GENERAL TERMS + +A. Termination: This Agreement and the rights granted hereunder will terminate automatically if a Recipient fails to comply with these terms and conditions, and fails to cure such noncompliance within thirty (30) days of becoming aware of such noncompliance. Upon termination, a Recipient agrees to immediately cease use and distribution of the Subject Software. All sublicenses to the Subject Software properly granted by the breaching Recipient shall survive any such termination of this Agreement. + +B. Severability: If any provision of this Agreement is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this Agreement. + +C. Applicable Law: This Agreement shall be subject to United States federal law only for all purposes, including, but not limited to, determining the validity of this Agreement, the meaning of its provisions and the rights, obligations and remedies of the parties. + +D. Entire Understanding: This Agreement constitutes the entire understanding and agreement of the parties relating to release of the Subject Software and may not be superseded, modified or amended except by further written agreement duly executed by the parties. + +E. Binding Authority: By accepting and using the Subject Software under this Agreement, a Recipient affirms its authority to bind the Recipient to all terms and conditions of this Agreement and that that Recipient hereby agrees to all terms and conditions herein. + +F. Point of Contact: Any Recipient contact with Government Agency is to be directed to the designated representative as follows: + +Bonnie Lumanog +Software Release Authority +MS 158, NASA Langley Research Center +Hampton, VA 23681 +Fax: 757-864-6500 +Phone: 757-864-2933 +Email: b.lumanog@nasa.gov + diff --git a/README.md b/README.md deleted file mode 100644 index 9104508..0000000 --- a/README.md +++ /dev/null @@ -1 +0,0 @@ -# Abaverify \ No newline at end of file diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000..02fdcf1 --- /dev/null +++ b/__init__.py @@ -0,0 +1 @@ +from abaverify import * \ No newline at end of file diff --git a/abaverify/__init__.py b/abaverify/__init__.py new file mode 100644 index 0000000..ed4e83e --- /dev/null +++ b/abaverify/__init__.py @@ -0,0 +1,3 @@ +from main import TestCase +from main import runTests +from main import ParametricMetaClass \ No newline at end of file diff --git a/abaverify/main.py b/abaverify/main.py new file mode 100644 index 0000000..8a54f4f --- /dev/null +++ b/abaverify/main.py @@ -0,0 +1,454 @@ +""" +This is the main API for the abaverify package. + +Notices: + +Copyright 2016 United States Government as represented by the Administrator of +the National Aeronautics and Space Administration. No copyright is claimed in +the United States under Title 17, U.S. Code. All Other Rights Reserved. + +Disclaimers + +No Warranty: THE SUBJECT SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY OF +ANY KIND, EITHER EXPRESSED, IMPLIED, OR STATUTORY, INCLUDING, BUT NOT LIMITED +TO, ANY WARRANTY THAT THE SUBJECT SOFTWARE WILL CONFORM TO SPECIFICATIONS, ANY +IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR +FREEDOM FROM INFRINGEMENT, ANY WARRANTY THAT THE SUBJECT SOFTWARE WILL BE ERROR +FREE, OR ANY WARRANTY THAT DOCUMENTATION, IF PROVIDED, WILL CONFORM TO THE +SUBJECT SOFTWARE. THIS AGREEMENT DOES NOT, IN ANY MANNER, CONSTITUTE AN +ENDORSEMENT BY GOVERNMENT AGENCY OR ANY PRIOR RECIPIENT OF ANY RESULTS, +RESULTING DESIGNS, HARDWARE, SOFTWARE PRODUCTS OR ANY OTHER APPLICATIONS +RESULTING FROM USE OF THE SUBJECT SOFTWARE. FURTHER, GOVERNMENT AGENCY +DISCLAIMS ALL WARRANTIES AND LIABILITIES REGARDING THIRD-PARTY SOFTWARE, IF +PRESENT IN THE ORIGINAL SOFTWARE, AND DISTRIBUTES IT "AS IS." + +Waiver and Indemnity: RECIPIENT AGREES TO WAIVE ANY AND ALL CLAIMS AGAINST THE +UNITED STATES GOVERNMENT, ITS CONTRACTORS AND SUBCONTRACTORS, AS WELL AS ANY +PRIOR RECIPIENT. IF RECIPIENT'S USE OF THE SUBJECT SOFTWARE RESULTS IN ANY +LIABILITIES, DEMANDS, DAMAGES, EXPENSES OR LOSSES ARISING FROM SUCH USE, +INCLUDING ANY DAMAGES FROM PRODUCTS BASED ON, OR RESULTING FROM, RECIPIENT'S +USE OF THE SUBJECT SOFTWARE, RECIPIENT SHALL INDEMNIFY AND HOLD HARMLESS THE +UNITED STATES GOVERNMENT, ITS CONTRACTORS AND SUBCONTRACTORS, AS WELL AS ANY +PRIOR RECIPIENT, TO THE EXTENT PERMITTED BY LAW. RECIPIENT'S SOLE REMEDY FOR +ANY SUCH MATTER SHALL BE THE IMMEDIATE, UNILATERAL TERMINATION OF THIS AGREEMENT. + +""" +import unittest +from optparse import OptionParser +import sys +import platform +import os +import re +import shutil +import subprocess +import contextlib +import itertools as it +import time +import inspect + +# +# Local +# + +class measureRunTimes: + """ + Measures run times during unit tests + """ + + def __init__(self): + self.runtimes = dict() + + + def processLine(self, line): + """ Helper function to mark the start and end times """ + + if re.match(r'Begin Linking', line): + self.Compile_start = time.time() + elif re.match(r'End Linking', line): + self.Compile_end = time.time() + self.compile_time = self.Compile_end - self.Compile_start + sys.stderr.write("\nCompile run time: {:.2f} s\n".format(self.compile_time)) + + elif re.match(r'Begin Abaqus/Explicit Packager', line): + self.packager_start = time.time() + elif re.match(r'End Abaqus/Explicit Packager', line): + self.packager_end = time.time() + self.package_time = self.packager_end - self.packager_start + sys.stderr.write("Packager run time: {:.2f} s\n".format(self.package_time)) + + elif re.match(r'Begin Abaqus/Explicit Analysis', line): + self.solver_start = time.time() + elif re.match(r'End Abaqus/Explicit Analysis', line) or re.match(r'.*Abaqus/Explicit Analysis exited with an error.*', line): + self.solver_end = time.time() + self.solver_time = self.solver_end - self.solver_start + sys.stderr.write("Solver run time: {:.2f} s\n".format(self.solver_time)) + + +# +# Public facing API +# + +class TestCase(unittest.TestCase): + """ + Base test case. Includes generic functionality to run tests on abaqus models. + """ + + def tearDown(self): + """ + Removes Abaqus temp files. This function is called by unittest. + """ + files = os.listdir(os.getcwd()) + patterns = [re.compile('.*abaqus.*\.rpy.*'), re.compile('.*abaqus.*\.rec.*')] + [os.remove(f) for f in files if any(regex.match(f) for regex in patterns)] + + + def runTest(self, jobName): + """ + Generic test method + """ + + # Save output to a log file + with open(os.path.join(os.getcwd(), 'testOutput', jobName + '.log'), 'w') as f: + + # Time tests + if options.time: + timer = measureRunTimes() + else: + timer = None + + # Execute the solver + if not options.useExistingResults: + self.runModel(jobName=jobName, f=f, timer=timer) + + # Execute process_results script load ODB and get results + pathForThisFile = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + pathForProcessResultsPy = '"' + os.path.join(pathForThisFile, 'processresults.py') + '"' + self.callAbaqus(cmd='abaqus cae noGUI=' + pathForProcessResultsPy + ' -- -- ' + jobName, log=f, timer=timer) + + # Run assertions + self.runAssertionsOnResults(jobName) + + + def runModel(self, jobName, f, timer): + """ + Logic to submit the abaqus job + """ + + # Platform specific file extension for user subroutine + if not options.precompileCode: + if (platform.system() == 'Linux'): + subext = '.f' + else: + subext = '.for' + userSubPath = os.path.join(os.getcwd(), options.relPathToUserSub + subext) + + # Copy input deck + shutil.copyfile(os.path.join(os.getcwd(), jobName + '.inp'), os.path.join(os.getcwd(), 'testOutput', jobName + '.inp')) + + # build abaqus cmd + cmd = 'abaqus job=' + jobName + if not options.precompileCode: + cmd += ' user="' + userSubPath + '"' + cmd += ' double=both interactive' + + # Copy parameters file, if it exists + parameterName = 'CompDam.parameters' + parameterPath = os.path.join(os.getcwd(), parameterName) + if os.path.exists(parameterPath): + shutil.copyfile(parameterPath, os.path.join(os.getcwd(), 'testOutput', parameterName)) + + # Run the test from the testOutput directory + os.chdir(os.path.join(os.getcwd(), 'testOutput')) + self.callAbaqus(cmd=cmd, log=f, timer=timer) + + os.chdir(os.pardir) + + + def runAssertionsOnResults(self, jobName): + """ + Runs assertions on each result in the jobName_results.py file + """ + + outputFileName = jobName + '_results.py' + outputFileDir = os.path.join(os.getcwd(), 'testOutput') + if (os.path.isfile(os.path.join(outputFileDir, outputFileName))): + sys.path.insert(0, outputFileDir) + results = __import__(outputFileName[:-3]).results + + for r in results: + + # Loop through values if there are more than one + if hasattr(r['computedValue'], '__iter__'): + for i in range(0, len(r['computedValue'])): + self.assertAlmostEqual(r['computedValue'][i], r['referenceValue'][i], delta=r['tolerance'][i]) + + else: + if "tolerance" in r: + self.assertAlmostEqual(r['computedValue'], r['referenceValue'], delta=r['tolerance']) + elif "referenceValue" in r: + self.assertEqual(r['computedValue'], r['referenceValue']) + else: + # No data to compare with, so pass the test + pass + else: + self.fail('No results file provided by process_results.py') + + + def callAbaqus(self, cmd, log, timer=None, shell=True): + """ + Logic for calls to abaqus. Support streaming the output to the log file. + """ + + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, shell=shell) + + # Parse output lines & save to a log file + for line in self.outputStreamer(p): + + # Time tests + if options.time: + if timer != None: + timer.processLine(line) + + # Log data + log.write(line + "\n") + if options.interactive: + print line + + + def outputStreamer(self, proc, stream='stdout'): + """ + Hanldes streaming of subprocess. + Copied from: http://blog.thelinuxkid.com/2013/06/get-python-subprocess-output-without.html + """ + + newlines = ['\n', '\r\n', '\r'] + stream = getattr(proc, stream) + with contextlib.closing(stream): + while True: + out = [] + last = stream.read(1) + # Don't loop forever + if last == '' and proc.poll() is not None: + break + while last not in newlines: + # Don't loop forever + if last == '' and proc.poll() is not None: + break + out.append(last) + last = stream.read(1) + out = ''.join(out) + yield out + + +class ParametricMetaClass(type): + """ + Meta class for parametric tests + More info: http://stackoverflow.com/a/20870875 + + Expects that the inheriting class defines + baseName: The name of the input deck to use as a template (without the .inp) + parameters: a dictionary with each parameter to vary. For example: {'alpha': range(-40,10,10), 'beta': range(60,210,30)} + """ + + def __new__(mcs, name, bases, dct): + + def make_test_function(testCase): + """ + Creates test_ function for the particular test case passed in + """ + + jobName = testCase['name'] + baseName = testCase['baseName'] + parameters = {k: v for k, v in testCase.items() if not k in ('baseName', 'name')} + + def test(self): + + # Create the input deck + # Copy the template input file + if 'pythonScriptForModel' in testCase: + inpFilePath = os.path.join(os.getcwd(), jobName + '.py') + shutil.copyfile(os.path.join(os.getcwd(), baseName + '.py'), inpFilePath) + else: + inpFilePath = os.path.join(os.getcwd(), jobName + '.inp') + shutil.copyfile(os.path.join(os.getcwd(), baseName + '.inp'), inpFilePath) + + # Update all of the relevant *Parameter terms in the Abaqus input deck + with file(inpFilePath, 'r') as original: + data = original.readlines() + for p in parameters.keys(): + for line in range(len(data)): + if re.search('.{0,}' + str(p) + '.{0,}=.{0,}$', data[line]) is not None: + data[line] = data[line].split('=')[0] + '= ' + str(parameters[p]) + '\n' + break + with file(inpFilePath, 'w') as modified: + modified.writelines(data) + + # Generate an expected results Python file with jobName + shutil.copyfile(os.path.join(os.getcwd(), baseName + '_expected.py'), os.path.join(os.getcwd(), jobName + '_expected.py')) + + # Save output to a log file + with open(os.path.join(os.getcwd(), 'testOutput', jobName + '.log'), 'w') as f: + + # Generate input file from python script + if 'pythonScriptForModel' in testCase: + callAbaqus(cmd='abaqus cae noGUI=' + inpFilePath, log=f) + + # Time tests + if options.time: + timer = measureRunTimes() + else: + timer = None + + # Execute the solver + self.runModel(jobName=jobName, f=f, timer=timer) + + # Execute process_results script load ODB and get results + self.callAbaqus(cmd='abaqus cae noGUI=abaverify/abaverify/processresults.py -- ' + jobName, log=f, timer=timer) + + os.remove(jobName + '.inp') # Delete temporary parametric input file + os.remove(jobName + '_expected.py') # Delete temporary parametric expected results file + if 'pythonScriptForModel' in testCase: + os.remove(jobName + '.py') + + # Run assertions + self.runAssertionsOnResults(jobName) + + + # Rename the test method and return the test + test.__name__ = jobName + return test + + # Store input arguments + try: + baseName = dct['baseName'] + parameters = dct['parameters'] + except: + print "baseName and parameters must be defined by the sub class" + + # Get the cartesian product to yield a list of all the potential test cases + testCases = list(dict(it.izip(parameters, x)) for x in it.product(*parameters.itervalues())) + + # Loop through each test + for i in range(0,len(testCases)): + + # Add a name to each test case + # Generate portion of test name based on particular parameter values + pn = '_'.join(['%s_%s' % (key, value) for (key, value) in testCases[i].items()]) + # Replace periods with commas so windows doesn't complain about file names + pn = re.sub('[.]','',pn) + # Add the test case name; concatenate the base name and parameter name + testCases[i].update({'name': baseName + '_' + pn}) + testCases[i].update({'baseName': baseName}) + if 'pythonScriptForModel' in dct: + testCases[i].update({'pythonScriptForModel': dct['pythonScriptForModel']}) + + # Add test functions to the testCase class + dct[testCases[i]['name']] = make_test_function(testCases[i]) + + return type.__new__(mcs, name, bases, dct) + + +def runTests(relPathToUserSub, compileCodeFunc=None): + """ + This is the main entry point for abaverify. + There is option optional argument (compileCodeFunc) which is the function called to compile subroutines via + abaqus make. This functionality is used when compiling the subroutine once before running several tests is + desired. By default the subroutine is compiled at every test execution. + """ + + global options + + # Command line options + parser = OptionParser() + parser.add_option("-i", "--interactive", action="store_true", dest="interactive", default=False, help="Print output to the terminal; useful for debugging") + parser.add_option("-t", "--time", action="store_true", dest="time", default=False, help="Calculates and prints the time it takes to run each test") + parser.add_option("-c", "--precompileCode", action="store_true", dest="precompileCode", default=False, help="Compiles the subroutine before running each tests") + parser.add_option("-e", "--useExistingBinaries", action="store_true", dest="useExistingBinaries", default=False, help="Uses existing binaries in /build") + parser.add_option("-r", "--useExistingResults", action="store_true", dest="useExistingResults", default=False, help="Uses existing results in /testOutput; useful for debugging postprocessing") + parser.add_option("-s", "--specifyPathToSub", action="store", dest="relPathToUserSub", default=relPathToUserSub, help="Override path to user subroutine") + (options, args) = parser.parse_args() + + # Remove custom args so they do not get sent to unittest + # http://stackoverflow.com/questions/1842168/python-unit-test-pass-command-line-arguments-to-setup-of-unittest-testcase + for x in sum([h._long_opts+h._short_opts for h in parser.option_list],[]): + if x in sys.argv: + sys.argv.remove(x) + + # Remove rpy files + testPath = os.getcwd() + pattern = re.compile('^abaqus\.rpy(\.)*([0-9])*$') + for f in os.listdir(testPath): + if pattern.match(f): + os.remove(os.path.join(os.getcwd(), f)) + + # Remove old binaries + if not options.useExistingBinaries: + if os.path.isdir(os.path.join(os.pardir, 'build')): + for f in os.listdir(os.path.join(os.pardir, 'build')): + os.remove(os.path.join(os.pardir, 'build', f)) + + # If testOutput doesn't exist, create it + testOutputPath = os.path.join(os.getcwd(), 'testOutput') + if not os.path.isdir(testOutputPath) and options.useExistingResults: + raise Exception("There must be results in the testOutput directory to use the --useExistingResults (-r) option") + if not os.path.isdir(testOutputPath): + os.makedirs(testOutputPath) + + # Remove old job files + if not options.useExistingResults: + testOutputPath = os.path.join(os.getcwd(), 'testOutput') + pattern = re.compile('.*\.env$') + for f in os.listdir(testOutputPath): + if not pattern.match(f): + os.remove(os.path.join(os.getcwd(), 'testOutput', f)) + + # Try to pre-compile the code + if not options.useExistingBinaries: + wd = os.getcwd() + if options.precompileCode: + try: + compileCodeFunc() + except: + print "ERROR: abaqus make failed.", sys.exc_info()[0] + raise Exception("Error compiling with abaqus make. Look for 'compile.log' in the testOutput directory. Or try running 'abaqus make library=CompDam_DGD' from the /for directory to debug.") + os.chdir(wd) + + # Make sure + # 1) environment file exists + # 2) it has usub_lib_dir + # 3) usub_lib_dir is the location where the binaries reside + # 4) a copy is in testOutput + if os.path.isfile(os.path.join(os.getcwd(), 'abaqus_v6.env')): + # Make sure it has usub_lib_dir + if options.precompileCode: + with open(os.path.join(os.getcwd(), 'abaqus_v6.env'), 'r+') as envFile: + foundusub = False + pattern_usub = re.compile('^usub_lib_dir.*') + for line in envFile: + if pattern_usub.match(line): + print "Found usub_lib_dir" + pathInEnvFile = re.findall('= "(.*)"$', line).pop() + if pathInEnvFile: + if pathInEnvFile == '/'.join(os.path.abspath(os.path.join(os.pardir, 'build')).split('\\')): # Note that this nonsense is because abaqus wants '/' as os.sep even on windows + foundusub = True + break + else: + raise Exception("ERROR: a usub_lib_dir is specified in the environment file that is different from the build location.") + else: + raise Exception("ERROR: logic to parse the environment file looking for usub_lib_dir failed.") + + # Add usub_lib_dir if it was not found + if not foundusub: + print "Adding usub_lib_dir to environment file." + with open(os.path.join(os.getcwd(), 'abaqus_v6.env'), 'a') as envFile: + pathWithForwardSlashes = '/'.join(os.path.abspath(os.path.join(os.pardir, 'build')).split('\\')) + print pathWithForwardSlashes + envFile.write('\nimport os\nusub_lib_dir = "' + pathWithForwardSlashes + '"\ndel os\n') + + # Copy to /test/testOutput + shutil.copyfile(os.path.join(os.getcwd(), 'abaqus_v6.env'), os.path.join(os.getcwd(), 'testOutput', 'abaqus_v6.env')) + else: + raise Exception("Missing environment file. Please configure a local abaqus environement file. See getting started in readme.") + + + unittest.main(verbosity=2) diff --git a/abaverify/processresults.py b/abaverify/processresults.py new file mode 100644 index 0000000..f9314f6 --- /dev/null +++ b/abaverify/processresults.py @@ -0,0 +1,638 @@ +""" +Opens an abaqus odb and gets requested results + +Arguments: + 1. jobName: name of the abaqus job (code expects an odb and a results text file with this name + in the working dir) + +Sample usage: +$ abaqus cae noGUI=model_runner.py -- jobName + +Output: +This code writes a file jobName_results.py with the reference value and the results collected from the job + +Test types: +1. max, min: + Records the maximum or minimum value of the specified identifier. Assumes that only one identifier is specified. +2. continuous: + Records the maximum change in the specified identifier value between each increment. This test should fail + when a discontinuity occurs that is larger than the reference value + tolerance. Assumes that only one + identifier is specified. +3. xy_infl_pt: + Records the location of an inflection point in an x-y data set. Assumes two identifiers are specified where + the first is the x-data and the second is the y-data. To speed up the search, a window can be specified + as [min, max] where only values in x withing the bounds of the window are used in searching for the inflection + point. The reference value and tolerance are specified as pairs corresponding to the x and y values [x, y]. + Numerical derivatives are calculated to find the inflection point. A Butterworth filter is used to smooth the data. + It is possible that this test could produce errorneous results if the data is too noisy for the filter settings. +4. disp_at_zero_y: +5. log_stress_at_failure_init: + Writes specified stresses (stressComponents) when first failure index in failureIndices indicates failure (>= 1.0). + This test assumes that the input deck terminates the analysis shortly after failure is reached by using the *Filter card. + All of the failure indices to check for failure should be included in the failureIndices array. The option + additionalIdenitifiersToStore can be used to record additional outputs at the increment where failure occurs + (e.g., alpha). Stress in stressComponents and other data in additionalIdenitifiersToStore are written to a log + file that is named using the test base name. +""" + + +from abaqus import * +import abaqusConstants +from abaqusConstants import * +from caeModules import * +import job +import os +import inspect +import sys +import re +import shutil +import numpy as np +from operator import itemgetter + +# This is a crude hack, but its working +pathForThisFile = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +sys.path.insert(0, pathForThisFile) + +# Throw-away debugger +def debug(obj): + if True: # Use to turn off debugging + sys.__stderr__.write("DEBUG - " + __name__ + ": " + str(obj) + '\n') + + +def interpolate(x, xp, fp): + """ + Augmentation to np.interp to handle negative numbers. This is a hack, needs improvement. + """ + if np.all(np.diff(xp) > 0): + return np.interp(x, xp, fp) + else: + if all([i<0 for i in xp]): + xpos = np.absolute(xp) + return np.interp(-1*x, xpos, fp) + else: + raise Exception("Functionality to interpolate datasets that traverse 0 or are unsorted is not implemented") + + +def resample(data, numPts): + """ + Re-samples an xy data set to have the number of points specified and a linear space in x + """ + x, y = zip(*data) + if x[0] < x[-1]: + xMin = x[0] + xMax = x[-1] + else: + xMin = x[-1] + xMax = x[0] + xNew = np.linspace(xMin, xMax, numPts) + yNew = interpolate(xNew, x, y) + return zip(xNew, yNew) + + +def listOfHistoryOutputSymbols(): + """ + Generates a list of the history output symbols in the odb + """ + outputs = list() + for s in odb.steps.keys(): + for hr in odb.steps[s].historyRegions.keys(): + for ho in odb.steps[s].historyRegions[hr].historyOutputs.keys(): + if ho not in outputs: + outputs.append(ho) + return outputs + + +def parseJobName(name): + """ + Parses job name of paramtric tests + """ + + s = name.split("_") + + output = dict() + + # Find the index of the first integer (value of paramter) + idxFirstInt = 0 + while True: + try: + int(s[idxFirstInt]) + break + except ValueError: + idxFirstInt += 1 + + # Everything before the first integer is the basename + output["baseName"] = "_".join(s[0:idxFirstInt-1]) + + # Assume everything after the basename is paramters and values + for n in range(idxFirstInt-1, len(s), 2): + output[s[n]] = s[n+1] + + return output + + +def _historyOutputNameHelperNode(prefixString, identifier, steps): + """ Helper to prevent repeated code in historyOutputNameFromIdentifier """ + + i = str(identifier["symbol"]) + if "position" in identifier and "nset" in identifier: + return prefixString + i + " at " + str(identifier["position"]) + " in NSET " + str(identifier["nset"]) + elif "nset" in identifier: + if len(steps) > 1: + raise Exception("_historyOutputNameHelperNode: Must specify position if analysis has multiple steps") + else: + step = steps[0] + for historyRegions in odb.steps[step].historyRegions.keys(): + regionLabels = [x for x in odb.steps[step].historyRegions.keys() if odb.steps[step].historyRegions[x].historyOutputs.has_key(i)] + if len(regionLabels) == 1: + labelSplit = regionLabels[0].split(' ') + if labelSplit[0] == 'Node' and len(labelSplit) == 2: + nodeNumber = labelSplit[1].split('.')[1] + return prefixString + i + " at Node " + nodeNumber + " in NSET " + str(identifier["nset"]) + else: + raise Exception("Must specify a position for " + i) + else: + raise Exception("Found multiple candidate positions. Must specify a position for " + i) + else: + raise ValueError("Missing nset definition in RF identifier") + + raise Exception("Error getting history Output name in _historyOutputNameHelperNode") + + +def _historyOutputNameHelperElement(prefixString, identifier, steps): + """ Helper to prevent repeated code in historyOutputNameFromIdentifier """ + + i = str(identifier["symbol"]) + if "position" in identifier and "elset" in identifier: + if i not in listOfHistoryOutputSymbols(): + for sym in listOfHistoryOutputSymbols(): + if i.lower() == sym.lower(): + i = sym + return prefixString + i + " at " + str(identifier["position"]) + " in ELSET " + str(identifier["elset"]) + else: + raise ValueError("Must define position in element identifier") + + +def historyOutputNameFromIdentifier(identifier, steps=None): + """ + Returns a well-formated history output name from the identifier. The identifier can be in any of a variety of + formats. A list of identifiers can be provided in which case this function returns a tuple of history output names. + + The step is the name of the step of interest. + + + Accepted identifier formats: + # Specify the complete identifier + - "identifier": "Reaction force: RF1 at Node 9 in NSET LOADAPP" + # Specify the complete identifier piecemeal + - "identifier": { + "symbol": "RF1", + "position": "Node 9", + "nset": "LOADAPP" + } + # For element output data, "elset" can be used + - "identifier": { + "symbol": "S11", + "position": "Element 1 Int Point 1", + "elset": "DAMAGEABLEROW" + } + # Specify the identifier without a position. In this case, the code looks through the available history outputs for a + # valid match. If a single match is found, it is used. Otherwise an exception is raised. The step argument must be specified. + - "identifier": { + "symbol": "RF1", + "nset": "LOADAPP" + } + # Not implemented for an element (yet) + """ + + # Handle case of a list of identifiers + if type(identifier) == type(list()): + out = list() + for i in identifier: + out.append(historyOutputNameFromIdentifier(identifier=i, steps=steps)) + return tuple(out) + + # Case when identifier is a dictionary + elif type(identifier) == type(dict()): + if "symbol" in identifier: + i = str(identifier["symbol"]) + + # Parse the symbol. Known symbols (RF, U, S, LE, E, SDV) + # RF + if re.match(r'^RF\d', i): + return _historyOutputNameHelperNode(prefixString="Reaction force: ", identifier=identifier, steps=steps) + + # U + elif re.match(r'^U\d', i): + return _historyOutputNameHelperNode(prefixString="Spatial displacement: ", identifier=identifier, steps=steps) + + # S + elif re.match(r'^S\d+', i): + return _historyOutputNameHelperElement(prefixString="Stress components: ", identifier=identifier, steps=steps) + + # LE + elif re.match(r'^LE\d+', i): + return _historyOutputNameHelperElement(prefixString="Logarithmic strain components: ", identifier=identifier, steps=steps) + + # E + elif re.match(r'^E\d+', i): + raise NotImplementedError() + + # SDV + elif re.match(r'^SDV\w+', i): + return _historyOutputNameHelperElement(prefixString="Solution dependent state variables: ", identifier=identifier, steps=steps) + + else: + raise ValueError("Unrecognized symbol " + i + " found") + else: + raise ValueError("Identifier missing symbol definition") + + # Case when the identifer is specified directly + elif type(identifier) == type(str()) or type(identifier) == type(unicode()): + return str(identifier) + else: + raise ValueError("Expecting that the argument is a list, dict, or str. Found " + str(type(identifier))) + + +def write_results(results_to_write, fileName, depth=0): + """ + Writes the contents of resultsFile to a Python text file + + :type results_to_write: list or dictionary + :type fileName: str + :type depth: int + """ + + # Create the file if it does not exist. If it does exist, open it for appending + if depth == 0: + f = open(fileName, 'w') + f.write('results = ') + else: + f = open(fileName, 'a') + + if type(results_to_write) == type(dict()): + + f.write('\t'*depth + '{\n') + + for r in results_to_write: + + f.write('\t'*depth + '"' + r + '": ') + + if (type(results_to_write[r]) == type(dict()) or type(results_to_write[r]) == type(list())): + f.write('\n') + f.close() + write_results(results_to_write[r], fileName, depth + 1) + f = open(fileName, 'a') + + elif type(results_to_write[r]) == type(str()): + f.write('"' + str(results_to_write[r]) + '",\n') + else: + f.write(str(results_to_write[r]) + ',\n') + + if depth == 0: + f.write('\t'*depth + '}\n') + else: + f.write('\t'*depth + '},\n') + + elif type(results_to_write) == type(list()): + + f.write('\t'*depth + '[\n') + + for r in results_to_write: + + if (type(r) == type(dict()) or type(r) == type(list())): + f.close() + write_results(r, fileName, depth + 1) + f = open(fileName, 'a') + + elif type(r) == type(str()): + f.write('\t'*depth + '"' + str(r) + '",\n') + + else: + f.write('\t'*depth + str(r) + ',\n') + + if depth == 0: + f.write('\t'*depth + ']\n') + else: + f.write('\t'*depth + '],\n') + + +debug(os.getcwd()) + +# Arguments +jobName = sys.argv[-1] + +# Load parameters +para = __import__(jobName + '_expected').parameters + +# Change working directory to testOutput and put a copy of the input file in testOutput +os.chdir(os.path.join(os.getcwd(), 'testOutput')) + +# Load ODB +odb = session.openOdb(name=os.path.join(os.getcwd(), jobName + '.odb'), readOnly=False) + +# Report errors +if odb.diagnosticData.numberOfAnalysisErrors > 0: + # Ignore excessive element distortion errors when generating failure envelopes + resultTypes = [r["type"] for r in para["results"]] + if "log_stress_at_failure_init" in resultTypes: + for e in odb.diagnosticData.analysisErrors: + if e.description != 'Excessively distorted elements': + raise Exception("\nERROR: Errors occurred during analysis") + if "ignoreAnalysisErrors" in para: + if not para["ignoreAnalysisErrors"]: + raise Exception("\nERROR: Errors occurred during analysis") + else: + raise Exception("\nERROR: Errors occurred during analysis") + +# Report warnings +if "ignoreWarnings" in para and not para["ignoreWarnings"]: + if odb.diagnosticData.numberOfAnalysisWarnings > 0: + raise Exception("\nERROR: Warnings occurred during analysis") + + +# Initialize dict to hold the results -- this will be used for assertions in the test_runner +testResults = list() + +# Collect results +for r in para["results"]: + + # Get the steps to consider. Default to "Step-1" + if "step" in r: + steps = (str(r["step"]), ) + else: + steps = ("Step-1", ) + + # Debugging + debug(steps) + debug(r) + + # Collect data from odb for each type of test + + # Max, min + if r["type"] in ("max", "min"): + + # This trys to automatically determine the appropriate position specifier + varName = historyOutputNameFromIdentifier(identifier=r["identifier"], steps=steps) + + f = open('temp.txt', 'a') + f.write(varName) + f.close() + + # Get the history data + n = str(r["identifier"]["symbol"]) + '_' + steps[0] + '_' + str(r["type"]) + xyDataObj = session.XYDataFromHistory(name=n, odb=odb, outputVariableName=varName, steps=steps) + xy = session.xyDataObjects[n] + # odb.userData.XYData(n, xy) + + # Get the value calculated in the analysis (last frame must equal to 1, which is total step time) + if r["type"] == "max": + r["computedValue"] = max([pt[1] for pt in xyDataObj]) + else: + r["computedValue"] = min([pt[1] for pt in xyDataObj]) + testResults.append(r) + + + # Enforce continuity + elif r["type"] == "continuous": + + # This trys to automatically determine the appropriate position specifier + varName = historyOutputNameFromIdentifier(identifier=r["identifier"], steps=steps) + + # Get the history data + xyDataObj = session.XYDataFromHistory(name='XYData-1', odb=odb, outputVariableName=varName, steps=steps) + + # Get the maximum change in the specified value + r["computedValue"] = max([max(r["referenceValue"], abs(xyDataObj[x][1] - xyDataObj[x-1][1])) for x in range(2,len(xyDataObj))]) + testResults.append(r) + + + elif r["type"] == "xy_infl_pt": + varNames = historyOutputNameFromIdentifier(identifier=r["identifier"], steps=steps) + + # Get xy data + x = session.XYDataFromHistory(name=str(r["identifier"][0]["symbol"]), odb=odb, outputVariableName=varNames[0], steps=steps) + y = session.XYDataFromHistory(name=str(r["identifier"][1]["symbol"]), odb=odb, outputVariableName=varNames[1], steps=steps) + + # Combine the x and y data + xy = combine(x, y) + tmpName = xy.name + session.xyDataObjects.changeKey(tmpName, 'ld') + xy = session.xyDataObjects['ld'] + odb.userData.XYData('ld', xy) + + # Locals + xyData = xy + windowMin=r["window"][0] + windowMax=r["window"][1] + + # Select window + windowed = [x for x in xyData if x[0] > windowMin and x[0] < windowMax] + if len(windowed) == 0: raise Exception("No points found in specified window") + if min([abs(windowed[i][0]-windowed[i-1][0]) for i in range(1, len(windowed))]) == 0: + raise "ERROR" + session.XYData(data=windowed, name="windowed") + ldWindowed = session.xyDataObjects['windowed'] + odb.userData.XYData('windowed', ldWindowed) + + # Calcuate the derivative using a moving window + xy = differentiate(session.xyDataObjects['windowed']) + xy = resample(data=xy, numPts=10000) + + # Filter + if "filterCutOffFreq" in r["identifier"][1]: + session.XYData(data=xy, name="temp") + xy = butterworthFilter(xyData=session.xyDataObjects['temp'], cutoffFrequency=int(r["identifier"][1]["filterCutOffFreq"])) + tmpName = xy.name + session.xyDataObjects.changeKey(tmpName, 'slope') + else: + session.XYData(data=xy, name="slope") + slopeXYObj = session.xyDataObjects['slope'] + odb.userData.XYData('slope', slopeXYObj) + + # Differentiate again + xy = differentiate(session.xyDataObjects['slope']) + tmpName = xy.name + session.xyDataObjects.changeKey(tmpName, 'dslope') + + # Get peak from dslope + dslope = session.xyDataObjects['dslope'] + odb.userData.XYData('dslope', dslope) + x, y = zip(*dslope) + y = np.absolute(y) + dslope = zip(x,y) + xMax, yMax = max(dslope, key=itemgetter(1)) + + # Store the x, y pair at the inflection point + x, y = zip(*session.xyDataObjects['windowed']) + r["computedValue"] = (xMax, interpolate(xMax, x, y)) + testResults.append(r) + + + elif r["type"] == "disp_at_zero_y": + varNames = historyOutputNameFromIdentifier(identifier=r["identifier"], steps=steps) + + # Get xy data + x = session.XYDataFromHistory(name=str(r["identifier"][0]["symbol"]), odb=odb, outputVariableName=varNames[0], steps=steps) + y = session.XYDataFromHistory(name=str(r["identifier"][1]["symbol"]), odb=odb, outputVariableName=varNames[1], steps=steps) + xy = combine(x, y) + + # Window definition + if "window" in r: + windowMin = r["window"][0] + windowMax = r["window"][1] + else: + windowMin = r['referenceValue'] - 2*r['tolerance'] + windowMax = r['referenceValue'] + 2*r['tolerance'] + + # Use subset of full traction-separation response + windowed = [x for x in xy if x[0] > windowMin and x[0] < windowMax] + + # Tolerance to zero + if "zeroTol" not in r: + r["zeroTol"] = 1e-6 + + # Find pt where stress goes to target + disp_crit = 0 + for pt in windowed: + if abs(pt[1]) <= r["zeroTol"]: + disp_crit = pt[0] + break + + # Issue error if a value was not found + if disp_crit == 0: + raise ValueError("disp_at_zero_y: Could not find a point where y data goes to zero") + + r["computedValue"] = disp_crit + testResults.append(r) + + + elif r["type"] == "log_stress_at_failure_init": + + # Load history data for failure indices and store xy object to list 'failed' if the index is failed at last increment + failed = list() + varNames = historyOutputNameFromIdentifier(identifier=r["failureIndices"], steps=steps) + for i in range(0, len(varNames)): + n = str(r["failureIndices"][i]["symbol"]) + xy = session.XYDataFromHistory(name=n, odb=odb, outputVariableName=varNames[i], steps=steps) + xy = session.xyDataObjects[n] + odb.userData.XYData(n, xy) + if xy[-1][1] >= 1.0: + failed.append(xy) + + # Make sure at least one failure index has reached 1.0 + if len(failed) <= 0: + raise Exception("No failure occurred in the model") + + # Find the increment number where failure initiated + i = len(failed[0]) + while True: + i = i - 1 + if len(failed) > 1: + for fi in failed: + if fi[i][1] < 1.0: + failed.remove(fi) + if len(failed) < 1: + raise NotImplementedError("Multiple failure modes occurred at the same increment") + continue + else: + if failed[0][i][1] >= 1.0: + continue + elif failed[0][i][1] < 1.0: + inc = i + 1 + break + + # Get values of stress at final failure + stressAtFailure = dict() + varNames = historyOutputNameFromIdentifier(identifier=r["stressComponents"], steps=steps) + for i in range(0, len(varNames)): + n = str(r["stressComponents"][i]["symbol"]) + xy = session.XYDataFromHistory(name=n, odb=odb, outputVariableName=varNames[i], steps=steps) + xy = session.xyDataObjects[n] + odb.userData.XYData(n, xy) + stressAtFailure[n] = xy[inc][1] + + # Get additionalIdentifiersToStore data + additionalData = dict() + varNames = historyOutputNameFromIdentifier(identifier=r["additionalIdentifiersToStore"], steps=steps) + for i in range(0, len(varNames)): + n = str(r["additionalIdentifiersToStore"][i]["symbol"]) + xy = session.XYDataFromHistory(name=n, odb=odb, outputVariableName=varNames[i], steps=steps) + xy = session.xyDataObjects[n] + odb.userData.XYData(n, xy) + additionalData[n] = xy[inc][1] + + + # Get job name + jnparsed = parseJobName(jobName) + + # Format string of data to write + dataToWrite = jnparsed["loadRatio"] + ', ' + ', '.join([str(x[1]) for x in sorted(stressAtFailure.items(), key=itemgetter(0))]) + dataToWrite += ', ' + ', '.join([str(x[1]) for x in sorted(additionalData.items(), key=itemgetter(0))]) + '\n' + + # Write stresses to file for plotting the failure envelope + # Write heading if this is the first record in the file + logFileName = jnparsed["baseName"] + "_" + "failure_envelope.txt" + if not os.path.isfile(logFileName): + heading = 'Load Ratio, ' + ', '.join([str(x[0]) for x in sorted(stressAtFailure.items(), key=itemgetter(0))]) + heading += ', ' + ', '.join([str(x[0]) for x in sorted(additionalData.items(), key=itemgetter(0))]) + '\n' + dataToWrite = heading + dataToWrite + with open(logFileName, "a") as f: + f.write(dataToWrite) + + + elif r["type"] == "slope": + varNames = historyOutputNameFromIdentifier(identifier=r["identifier"], steps=steps) + + # Get xy data + x = session.XYDataFromHistory(name=str(r["identifier"][0]["symbol"]), odb=odb, outputVariableName=varNames[0], steps=steps) + y = session.XYDataFromHistory(name=str(r["identifier"][1]["symbol"]), odb=odb, outputVariableName=varNames[1], steps=steps) + + # Combine the x and y data + xy = combine(x, y) + tmpName = xy.name + session.xyDataObjects.changeKey(tmpName, 'slope_xy') + xy = session.xyDataObjects['slope_xy'] + odb.userData.XYData('slope_xy', xy) + + # Locals + xyData = xy + windowMin=r["window"][0] + windowMax=r["window"][1] + + # Select window + windowed = [x for x in xyData if x[0] > windowMin and x[0] < windowMax] + if len(windowed) == 0: raise Exception("No points found in specified window") + if min([abs(windowed[i][0]-windowed[i-1][0]) for i in range(1, len(windowed))]) == 0: + raise "ERROR" + session.XYData(data=windowed, name="windowed") + ldWindowed = session.xyDataObjects['windowed'] + odb.userData.XYData('windowed', ldWindowed) + + # Calcuate the derivative + xy = differentiate(session.xyDataObjects['windowed']) + tmpName = xy.name + session.xyDataObjects.changeKey(tmpName, 'slope_xy_diff') + odb.userData.XYData('slope_xy_diff', session.xyDataObjects['slope_xy_diff']) + + # Get the average value of the slope + x, y = zip(*session.xyDataObjects['slope_xy_diff']) + r["computedValue"] = np.mean(y) + testResults.append(r) + + + else: + raise NotImplementedError("test_case result data not recognized: " + str(r)) + +# Save the odb +odb.save() + +# Write the results to a text file for assertions by test_runner +fileName = os.path.join(os.getcwd(), jobName + '_results.py') + +# Remove the old results file if it exists +try: + os.remove(fileName) +except: + pass + +write_results(testResults, fileName) diff --git a/readme.md b/readme.md new file mode 100644 index 0000000..afa9b06 --- /dev/null +++ b/readme.md @@ -0,0 +1,131 @@ +# abaverify +A python package built on [`unittest`](https://docs.python.org/2.7/library/unittest.html) for running verification tests on Abaqus user subroutines. Basic familiarity with unittest and Abaqus user subroutine development is assumed in this readme. + +This software may be used, reproduced, and provided to others only as permitted under the terms of the agreement under which it was acquired from the U.S. Government. Neither title to, nor ownership of, the software is hereby transferred. This notice shall remain on all copies of the software. + +Copyright 2016 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. No copyright is claimed in the United States under Title 17, U.S. Code. All Other Rights Reserved. + +For any questions, please contact the developers: +- Andrew Bergan | [andrew.c.bergan@nasa.gov](mailto:andrew.c.bergan@nasa.gov) | (W) 757-864-3744 +- Frank Leone | [frank.a.leone@nasa.gov](mailto:frank.a.leone@nasa.gov) | (W) 757-864-3050 + +## Getting-Started +This package assumes that you have `python 2.7` and `git` installed. This packaged is designed for Abaqus 2016 and it has been used successfully with v6.14; it may or may not work with older versions. It also assumes that you have an Abaqus user subroutine in a git repository with a minimum directory structure as shown here: +``` +. + .git/ + for/ + usub.for + tests/ + testOutput/ + abaqus_v6.env + test_model1.inp + test_model1_expected.py + ... + test_runner.py + .gitignore +``` + +The user subroutine is stored in the `for/` directory and the verification tests are stored in the `/tests/` directory. + +### Install `abaverify` +To install `abaverify`, simply clone it to your subroutine's `tests/` directory. + +The remainder of this section describes how to build your own tests using `abaverify` (e.g., what goes inside the `test_model1.inp`, `test_model1_expected.py`, and `test_runner.py`) files. For a working example, checkout the sample verification test in the `abaverify/tests/tests/` directory. You can run the sample test with the command `python test_runner.py` from the `abaverify/tests/tests/` directory. Note, the default environment file (`abaverify/tests/tests`) is formatted for windows; linux users will need to modify the default environment file to the linux format. + +### Create `.inp` and `.py` files for each test model +A model file (`*.inp` or `*.py`) and corresponding results file (`*_expected.py`) with the same name must be created for each test case. These files are placed in the `tests/` directory. The model file is a typical Abaqus input deck or python script, so no detailed discussion is provided here (any Abaqus model should work). When tests are executed (with the command `python test_runner.py`), the models in the `tests/` directory are run in Abaqus. + +The `*_expected.py` file defines the assertions that are run on the `odb` output from the analysis. After each analysis is completed, the script `abaverify/processresults.py` is called to collect the quantities defined in the `*_expected.py` file. The `*_expected.py` file must contain a list called `"results"` that contains an object for each result of interest. A typical result quantity would be the maximum stress for the stress component `S11`. For example: +``` +parameters = { + "results": + [ + # Simple example to find max value of state variable d2 + { + "type": "max", # Specifies criteria to apply to the output quantity + "identifier": # Identifies which output quantity to interrogate + { + "symbol": "SDV_CDM_d2", + "elset": "ALL", + "position": "Element 1 Int Point 1" + }, + "referenceValue": 0.0 # This the value that the result from the model is compared against + }, + { + ... + + ... + } + ] +} +``` +The value found in the `odb` must match the reference value for the test to pass. In the case above, the test is simply to say that `SDV_CDM_d2` is always zero, since the range of `SDV_CDM_d2` happens to be between 0 and 1. Any history output quantity can be interrogated using one of the following criteria defined in the `type` field: `max`, `min`, `continuous`, `xy_infl_pt`, `disp_at_zero_y`, `log_stress_at_failure_init`, or `slope`. Here is a more complicated example: +``` +parameters = { + "results": + [ + # More complicated case to find the slope of the stress strain curve within the interval 0.0001 < x < 0.005 + { + "type": "slope", + "step": "Step-1", # By default the step is assumed to be the first step. Can specify any step with the step name + "identifier": [ # The identifier here is an array since we are looking for the slope of a curve defined by x and y + { # x + "symbol": "LE11", + "elset": "ALL", + "position": "Element 1 Int Point 1" + }, + { # y + "symbol": "S11", + "elset": "ALL", + "position": "Element 1 Int Point 1" + } + ], + "window": [0.0001, 0.005], # [min, max] in x + "referenceValue": 171420, # Reference value for E1 + "tolerance": 1714 # Require that the calculated value is within 1% of the reference value + } + ] +} +``` +The results array can contain as many result objects as needed to verify that the model has performed as designed. Assertions are run on each result object and if any one fails, the test is marked as failed. + +### Create a `test_runner.py` file +The file `sample_usage.py` gives an example of how you call your newly created tests. By convention, this file is named as `test_runner.py`. This file must include: +1. `import abaverify`. +2. classes that inherit `av.TestCase` and define functions beginning with `test` following the usage of `unittest`. See the `sample_usage.py` for an example. +3. call to `runTests()` which takes one argument: the relative path to your user subroutine (omit the `.f` or `.for` ending, the code automatically appends it). + +## Running your tests +Before running tests, make sure you place an Abaqus environment file in your project's `tests/` directory. At a minimum, the environment file should include the options for compiling your subroutine. If you do not include your environment file, `abaverify` will give an error. + +You can run your tests with the syntax defined by `unittest`. To run all tests, execute the following from the `tests` directory of your project: +``` +tests $ python test_runner.py +``` +All of the tests that have been implemented will be run. The last few lines of output from these commands indicate the number of tests run and `OK` if they are all successful. + +To run a single test, add the class and test name. For example, for the input deck `test_CPS4R_tension.inp`, type: +``` +tests $ python test_runner.py SingleElementTests.test_CPS4R_tension +``` + +Various command line options can be used as described below. + +The option `-i` or equivalently `--interactive` can be specified to print the Abaqus log data to the terminal. For example: +``` +tests $ python test_runner.py SingleElementTests.test_C3D8R_simpleShear12 --interactive +``` + +The option `-t` or equivalently `--time` can be specified to print the run times for the compiler, packager, and solver to the terminal. For example: +``` +tests $ python test_runner.py SingleElementTests.test_C3D8R_simpleShear12 --timer +``` + +The option `-c` or equivalently `--preCompileCode` can be specified to use `abaqus make` to compile the code into a binary before running one or more tests. A function that compiles the code must be provided to `abaverify` as an argument to the `runTests` function call in `test_runner.py`. The `usub_lib` option must be defined in the environment file. + +The option `-e` or equivalently `--useExistingBinaries` can be specified to reuse the most recent compiled version of the code. + +The option `-r` or equivalently `--useExistingResults` can be specified to reuse the most recent test results. The net effect is that only the post-processing portion of the code is run, so you don't have to wait for the model to run just to debug a `_expected.py` file or `processresults.py`. + +The option `-s` or equivalently `--specifyPathToSub` can be used to override the relative path to the user subroutine specified in the the call `av.runTests()` in your `test_runner.py` file. diff --git a/sample_usage.py b/sample_usage.py new file mode 100644 index 0000000..24a38ae --- /dev/null +++ b/sample_usage.py @@ -0,0 +1,74 @@ +""" +This file contains a sample of how you might use the abaverify module. + +This file is a python script that is meant as a sample of what you would include +in your project. The assumed directory structure of your project is: + +project/ + for/ + usub.for <-- User subroutine + test/ + abaverify/ + test_model1.inp <-- A verification model + test_model1.json <-- Instructs abaverify what the nominal results should be + test_model2.inp + test_model2.json + ... + sample_usage.py <-- This script. We suggest you rename it something like 'test_runner.py' + + +This script should be called from the command line in the test/ directory. The conventions used with unittest +apply. To run a single test type: + + test $ python sample_usage.py SingleElementTests.test_model1 + +To run all tests in a class: + + test $ python sample_usage.py SingleElementTests + +To run all tests: + + test $ python sample_usage.py +""" + +# Import the module +import abaverify as av + + +# Create a classs to group your tests. Follow the same pattern as unittest. +# You need to create atleast one test class and add atleast one test to it in order to do anything useful with this module. +# The assumption is that there is one 'test_' function defined for each model. Assertions are run for each nominal expected +# result defined in the corresponding json file. +class SingleElementTests(av.TestCase): + + def test_model1(self): + self.runTest('test_model1') + + def test_model2(self): + self.runTest('test_model2') + + +# Optionally, create parametric tests by defininging a dicitionary of parameters to vary +class ParametricMixedModeMatrix(av.TestCase): + + # Specify meta class (Don't change this line) + __metaclass__ = av.ParametricMetaClass + + # Refers to the template input file name + baseName = "test_C3D8R_mixedModeMatrix" + + # Range of parameters to test; all combinations are tested + # alpha is the angle of the crack normal + # beta defines the direction of tensile loading in Step-1 and compressive loading in Step-2 + parameters = {'alpha': range(0,50,10), 'beta': range(0,210,30), 'friction': [0.00, 0.15, 0.30, 0.45, 0.60]} + + + +# That's it for setup. Add as many tests as you want! + +# This last line is critical, it calls the abaverify code so that when you run this script +# abaverify is executed. The function takes one optional argument: a function to call to compile +# the subroutine code with abaqus make (not shown here). +if __name__ == "__main__": + # av.runTests(relPathToUserSub='../for/vumat') + av.runTests(relPathToUserSub='../path/to/your/subroutine/without/file/extension') diff --git a/tests/for/materialProperties.for b/tests/for/materialProperties.for new file mode 100644 index 0000000..509140a --- /dev/null +++ b/tests/for/materialProperties.for @@ -0,0 +1,160 @@ + + module materialProperties + ! Contains code relevant to importing material properties from props array + + ! Data structure for material properties + ! FYI naming this type "properties" triggered an error, so its named mproperties + type :: mproperties + real(kind=kind(1.d0)) :: E1, E2, E3, nu12, nu13, nu23, G12, G13, G23 + real(kind=kind(1.d0)) :: nu21, nu31, nu32 + end type + + ! Machine precision + integer, parameter, private :: dp = kind(1.d0) + + + Contains + subroutine materialProperties_load(nprops, props, properties) + ! Loads the props array into a properties object + ! Material symmetry is handled within this subroutine + +#ifdef umat + include 'aba_param.inc' +#else + include 'vaba_param.inc' +#endif + + ! Arguments + integer, intent(in) :: nprops + dimension props(nprops) ! Intent in + type(mproperties), intent(out) :: properties + + ! Locals + real(dp) :: E, G, nu + + ! Branch depending on nprops + ! Isotropic + if (nprops .EQ. 2) then + properties%E1 = props(1) + properties%E2 = props(1) + properties%E3 = props(1) + + properties%nu12 = props(2) + properties%nu13 = props(2) + properties%nu23 = props(2) + + G = props(1)/(2*(1+props(2))) + properties%G12 = G + properties%G13 = G + properties%G23 = G + + ! Transversely isotropic + else if (nprops .EQ. 5) then + properties%E1 = props(1) + properties%E2 = props(2) + properties%E3 = props(2) + + properties%nu12 = props(3) + properties%nu13 = properties%E1/(2*properties%G13) - 1.d0 + properties%nu23 = properties%E2/(2*properties%G23) - 1.d0 + + properties%G12 = props(4) + properties%G13 = props(5) + properties%G23 = props(5) + + ! Orthotropic + else if (nprops .EQ. 9) then + properties%E1 = props(1) + properties%E2 = props(2) + properties%E3 = props(3) + + properties%nu12 = props(4) + properties%nu13 = props(5) + properties%nu23 = props(6) + + properties%G12 = props(7) + properties%G13 = props(8) + properties%G23 = props(9) + + else + Call ABQERR(-3,'BAD INPUT. Expecting that nprops is 2, 5, or 9. Received nprops=%I.',nprops,realv,charv) + + end if + + + ! Calculate inverse Poisson's ratios + properties%nu21 = properties%nu12 * (properties%E2/properties%E1) + properties%nu31 = properties%nu13 * (properties%E3/properties%E1) + properties%nu32 = properties%nu23 * (properties%E3/properties%E2) + + ! TODO Add checks on elastic constants + + + return + end subroutine materialProperties_load + + subroutine materialProperties_elasticStiffness(elementType, properties, stiff) + ! Computes the stiffness tensor + + ! Arguments + character(len=80), intent(in) :: elementType ! Stores type of element if it is recognized + type(mproperties), intent(in) :: properties ! Material properties + real(dp), dimension(:,:) :: stiff ! Stiffness matrix + + ! Locals + real :: preFactor + + ! Define the stiffness matrix + if ((elementType .EQ. 'CPS') .OR. (elementType .EQ. 'S') .OR. (elementType .EQ. 'SC')) then ! Plane stress + stiff = 0.d0 + stiff(1,1) = properties%E1/(1.d0-properties%nu12*properties%nu21) + stiff(1,2) = (properties%nu12*properties%E2)/(1.d0-properties%nu12*properties%nu21) + stiff(2,2) = properties%E2/(1.d0-properties%nu12*properties%nu21) + stiff(2,1) = stiff(1,2) + + ! Handle vumat weirdness where for some reason I don't understand, ndir=3 for plane stress in explicit + if (size(stiff) .EQ. 9) then + ! Implicit + stiff(3,3) = properties%G12 + else + ! Explicit, where ndir=3 + stiff(4,4) = properties%G12 + end if + + else if ((elementType .EQ. 'CPE') .OR. (elementType .EQ. 'C3D')) then ! Plane strain and 3D + preFactor = (1-properties%nu12*properties%nu21-properties%nu23*properties%nu32-properties%nu13*properties%nu31 & + -2*properties%nu21*properties%nu32*properties%nu13)/(properties%E1*properties%E2*properties%E3) + + stiff = 0.d0 + stiff(1,1) = (1-properties%nu23*properties%nu32)/(properties%E2*properties%E3*preFactor) + stiff(2,2) = (1-properties%nu13*properties%nu31)/(properties%E1*properties%E3*preFactor) + stiff(3,3) = (1-properties%nu12*properties%nu21)/(properties%E1*properties%E2*preFactor) + stiff(1,2) = (properties%nu21+properties%nu31*properties%nu23)/(properties%E2*properties%E3*preFactor) + stiff(1,3) = (properties%nu31+properties%nu21*properties%nu32)/(properties%E2*properties%E3*preFactor) + stiff(2,1) = stiff(1,2) + stiff(2,3) = (properties%nu32+properties%nu12*properties%nu31)/(properties%E1*properties%E3*preFactor) + stiff(3,1) = stiff(1,3) + stiff(3,2) = stiff(2,3) + stiff(4,4) = properties%G12 + + ! Additional components for 3D elements + if (elementType .EQ. 'C3D') then + stiff(4,4) = properties%G12 +#ifdef umat + stiff(5,5) = properties%G13 + stiff(6,6) = properties%G23 +#else + stiff(5,5) = properties%G23 + stiff(6,6) = properties%G13 +#endif + end if + + else + Call ABQERR(-3,'Did not find a recognized element type in material name.',intv,realv,charv) + + end if + + return + end subroutine materialProperties_elasticStiffness + + end module diff --git a/tests/for/umat.for b/tests/for/umat.for new file mode 100644 index 0000000..86ce3d6 --- /dev/null +++ b/tests/for/umat.for @@ -0,0 +1,63 @@ +#ifndef umat +#define umat 0 +#endif + +#include "usub-util.for" +#include "materialProperties.for" + + + SUBROUTINE UMAT(STRESS,STATEV,DDSDDE,SSE,SPD,SCD, + * RPL,DDSDDT,DRPLDE,DRPLDT, + * STRAN,DSTRAN,TIME,DTIME,TEMP,DTEMP,PREDEF,DPRED,CMNAME, + * NDI,NSHR,NTENS,NSTATV,PROPS,NPROPS,COORDS,DROT,PNEWDT, + * CELENT,DFGRD0,DFGRD1,NOEL,NPT,LAYER,KSPT,JSTEP,KINC) + + ! Load modulues + use materialProperties + + INCLUDE 'ABA_PARAM.INC' + + CHARACTER*80 CMNAME + DIMENSION STRESS(NTENS),STATEV(NSTATV), & + DDSDDE(NTENS,NTENS),DDSDDT(NTENS),DRPLDE(NTENS), & + STRAN(NTENS),DSTRAN(NTENS),TIME(2),PREDEF(1),DPRED(1), & + PROPS(NPROPS),COORDS(3),DROT(3,3),DFGRD0(3,3),DFGRD1(3,3), & + JSTEP(4) + + ! == END standard Abaqus umat interface == + + ! Machine precision + integer, parameter :: dp = kind(1.d0) + + ! Local + character(len=80) mat, elementType ! Stores type of element if it is recognized + real(dp) :: stiff(ntens, ntens) ! Stiffness matrix (often written as 'C') + real(dp) :: strain(ntens) ! Strain vector (strain at the start of the increment + dstran) + type(mproperties) :: p ! Material properties + + ! --------------- END declarations --------------- + + ! Identify element type in material name + call getElementType(cmname, mat, elementType) + + ! Load material properties, p + call materialProperties_load(nprops, props, p) + + ! Compute the total strain + do i=1, ntens + strain(i) = stran(i) + dstran(i) + end DO + + ! Load the elastic stiffness matrix + call materialProperties_elasticStiffness(elementType, p, stiff) + + ! Compute the stress vector + stress = matmul(stiff, strain) + + + ! Compute the jacobian (DDSDDE) + ddsdde = stiff + + + RETURN + END diff --git a/tests/for/usub-util.for b/tests/for/usub-util.for new file mode 100644 index 0000000..e07536f --- /dev/null +++ b/tests/for/usub-util.for @@ -0,0 +1,86 @@ + + subroutine ABQERR(lop, string, intv, realv, charv) + ! Generic call to report error in subroutine + ! Abstracts the differences betwen implicit and explicit subroutines + + ! Arguments (all are intent in) + integer :: lop + character*500 string + character*8 charv(*) + dimension intv(*),realv(*) + + + +#ifdef umat + Call STDB_ABQERR(lop, string, intv, realv, charv) +#else + Call XPLB_ABQERR(lop, string, intv, realv, charv) +#endif + + return + end + + + subroutine splitAtFirstMatch(string, char, firstHalf, secondHalf) + ! Attempts to split a string at the specified char + ! Returns an array of the two parts of the string in output + ! If no match is found, the second value of the array output is empty + ! Uses a brute force search + ! String length is limited to 500 + + ! Arguments + character(len=*), intent (in) :: string + character(len=1), intent (in) :: char + character(len=*), intent (out) :: firstHalf, secondHalf + + + ! Brute force search + i = scan(string, char) + if (i .GT. 0) then + firstHalf = string(:i-1) + secondHalf = string(i+1:) + else + firstHalf = string + secondHalf = '' + end if + + return + end + + subroutine getElementType(cmname, mat, elementType) + ! Assumes that the material is named with the convention MATERIALNAME-ELEMENTTYPE + ! splits on the '-' and returns the material name and elementType + + ! Arguments + character(len=*), intent(in) :: cmname + character(len=*), intent(out) :: mat, elementType + + ! Locals + character(len=80) :: empty + + call splitAtFirstMatch(cmname, '-', mat, elementType) + if (elementType .EQ. empty) then + Call ABQERR(-3,'Found element type null in material name.',intv,realv,charv) + + else if (index(elementType,'CPS') .GT. 0) then + elementType = 'CPS' + + else if (index(elementType,'CPE') .GT. 0) then + elementType = 'CPE' + + else if (index(elementType, 'C3D') .GT. 0) then + elementType = 'C3D' + + else if (index(elementType, 'SC') .GT. 0) then + elementType = 'SC' + + else if (index(elementType, 'S') .GT. 0) then + elementType = 'S' + + else + Call ABQERR(-3,'Did not find a recognized element type in material name.',intv,realv,charv) + + end if + + return + end \ No newline at end of file diff --git a/tests/for/vumat.for b/tests/for/vumat.for new file mode 100644 index 0000000..e95fa0d --- /dev/null +++ b/tests/for/vumat.for @@ -0,0 +1,75 @@ +#include "usub-util.for" +#include "materialProperties.for" + + subroutine vumat( & + ! Read only - + nblock, ndir, nshr, nstatev, nfieldv, nprops, lanneal, & + stepTime, totalTime, dt, cmname, coordMp, charLength, & + props, density, strainInc, relSpinInc, & + tempOld, stretchOld, defgradOld, fieldOld, & + stressOld, stateOld, enerInternOld, enerInelasOld, & + tempNew, stretchNew, defgradNew, fieldNew, & + + ! Write only - + stressNew, stateNew, enerInternNew, enerInelasNew ) + + ! Load modulues + use materialProperties + +#ifdef umat + include 'aba_param.inc' +#else + include 'vaba_param.inc' +#endif + + ! All arrays dimensioned by (*) are not used in this algorithm + dimension props(nprops), density(nblock), coordMp(nblock,*), & + charLength(*), strainInc(nblock,ndir+nshr), relSpinInc(*), tempOld(*), & + stretchOld(*), defgradOld(*), fieldOld(*), stressOld(nblock,ndir+nshr), & + stateOld(nblock,nstatev), enerInternOld(nblock), enerInelasOld(nblock), & + tempNew(*), stretchNew(*), defgradNew(*), fieldNew(*), stressNew(nblock,ndir+nshr), & + stateNew(nblock,nstatev), enerInternNew(nblock), enerInelasNew(nblock) + + character*80 cmname + + ! == END standard Abaqus umat interface == + + ! Machine precision + integer, parameter :: dp = kind(1.d0) + + ! Locals + character(len=80) mat, elementType ! Stores type of element if it is recognized + real(dp) :: stiff(ndir+nshr, ndir+nshr) ! Stiffness tensor (often written as 'C') + type(mproperties) :: p ! Material properties + + ! --------------- END declarations --------------- + + ! Identify element type in material name + call getElementType(cmname, mat, elementType) + + ! Load material properties, p + call materialProperties_load(nprops, props, p) + + ! Load the elastic stiffness matrix + call materialProperties_elasticStiffness(elementType, p, stiff) + + + + ! nblock loop + do 100 i = 1, nblock + + ! VUMAT provides tensor shear strain + ! Convert tensor shear strains to engineering shear strain + strainInc(i,4:) = 2.d0*strainInc(i,4:) + + ! Compute stress + stressNew(i,:) = stressOld(i,:) + matmul(stiff, strainInc(i,:)) + + ! Change strainInc back to tensor strain + strainInc(i,4:) = strainInc(i,4:)/2.d0 + + 100 continue + + + return + end diff --git a/tests/tests/abaqus_v6.env b/tests/tests/abaqus_v6.env new file mode 100644 index 0000000..b9f7462 --- /dev/null +++ b/tests/tests/abaqus_v6.env @@ -0,0 +1,9 @@ +compile_fortran=['ifort', '/free', + '/c','/DABQ_WIN86_64', '/extend-source', '/fpp', + '/iface:cref', '/recursive', '/Qauto-scalar', + '/QxSSE3', '/QaxAVX', + '/heap-arrays:1', + '/Od', '/Ob0', # <-- Optimization Debugging + # '/Zi', # <-- Debugging + #'/gen-interfaces', '/warn:interfaces', '/check', '/fpe:0', + '/include:%I'] \ No newline at end of file diff --git a/tests/tests/test_CPS4R_tension.inp b/tests/tests/test_CPS4R_tension.inp new file mode 100644 index 0000000..f438fe9 --- /dev/null +++ b/tests/tests/test_CPS4R_tension.inp @@ -0,0 +1,108 @@ +*Heading +** Job name: test_CPS4R_IM7-8552-Ply_explicit_VUMAT_uniaxialTension Model name: Model-1 +** Generated by: Abaqus/CAE 2016 +*Preprint, echo=NO, model=NO, history=NO, contact=NO +** +** PARTS +** +*Part, name=Part-1 +*Node + 1, 0., 0. + 2, 0.150000006, 0. + 3, 0., 0.150000006 + 4, 0.150000006, 0.150000006 +*Element, type=CPS4R +1, 1, 2, 4, 3 +*Nset, nset=loadApp + 3, +*Nset, nset=loadFollowers + 4, +*Nset, nset=pin + 1, +*Nset, nset=roller + 2, +*Nset, nset=all, generate + 1, 4, 1 +*Elset, elset=all + 1, +*Orientation, name=Ori-1 + 0., 1., 0., -1., 0., 0. +3, 0. +** Section: Section-1 +*Solid Section, elset=all, orientation=Ori-1, material=IM7-8552-Ply-CPS4R +0.15, +*End Part +** +** +** ASSEMBLY +** +*Assembly, name=Assembly +** +*Instance, name=Part-1-1, part=Part-1 +*End Instance +** +** Constraint: LoadApp +*Equation +2 +Part-1-1.loadFollowers, 2, -1. +Part-1-1.loadApp, 2, 1. +*End Assembly +*Amplitude, name=Amp-1, definition=SMOOTH STEP + 0., 0., 0.1, 1. +** +** MATERIALS +** +*Material, name=IM7-8552-Ply-CPS4R +*Density + 1.59e-09, +*Depvar + 1, +*User Material, constants=9 +171000.,9100.,9100., 0.32, 0.32, 0.52,5300.,5300. +3000., +** ---------------------------------------------------------------- +** +** STEP: Step-1 +** +*Step, name=Step-1, nlgeom=NO +*Dynamic, Explicit +, 0.1 +*Bulk Viscosity +0.06, 1.2 +** Mass Scaling: Semi-Automatic +** Whole Model +*Fixed Mass Scaling, factor=10000. +** +** BOUNDARY CONDITIONS +** +** Name: BC-loadApp Type: Displacement/Rotation +*Boundary, amplitude=Amp-1 +Part-1-1.loadApp, 2, 2, 0.015 +** Name: BC-pin Type: Displacement/Rotation +*Boundary, amplitude=Amp-1 +Part-1-1.pin, 1, 1 +Part-1-1.pin, 2, 2 +** Name: BC-roller Type: Displacement/Rotation +*Boundary, amplitude=Amp-1 +Part-1-1.roller, 2, 2 +** +** OUTPUT REQUESTS +** +*Restart, write, number interval=1, time marks=NO +** +** FIELD OUTPUT: F-Output-1 +** +*Output, field, variable=PRESELECT +** +** HISTORY OUTPUT: H-Output-2 +** +*Output, history +*Element Output, elset=Part-1-1.all +S11, S22 +** +** HISTORY OUTPUT: H-Output-1 +** +*Output, history, time interval=0.0001 +*Energy Output +ALLAE, ALLCD, ALLDMD, ALLFD, ALLIE, ALLKE, ALLMW, ALLPD, ALLSE, ALLVD, ALLWK, ETOTAL +*End Step diff --git a/tests/tests/test_CPS4R_tension_expected.py b/tests/tests/test_CPS4R_tension_expected.py new file mode 100644 index 0000000..d71338d --- /dev/null +++ b/tests/tests/test_CPS4R_tension_expected.py @@ -0,0 +1,26 @@ +parameters = { + "results": [ + { + "type": "max", + "identifier": + { + "symbol": "S11", + "elset": "ALL", + "position": "Element 1 Int Point 1" + }, + "referenceValue": 17100.0, + "tolerance": 1.0 + }, + { + "type": "max", + "identifier": + { + "symbol": "S22", + "elset": "ALL", + "position": "Element 1 Int Point 1" + }, + "referenceValue": 0, + "tolerance": 0.1 + } + ] +} diff --git a/tests/tests/test_runner.py b/tests/tests/test_runner.py new file mode 100644 index 0000000..2be27e6 --- /dev/null +++ b/tests/tests/test_runner.py @@ -0,0 +1,26 @@ +# This is a crude hack so that we can import the abaverify module for testing purposes +import inspect +import os +import sys +pathForThisFile = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) +pathToAbaverifyDir = os.path.join(pathForThisFile, os.pardir, os.pardir) +sys.path.insert(0, pathToAbaverifyDir) + +# Normal import line +import abaverify as av + + +class SingleElementTests(av.TestCase): + + def test_CPS4R_tension(self): + self.runTest('test_CPS4R_tension') + + + +# That's it for setup. Add as many tests as you want! + +# This last line is critical, it calls the abaverify code so that when you run this script +# abaverify is executed. The function takes one optional argument: a function to call to compile +# the subroutine code with abaqus make (not shown here). +if __name__ == "__main__": + av.runTests(relPathToUserSub='../for/vumat') \ No newline at end of file