Skip to content

Commit

Permalink
Verified totals for DASimpleFoam (#715)
Browse files Browse the repository at this point in the history
* Added first lib with test yml.

* Fixed a bug in the test script.

* Added the first v4 test.

* Added the missing DAOption Make.

* Added test codecov

* Update and rename codecov.yml to code_cov.yml

* Added unit_test verification mechanism

* Unify the incompressible and compressible libs.

* Added more libs.

* Renamed DAObjFunc to DAFunction.

* More renames.

* Re-structue the DAFunction.

* Added more libs and delete unused DAPart and DAJac sub classes.

* DASOlver compiled.

* Re-organized coloring util.

* Re-organized pyDASolvers.

* Fixed the unit test setup file.

* Added the DAInput and DAOutput classes.

* Added -w to all the options files to disable warnings.

* Changed the interfaces for many classes.

* Changed the Allmake files.

* Changed init file.

* Changed mphys and pyDAFoam python layers.

* Changed DASolver and add calcJacTVecProduct function.

* Fixed an issue in init file.

* DAOutput uses its own dicts.

* Added the missing pip install for tests.

* Total accurate for DASimpleFoam.
  • Loading branch information
friedenhe authored Dec 5, 2024
1 parent 2cc7b55 commit 50471a5
Show file tree
Hide file tree
Showing 185 changed files with 5,508 additions and 10,127 deletions.
45 changes: 45 additions & 0 deletions .github/workflows/code_cov.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
name: codecov

on: [push, pull_request]

env:
REPO_NAME: 'dafoam'
DOCKER_WORKING_DIR: '/home/dafoamuser/dafoam/$REPO_NAME'
DOCKER_MOUNT_DIR: '/home/dafoamuser/mount/$REPO_NAME'
DOCKER_TAG: 'latest'
DOCKER_ENV_FILE: '/home/dafoamuser/dafoam/loadDAFoam.sh'

jobs:
code_coverage:
runs-on: ubuntu-20.04
name: Codecov
strategy:
fail-fast: false
matrix:
testConfig: [incompressible]
include:
- testConfig: incompressible
args: 'incompressible'

steps:
- uses: actions/checkout@main
- name: Create the docker container and run the tests
run: |
docker pull dafoam/opt-packages:${{env.DOCKER_TAG}}
docker run -i -d -u dafoamuser --name regtest -v $GITHUB_WORKSPACE:${{env.DOCKER_MOUNT_DIR}} dafoam/opt-packages:${{env.DOCKER_TAG}} /bin/bash
docker exec -i regtest /bin/bash -c "rm -rf ${{env.DOCKER_WORKING_DIR}} && cp -r ${{env.DOCKER_MOUNT_DIR}} ${{env.DOCKER_WORKING_DIR}}"
docker exec regtest sed -i 's/-std=c++11/-std=c++11 -fprofile-arcs -ftest-coverage/g' ${{env.DOCKER_WORKING_DIR}}/src/adjoint/DAOption/Make/options
docker exec regtest sed -i 's/-lfiniteVolume$(DF_LIB_SUFFIX)/-lfiniteVolume$(DF_LIB_SUFFIX) -lgcov/g' ${{env.DOCKER_WORKING_DIR}}/src/adjoint/DAOption/Make/options
docker exec regtest sed -i 's/-std=c++11/-std=c++11 -fprofile-arcs -ftest-coverage/g' ${{env.DOCKER_WORKING_DIR}}/src/adjoint/DAUtility/Make/options
docker exec regtest sed -i 's/-lfiniteVolume$(DF_LIB_SUFFIX)/-lfiniteVolume$(DF_LIB_SUFFIX) -lgcov/g' ${{env.DOCKER_WORKING_DIR}}/src/adjoint/DAUtility/Make/options
docker exec -i regtest /bin/bash -c ". ${{env.DOCKER_ENV_FILE}} && cd ${{env.DOCKER_WORKING_DIR}} && ./Allmake"
docker exec -i regtest /bin/bash -c ". ${{env.DOCKER_ENV_FILE}} && cd ${{env.DOCKER_WORKING_DIR}} && pip install ."
docker exec -i regtest /bin/bash -c ". ${{env.DOCKER_ENV_FILE}} && cd ${{env.DOCKER_WORKING_DIR}}/tests && ./Allrun"
docker exec -i regtest /bin/bash -c ". ${{env.DOCKER_ENV_FILE}} && cd ${{env.DOCKER_WORKING_DIR}} && lcov --capture --directory . --output-file coverage.info && echo dafoamuser | sudo -S cp -r coverage.info ${{env.DOCKER_MOUNT_DIR}}/"
- name: Upload
uses: codecov/codecov-action@v4
with:
fail_ci_if_error: true
files: ./coverage.info
token: ${{secrets.CODECOV_TOKEN}}
verbose: true
34 changes: 34 additions & 0 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
name: tests

on: [push, pull_request]

env:
REPO_NAME: 'dafoam'
DOCKER_WORKING_DIR: '/home/dafoamuser/dafoam/$REPO_NAME'
DOCKER_MOUNT_DIR: '/home/dafoamuser/mount/$REPO_NAME'
DOCKER_TAG: 'latest'
DOCKER_ENV_FILE: '/home/dafoamuser/dafoam/loadDAFoam.sh'
DOCKER_OF_ADF_BASHRC: '/home/dafoamuser/dafoam/OpenFOAM/OpenFOAM-v1812-ADF/etc/bashrc'
DOCKER_OF_ADR_BASHRC: '/home/dafoamuser/dafoam/OpenFOAM/OpenFOAM-v1812-ADR/etc/bashrc'

jobs:

reg_tests:
runs-on: ubuntu-20.04
name: Tests
strategy:
fail-fast: false
matrix:
testConfig: [incompressible]
include:
- testConfig: incompressible
args: 'incompressible'
steps:
- uses: actions/checkout@v3
- name: Create the docker container and run the tests
run: |
docker pull dafoam/opt-packages:${{env.DOCKER_TAG}}
docker run -i -d -u dafoamuser --name regtest -v $GITHUB_WORKSPACE:${{env.DOCKER_MOUNT_DIR}} dafoam/opt-packages:${{env.DOCKER_TAG}} /bin/bash
docker exec -i regtest /bin/bash -c "rm -rf ${{env.DOCKER_WORKING_DIR}} && cp -r ${{env.DOCKER_MOUNT_DIR}} ${{env.DOCKER_WORKING_DIR}}"
docker exec -i regtest /bin/bash -c ". ${{env.DOCKER_ENV_FILE}} && cd ${{env.DOCKER_WORKING_DIR}} && ./Allmake && pip install ."
docker exec -i regtest /bin/bash -c ". ${{env.DOCKER_ENV_FILE}} && cd ${{env.DOCKER_WORKING_DIR}}/tests && ./Allrun"
5 changes: 2 additions & 3 deletions Allclean
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,8 @@ if [ -z "$WM_PROJECT" ]; then
exit 1
fi

cd src/adjoint && rm -rf lnInclude && cd -
cd src/adjoint && ./Allclean && cd -
cd src/pyUnitTests && ./Allclean && cd -
cd src/pyDASolvers && ./Allclean && cd -
cd src/utilities/coloring && ./Allclean && cd -
cd src/utilities/postProcessing && ./Allclean && cd -
cd src/utilities/preProcessing && ./Allclean && cd -
cd tests && ./Allclean && cd -
17 changes: 5 additions & 12 deletions Allmake
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,8 @@ if [ -z "$WM_PROJECT" ]; then
exit 1
fi

if [ -z "$1" ]; then
echo "Argument not found. Using the default value: opt"
argm="opt"
else
argm="$1"
fi

cd src/adjoint && ./Allmake $argm && cd -
cd src/pyDASolvers && ./Allmake $argm && cd -
cd src/utilities/coloring && ./Allmake $argm && cd -
cd src/utilities/postProcessing && ./Allmake $argm && cd -
cd src/utilities/preProcessing && ./Allmake $argm && cd -
wmakeLnInclude src/adjoint
cd src/adjoint && ./Allmake && cd -
cd src/pyUnitTests && ./Allmake && cd -
cd src/pyDASolvers && ./Allmake && cd -
cd src/utilities/coloring && ./Allmake && cd -
1 change: 1 addition & 0 deletions dafoam/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,4 @@

from .pyDAFoam import PYDAFOAM
from . import optFuncs
from . import pyUnitTests
119 changes: 76 additions & 43 deletions dafoam/mphys/mphys_dafoam.py
Original file line number Diff line number Diff line change
Expand Up @@ -582,7 +582,7 @@ def setup(self):
# by default, we will not have a separate optionDict attached to this
# solver. But if we do multipoint optimization, we need to use the
# optionDict for each point because each point may have different
# objFunc and primalBC options
# function and primalBC options
self.optionDict = None

# Initialize the design variable functions, e.g., aoa, actuator
Expand All @@ -595,10 +595,13 @@ def setup(self):
self.DVCon = None

# initialize the dRdWT matrix-free matrix in DASolver
DASolver.solverAD.initializedRdWTMatrixFree(DASolver.xvVec, DASolver.wVec)
DASolver.solverAD.initializedRdWTMatrixFree()

# create the adjoint vector
self.psi = self.DASolver.wVec.duplicate()
localAdjointSize = DASolver.getNLocalAdjointStates()
self.psi = PETSc.Vec().create(comm=PETSc.COMM_WORLD)
self.psi.setSizes((localAdjointSize, PETSc.DECIDE), bsize=1)
self.psi.setFromOptions()
self.psi.zeroEntries()

# if true, we need to compute the coloring
Expand Down Expand Up @@ -727,6 +730,11 @@ def solve_nonlinear(self, inputs, outputs):
# set the runStatus, this is useful when the actuator term is activated
DASolver.setOption("runStatus", "solvePrimal")

# first we assign the volume coordinates from the inputs dict
# to the OF layer (deform the mesh)
volCoords = inputs["%s_vol_coords" % self.discipline]
DASolver.setVolCoords(volCoords)

# assign the optionDict to the solver
self.apply_options(self.optionDict)

Expand Down Expand Up @@ -757,13 +765,17 @@ def solve_nonlinear(self, inputs, outputs):
DASolver.evalFunctions(funcs, evalFuncs=self.evalFuncs)

# assign the computed flow states to outputs
outputs["%s_states" % self.discipline] = DASolver.getStates()
states = DASolver.getStates()
outputs["%s_states" % self.discipline] = states

# if the primal solution fail, we return analysisError and let the optimizer handle it
fail = funcs["fail"]
if fail:
raise AnalysisError("Primal solution failed!")

# set states
DASolver.setStates(states)

def linearize(self, inputs, outputs, residuals):
# NOTE: we do not do any computation in this function, just print some information

Expand Down Expand Up @@ -795,34 +807,43 @@ def apply_linear(self, inputs, outputs, d_inputs, d_outputs, d_residuals, mode):
func(dvVal, DASolver)

# assign the states in outputs to the OpenFOAM flow fields
# NOTE: this is not quite necessary because setStates have been called before in the solve_nonlinear
# here we call it just be on the safe side
DASolver.setStates(outputs["%s_states" % self.discipline])

designVariables = DASolver.getOption("designVar")

localAdjSize = DASolver.getNLocalAdjointStates()
localXvSize = DASolver.getNLocalPoints() * 3

if "%s_states" % self.discipline in d_residuals:

# get the reverse mode AD seed from d_residuals
resBar = d_residuals["%s_states" % self.discipline]
# convert the seed array to Petsc vector
resBarVec = DASolver.array2Vec(resBar)

seed = d_residuals["%s_states" % self.discipline]

# this computes [dRdW]^T*Psi using reverse mode AD
if "%s_states" % self.discipline in d_outputs:
prodVec = DASolver.wVec.duplicate()
prodVec.zeroEntries()
DASolver.solverAD.calcdRdWTPsiAD(DASolver.xvVec, DASolver.wVec, resBarVec, prodVec)
wBar = DASolver.vec2Array(prodVec)
d_outputs["%s_states" % self.discipline] += wBar
product = np.zeros(localAdjSize)
jacInput = outputs["%s_states" % self.discipline]
DASolver.solverAD.calcJacTVecProduct(
"stateVar", localAdjSize, 1, jacInput, "residual", localAdjSize, 1, seed, product
)
d_outputs["%s_states" % self.discipline] += product

# loop over all d_inputs keys and compute the matrix-vector products accordingly
for inputName in list(d_inputs.keys()):
# this computes [dRdXv]^T*Psi using reverse mode AD
if inputName == "%s_vol_coords" % self.discipline:
prodVec = DASolver.xvVec.duplicate()
prodVec.zeroEntries()
DASolver.solverAD.calcdRdXvTPsiAD(DASolver.xvVec, DASolver.wVec, resBarVec, prodVec)
xVBar = DASolver.vec2Array(prodVec)
d_inputs["%s_vol_coords" % self.discipline] += xVBar
product = np.zeros(localXvSize)
jacInput = inputs["%s_vol_coords" % self.discipline]
DASolver.solverAD.calcJacTVecProduct(
"volCoord", localXvSize, 1, jacInput, "residual", localAdjSize, 1, seed, product
)
d_inputs["%s_vol_coords" % self.discipline] += product
elif inputName == "q_conduct":
# calculate [dRdQ]^T*Psi for thermal
volCoords = inputs["%s_vol_coords" % self.discipline]
Expand Down Expand Up @@ -999,7 +1020,7 @@ def solve_linear(self, d_outputs, d_residuals, mode):
if DASolver.getOption("writeMinorIterations"):
if DASolver.dRdWTPC is None or DASolver.ksp is None:
DASolver.dRdWTPC = PETSc.Mat().create(self.comm)
DASolver.solver.calcdRdWT(DASolver.xvVec, DASolver.wVec, 1, DASolver.dRdWTPC)
DASolver.solver.calcdRdWT(1, DASolver.dRdWTPC)
DASolver.ksp = PETSc.KSP().create(self.comm)
DASolver.solverAD.createMLRKSPMatrixFree(DASolver.dRdWTPC, DASolver.ksp)
# otherwise, we need to recompute the PC mat based on adjPCLag
Expand Down Expand Up @@ -1045,7 +1066,7 @@ def solve_linear(self, d_outputs, d_residuals, mode):
if DASolver.dRdWTPC is not None:
DASolver.dRdWTPC.destroy()
DASolver.dRdWTPC = PETSc.Mat().create(self.comm)
DASolver.solver.calcdRdWT(DASolver.xvVec, DASolver.wVec, 1, DASolver.dRdWTPC)
DASolver.solver.calcdRdWT(1, DASolver.dRdWTPC)
# reset the KSP
if DASolver.ksp is not None:
DASolver.ksp.destroy()
Expand All @@ -1072,10 +1093,10 @@ def solve_linear(self, d_outputs, d_residuals, mode):

# optionally write the adjoint vector as OpenFOAM field format for post-processing
# update the obj func name for solve_linear later
solveLinearObjFuncName = DASolver.getOption("solveLinearObjFuncName")
solveLinearFunctionName = DASolver.getOption("solveLinearFunctionName")
psi_array = DASolver.vec2Array(self.psi)
solTimeFloat = self.solution_counter / 1e4
self.DASolver.writeAdjointFields(solveLinearObjFuncName, solTimeFloat, psi_array)
self.DASolver.writeAdjointFields(solveLinearFunctionName, solTimeFloat, psi_array)

elif adjEqnSolMethod == "fixedPoint":
solutionTime, renamed = DASolver.renameSolution(self.solution_counter)
Expand Down Expand Up @@ -1298,13 +1319,13 @@ def setup(self):
def mphys_add_funcs(self):
# add the function names to this component, called from runScript.py

# it is called objFunc in DAOptions but it contains both objective and constraint functions
objFuncs = self.DASolver.getOption("objFunc")
# it is called function in DAOptions but it contains both objective and constraint functions
functions = self.DASolver.getOption("function")

self.funcs = []

for objFunc in objFuncs:
self.funcs.append(objFunc)
for function in functions:
self.funcs.append(function)

# loop over the functions here and create the output
for f_name in self.funcs:
Expand Down Expand Up @@ -1409,33 +1430,45 @@ def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):
print("Computing partials for ", list(funcsBar.keys()))

# update the obj func name for solve_linear later
DASolver.setOption("solveLinearObjFuncName", list(funcsBar.keys())[0])
# TODO: this is potentially overlapped with _outputOptions
DASolver.setOption("solveLinearFunctionName", list(funcsBar.keys())[0])
DASolver.updateDAOption()

localAdjSize = DASolver.getNLocalAdjointStates()
localXvSize = DASolver.getNLocalPoints() * 3

# loop over all d_inputs keys and compute the partials accordingly
for objFuncName in list(funcsBar.keys()):
for functionName in list(funcsBar.keys()):

fBar = funcsBar[functionName]

fBar = funcsBar[objFuncName]
seed = d_outputs[functionName]

# set the functionName to the _outputOptions dict such that DAOutputFunction can use it in the OF layer
DASolver.setOption("_outputOptions", {"functionName": functionName})
DASolver.updateDAOption()

for inputName in list(d_inputs.keys()):

# compute dFdW * fBar
if inputName == "%s_states" % self.discipline:
dFdW = DASolver.wVec.duplicate()
dFdW.zeroEntries()
DASolver.solverAD.calcdFdWAD(DASolver.xvVec, DASolver.wVec, objFuncName.encode(), dFdW)
wBar = DASolver.vec2Array(dFdW)
d_inputs["%s_states" % self.discipline] += wBar * fBar

# compute dFdW * fBar
product = np.zeros(localAdjSize)
jacInput = inputs["%s_states" % self.discipline]
DASolver.solverAD.calcJacTVecProduct(
"stateVar", localAdjSize, 1, jacInput, "function", 1, 1, seed, product
)
d_inputs["%s_states" % self.discipline] += product

# compute dFdX * fBar
elif inputName == "%s_vol_coords" % self.discipline:
dFdXv = DASolver.xvVec.duplicate()
dFdXv.zeroEntries()
DASolver.solverAD.calcdFdXvAD(
DASolver.xvVec, DASolver.wVec, objFuncName.encode(), "dummy".encode(), dFdXv

product = np.zeros(localXvSize)
jacInput = inputs["%s_vol_coords" % self.discipline]
DASolver.solverAD.calcJacTVecProduct(
"volCoord", localXvSize, 1, jacInput, "function", 1, 1, seed, product
)
xVBar = DASolver.vec2Array(dFdXv)
d_inputs["%s_vol_coords" % self.discipline] += xVBar * fBar
d_inputs["%s_vol_coords" % self.discipline] += product

# now we deal with general input input names
else:
Expand All @@ -1444,7 +1477,7 @@ def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):
dFdAOA = PETSc.Vec().create(self.comm)
dFdAOA.setSizes((PETSc.DECIDE, 1), bsize=1)
dFdAOA.setFromOptions()
DASolver.calcdFdAOAAnalytical(objFuncName, dFdAOA)
DASolver.calcdFdAOAAnalytical(functionName, dFdAOA)

# The aoaBar variable will be length 1 on the root proc, but length 0 an all slave procs.
# The value on the root proc must be broadcast across all procs.
Expand All @@ -1461,7 +1494,7 @@ def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):
dFdBC.setSizes((PETSc.DECIDE, 1), bsize=1)
dFdBC.setFromOptions()
DASolver.solverAD.calcdFdBCAD(
DASolver.xvVec, DASolver.wVec, objFuncName.encode(), inputName.encode(), dFdBC
DASolver.xvVec, DASolver.wVec, functionName.encode(), inputName.encode(), dFdBC
)
# The BCBar variable will be length 1 on the root proc, but length 0 an all slave procs.
# The value on the root proc must be broadcast across all procs.
Expand All @@ -1478,7 +1511,7 @@ def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):
dFdACTD.setSizes((PETSc.DECIDE, 13), bsize=1)
dFdACTD.setFromOptions()
DASolver.solverAD.calcdFdACTAD(
DASolver.xvVec, DASolver.wVec, objFuncName.encode(), inputName.encode(), dFdACTD
DASolver.xvVec, DASolver.wVec, functionName.encode(), inputName.encode(), dFdACTD
)
# we will convert the MPI dFdACTD to seq array for all procs
ACTDBar = DASolver.convertMPIVec2SeqArray(dFdACTD)
Expand All @@ -1498,7 +1531,7 @@ def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):
dFdHSC.setSizes((PETSc.DECIDE, 9), bsize=1)
dFdHSC.setFromOptions()
DASolver.solverAD.calcdFdHSCAD(
DASolver.xvVec, DASolver.wVec, objFuncName.encode(), inputName.encode(), dFdHSC
DASolver.xvVec, DASolver.wVec, functionName.encode(), inputName.encode(), dFdHSC
)
# we will convert the MPI dFdHSC to seq array for all procs
HSCBar = DASolver.convertMPIVec2SeqArray(dFdHSC)
Expand All @@ -1524,7 +1557,7 @@ def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):
dFdField.setSizes((nLocalSize, PETSc.DECIDE), bsize=1)
dFdField.setFromOptions()
DASolver.solverAD.calcdFdFieldAD(
DASolver.xvVec, DASolver.wVec, objFuncName.encode(), inputName.encode(), dFdField
DASolver.xvVec, DASolver.wVec, functionName.encode(), inputName.encode(), dFdField
)

# user can prescribe whether the field var is distributed. Default is True
Expand All @@ -1551,7 +1584,7 @@ def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):
volCoords,
states,
parameters,
objFuncName.encode(),
functionName.encode(),
inputName.encode(),
modelName.encode(),
product,
Expand Down
Loading

0 comments on commit 50471a5

Please sign in to comment.