forked from modflowpy/flopy
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* workaround intermittent macos CI matplotlib failures (modflowpy#1491) * don't use plt.show() in tests * add explanatory comments to conftest.py * skip ex-gwtgwt-mt3dms-p10 mf6 example test (MODFLOW-USGS/modflow6#1008) * give test_mt3d.py::test_mfnwt_CrnkNic more retries * rename release/ to scripts/ * move pull_request_prepare.py to scripts/ * separate CI workflows for benchmarks, examples and regression test * name benchmark artifacts benchmarks-<system>-python version>-<workflow run ID> * add postprocess_benchmarks.py to scripts/ * add benchmark postprocessing CI job (creates artifact benchmarks-<workflow run ID>) * cache modflow executables (+ invalidation on new release) * move sort to child classes' __init__() from _ModpathSeries.get_data() (address modflowpy#1479) * reenable PathlineFile.get_destination_pathline_data() benchmark * add test that PathlineFile sorts on initialization * update ci.yml usages to commit.yml * don't upload coverage after smoke tests, benchmarks, regression tests and example tests * upload coverage on PR as well as push (fix codecov bot comments) * update to codecov action v3 * decrease coverage precision to 2 places (avoid small deltas)
- Loading branch information
Showing
25 changed files
with
981 additions
and
726 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,273 @@ | ||
name: FloPy benchmarks | ||
|
||
on: | ||
push: | ||
branches: | ||
- develop | ||
- tests | ||
pull_request: | ||
branches: | ||
- develop | ||
- tests | ||
schedule: | ||
- cron: '0 8 * * *' # run at 8 AM UTC (12 am PST) | ||
|
||
jobs: | ||
benchmark: | ||
name: Benchmarks | ||
runs-on: ${{ matrix.os }} | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
os: [ ubuntu-latest, macos-latest ] | ||
python-version: [ 3.7, 3.8, 3.9, "3.10" ] | ||
exclude: | ||
# avoid shutil.copytree infinite recursion bug | ||
# https://github.com/python/cpython/pull/17098 | ||
- python-version: '3.8.0' | ||
include: | ||
- os: ubuntu-latest | ||
path: ~/.cache/pip | ||
- os: macos-latest | ||
path: ~/Library/Caches/pip | ||
defaults: | ||
run: | ||
shell: bash | ||
timeout-minutes: 90 | ||
|
||
steps: | ||
- name: Checkout repo | ||
uses: actions/[email protected] | ||
|
||
- name: Cache Python | ||
uses: actions/cache@v3 | ||
with: | ||
path: ${{ matrix.path }} | ||
key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('setup.cfg') }} | ||
restore-keys: | | ||
${{ runner.os }}-${{ matrix.python-version }}-pip- | ||
- name: Setup Python | ||
uses: actions/setup-python@v4 | ||
with: | ||
python-version: ${{ matrix.python-version }} | ||
|
||
- name: Get branch name | ||
uses: nelonoel/[email protected] | ||
|
||
- name: Install Python dependencies | ||
run: | | ||
python -m pip install --upgrade pip | ||
pip install . | ||
pip install ".[test, optional]" | ||
- name: Check Modflow release | ||
run: | | ||
release_json=$(gh api -X GET -H "Accept: application/vnd.github+json" /repos/MODFLOW-USGS/executables/releases/latest) | ||
asset_id=$(python ./scripts/get_executables_metadata_asset_id.py "$release_json") | ||
if [ ${#asset_id} -gt 0 ]; then | ||
gh api -H "Accept: application/octet-stream" "/repos/MODFLOW-USGS/executables/releases/assets/$asset_id" >> executables.json | ||
else | ||
touch executables.json | ||
fi | ||
env: | ||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} | ||
|
||
- name: Cache Modflow executables | ||
uses: actions/cache@v3 | ||
with: | ||
path: $HOME/.local/bin | ||
key: modflow-exes-${{ matrix.os }}-${{ hashFiles(executables.json) }} | ||
restore-keys: | | ||
modflow-exes-${{ matrix.os }} | ||
- name: Install Modflow executables | ||
working-directory: ./autotest | ||
run: | | ||
mkdir -p $HOME/.local/bin | ||
get-modflow $HOME/.local/bin | ||
echo "$HOME/.local/bin" >> $GITHUB_PATH | ||
env: | ||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | ||
|
||
- name: Run benchmarks | ||
working-directory: ./autotest | ||
run: | | ||
mkdir -p .benchmarks | ||
pytest -v --durations=0 --benchmark-only --benchmark-json .benchmarks/${{ matrix.os }}_python${{ matrix.python-version }}.json --keep-failed=.failed | ||
env: | ||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | ||
|
||
- name: Upload failed benchmark artifact | ||
uses: actions/upload-artifact@v2 | ||
if: failure() | ||
with: | ||
name: failed-benchmark-${{ matrix.os }}-${{ matrix.python-version }}-${{ github.run_id }} | ||
path: | | ||
./autotest/.failed/** | ||
- name: Upload benchmark result artifact | ||
uses: actions/upload-artifact@v2 | ||
with: | ||
name: benchmarks-${{ matrix.os }}-${{ matrix.python-version }}-${{ github.run_id }} | ||
path: | | ||
./autotest/.benchmarks/**/*.json | ||
benchmark_windows: | ||
name: Benchmarks (Windows) | ||
runs-on: windows-latest | ||
strategy: | ||
fail-fast: false | ||
matrix: | ||
python-version: [ 3.7, 3.8, 3.9, "3.10" ] | ||
exclude: | ||
# avoid shutil.copytree infinite recursion bug | ||
# https://github.com/python/cpython/pull/17098 | ||
- python-version: '3.8.0' | ||
defaults: | ||
run: | ||
shell: pwsh | ||
timeout-minutes: 90 | ||
|
||
steps: | ||
- name: Checkout repo | ||
uses: actions/[email protected] | ||
|
||
- name: Get branch name | ||
uses: nelonoel/[email protected] | ||
|
||
- name: Cache Miniconda | ||
uses: actions/cache@v3 | ||
with: | ||
path: ~/conda_pkgs_dir | ||
key: ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.run-type }}-${{ hashFiles('etc/environment.yml') }} | ||
|
||
# Standard python fails on windows without GDAL installation | ||
# Using custom bash shell ("shell: bash -l {0}") with Miniconda | ||
- name: Setup Miniconda | ||
uses: conda-incubator/[email protected] | ||
with: | ||
python-version: ${{ matrix.python-version }} | ||
channels: conda-forge | ||
auto-update-conda: true | ||
activate-environment: flopy | ||
use-only-tar-bz2: true | ||
|
||
- name: Install Python dependencies | ||
run: | | ||
conda env update --name flopy --file etc/environment.yml | ||
python -m pip install --upgrade pip | ||
pip install https://github.com/modflowpy/pymake/zipball/master | ||
pip install xmipy | ||
pip install . | ||
- name: Check Modflow release | ||
run: | | ||
$release_json=(gh api -X GET -H "Accept: application/vnd.github+json" /repos/MODFLOW-USGS/executables/releases/latest) | ||
$asset_id=(python ./scripts/get_executables_metadata_asset_id.py "$release_json") | ||
if ($asset_id.Length -gt 0) { | ||
gh api -H "Accept: application/octet-stream" "/repos/MODFLOW-USGS/executables/releases/assets/$asset_id" >> executables.json | ||
} else { | ||
New-Item -Name "executables.json" -ItemType File | ||
} | ||
env: | ||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} | ||
|
||
- name: Cache Modflow executables | ||
uses: actions/cache@v3 | ||
with: | ||
path: C:\Users\runneradmin\.local\bin | ||
key: modflow-exes-${{ runner.os }}-${{ hashFiles(executables.json) }} | ||
restore-keys: | | ||
modflow-exes-${{ runner.os }} | ||
- name: Install Modflow executables | ||
run: | | ||
md -Force C:\Users\runneradmin\.local\bin | ||
get-modflow "C:\Users\runneradmin\.local\bin" | ||
echo "C:\Users\runneradmin\.local\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append | ||
env: | ||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | ||
|
||
- name: Run benchmarks | ||
working-directory: ./autotest | ||
run: | | ||
md -Force .benchmarks | ||
pytest -v --durations=0 --benchmark-only --benchmark-json .benchmarks/${{ runner.os }}_python${{ matrix.python-version }}.json --keep-failed=.failed | ||
env: | ||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | ||
|
||
- name: Upload failed benchmark artifact | ||
uses: actions/upload-artifact@v2 | ||
if: failure() | ||
with: | ||
name: failed-benchmark-${{ runner.os }}-${{ matrix.python-version }}-${{ github.run_id }} | ||
path: | | ||
./autotest/.failed/** | ||
- name: Upload benchmark result artifact | ||
uses: actions/upload-artifact@v2 | ||
with: | ||
name: benchmarks-${{ runner.os }}-${{ matrix.python-version }}-${{ github.run_id }} | ||
path: | | ||
./autotest/.benchmarks/**/*.json | ||
post_benchmark: | ||
needs: | ||
- benchmark | ||
- benchmark_windows | ||
name: Postprocess benchmark results | ||
runs-on: ubuntu-latest | ||
defaults: | ||
run: | ||
shell: bash | ||
timeout-minutes: 10 | ||
|
||
steps: | ||
- name: Checkout repo | ||
uses: actions/[email protected] | ||
|
||
- name: Cache Python | ||
uses: actions/cache@v3 | ||
with: | ||
path: ~/.cache/pip | ||
key: ${{ runner.os }}-3.7-pip-${{ hashFiles('setup.cfg') }} | ||
restore-keys: | | ||
${{ runner.os }}-3.7-pip- | ||
- name: Setup Python | ||
uses: actions/setup-python@v4 | ||
with: | ||
python-version: 3.7 | ||
|
||
- name: Install Python dependencies | ||
run: | | ||
python -m pip install --upgrade pip | ||
pip install numpy pandas matplotlib seaborn | ||
- name: Download all artifacts | ||
uses: actions/download-artifact@v3 | ||
with: | ||
path: ./autotest/.benchmarks | ||
|
||
- name: Process benchmark results | ||
run: | | ||
artifact_json=$($gh api -X GET -H "Accept: application/vnd.github+json" /repos/modflowpy/flopy/actions/artifacts) | ||
python ./scripts/get_benchmark_artifact_ids.py "$artifacts_json" \ | ||
| xargs -I@ bash -c "gh api -H 'Accept: application/vnd.github+json' /repos/modflowpy/flopy/actions/artifacts/@/zip >> ./autotest/.benchmarks/@.zip" | ||
zipfiles=( ./autotest/.benchmarks/*.zip ) | ||
if (( ${#zipfiles[@]} )); then | ||
unzip -o './autotest/.benchmarks/*.zip' -d ./autotest/.benchmarks | ||
fi | ||
python ./scripts/postprocess_benchmarks.py ./autotest/.benchmarks ./autotest/.benchmarks | ||
env: | ||
ARTIFACTS: ${{steps.run_tests.outputs.artifact_ids}} | ||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} | ||
|
||
- name: Upload benchmark results | ||
uses: actions/upload-artifact@v2 | ||
with: | ||
name: benchmarks-${{ github.run_id }} | ||
path: | | ||
./autotest/.benchmarks/*.csv | ||
./autotest/.benchmarks/*.png |
Oops, something went wrong.