diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml new file mode 100644 index 0000000000..0fc8dd208e --- /dev/null +++ b/.github/workflows/benchmark.yml @@ -0,0 +1,273 @@ +name: FloPy benchmarks + +on: + push: + branches: + - develop + - tests + pull_request: + branches: + - develop + - tests + schedule: + - cron: '0 8 * * *' # run at 8 AM UTC (12 am PST) + +jobs: + benchmark: + name: Benchmarks + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ ubuntu-latest, macos-latest ] + python-version: [ 3.7, 3.8, 3.9, "3.10" ] + exclude: + # avoid shutil.copytree infinite recursion bug + # https://github.com/python/cpython/pull/17098 + - python-version: '3.8.0' + include: + - os: ubuntu-latest + path: ~/.cache/pip + - os: macos-latest + path: ~/Library/Caches/pip + defaults: + run: + shell: bash + timeout-minutes: 90 + + steps: + - name: Checkout repo + uses: actions/checkout@v2.3.4 + + - name: Cache Python + uses: actions/cache@v3 + with: + path: ${{ matrix.path }} + key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('setup.cfg') }} + restore-keys: | + ${{ runner.os }}-${{ matrix.python-version }}-pip- + + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Get branch name + uses: nelonoel/branch-name@v1.0.1 + + - name: Install Python dependencies + run: | + python -m pip install --upgrade pip + pip install . + pip install ".[test, optional]" + + - name: Check Modflow release + run: | + release_json=$(gh api -X GET -H "Accept: application/vnd.github+json" /repos/MODFLOW-USGS/executables/releases/latest) + asset_id=$(python ./scripts/get_executables_metadata_asset_id.py "$release_json") + if [ ${#asset_id} -gt 0 ]; then + gh api -H "Accept: application/octet-stream" "/repos/MODFLOW-USGS/executables/releases/assets/$asset_id" >> executables.json + else + touch executables.json + fi + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Cache Modflow executables + uses: actions/cache@v3 + with: + path: $HOME/.local/bin + key: modflow-exes-${{ matrix.os }}-${{ hashFiles(executables.json) }} + restore-keys: | + modflow-exes-${{ matrix.os }} + + - name: Install Modflow executables + working-directory: ./autotest + run: | + mkdir -p $HOME/.local/bin + get-modflow $HOME/.local/bin + echo "$HOME/.local/bin" >> $GITHUB_PATH + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Run benchmarks + working-directory: ./autotest + run: | + mkdir -p .benchmarks + pytest -v --durations=0 --benchmark-only --benchmark-json .benchmarks/${{ matrix.os }}_python${{ matrix.python-version }}.json --keep-failed=.failed + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Upload failed benchmark artifact + uses: actions/upload-artifact@v2 + if: failure() + with: + name: failed-benchmark-${{ matrix.os }}-${{ matrix.python-version }}-${{ github.run_id }} + path: | + ./autotest/.failed/** + + - name: Upload benchmark result artifact + uses: actions/upload-artifact@v2 + with: + name: benchmarks-${{ matrix.os }}-${{ matrix.python-version }}-${{ github.run_id }} + path: | + ./autotest/.benchmarks/**/*.json + + benchmark_windows: + name: Benchmarks (Windows) + runs-on: windows-latest + strategy: + fail-fast: false + matrix: + python-version: [ 3.7, 3.8, 3.9, "3.10" ] + exclude: + # avoid shutil.copytree infinite recursion bug + # https://github.com/python/cpython/pull/17098 + - python-version: '3.8.0' + defaults: + run: + shell: pwsh + timeout-minutes: 90 + + steps: + - name: Checkout repo + uses: actions/checkout@v2.3.4 + + - name: Get branch name + uses: nelonoel/branch-name@v1.0.1 + + - name: Cache Miniconda + uses: actions/cache@v3 + with: + path: ~/conda_pkgs_dir + key: ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.run-type }}-${{ hashFiles('etc/environment.yml') }} + + # Standard python fails on windows without GDAL installation + # Using custom bash shell ("shell: bash -l {0}") with Miniconda + - name: Setup Miniconda + uses: conda-incubator/setup-miniconda@v2.1.1 + with: + python-version: ${{ matrix.python-version }} + channels: conda-forge + auto-update-conda: true + activate-environment: flopy + use-only-tar-bz2: true + + - name: Install Python dependencies + run: | + conda env update --name flopy --file etc/environment.yml + python -m pip install --upgrade pip + pip install https://github.com/modflowpy/pymake/zipball/master + pip install xmipy + pip install . + + - name: Check Modflow release + run: | + $release_json=(gh api -X GET -H "Accept: application/vnd.github+json" /repos/MODFLOW-USGS/executables/releases/latest) + $asset_id=(python ./scripts/get_executables_metadata_asset_id.py "$release_json") + if ($asset_id.Length -gt 0) { + gh api -H "Accept: application/octet-stream" "/repos/MODFLOW-USGS/executables/releases/assets/$asset_id" >> executables.json + } else { + New-Item -Name "executables.json" -ItemType File + } + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Cache Modflow executables + uses: actions/cache@v3 + with: + path: C:\Users\runneradmin\.local\bin + key: modflow-exes-${{ runner.os }}-${{ hashFiles(executables.json) }} + restore-keys: | + modflow-exes-${{ runner.os }} + + - name: Install Modflow executables + run: | + md -Force C:\Users\runneradmin\.local\bin + get-modflow "C:\Users\runneradmin\.local\bin" + echo "C:\Users\runneradmin\.local\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Run benchmarks + working-directory: ./autotest + run: | + md -Force .benchmarks + pytest -v --durations=0 --benchmark-only --benchmark-json .benchmarks/${{ runner.os }}_python${{ matrix.python-version }}.json --keep-failed=.failed + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Upload failed benchmark artifact + uses: actions/upload-artifact@v2 + if: failure() + with: + name: failed-benchmark-${{ runner.os }}-${{ matrix.python-version }}-${{ github.run_id }} + path: | + ./autotest/.failed/** + + - name: Upload benchmark result artifact + uses: actions/upload-artifact@v2 + with: + name: benchmarks-${{ runner.os }}-${{ matrix.python-version }}-${{ github.run_id }} + path: | + ./autotest/.benchmarks/**/*.json + + post_benchmark: + needs: + - benchmark + - benchmark_windows + name: Postprocess benchmark results + runs-on: ubuntu-latest + defaults: + run: + shell: bash + timeout-minutes: 10 + + steps: + - name: Checkout repo + uses: actions/checkout@v2.3.4 + + - name: Cache Python + uses: actions/cache@v3 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-3.7-pip-${{ hashFiles('setup.cfg') }} + restore-keys: | + ${{ runner.os }}-3.7-pip- + + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: 3.7 + + - name: Install Python dependencies + run: | + python -m pip install --upgrade pip + pip install numpy pandas matplotlib seaborn + + - name: Download all artifacts + uses: actions/download-artifact@v3 + with: + path: ./autotest/.benchmarks + + - name: Process benchmark results + run: | + artifact_json=$($gh api -X GET -H "Accept: application/vnd.github+json" /repos/modflowpy/flopy/actions/artifacts) + python ./scripts/get_benchmark_artifact_ids.py "$artifacts_json" \ + | xargs -I@ bash -c "gh api -H 'Accept: application/vnd.github+json' /repos/modflowpy/flopy/actions/artifacts/@/zip >> ./autotest/.benchmarks/@.zip" + zipfiles=( ./autotest/.benchmarks/*.zip ) + if (( ${#zipfiles[@]} )); then + unzip -o './autotest/.benchmarks/*.zip' -d ./autotest/.benchmarks + fi + python ./scripts/postprocess_benchmarks.py ./autotest/.benchmarks ./autotest/.benchmarks + env: + ARTIFACTS: ${{steps.run_tests.outputs.artifact_ids}} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Upload benchmark results + uses: actions/upload-artifact@v2 + with: + name: benchmarks-${{ github.run_id }} + path: | + ./autotest/.benchmarks/*.csv + ./autotest/.benchmarks/*.png \ No newline at end of file diff --git a/.github/workflows/commit.yml b/.github/workflows/commit.yml index 2e46ed561e..c9dcef572c 100644 --- a/.github/workflows/commit.yml +++ b/.github/workflows/commit.yml @@ -8,7 +8,9 @@ on: - 'release*' - 'ci-diagnose' pull_request: - branches: [master, develop] + branches: + - master + - develop jobs: @@ -25,13 +27,13 @@ jobs: uses: actions/checkout@v2.3.4 # Caching disabled until hashFiles issue resolved: https://github.com/actions/runner/issues/1840 - # - name: Cache python - # uses: actions/cache@v3 - # with: - # path: ~/.cache/pip - # key: ${{ runner.os }}-3.7-pip-${{ hashFiles('setup.cfg') }} - # restore-keys: | - # ${{ runner.os }}-3.7-pip- + - name: Cache python + uses: actions/cache@v3 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-3.7-pip-${{ hashFiles('setup.cfg') }} + restore-keys: | + ${{ runner.os }}-3.7-pip- - name: Setup Python uses: actions/setup-python@v4 @@ -65,13 +67,13 @@ jobs: uses: actions/checkout@v2.3.4 # Caching disabled until hashFiles issue resolved: https://github.com/actions/runner/issues/1840 - # - name: Cache python - # uses: actions/cache@v3 - # with: - # path: ~/.cache/pip - # key: ${{ runner.os }}-3.7-pip-${{ hashFiles('setup.cfg') }} - # restore-keys: | - # ${{ runner.os }}-3.7-pip- + - name: Cache python + uses: actions/cache@v3 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-3.7-pip-${{ hashFiles('setup.cfg') }} + restore-keys: | + ${{ runner.os }}-3.7-pip- - name: Setup Python uses: actions/setup-python@v4 @@ -110,7 +112,7 @@ jobs: pylint --jobs=2 --errors-only --exit-zero ./flopy smoke: - name: Smoke + name: Smoke test runs-on: ubuntu-latest defaults: run: @@ -122,13 +124,13 @@ jobs: uses: actions/checkout@v2.3.4 # Caching disabled until hashFiles issue resolved: https://github.com/actions/runner/issues/1840 - # - name: Cache python - # uses: actions/cache@v3 - # with: - # path: ~/.cache/pip - # key: ${{ runner.os }}-3.7-pip-${{ hashFiles('setup.cfg') }} - # restore-keys: | - # ${{ runner.os }}-3.7-pip- + - name: Cache python + uses: actions/cache@v3 + with: + path: ~/.cache/pip + key: ${{ runner.os }}-3.7-pip-${{ hashFiles('setup.cfg') }} + restore-keys: | + ${{ runner.os }}-3.7-pip- - name: Setup Python uses: actions/setup-python@v4 @@ -144,14 +146,13 @@ jobs: pip install . pip install ".[test, optional]" - # Caching disabled until hashFiles issue resolved: https://github.com/actions/runner/issues/1840 - # - name: Cache Modflow executables - # uses: actions/cache@v3 - # with: - # path: $HOME/.local/bin - # key: ${{ runner.os }}-${{ hashFiles('flopy/utils/get_modflow.py') }} - # restore-keys: | - # ${{ runner.os }}-${{ hashFiles('flopy/utils/get_modflow.py') }} + - name: Cache Modflow executables + uses: actions/cache@v3 + with: + path: $HOME/.local/bin + key: modflow-exes-${{ runner.os }} + restore-keys: | + modflow-exes-${{ runner.os }} - name: Install Modflow executables run: | @@ -161,10 +162,10 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Run tests + - name: Run smoke tests working-directory: ./autotest run: | - pytest -v -n=auto --smoke --cov=flopy --cov-report=xml --durations=0 --keep-failed=.failed + pytest -v -n=auto --smoke --durations=0 --keep-failed=.failed env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -172,23 +173,10 @@ jobs: uses: actions/upload-artifact@v2 if: failure() with: - name: failed-${{ matrix.os }}-${{ matrix.python-version }} + name: failed-smoke-${{ matrix.os }}-${{ matrix.python-version }} path: | ./autotest/.failed/** - - name: Print coverage - working-directory: ./autotest - run: | - coverage report - - - name: Upload coverage - if: - github.repository_owner == 'modflowpy' && github.event_name == 'push' - uses: codecov/codecov-action@v2.1.0 - with: - directory: ./autotest - file: coverage.xml - test: name: Test needs: smoke @@ -208,19 +196,17 @@ jobs: - os: macos-latest path: ~/Library/Caches/pip timeout-minutes: 45 - steps: - name: Checkout repo uses: actions/checkout@v2.3.4 - # Caching disabled until hashFiles issue resolved: https://github.com/actions/runner/issues/1840 - # - name: Cache python - # uses: actions/cache@v3 - # with: - # path: ${{ matrix.path }} - # key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('setup.cfg') }} - # restore-keys: | - # ${{ runner.os }}-${{ matrix.python-version }}-pip- + - name: Cache python + uses: actions/cache@v3 + with: + path: ${{ matrix.path }} + key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('setup.cfg') }} + restore-keys: | + ${{ runner.os }}-${{ matrix.python-version }}-pip- - name: Setup Python uses: actions/setup-python@v4 @@ -236,14 +222,13 @@ jobs: pip install . pip install ".[test, optional]" - # Caching disabled until hashFiles issue resolved: https://github.com/actions/runner/issues/1840 - # - name: Cache Modflow executables - # uses: actions/cache@v3 - # with: - # path: $HOME/.local/bin - # key: ${{ runner.os }}-${{ hashFiles('flopy/utils/get_modflow.py') }} - # restore-keys: | - # ${{ runner.os }}-${{ hashFiles('flopy/utils/get_modflow.py') }} + - name: Cache Modflow executables + uses: actions/cache@v3 + with: + path: $HOME/.local/bin + key: modflow-exes-${{ matrix.os }} + restore-keys: | + modflow-exes-${{ runner.os }} - name: Install Modflow executables run: | @@ -275,11 +260,11 @@ jobs: - name: Upload coverage if: - github.repository_owner == 'modflowpy' && github.event_name == 'push' - uses: codecov/codecov-action@v2.1.0 + github.repository_owner == 'modflowpy' && (github.event_name == 'push' || github.event_name == 'pull_request') + uses: codecov/codecov-action@v3 with: - directory: ./autotest - file: coverage.xml + token: ${{ secrets.CODECOV_TOKEN }} + files: ./autotest/coverage.xml test_windows: name: Test Windows @@ -305,12 +290,11 @@ jobs: - name: Get branch name uses: nelonoel/branch-name@v1.0.1 - # Caching disabled until hashFiles issue resolved: https://github.com/actions/runner/issues/1840 - # - name: Cache Miniconda - # uses: actions/cache@v3 - # with: - # path: ~/conda_pkgs_dir - # key: ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.run-type }}-${{ hashFiles('etc/environment.yml') }} + - name: Cache Miniconda + uses: actions/cache@v3 + with: + path: ~/conda_pkgs_dir + key: ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.run-type }}-${{ hashFiles('etc/environment.yml') }} # Standard python fails on windows without GDAL installation # Using custom bash shell ("shell: bash -l {0}") with Miniconda @@ -331,9 +315,17 @@ jobs: pip install xmipy pip install . - - name: Install modflow executables + - name: Cache Modflow executables + uses: actions/cache@v3 + with: + path: C:\Users\runneradmin\.local\bin + key: modflow-exes-${{ runner.os }} + restore-keys: | + modflow-exes-${{ runner.os }} + + - name: Install Modflow executables run: | - md C:\Users\runneradmin\.local\bin + md -Force C:\Users\runneradmin\.local\bin get-modflow "C:\Users\runneradmin\.local\bin" echo "C:\Users\runneradmin\.local\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append env: @@ -350,7 +342,7 @@ jobs: uses: actions/upload-artifact@v2 if: failure() with: - name: failed-${{ matrix.os }}-${{ matrix.python-version }} + name: failed-${{ runner.os }}-${{ matrix.python-version }} path: | ./autotest/.failed/** @@ -361,8 +353,8 @@ jobs: - name: Upload coverage if: - github.repository_owner == 'modflowpy' && github.event_name == 'push' - uses: codecov/codecov-action@v2.1.0 + github.repository_owner == 'modflowpy' && (github.event_name == 'push' || github.event_name == 'pull_request') + uses: codecov/codecov-action@v3 with: - directory: ./autotest - file: coverage.xml + token: ${{ secrets.CODECOV_TOKEN }} + files: ./autotest/coverage.xml diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml deleted file mode 100644 index 1cc974477e..0000000000 --- a/.github/workflows/daily.yml +++ /dev/null @@ -1,545 +0,0 @@ -name: FloPy daily tasks - -on: - schedule: - - cron: '0 8 * * *' # run at 8 AM UTC (12 am PST) - -jobs: - regression: - name: Regression tests - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: [ ubuntu-latest, macos-latest ] - python-version: [ 3.7, 3.8, 3.9, "3.10" ] - exclude: - # avoid shutil.copytree infinite recursion bug - # https://github.com/python/cpython/pull/17098 - - python-version: '3.8.0' - include: - - os: ubuntu-latest - path: ~/.cache/pip - - os: macos-latest - path: ~/Library/Caches/pip - defaults: - run: - shell: bash - timeout-minutes: 90 - - steps: - - name: Checkout repo - uses: actions/checkout@v2.3.4 - - # Caching disabled until hashFiles issue resolved: https://github.com/actions/runner/issues/1840 - # - name: Cache python - # uses: actions/cache@v3 - # with: - # path: ${{ matrix.path }} - # key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('setup.cfg') }} - # restore-keys: | - # ${{ runner.os }}-${{ matrix.python-version }}-pip- - - - name: Setup Python - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - - name: Get branch name - uses: nelonoel/branch-name@v1.0.1 - - - name: Install Python dependencies - run: | - python -m pip install --upgrade pip - pip install . - pip install ".[test, optional]" - - - name: Install modflow executables - working-directory: ./autotest - run: | - mkdir -p $HOME/.local/bin - get-modflow $HOME/.local/bin - echo "$HOME/.local/bin" >> $GITHUB_PATH - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Run tests - working-directory: ./autotest - run: | - pytest -v -m="regression" -n=auto --cov=flopy --cov-report=xml --durations=0 --keep-failed=.failed - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Upload failed test outputs - uses: actions/upload-artifact@v2 - if: failure() - with: - name: failed-${{ matrix.os }}-${{ matrix.python-version }} - path: | - ./autotest/.failed/** - - - name: Print coverage report - working-directory: ./autotest - run: | - coverage report - - - name: Upload coverage - if: - github.repository_owner == 'modflowpy' && github.event_name == 'push' - uses: codecov/codecov-action@v2.1.0 - with: - directory: ./autotest - file: coverage.xml - - examples: - name: Example scripts & notebooks - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: [ ubuntu-latest, macos-latest ] - python-version: [ 3.7, 3.8, 3.9, "3.10" ] - exclude: - # avoid shutil.copytree infinite recursion bug - # https://github.com/python/cpython/pull/17098 - - python-version: '3.8.0' - include: - - os: ubuntu-latest - path: ~/.cache/pip - - os: macos-latest - path: ~/Library/Caches/pip - defaults: - run: - shell: bash - timeout-minutes: 90 - - steps: - - name: Checkout repo - uses: actions/checkout@v2.3.4 - - # Caching disabled until hashFiles issue resolved: https://github.com/actions/runner/issues/1840 - # - name: Cache python - # uses: actions/cache@v3 - # with: - # path: ${{ matrix.path }} - # key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('setup.cfg') }} - # restore-keys: | - # ${{ runner.os }}-${{ matrix.python-version }}-pip- - - - name: Setup Python - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - - name: Get branch name - uses: nelonoel/branch-name@v1.0.1 - - - name: Install Python dependencies - run: | - python -m pip install --upgrade pip - pip install . - pip install ".[test, optional]" - - - name: Install modflow executables - working-directory: ./autotest - run: | - mkdir -p $HOME/.local/bin - get-modflow $HOME/.local/bin - echo "$HOME/.local/bin" >> $GITHUB_PATH - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Run tests - working-directory: ./autotest - run: | - pytest -v -m="example" -n=auto --cov=flopy --cov-report=xml --durations=0 --keep-failed=.failed - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Upload failed test outputs - uses: actions/upload-artifact@v2 - if: failure() - with: - name: failed-${{ matrix.os }}-${{ matrix.python-version }} - path: | - ./autotest/.failed/** - - - name: Print coverage report - working-directory: ./autotest - run: | - coverage report - - - name: Upload coverage - if: - github.repository_owner == 'modflowpy' && github.event_name == 'push' - uses: codecov/codecov-action@v2.1.0 - with: - directory: ./autotest - file: coverage.xml - - benchmark: - name: Benchmarks - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: [ ubuntu-latest, macos-latest ] - python-version: [ 3.7, 3.8, 3.9, "3.10" ] - exclude: - # avoid shutil.copytree infinite recursion bug - # https://github.com/python/cpython/pull/17098 - - python-version: '3.8.0' - include: - - os: ubuntu-latest - path: ~/.cache/pip - - os: macos-latest - path: ~/Library/Caches/pip - defaults: - run: - shell: bash - timeout-minutes: 90 - - steps: - - name: Checkout repo - uses: actions/checkout@v2.3.4 - - # Caching disabled until hashFiles issue resolved: https://github.com/actions/runner/issues/1840 - # - name: Cache python - # uses: actions/cache@v3 - # with: - # path: ${{ matrix.path }} - # key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('setup.cfg') }} - # restore-keys: | - # ${{ runner.os }}-${{ matrix.python-version }}-pip- - - - name: Setup Python - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - - - name: Get branch name - uses: nelonoel/branch-name@v1.0.1 - - - name: Install Python dependencies - run: | - python -m pip install --upgrade pip - pip install . - pip install ".[test, optional]" - - - name: Install modflow executables - working-directory: ./autotest - run: | - mkdir -p $HOME/.local/bin - get-modflow $HOME/.local/bin - echo "$HOME/.local/bin" >> $GITHUB_PATH - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Load cached benchmark results (for comparison) - uses: actions/cache@v3 - with: - path: ./autotest/.benchmarks - key: benchmark-${{ matrix.os }}-${{ matrix.python-version }} - - - name: Run benchmarks - working-directory: ./autotest - run: | - pytest -v --durations=0 --cov=flopy --cov-report=xml --benchmark-only --benchmark-autosave --benchmark-compare --benchmark-compare-fail=mean:25% --keep-failed=.failed - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Upload failed benchmark outputs - uses: actions/upload-artifact@v2 - if: failure() - with: - name: failed-${{ matrix.os }}-${{ matrix.python-version }} - path: | - ./autotest/.failed/** - - - name: Upload benchmark results - uses: actions/upload-artifact@v2 - with: - name: benchmark-${{ matrix.os }}-${{ matrix.python-version }} - path: | - ./autotest/.benchmarks/**/*.json - - - name: Print coverage report - working-directory: ./autotest - run: | - coverage report - - - name: Upload coverage - if: - github.repository_owner == 'modflowpy' && github.event_name == 'push' - uses: codecov/codecov-action@v2.1.0 - with: - directory: ./autotest - file: coverage.xml - - regression_windows: - name: Regression tests (Windows) - runs-on: windows-latest - strategy: - fail-fast: false - matrix: - python-version: [ 3.7, 3.8, 3.9, "3.10" ] - exclude: - # avoid shutil.copytree infinite recursion bug - # https://github.com/python/cpython/pull/17098 - - python-version: '3.8.0' - defaults: - run: - shell: pwsh - timeout-minutes: 90 - - steps: - - name: Checkout repo - uses: actions/checkout@v2.3.4 - - - name: Get branch name - uses: nelonoel/branch-name@v1.0.1 - - # Caching disabled until hashFiles issue resolved: https://github.com/actions/runner/issues/1840 - # - name: Cache Miniconda - # uses: actions/cache@v3 - # with: - # path: ~/conda_pkgs_dir - # key: ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.run-type }}-${{ hashFiles('etc/environment.yml') }} - - # Standard python fails on windows without GDAL installation - # Using custom bash shell ("shell: bash -l {0}") with Miniconda - - name: Setup Miniconda - uses: conda-incubator/setup-miniconda@v2.1.1 - with: - python-version: ${{ matrix.python-version }} - channels: conda-forge - auto-update-conda: true - activate-environment: flopy - use-only-tar-bz2: true - - - name: Install Python dependencies - run: | - conda env update --name flopy --file etc/environment.yml - python -m pip install --upgrade pip - pip install https://github.com/modflowpy/pymake/zipball/master - pip install xmipy - pip install . - - - name: Install modflow executables - run: | - md C:\Users\runneradmin\.local\bin - get-modflow "C:\Users\runneradmin\.local\bin" - echo "C:\Users\runneradmin\.local\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Run tests - working-directory: ./autotest - run: | - pytest -v -n auto -m "regression" --cov=flopy --cov-report=xml --durations=0 --keep-failed=.failed - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Upload failed test outputs - uses: actions/upload-artifact@v2 - if: failure() - with: - name: failed-${{ matrix.os }}-${{ matrix.python-version }} - path: | - ./autotest/.failed/** - - - name: Print coverage report - working-directory: ./autotest - run: | - coverage report - - - name: Upload coverage - if: - github.repository_owner == 'modflowpy' && github.event_name == 'push' - uses: codecov/codecov-action@v2.1.0 - with: - directory: ./autotest - file: coverage.xml - - examples_windows: - name: Example scripts & notebooks (Windows) - runs-on: windows-latest - strategy: - fail-fast: false - matrix: - python-version: [ 3.7, 3.8, 3.9, "3.10" ] - exclude: - # avoid shutil.copytree infinite recursion bug - # https://github.com/python/cpython/pull/17098 - - python-version: '3.8.0' - defaults: - run: - shell: pwsh - timeout-minutes: 90 - - steps: - - name: Checkout repo - uses: actions/checkout@v2.3.4 - - - name: Get branch name - uses: nelonoel/branch-name@v1.0.1 - - # Caching disabled until hashFiles issue resolved: https://github.com/actions/runner/issues/1840 - # - name: Cache Miniconda - # uses: actions/cache@v3 - # with: - # path: ~/conda_pkgs_dir - # key: ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.run-type }}-${{ hashFiles('etc/environment.yml') }} - - # Standard python fails on windows without GDAL installation - # Using custom bash shell ("shell: bash -l {0}") with Miniconda - - name: Setup Miniconda - uses: conda-incubator/setup-miniconda@v2.1.1 - with: - python-version: ${{ matrix.python-version }} - channels: conda-forge - auto-update-conda: true - activate-environment: flopy - use-only-tar-bz2: true - - - name: Install Python dependencies - run: | - conda env update --name flopy --file etc/environment.yml - python -m pip install --upgrade pip - pip install https://github.com/modflowpy/pymake/zipball/master - pip install xmipy - pip install . - - - name: Install modflow executables - run: | - md C:\Users\runneradmin\.local\bin - get-modflow "C:\Users\runneradmin\.local\bin" - echo "C:\Users\runneradmin\.local\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Run tests - working-directory: ./autotest - run: | - pytest -v -n auto -m "example" --cov=flopy --cov-report=xml --durations=0 --keep-failed=.failed - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Upload failed test outputs - uses: actions/upload-artifact@v2 - if: failure() - with: - name: failed-${{ matrix.os }}-${{ matrix.python-version }} - path: | - ./autotest/.failed/** - - - name: Print coverage report - working-directory: ./autotest - run: | - coverage report - - - name: Upload coverage - if: - github.repository_owner == 'modflowpy' && github.event_name == 'push' - uses: codecov/codecov-action@v2.1.0 - with: - directory: ./autotest - file: coverage.xml - - benchmark_windows: - name: Benchmarks (Windows) - runs-on: windows-latest - strategy: - fail-fast: false - matrix: - python-version: [ 3.7, 3.8, 3.9, "3.10" ] - exclude: - # avoid shutil.copytree infinite recursion bug - # https://github.com/python/cpython/pull/17098 - - python-version: '3.8.0' - defaults: - run: - shell: pwsh - timeout-minutes: 90 - - steps: - - name: Checkout repo - uses: actions/checkout@v2.3.4 - - - name: Get branch name - uses: nelonoel/branch-name@v1.0.1 - - # Caching disabled until hashFiles issue resolved: https://github.com/actions/runner/issues/1840 - # - name: Cache Miniconda - # uses: actions/cache@v3 - # with: - # path: ~/conda_pkgs_dir - # key: ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.run-type }}-${{ hashFiles('etc/environment.yml') }} - - # Standard python fails on windows without GDAL installation - # Using custom bash shell ("shell: bash -l {0}") with Miniconda - - name: Setup Miniconda - uses: conda-incubator/setup-miniconda@v2.1.1 - with: - python-version: ${{ matrix.python-version }} - channels: conda-forge - auto-update-conda: true - activate-environment: flopy - use-only-tar-bz2: true - - - name: Install Python dependencies - run: | - conda env update --name flopy --file etc/environment.yml - python -m pip install --upgrade pip - pip install https://github.com/modflowpy/pymake/zipball/master - pip install xmipy - pip install . - - - name: Install modflow executables - run: | - md C:\Users\runneradmin\.local\bin - get-modflow "C:\Users\runneradmin\.local\bin" - echo "C:\Users\runneradmin\.local\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Load cached benchmark results (for comparison) - uses: actions/cache@v3 - with: - path: ./autotest/.benchmarks - key: benchmark-${{ runner.os }}-${{ matrix.python-version }} - - - name: Run benchmarks - working-directory: ./autotest - run: | - pytest -v --durations=0 --cov=flopy --cov-report=xml --benchmark-only --benchmark-autosave --benchmark-compare --benchmark-compare-fail=mean:25% --keep-failed=.failed - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Upload failed benchmark outputs - uses: actions/upload-artifact@v2 - if: failure() - with: - name: failed-${{ matrix.os }}-${{ matrix.python-version }} - path: | - ./autotest/.failed/** - - - name: Upload benchmark results - uses: actions/upload-artifact@v2 - with: - name: benchmark-${{ runner.os }}-${{ matrix.python-version }} - path: | - ./autotest/.benchmarks/**/*.json - - - name: Print coverage report - working-directory: ./autotest - run: | - coverage report - - - name: Upload coverage - if: - github.repository_owner == 'modflowpy' && github.event_name == 'push' - uses: codecov/codecov-action@v2.1.0 - with: - directory: ./autotest - file: coverage.xml \ No newline at end of file diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml new file mode 100644 index 0000000000..081f63fd89 --- /dev/null +++ b/.github/workflows/examples.yml @@ -0,0 +1,165 @@ +name: FloPy example script & notebook tests + +on: + schedule: + - cron: '0 8 * * *' # run at 8 AM UTC (12 am PST) + +jobs: + examples: + name: Example scripts & notebooks + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ ubuntu-latest, macos-latest ] + python-version: [ 3.7, 3.8, 3.9, "3.10" ] + exclude: + # avoid shutil.copytree infinite recursion bug + # https://github.com/python/cpython/pull/17098 + - python-version: '3.8.0' + include: + - os: ubuntu-latest + path: ~/.cache/pip + - os: macos-latest + path: ~/Library/Caches/pip + defaults: + run: + shell: bash + timeout-minutes: 90 + steps: + - name: Checkout repo + uses: actions/checkout@v2.3.4 + + # Caching disabled until hashFiles issue resolved: https://github.com/actions/runner/issues/1840 + # - name: Cache python + # uses: actions/cache@v3 + # with: + # path: ${{ matrix.path }} + # key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('setup.cfg') }} + # restore-keys: | + # ${{ runner.os }}-${{ matrix.python-version }}-pip- + + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Get branch name + uses: nelonoel/branch-name@v1.0.1 + + - name: Install Python dependencies + run: | + python -m pip install --upgrade pip + pip install . + pip install ".[test, optional]" + + - name: Cache Modflow executables + uses: actions/cache@v3 + with: + path: $HOME/.local/bin + key: modflow-exes-${{ matrix.os }} + restore-keys: | + modflow-exes-${{ matrix.os }} + + - name: Install Modflow executables + working-directory: ./autotest + run: | + mkdir -p $HOME/.local/bin + get-modflow $HOME/.local/bin + echo "$HOME/.local/bin" >> $GITHUB_PATH + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Run example tests + working-directory: ./autotest + run: | + pytest -v -m="example" -n=auto --durations=0 --keep-failed=.failed + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Upload failed test outputs + uses: actions/upload-artifact@v2 + if: failure() + with: + name: failed-example-${{ matrix.os }}-${{ matrix.python-version }} + path: | + ./autotest/.failed/** + + examples_windows: + name: Example scripts & notebooks (Windows) + runs-on: windows-latest + strategy: + fail-fast: false + matrix: + python-version: [ 3.7, 3.8, 3.9, "3.10" ] + exclude: + # avoid shutil.copytree infinite recursion bug + # https://github.com/python/cpython/pull/17098 + - python-version: '3.8.0' + defaults: + run: + shell: pwsh + timeout-minutes: 90 + steps: + - name: Checkout repo + uses: actions/checkout@v2.3.4 + + - name: Get branch name + uses: nelonoel/branch-name@v1.0.1 + + # Caching disabled until hashFiles issue resolved: https://github.com/actions/runner/issues/1840 + # - name: Cache Miniconda + # uses: actions/cache@v3 + # with: + # path: ~/conda_pkgs_dir + # key: ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.run-type }}-${{ hashFiles('etc/environment.yml') }} + + # Standard python fails on windows without GDAL installation + # Using custom bash shell ("shell: bash -l {0}") with Miniconda + - name: Setup Miniconda + uses: conda-incubator/setup-miniconda@v2.1.1 + with: + python-version: ${{ matrix.python-version }} + channels: conda-forge + auto-update-conda: true + activate-environment: flopy + use-only-tar-bz2: true + + - name: Install Python dependencies + run: | + conda env update --name flopy --file etc/environment.yml + python -m pip install --upgrade pip + pip install https://github.com/modflowpy/pymake/zipball/master + pip install xmipy + pip install . + + - name: Cache Modflow executables + uses: actions/cache@v3 + with: + path: C:\Users\runneradmin\.local\bin + key: modflow-exes-${{ runner.os }} + restore-keys: | + modflow-exes-${{ runner.os }} + + - name: Install Modflow executables + run: | + md -Force C:\Users\runneradmin\.local\bin + get-modflow "C:\Users\runneradmin\.local\bin" + echo "C:\Users\runneradmin\.local\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Run example tests + working-directory: ./autotest + run: | + pytest -v -n auto -m "example" --durations=0 --keep-failed=.failed + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Upload failed test outputs + uses: actions/upload-artifact@v2 + if: failure() + with: + name: failed-example-${{ runner.os }}-${{ matrix.python-version }} + path: | + ./autotest/.failed/* diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml new file mode 100644 index 0000000000..3c2faa153a --- /dev/null +++ b/.github/workflows/regression.yml @@ -0,0 +1,165 @@ +name: FloPy regression tests + +on: + schedule: + - cron: '0 8 * * *' # run at 8 AM UTC (12 am PST) + +jobs: + regression: + name: Regression tests + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ ubuntu-latest, macos-latest ] + python-version: [ 3.7, 3.8, 3.9, "3.10" ] + exclude: + # avoid shutil.copytree infinite recursion bug + # https://github.com/python/cpython/pull/17098 + - python-version: '3.8.0' + include: + - os: ubuntu-latest + path: ~/.cache/pip + - os: macos-latest + path: ~/Library/Caches/pip + defaults: + run: + shell: bash + timeout-minutes: 90 + steps: + - name: Checkout repo + uses: actions/checkout@v2.3.4 + + # Caching disabled until hashFiles issue resolved: https://github.com/actions/runner/issues/1840 + # - name: Cache python + # uses: actions/cache@v3 + # with: + # path: ${{ matrix.path }} + # key: ${{ runner.os }}-${{ matrix.python-version }}-pip-${{ hashFiles('setup.cfg') }} + # restore-keys: | + # ${{ runner.os }}-${{ matrix.python-version }}-pip- + + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + + - name: Get branch name + uses: nelonoel/branch-name@v1.0.1 + + - name: Install Python dependencies + run: | + python -m pip install --upgrade pip + pip install . + pip install ".[test, optional]" + + - name: Cache Modflow executables + uses: actions/cache@v3 + with: + path: $HOME/.local/bin + key: modflow-exes-${{ matrix.os }} + restore-keys: | + modflow-exes-${{ matrix.os }} + + - name: Install Modflow executables + working-directory: ./autotest + run: | + mkdir -p $HOME/.local/bin + get-modflow $HOME/.local/bin + echo "$HOME/.local/bin" >> $GITHUB_PATH + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Run regression tests + working-directory: ./autotest + run: | + pytest -v -m="regression" -n=auto --durations=0 --keep-failed=.failed + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Upload failed test outputs + uses: actions/upload-artifact@v2 + if: failure() + with: + name: failed-regression-${{ matrix.os }}-${{ matrix.python-version }} + path: | + ./autotest/.failed/** + + regression_windows: + name: Regression tests (Windows) + runs-on: windows-latest + strategy: + fail-fast: false + matrix: + python-version: [ 3.7, 3.8, 3.9, "3.10" ] + exclude: + # avoid shutil.copytree infinite recursion bug + # https://github.com/python/cpython/pull/17098 + - python-version: '3.8.0' + defaults: + run: + shell: pwsh + timeout-minutes: 90 + steps: + - name: Checkout repo + uses: actions/checkout@v2.3.4 + + - name: Get branch name + uses: nelonoel/branch-name@v1.0.1 + + # Caching disabled until hashFiles issue resolved: https://github.com/actions/runner/issues/1840 + # - name: Cache Miniconda + # uses: actions/cache@v3 + # with: + # path: ~/conda_pkgs_dir + # key: ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.run-type }}-${{ hashFiles('etc/environment.yml') }} + + # Standard python fails on windows without GDAL installation + # Using custom bash shell ("shell: bash -l {0}") with Miniconda + - name: Setup Miniconda + uses: conda-incubator/setup-miniconda@v2.1.1 + with: + python-version: ${{ matrix.python-version }} + channels: conda-forge + auto-update-conda: true + activate-environment: flopy + use-only-tar-bz2: true + + - name: Install Python dependencies + run: | + conda env update --name flopy --file etc/environment.yml + python -m pip install --upgrade pip + pip install https://github.com/modflowpy/pymake/zipball/master + pip install xmipy + pip install . + + - name: Cache Modflow executables + uses: actions/cache@v3 + with: + path: C:\Users\runneradmin\.local\bin + key: modflow-exes-${{ runner.os }} + restore-keys: | + modflow-exes-${{ runner.os }} + + - name: Install Modflow executables + run: | + md -Force C:\Users\runneradmin\.local\bin + get-modflow "C:\Users\runneradmin\.local\bin" + echo "C:\Users\runneradmin\.local\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Run regression tests + working-directory: ./autotest + run: | + pytest -v -n auto -m "regression" --durations=0 --keep-failed=.failed + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Upload failed test outputs + uses: actions/upload-artifact@v2 + if: failure() + with: + name: failed-regression-${{ runner.os }}-${{ matrix.python-version }} + path: | + ./autotest/.failed/** diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index abacfbc884..6faebce7ee 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -55,12 +55,12 @@ Before you submit your Pull Request (PR) consider the following guidelines: ``` 4. Create your patch, **including appropriate test cases**. See [DEVELOPER,md](DEVELOPER.md#running-tests) for guidelines for constructing autotests. -5. Run the [isort import sorter](https://github.com/PyCQA/isort) and [black formatter](https://github.com/psf/black) on Flopy source files from the git repository `autotest` directory using: +5. Run the [isort import sorter](https://github.com/PyCQA/isort) and [black formatter](https://github.com/psf/black). There is a utility script to do this in the `scripts` directory: ```shell - python pull_request_prepare.py + python ./scripts/pull_request_prepare.py ``` - Note: Pull Requests must pass isort import and black format checks run on the [GitHub actions](https://github.com/modflowpy/flopy/actions) (*linting*) before they will be accepted. isort can be installed using [`pip`](https://pypi.org/project/isort/) and [`conda`](https://anaconda.org/conda-forge/isort). The black formatter can also be installed using [`pip`](https://pypi.org/project/black/) and [`conda`](https://anaconda.org/conda-forge/black). If the Pull Request fails the *linting* job in the [flopy continuous integration](https://github.com/modflowpy/flopy/actions/workflows/ci.yml) workflow, make sure the latest versions of isort and black are installed. + Note: Pull Requests must pass isort import and black format checks run on the [GitHub actions](https://github.com/modflowpy/flopy/actions) (*linting*) before they will be accepted. isort can be installed using [`pip`](https://pypi.org/project/isort/) and [`conda`](https://anaconda.org/conda-forge/isort). The black formatter can also be installed using [`pip`](https://pypi.org/project/black/) and [`conda`](https://anaconda.org/conda-forge/black). If the Pull Request fails the *linting* job in the [flopy continuous integration](https://github.com/modflowpy/flopy/actions/workflows/commit.yml) workflow, make sure the latest versions of isort and black are installed. 6. Run the full FloPy test suite and ensure that all tests pass: diff --git a/DEVELOPER.md b/DEVELOPER.md index ee3b2bd4db..09f336354e 100644 --- a/DEVELOPER.md +++ b/DEVELOPER.md @@ -435,13 +435,13 @@ The [`act`](https://github.com/nektos/act) tool uses Docker to run containerized With Docker installed and running, run `act -l` from the project root to see available CI workflows. To run all workflows and jobs, just run `act`. To run a particular workflow use `-W`: ```shell -act -W .github/workflows/ci.yml +act -W .github/workflows/commit.yml ``` To run a particular job within a workflow, add the `-j` option: ```shell -act -W .github/workflows/ci.yml -j build +act -W .github/workflows/commit.yml -j build ``` **Note:** GitHub API rate limits are easy to exceed, especially with job matrices. Authenticated GitHub users have a much higher rate limit: use `-s GITHUB_TOKEN=` when invoking `act` to provide a personal access token. Note that this will log your token in shell history — leave the value blank for a prompt to enter it more securely. diff --git a/README.md b/README.md index 175b03f302..e2ec673e95 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ flopy3 ### Version 3.3.6 — release candidate -[![flopy continuous integration](https://github.com/modflowpy/flopy/actions/workflows/ci.yml/badge.svg?branch=develop)](https://github.com/modflowpy/flopy/actions/workflows/ci.yml) +[![flopy continuous integration](https://github.com/modflowpy/flopy/actions/workflows/commit.yml/badge.svg?branch=develop)](https://github.com/modflowpy/flopy/actions/workflows/commit.yml) [![Read the Docs](https://github.com/modflowpy/flopy/actions/workflows/rtd.yml/badge.svg?branch=develop)](https://github.com/modflowpy/flopy/actions/workflows/rtd.yml) [![codecov](https://codecov.io/gh/modflowpy/flopy/branch/develop/graph/badge.svg)](https://codecov.io/gh/modflowpy/flopy) diff --git a/autotest/conftest.py b/autotest/conftest.py index 8368a3c841..9f7f8df01f 100644 --- a/autotest/conftest.py +++ b/autotest/conftest.py @@ -1,7 +1,5 @@ import importlib -import io import os -import pkg_resources import socket import sys from os import environ @@ -14,6 +12,7 @@ from urllib import request from warnings import warn +import pkg_resources import pytest # constants @@ -139,7 +138,9 @@ def is_connected(hostname): def is_in_ci(): - return "CI" in os.environ + # if running in GitHub Actions CI, "CI" variable always set to true + # https://docs.github.com/en/actions/learn-github-actions/environment-variables#default-environment-variables + return bool(os.environ.get("CI", None)) def is_github_rate_limited() -> Optional[bool]: @@ -464,3 +465,12 @@ def run_py_script(script, *args, verbose=False): """Run a Python script, return tuple (stdout, stderr, returncode).""" return run_cmd( sys.executable, script, *args, verbose=verbose, cwd=Path(script).parent) + + +# use noninteractive matplotlib backend if in Mac OS CI to avoid pytest-xdist node failure +# e.g. https://github.com/modflowpy/flopy/runs/7748574375?check_suite_focus=true#step:9:57 +@pytest.fixture(scope="session", autouse=True) +def patch_macos_ci_matplotlib(): + if is_in_ci() and system().lower() == "darwin": + import matplotlib + matplotlib.use("agg") diff --git a/autotest/regression/conftest.py b/autotest/regression/conftest.py index 07def686ed..5971ede0c0 100644 --- a/autotest/regression/conftest.py +++ b/autotest/regression/conftest.py @@ -11,14 +11,6 @@ __mf6_examples_lock = FileLock(Path(gettempdir()) / f"{__mf6_examples}.lock") -def is_nested(namfile) -> bool: - p = Path(namfile) - if not p.is_file() or not p.name.endswith('.nam'): - raise ValueError(f"Expected a namfile path, got {p}") - - return p.parent.parent.name != __mf6_examples - - def get_mf6_examples_path() -> Path: pytest.importorskip("pymake") import pymake @@ -39,51 +31,48 @@ def get_mf6_examples_path() -> Path: __mf6_examples_lock.release() -@pytest.fixture(scope="session") -def temp_mf6_examples_path(tmpdir_factory): - pytest.importorskip("pymake") - import pymake +def is_nested(namfile) -> bool: + p = Path(namfile) + if not p.is_file() or not p.name.endswith('.nam'): + raise ValueError(f"Expected a namfile path, got {p}") - temp = Path(tmpdir_factory.mktemp(__mf6_examples)) - pymake.download_and_unzip( - url="https://github.com/MODFLOW-USGS/modflow6-examples/releases/download/current/modflow6-examples.zip", - pth=str(temp), - verify=True, - ) - return temp + return p.parent.parent.name != __mf6_examples def pytest_generate_tests(metafunc): - def get_namfiles(): - return get_mf6_examples_path().rglob("mfsim.nam") - + # examples to skip: + # - ex-gwtgwt-mt3dms-p10: https://github.com/MODFLOW-USGS/modflow6/pull/1008 + exclude = ["ex-gwt-gwtgwt-mt3dms-p10"] + namfiles = [str(p) for p in get_mf6_examples_path().rglob("mfsim.nam") if not any(e in str(p) for e in exclude)] + + # parametrization by model + # - single namfile per test case + # - no coupling (only first model in each simulation subdir is used) key = "mf6_example_namfile" if key in metafunc.fixturenames: - # model parametrization (single namfile, no coupling) - namfiles = [str(p) for p in get_namfiles()] metafunc.parametrize(key, sorted(namfiles)) + # parametrization by simulation + # - each test case gets an ordered list of 1+ namfiles + # - models can be coupled (run in order provided, sharing workspace) key = "mf6_example_namfiles" if key in metafunc.fixturenames: - # simulation parametrization (1+ models in series) - # ordered list of namfiles representing simulation - namfiles = sorted([str(p) for p in get_namfiles()]) + simulations = [] def simulation_name_from_model_path(p): p = Path(p) return p.parent.parent.name if is_nested(p) else p.parent.name - def simulation_name_from_model_namfiles(mnams): - namfile = next(iter(mnams), None) - if namfile is None: pytest.skip("No namfiles (expected ordered collection)") - namfile = Path(namfile) - return (namfile.parent.parent if is_nested(namfile) else namfile.parent).name - - simulations = [] for model_name, model_namfiles in groupby(namfiles, key=simulation_name_from_model_path): models = sorted(list(model_namfiles)) # sort in alphabetical order (gwf < gwt) simulations.append(models) print(f"Simulation {model_name} has {len(models)} model(s):\n" f"{linesep.join(model_namfiles)}") + def simulation_name_from_model_namfiles(mnams): + namfile = next(iter(mnams), None) + if namfile is None: pytest.skip("No namfiles (expected ordered collection)") + namfile = Path(namfile) + return (namfile.parent.parent if is_nested(namfile) else namfile.parent).name + metafunc.parametrize(key, simulations, ids=simulation_name_from_model_namfiles) diff --git a/autotest/regression/test_mf6_examples.py b/autotest/regression/test_mf6_examples.py index ed0ad9a9c3..5ec55d4184 100644 --- a/autotest/regression/test_mf6_examples.py +++ b/autotest/regression/test_mf6_examples.py @@ -27,12 +27,11 @@ def test_mf6_example_simulations(tmpdir, mf6_example_namfiles): # make sure we have at least 1 name file if len(mf6_example_namfiles) == 0: pytest.skip("No namfiles (expected ordered collection)") - namfile = Path(mf6_example_namfiles[0]) + namfile = Path(mf6_example_namfiles[0]) # pull the first model's namfile # coupled models have nested dirs (e.g., 'mf6gwf' and 'mf6gwt') under model directory # TODO: are there multiple types of couplings? e.g. besides GWF-GWT, mt3dms? nested = is_nested(namfile) - tmpdir = Path(tmpdir / "workspace") # working directory (must not exist for copytree) cmpdir = tmpdir / "compare" # comparison directory @@ -53,10 +52,12 @@ def run_models(): wrkdir = Path(tmpdir / model_path.name) if nested else tmpdir # load simulation - sim = MFSimulation.load(namfile_name, - version="mf6", - exe_name="mf6", - sim_ws=str(wrkdir)) + sim = MFSimulation.load( + namfile_name, + version="mf6", + exe_name="mf6", + sim_ws=str(wrkdir) + ) assert isinstance(sim, MFSimulation) # run simulation diff --git a/autotest/test_binarygrid_util.py b/autotest/test_binarygrid_util.py index 25090fad35..506d6bb9fd 100644 --- a/autotest/test_binarygrid_util.py +++ b/autotest/test_binarygrid_util.py @@ -1,9 +1,8 @@ import matplotlib import numpy as np import pytest -from matplotlib import pyplot as plt - from flaky import flaky +from matplotlib import pyplot as plt from flopy.discretization import StructuredGrid, UnstructuredGrid, VertexGrid from flopy.mf6.utils import MfGrdFile diff --git a/autotest/test_modpathfile.py b/autotest/test_modpathfile.py index c1c3c3a5e2..1efa691de9 100644 --- a/autotest/test_modpathfile.py +++ b/autotest/test_modpathfile.py @@ -3,6 +3,7 @@ import pstats from shutil import copytree +import numpy as np import pytest from flopy.mf6 import MFSimulation, ModflowTdis, ModflowGwf, ModflowIms, ModflowGwfic, ModflowGwfdis, ModflowGwfnpf, ModflowGwfrcha, ModflowGwfwel, \ @@ -13,33 +14,13 @@ from autotest.conftest import requires_exe -@pytest.fixture(scope="session") -def mp7_simulation(session_tmpdir): - ws = str(session_tmpdir / "mp7_model") - - nper, nstp, perlen, tsmult = 1, 1, 1.0, 1.0 - nlay, nrow, ncol = 3, 21, 20 - delr = delc = 500.0 - top = 400.0 - botm = [220.0, 200.0, 0.0] - laytyp = [1, 0, 0] - kh = [50.0, 0.01, 200.0] - kv = [10.0, 0.01, 20.0] - wel_loc = (2, 10, 9) - wel_q = -150000.0 - rch = 0.005 - riv_h = 320.0 - riv_z = 317.0 - riv_c = 1.0e5 - +def __create_simulation(ws, name, nrow, ncol, perlen, nstp, tsmult, nper, nlay, delr, delc, top, botm, laytyp, kh, kv, rch, wel_loc, wel_q, riv_h, riv_c, riv_z): def get_nodes(locs): nodes = [] for k, i, j in locs: nodes.append(k * nrow * ncol + i * ncol + j) return nodes - name = "mp7_perf_test" - # Create the Flopy simulation object sim = MFSimulation( sim_name=name, exe_name="mf6", version="mf6", sim_ws=ws @@ -169,49 +150,105 @@ def get_nodes(locs): return sim, forward_model_name, backward_model_name, nodew, nodesr +@pytest.fixture(scope="module") +def mp7_small(module_tmpdir): + return __create_simulation( + ws=str(module_tmpdir / "mp7_small"), + name="mp7_small", + nper=1, + nstp=1, + perlen=1.0, + tsmult=1.0, + nlay=3, + nrow=11, + ncol=10, + delr=500.0, + delc=500.0, + top=400.0, + botm=[220.0, 200.0, 0.0], + laytyp=[1, 0, 0], + kh=[50.0, 0.01, 200.0], + kv=[10.0, 0.01, 20.0], + wel_loc=(2, 10, 9), + wel_q=-150000.0, + rch=0.005, + riv_h=320.0, + riv_z=317.0, + riv_c=1.0e5) + + +@pytest.fixture(scope="module") +def mp7_large(module_tmpdir): + return __create_simulation( + ws=str(module_tmpdir / "mp7_large"), + name="mp7_large", + nper=1, + nstp=1, + perlen=1.0, + tsmult=1.0, + nlay=3, + nrow=21, + ncol=20, + delr=500.0, + delc=500.0, + top=400.0, + botm=[220.0, 200.0, 0.0], + laytyp=[1, 0, 0], + kh=[50.0, 0.01, 200.0], + kv=[10.0, 0.01, 20.0], + wel_loc=(2, 10, 9), + wel_q=-150000.0, + rch=0.005, + riv_h=320.0, + riv_z=317.0, + riv_c=1.0e5) + + +def test_pathline_file_sorts_in_ctor(tmpdir, module_tmpdir, mp7_small): + sim, forward_model_name, backward_model_name, nodew, nodesr = mp7_small + ws = tmpdir / "ws" + + # copytree(sim.simulation_data.mfpath.get_sim_path(), ws) + copytree(str(module_tmpdir / "mp7_small"), ws) + + forward_path = ws / f"{forward_model_name}.mppth" + assert forward_path.is_file() + + pathline_file = PathlineFile(str(forward_path)) + assert np.all(pathline_file._data[:-1]['particleid'] <= pathline_file._data[1:]['particleid']) + + @requires_exe("mf6", "mp7") -@pytest.mark.skip(reason="pending https://github.com/modflowpy/flopy/issues/1479") -@pytest.mark.slow @pytest.mark.parametrize("direction", ["forward", "backward"]) @pytest.mark.parametrize("locations", ["well", "river"]) -def test_get_destination_pathline_data(tmpdir, mp7_simulation, direction, locations, benchmark): - sim, forward_model_name, backward_model_name, nodew, nodesr = mp7_simulation +def test_get_destination_pathline_data(tmpdir, mp7_large, direction, locations, benchmark): + sim, forward_model_name, backward_model_name, nodew, nodesr = mp7_large ws = tmpdir / "ws" - # copy simulation data from fixture setup to temp workspace copytree(sim.simulation_data.mfpath.get_sim_path(), ws) - # make sure we have pathline files forward_path = ws / f"{forward_model_name}.mppth" backward_path = ws / f"{backward_model_name}.mppth" assert forward_path.is_file() assert backward_path.is_file() - # get pathline file corresponding to parametrized direction pathline_file = PathlineFile(str(backward_path) if direction == "backward" else str(forward_path)) - - # run benchmark benchmark(lambda: pathline_file.get_destination_pathline_data(dest_cells=nodew if locations == "well" else nodesr)) @requires_exe("mf6", "mp7") @pytest.mark.parametrize("direction", ["forward", "backward"]) @pytest.mark.parametrize("locations", ["well", "river"]) -def test_get_destination_endpoint_data(tmpdir, mp7_simulation, direction, locations, benchmark): - sim, forward_model_name, backward_model_name, nodew, nodesr = mp7_simulation +def test_get_destination_endpoint_data(tmpdir, mp7_large, direction, locations, benchmark): + sim, forward_model_name, backward_model_name, nodew, nodesr = mp7_large ws = tmpdir / "ws" - # copy simulation data from fixture setup to temp workspace copytree(sim.simulation_data.mfpath.get_sim_path(), ws) - # make sure we have endpoint files forward_end = ws / f"{forward_model_name}.mpend" backward_end = ws / f"{backward_model_name}.mpend" assert forward_end.is_file() assert backward_end.is_file() - # get endpoint file corresponding to parametrized direction endpoint_file = EndpointFile(str(backward_end) if direction == "backward" else str(forward_end)) - - # run benchmark benchmark(lambda: endpoint_file.get_destination_endpoint_data(dest_cells=nodew if locations == "well" else nodesr)) diff --git a/autotest/test_mt3d.py b/autotest/test_mt3d.py index d678e744e6..f486f56b1e 100644 --- a/autotest/test_mt3d.py +++ b/autotest/test_mt3d.py @@ -298,7 +298,7 @@ def test_mf2000_zeroth(tmpdir, mf2kmt3d_model_path): os.remove(os.path.join(cpth, ftlfile)) -@flaky +@flaky(max_runs=3) @requires_exe("mfnwt", "mt3dms") @excludes_platform("Windows", ci_only=True) # TODO remove once fixed in MT3D-USGS def test_mfnwt_CrnkNic(tmpdir, mfnwtmt3d_model_path): diff --git a/autotest/test_sfr.py b/autotest/test_sfr.py index ed4ac99513..f3cad7fbbd 100644 --- a/autotest/test_sfr.py +++ b/autotest/test_sfr.py @@ -8,8 +8,8 @@ import matplotlib.pyplot as plt import numpy as np import pytest -from autotest.conftest import get_example_data_path, requires_exe, requires_pkg +from autotest.conftest import get_example_data_path, requires_exe, requires_pkg from flopy.discretization import StructuredGrid from flopy.modflow import Modflow, ModflowDis, ModflowSfr2, ModflowStr from flopy.modflow.mfsfr2 import check @@ -796,11 +796,9 @@ def test_sfr_plot(mf2005_model_path): tv = sfr.plot( key="strtop", ) - plt.show(block=False) assert issubclass( type(tv[0]), matplotlib.axes.SubplotBase ), "could not plot strtop" - plt.close("all") def get_test_matrix(): diff --git a/codecov.yml b/codecov.yml index b60d1d0582..81e8597aa6 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1,5 +1,5 @@ coverage: - precision: 3 + precision: 2 round: down range: "50...100" status: diff --git a/docs/make_release.md b/docs/make_release.md index 8c915e5998..6e93fd0bda 100644 --- a/docs/make_release.md +++ b/docs/make_release.md @@ -45,13 +45,13 @@ Instructions for making a FloPy release ## Build USGS release notes -1. Manually run `make-release.py` in the `release/` directory to update version information using: +1. Manually run `make-release.py` in the `scripts/` directory to update version information using: ``` python make-release.py ``` -2. Manually run `update-version_changes.py` in the `release/` directory to update version changes information using: +2. Manually run `update-version_changes.py` in the `scripts/` directory to update version changes information using: ``` python update-version_changes.py @@ -151,7 +151,7 @@ Use `run_notebooks.py` in the `release` directory to rerun all of the notebooks 2. Increment `major`, `minor`, and/or `micro` numbers in `flopy/version.py`, as appropriate. -3. Manually run `make-release.py` in the `release/` directory to update version information using: +3. Manually run `make-release.py` in the `scripts/` directory to update version information using: ``` python make-release.py diff --git a/flopy/utils/modpathfile.py b/flopy/utils/modpathfile.py index 49002cb76c..426ca3cf3c 100644 --- a/flopy/utils/modpathfile.py +++ b/flopy/utils/modpathfile.py @@ -147,7 +147,6 @@ def get_data(self, partid=0, totim=None, ge=True): """ ra = self._data - ra.sort(order=["particleid", "time"]) if totim is not None: if ge: idx = np.where( @@ -181,7 +180,6 @@ def get_alldata(self, totim=None, ge=True): """ ra = self._data - ra.sort(order=["particleid", "time"]) if totim is not None: if ge: idx = np.where(ra["time"] >= totim)[0] @@ -470,6 +468,9 @@ def __init__(self, filename, verbose=False): # set number of particle ids self.nid = np.unique(self._data["particleid"]) + # sort data + self._data.sort(order=["particleid", "time"]) + # close the input file self.file.close() @@ -1315,6 +1316,9 @@ def __init__(self, filename, verbose=False): # set number of particle ids self.nid = np.unique(self._data["particleid"]) + # sort data + self._data.sort(order=["particleid", "time"]) + # close the input file self.file.close() return diff --git a/scripts/get_benchmark_artifact_ids.py b/scripts/get_benchmark_artifact_ids.py new file mode 100644 index 0000000000..ad56dcd96b --- /dev/null +++ b/scripts/get_benchmark_artifact_ids.py @@ -0,0 +1,11 @@ +import json +import sys + +input_json = json.loads(sys.argv[1]) +# workflow_run = int(sys.argv[2]) + +artifacts = input_json['artifacts'] +artifacts = [a for a in artifacts if a['name'].startswith('benchmarks-')] # and a['workflow_run']['id'] == workflow_run] +artifacts = [a for a in artifacts if a['name'].split('-')[-1].isdigit()] # skip if last element isn't workflow run id +for a in artifacts: + print(a['id']) diff --git a/scripts/get_executables_metadata_asset_id.py b/scripts/get_executables_metadata_asset_id.py new file mode 100644 index 0000000000..d80694a79b --- /dev/null +++ b/scripts/get_executables_metadata_asset_id.py @@ -0,0 +1,10 @@ +import json +import sys + +input_json = json.loads(sys.argv[1]) + +tag = input_json['tag_name'] +assets = input_json['assets'] +metadata = next(iter([a for a in assets if a['name'] == 'mac.zip']), None) +if metadata: + print(dict(metadata)['id']) diff --git a/release/make-release.py b/scripts/make-release.py similarity index 98% rename from release/make-release.py rename to scripts/make-release.py index f621698084..d0e419d584 100755 --- a/release/make-release.py +++ b/scripts/make-release.py @@ -283,9 +283,9 @@ def update_readme_markdown(vmajor, vminor, vmicro): elif "[flopy continuous integration]" in line: line = ( "[![flopy continuous integration](https://github.com/" - "modflowpy/flopy/actions/workflows/ci.yml/badge.svg?" + "modflowpy/flopy/actions/workflows/commit.yml/badge.svg?" "branch={})](https://github.com/modflowpy/flopy/actions/" - "workflows/ci.yml)".format(branch) + "workflows/commit.yml)".format(branch) ) elif "[Read the Docs]" in line: line = ( diff --git a/scripts/postprocess_benchmarks.py b/scripts/postprocess_benchmarks.py new file mode 100644 index 0000000000..3225fa104c --- /dev/null +++ b/scripts/postprocess_benchmarks.py @@ -0,0 +1,146 @@ +import json +import sys +from pathlib import Path +from pprint import pprint + +import matplotlib.dates as dates +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +from matplotlib import cm +from matplotlib.lines import Line2D +import seaborn as sns + +indir = Path(sys.argv[1]) +outdir = Path(sys.argv[2]) +json_paths = list(Path(indir).rglob('*.json')) + +print(f"Found {len(json_paths)} JSON files") +# pprint([str(p) for p in json_paths]) + + +def get_benchmarks(paths): + benchmarks = list() + num_benchmarks = 0 + + for path in paths: + with open(path, 'r') as file: + jsn = json.load(file) + system = jsn['machine_info']['system'] + python = jsn['machine_info']['python_version'] + if len(python.split('.')) == 3: python = python.rpartition('.')[0] + tstamp = jsn['datetime'] + bmarks = jsn['benchmarks'] + for benchmark in bmarks: + num_benchmarks += 1 + fullname = benchmark['fullname'] + included = [ + 'min', + # 'max', + # 'median', + 'mean', + ] + for stat, value in benchmark['stats'].items(): + if stat not in included: continue + benchmarks.append({ + "system": system, + "python": python, + "time": tstamp, + "case": fullname, + "stat": stat, + "value": value, + }) + + print("Found", num_benchmarks, "benchmarks") + return benchmarks + + +# create data frame and save to CSV +benchmarks_df = pd.DataFrame(get_benchmarks(json_paths)) +benchmarks_df['time'] = pd.to_datetime(benchmarks_df['time']) +benchmarks_df.to_csv(str(outdir / f"benchmarks.csv"), index=False) + + +def matplotlib_plot(stats): + nstats = len(stats) + fig, axs = plt.subplots(nstats, 1, sharex=True) + + # color-code according to python version + pythons = np.unique(benchmarks_df['python']) + colors = dict(zip(pythons, cm.jet(np.linspace(0, 1, len(pythons))))) + + # markers according to system + systems = np.unique(benchmarks_df['system']) + markers = dict(zip(systems, ['x', 'o', 's'])) # osx, linux, windows + benchmarks_df['marker'] = benchmarks_df['system'].apply(lambda x: markers[x]) + + for i, (stat_name, stat_group) in enumerate(stats): + stat_df = pd.DataFrame(stat_group) + ax = axs[i] if nstats > 1 else axs + ax.set_title(stat_name) + ax.tick_params(axis='x', rotation=45) + ax.xaxis.set_major_locator(dates.DayLocator(interval=1)) + ax.xaxis.set_major_formatter(dates.DateFormatter('\n%m-%d-%Y')) + + for si, system in enumerate(systems): + ssub = stat_df[stat_df['system'] == system] + marker = markers[system] + for pi, python in enumerate(pythons): + psub = ssub[ssub['python'] == python] + color = colors[python] + ax.scatter(psub['time'], psub['value'], color=color, marker=marker) + ax.plot(psub['time'], psub['value'], linestyle="dotted", color=color) + + # configure legend + patches = [] + for system in systems: + for python in pythons: + patches.append(Line2D([0], [0], color=colors[python], marker=markers[system], label=f"{system} Python{python}")) + leg = plt.legend(handles=patches, loc='upper left', ncol=3, bbox_to_anchor=(0, 0), framealpha=0.5, bbox_transform=ax.transAxes) + for lh in leg.legendHandles: + lh.set_alpha(0.5) + + fig.suptitle(case_name) + plt.ylabel('ms') + + fig.tight_layout() + fig.set_size_inches(8, 8) + + return fig + + +def seaborn_plot(stats): + nstats = len(stats) + fig, axs = plt.subplots(nstats, 1, sharex=True) + + for i, (stat_name, stat_group) in enumerate(stats): + stat_df = pd.DataFrame(stat_group) + ax = axs[i] if nstats > 1 else axs + ax.tick_params(axis='x', rotation=45) + + sp = sns.scatterplot(x='time', y='value', style='system', hue='python', data=stat_df, ax=ax, palette="YlOrBr") + sp.set(xlabel=None) + ax.set_title(stat_name) + ax.get_legend().remove() + ax.set_ylabel('ms') + + fig.suptitle(case_name) + fig.tight_layout() + + plt.subplots_adjust(left=0.3) + plt.legend(loc="lower left", framealpha=0.3, bbox_to_anchor=(-0.45, -0.6)) + + return fig + + +# create and save plots +cases = benchmarks_df.groupby('case') +for case_name, case in cases: + stats = pd.DataFrame(case).groupby('stat') + case_name = str(case_name) \ + .replace('/', '_') \ + .replace(':', '_') + + # fig = matplotlib_plot(stats) + fig = seaborn_plot(stats) + plt.savefig(str(outdir / f"{case_name}.png")) diff --git a/autotest/pull_request_prepare.py b/scripts/pull_request_prepare.py similarity index 100% rename from autotest/pull_request_prepare.py rename to scripts/pull_request_prepare.py diff --git a/release/run_notebooks.py b/scripts/run_notebooks.py similarity index 100% rename from release/run_notebooks.py rename to scripts/run_notebooks.py diff --git a/release/update-version_changes.py b/scripts/update-version_changes.py similarity index 100% rename from release/update-version_changes.py rename to scripts/update-version_changes.py