diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..5600260c9 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,5 @@ +visualisation +tmp +docs +fredt.tar +build_dep.tar diff --git a/.env.docker-override b/.env.docker-override index 9df9b1ee5..2caef9924 100644 --- a/.env.docker-override +++ b/.env.docker-override @@ -1,6 +1,16 @@ -# Ovverrides any values in .env that are being used for local development +# Overrides any values in .env that are being used for local development +# Values here are used inside the contianers using this file +DATA_DIR=/stored_data +DATA_DIR_SLR=/stored_data/slr_data +DATA_DIR_REC=/stored_data/rec_data +DATA_DIR_MODEL_OUTPUT=/stored_data/model_output +DATA_DIR_GEOSERVER=/stored_data/geoserver +FLOOD_MODEL_DIR=/bg_flood POSTGRES_PORT=5432 POSTGRES_HOST=db_postgres MESSAGE_BROKER_HOST=message_broker + +GEOSERVER_HOST=http://geoserver +GEOSERVER_PORT=8080 diff --git a/.env.template b/.env.template index d0e689bbd..95a438ee0 100644 --- a/.env.template +++ b/.env.template @@ -3,7 +3,9 @@ DATA_DIR=U:/Research/FloodRiskResearch/DigitalTwin/stored_data DATA_DIR_REC=U:/Research/FloodRiskResearch/DigitalTwin/stored_data/rec_data DATA_DIR_MODEL_OUTPUT=U:/Research/FloodRiskResearch/DigitalTwin/stored_data/model_output DATA_DIR_GEOSERVER=U:/Research/FloodRiskResearch/DigitalTwin/stored_data/geoserver -FLOOD_MODEL_DIR=U:/Research/FloodRiskResearch/DigitalTwin/BG-Flood/BG-Flood_v8.0 +FLOOD_MODEL_DIR=U:/Research/FloodRiskResearch/DigitalTwin/BG-Flood/BG_Flood_v0-9 + +DEBUG_TRACEBACK=False POSTGRES_HOST=localhost POSTGRES_PORT=5431 @@ -18,12 +20,9 @@ GEOSERVER_PORT=8088 GEOSERVER_ADMIN_NAME=admin GEOSERVER_ADMIN_PASSWORD=geoserver -STATSNZ_API_KEY= -LINZ_API_KEY= -LRIS_API_KEY= -MFE_API_KEY= -NIWA_API_KEY= - +WWW_HOST=http://localhost +WWW_PORT=8080 +CESIUM_ACCESS_TOKEN= # for NewZeaLiDAR # directory name for source LiDAR data from OpenTopography, parent dir is DATA_DIR diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 000000000..dfdb8b771 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +*.sh text eol=lf diff --git a/.github/workflows/build-documentation.yml b/.github/workflows/build-documentation.yml index 40afc08bc..ae2d263d4 100644 --- a/.github/workflows/build-documentation.yml +++ b/.github/workflows/build-documentation.yml @@ -12,9 +12,10 @@ jobs: working-directory: ./sphinx steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 0 + token: ${{ secrets.PUSH_PAGES_PAT }} - name: Install build dependencies run: | python -m pip install -r requirements.txt diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index a6a4ab5ee..78f079e6a 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -44,10 +44,6 @@ jobs: defaults: run: shell: bash -l {0} - strategy: - fail-fast: false - matrix: - python-version: [ 3.11 ] steps: - name: Checkout github repo including lfs files @@ -64,15 +60,12 @@ jobs: key: ${{ runner.os }}-conda-${{ env.cache-name }}-${{ hashFiles('environment.yml') }} - name: Install package dependencies - uses: conda-incubator/setup-miniconda@v2 + uses: mamba-org/setup-micromamba@v1 with: - activate-environment: digitaltwin environment-file: environment.yml - auto-activate-base: false - channels: conda-forge - channel-priority: strict - use-only-tar-bz2: true # IMPORTANT: This needs to be set for caching to work properly! - python-version: ${{ matrix.python-version }} + # only cache environment + cache-environment: true + cache-downloads: false - run: | conda info conda list diff --git a/.gitignore b/.gitignore index f9f922d54..625c04e4b 100644 --- a/.gitignore +++ b/.gitignore @@ -128,3 +128,4 @@ dmypy.json # Pyre type checker .pyre/ +/api_keys.env diff --git a/Dockerfile b/Dockerfile index 654a670ef..abdb36498 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,18 +1,99 @@ -FROM continuumio/miniconda3 as base - -WORKDIR app/ +FROM continuumio/miniconda3:23.10.0-1 AS build +# Miniconda layer for building conda environment +WORKDIR /app +# Install mamba for faster conda solves +RUN conda install -c conda-forge mamba +# Create Conda environment COPY environment.yml . -RUN conda env create -f environment.yml +RUN mamba env create -f environment.yml + # Make RUN commands use the new environment: SHELL ["conda", "run", "-n", "digitaltwin", "/bin/bash", "-c"] +# Test that conda environment worked successfully RUN echo "Check GeoFabrics is installed to test environment" RUN python -c "import geofabrics" -COPY selected_polygon.geojson . -COPY src/ src/ +# Pack conda environment to be shared to runtime image +RUN conda-pack --ignore-missing-files -n digitaltwin -o /tmp/env.tar \ + && mkdir /venv \ + && cd /venv \ + && tar xf /tmp/env.tar \ + && rm /tmp/env.tar +RUN /venv/bin/conda-unpack + + +FROM lparkinson/bg_flood:v0.9 AS runtime-base +# BG_Flood stage for running the digital twin. Reduces image size significantly if we use a multi-stage build +WORKDIR /app + +USER root + +# Install dependencies +RUN apt-get update \ + && apt-get install -y --no-install-recommends ca-certificates curl wget acl \ +# Install firefox from mozilla .deb repository, not snap package as is default for ubuntu (snap does not work for docker) + && wget -q https://packages.mozilla.org/apt/repo-signing-key.gpg -O- | tee /etc/apt/keyrings/packages.mozilla.org.asc > /dev/null \ + && echo "deb [signed-by=/etc/apt/keyrings/packages.mozilla.org.asc] https://packages.mozilla.org/apt mozilla main" | tee -a /etc/apt/sources.list.d/mozilla.list > /dev/null \ + && echo $' \n\ +Package: * \n\ +Pin: origin packages.mozilla.org \n\ +Pin-Priority: 1000 \n\ +' | tee /etc/apt/preferences.d/mozilla \ + && cat /etc/apt/preferences.d/mozilla \ + && apt-get update \ + && apt-get install -y --no-install-recommends firefox \ +# Install geckodriver, webdriver for firefox, needed for selenium + && curl --proto "=https" -L https://github.com/mozilla/geckodriver/releases/download/v0.30.0/geckodriver-v0.30.0-linux64.tar.gz | tar xz -C /usr/local/bin \ +# Install health-checker tool that allows us to run commands when checking root endpoint to check if service is available + && wget -q https://github.com/gruntwork-io/health-checker/releases/download/v0.0.8/health-checker_linux_amd64 -O /usr/local/bin/health-checker \ + && chmod +x /usr/local/bin/health-checker \ +# Cleanup image and remove junk + && rm -fr /var/lib/apt/lists/* \ +# Remove unused packages. Keep curl for health checking in docker-compose + && apt-get purge -y ca-certificates wget + +# Create stored data dir inside image, in case it does not get mounted (such as when deploying on AWS) +RUN mkdir /stored_data && setfacl -R -m u:nonroot:rwx /stored_data + +USER nonroot + +# Copy python virtual environment from build layer +COPY --chown=nonroot:nonroot --chmod=544 --from=build /venv /venv +# Using python virtual environment, preload selenium with firefox so that first runtime is faster. +SHELL ["/bin/bash", "-c"] +RUN source /venv/bin/activate && \ + selenium-manager --browser firefox --debug + +# Copy source files and essential runtime files +COPY --chown=nonroot:nonroot --chmod=444 selected_polygon.geojson . +COPY --chown=nonroot:nonroot --chmod=644 instructions.json . +COPY --chown=nonroot:nonroot --chmod=544 src/ src/ + + +FROM runtime-base AS backend +# Image build target for backend +# Using separate build targets for each image because the Orbica platform does not allow for modifying entrypoints +# and using multiple dockerfiles was creating increase complexity problems keeping things in sync EXPOSE 5000 -ENTRYPOINT ["conda", "run", "--no-capture-output", "-n", "digitaltwin", "gunicorn", "--bind", "0.0.0.0:5000", "src.app:app"] + +SHELL ["/bin/bash", "-c"] +ENTRYPOINT source /venv/bin/activate && \ + gunicorn --bind 0.0.0.0:5000 src.app:app + + +FROM runtime-base AS celery_worker +# Image build target for celery_worker + +EXPOSE 5001 + +SHELL ["/bin/bash", "-c"] +# Activate environment and run the health-checker in background and celery worker in foreground +ENTRYPOINT source /venv/bin/activate && \ + health-checker --listener 0.0.0.0:5001 --log-level error --script-timeout 10 \ + --script "celery -A src.tasks inspect ping" & \ + source /venv/bin/activate && \ + celery -A src.tasks worker -P threads --loglevel=INFO diff --git a/README.md b/README.md index 421de4d07..b40999958 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,7 @@ # Flood Resilience Digital Twin (FReDT) +![image](https://github.com/GeospatialResearch/Digital-Twins/assets/41398636/b7b9da6c-3895-46f5-99dc-4094003b2946) + + ## Introduction According to the National Emergency Management Agency, flooding is the greatest hazard in New Zealand, in terms of frequency, losses and civil defence emergencies. @@ -13,7 +16,7 @@ The Flood Resilience Digital Twin can provide a better understanding of the degr Digital Twin not only represents the current status of the visualised assets but also how they will perform/react to future situations. The build twin when used to run flood models combined with other sources of information can allow us to make predictions. -Data is collected from an open data portal provided by multiple organisations or data providers such as LINZ, StatsNZ, opentopography, NIWA, MFE, and more. +Data is collected from open data portals provided by multiple organisations or data providers such as LINZ, StatsNZ, opentopography, NIWA, MFE, and more. The collected data is stored in the application database using PostgreSQL The reason for implementing a database are: @@ -28,8 +31,6 @@ The following list defines the basic steps required to setup and run the digital ## Requirements * [Docker](https://www.docker.com/) -* [Anaconda](https://www.anaconda.com/download) -* [Node.js / NPM](https://nodejs.org/) ## Required Credentials: @@ -37,52 +38,37 @@ Create API keys for each of these services. You may need to create an account an * [Stats NZ API Key](https://datafinder.stats.govt.nz/my/api/) * [LINZ API Key](https://data.linz.govt.nz/my/api/) * [MFE API Key](https://data.mfe.govt.nz/my/api/) -* [NIWA Application API Key](https://developer.niwa.co.nz/) - Create an app that has the Tide API enabled -* [Cesium access token](https://cesium.com/ion/tokens) +* [NIWA Application API Key](https://developer.niwa.co.nz/) - Create an app that has the Tide API enabled ## Starting the Digital Twin application (localhost) -1. Set up Docker, Anaconda, and NPM to work on your system. - -1. Clone this repository to your local machine (may be best to avoid network drives for software development since they are much slower) - -1. In the project root, in an Anaconda prompt, run the following commands to initialise the environment: - ```bash - #!/usr/bin/env bash - conda env create -f environment.yml - conda activate digitaltwin - ``` - _While the environment is being created, you can continue with the other steps until using the environment._ +1. Clone this repository to your local machine. 1. Create a file called `.env` in the project root, copy the contents of `.env.template` and fill in all blank fields unless a comment says you can leave it blank. +Blank fields to fill in include things like the `POSTGRES_PASSWORD` variable and `CESIUM_ACCESS_TOKEN`. You may configure other variables as needed. -1. Set any file paths in `.env` if needed, for example `FLOOD_MODEL_DIR` references a Geospatial Research Institute - network drive, so you may need to provide your own implementation of `BG_flood` here. - Multiple instances of the digital twin can point to the same directories and share the cached data to improve speed. - -1. Create a file `visualisation/.env.local`. In this, fill in - `VUE_APP_CESIUM_ACCESS_TOKEN=[your_token_here]`, replace `[your_token_here]` with the Cesium Access Token +1. Configure `DATA_DIRx` variables in `.env` such that they point to real directories accessible to your file system. + We have these mounted on UC network drives, so we can share lidar data between FReDT instances. + +1. Create a file called `api_keys.env`, copy the contents of `api_keys.env.template` and fill in the blank values with API credentials. + +1. Set any file paths in `.env` if needed. Multiple instances of the digital twin can point to the same directories and share the cached data to improve speed. -1. From project root, run the command `docker-compose up --build -d` to run the database, backend web servers, and helper services. +1. From project root, run the command `docker-compose up -d` to run the database, backend web servers, and helper services. **If this fails on a WindowsToastNotification error on windows, just run it again and it should work.** -1. Currently, the `visualisation` and `celery_worker` services are not set up to work with Docker, so these will be set up manually. - 1. In one terminal, with the conda environment activated, go to the project root directory and run `celery -A src.tasks worker --loglevel=INFO --pool=solo` to run the backend celery service. - 1. In another terminal open the `visualisation` directory and run `npm ci && npm run serve` to start the development visualisation server. - -1. You may inspect the logs of the backend in the celery window. +1. You may inspect the logs of the backend using `docker-compose logs -f backend celery_worker` 1. You may inspect the PostgreSQL database by logging in using the credentials you stored in the `.env` file and a database client such as `psql` or pgAdmin or DBeaver or PyCharm Professional. ## Using the Digital Twin application -1. With the visualisation server running, visit the address shown in the visualisation server window, default [http://localhost:8080](http://localhost:8080) -1. To run a flood model, hold SHIFT and hold the left mouse button to drag a box around the area you wish to run the model for. -1. Once the model has completed running, you may need to click the button at the bottom of the screen requesting you to reload the flood model. -1. To see a graph for flood depths over time at a location, hold CTRL and click the left mouse button on the area you wish to query. +The current application is running only in headless mode. Meaning, the front-end website is not active. +To interact with the application you send calls to the REST API. Example calls are shown in api_calls.py, and they can be replicated in other http clients such as Postman. ## Setup for developers +Set up environment variables as above. ### Run single Docker service e.g. database To run only one isolated service (services defined in `docker-compose.yml`) use the following command: @@ -94,14 +80,14 @@ e.g. To run only the database in detached mode: docker-compose up --build -d db_postgres ``` -### Run Celery locally (reccomended, since BG Flood does not yet work on Docker) +### Run Celery locally (without docker) With the conda environment activated run: ```bash #!/usr/bin/env bash -celery -A src.tasks worker --loglevel=INFO --pool=solo +celery -A src.tasks worker -P threads --loglevel=INFO ``` -### Running the backend without web interface. +### Running the backend as a processing script instead of web interface It will likely be useful to run processing using the digital twin, without running the web interface. To do so: 1. Run `db_postgres` and `geoserver` services in docker. @@ -109,7 +95,8 @@ To do so: #!/usr/bin/env bash docker-compose up --build -d db_postgres geoserver ``` -2. For local testing, it may be useful to use the `src.run_all.py` script to run the processing. +2. For local testing, it may be useful to use the `src.run_all.py` script to run the processing. From the project root run +`python -m src.run_all` ## Tests diff --git a/api_calls.py b/api_calls.py index 681197e64..dcd3ea467 100644 --- a/api_calls.py +++ b/api_calls.py @@ -30,10 +30,10 @@ def generate_flood_model() -> str: # Create request data for getting flood model data from a region over Kaiapoi request_data = { "bbox": { - "lat1": -43.38205648955185, - "lng1": 172.6487081332888, - "lng2": 172.66, - "lat2": -43.40 + "lat1": -43.370613130921434, + "lng1": 172.65156000179044, + "lng2": 172.71678302522903, + "lat2": -43.400136655560765 }, "scenarioOptions": { "Projected Year": 2050, @@ -60,12 +60,12 @@ def poll_for_completion(task_id: str) -> int: # 5 Second delay before retrying time.sleep(5) print("Polling backend for task completion...") - # Get status of a task task_status_response = requests.get(f"{backend_url}/tasks/{task_id}") + response_body = task_status_response.json() + print(response_body) task_status_response.raise_for_status() # Load the body JSON into a python dict - response_body = json.loads(task_status_response.text) task_status = response_body["taskStatus"] task_value = response_body['taskValue'] print(f"Task completed with value {task_value}") @@ -83,7 +83,6 @@ def get_building_statuses(model_id: int) -> GeoDataFrame: return GeoDataFrame.from_features(building_json["features"]) - def get_depths_at_point(task_id: str): point = {"lat": -43.39, "lng": 172.65} # Send a request to get the depths at a point for a flood model associated with a task @@ -97,6 +96,18 @@ def get_depths_at_point(task_id: str): print(response_body) +def fetch_new_dataset_table(): + # Update LiDAR datasets, takes a long time. + print("Refreshing LiDAR OpenTopography URLs to get newest LiDAR data") + update_datasets_response = requests.post(f"{backend_url}/datasets/update") + # Check for errors (400/500 codes) + update_datasets_response.raise_for_status() + # Load the body JSON into a python dict + response_body = json.loads(update_datasets_response.text) + # Read the task id + return response_body["taskId"] + + def stop_task(task_id: str): # Send a request to stop the task requests.delete(f"{backend_url}/tasks/{task_id}") diff --git a/api_keys.env.template b/api_keys.env.template new file mode 100644 index 000000000..ccdce0d50 --- /dev/null +++ b/api_keys.env.template @@ -0,0 +1,5 @@ +STATSNZ_API_KEY= +LINZ_API_KEY= +LRIS_API_KEY= +MFE_API_KEY= +NIWA_API_KEY= diff --git a/build_docker.sh b/build_docker.sh new file mode 100644 index 000000000..8b1236604 --- /dev/null +++ b/build_docker.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +## Sets any necessary pre-requisite folders and builds/pulls docker images. +## Uses the local repository as priority over published docker images. + +# Read .env file +echo "Reading .env file" +set -o allexport +source .env +set +o allexport + +# Create geoserver folder that needs to be created by a user and not the Docker user +echo "Ensuring DATA_DIR_GEOSERVER ($DATA_DIR_GEOSERVER) exists" +mkdir -p "$DATA_DIR_GEOSERVER" + +# Pull docker images from online where available +docker compose pull + +# Build images that are different from the online source +docker compose build + + +# Save images to tar for backup in case docker goes down +echo "Saving images to fredt.tar" +docker save -o fredt.tar \ + postgis/postgis:16-3.4 \ + lparkinson/backend-flood-resilience-dt:1.0 \ + lparkinson/celery-flood-resilience-dt:1.0 \ + docker.osgeo.org/geoserver:2.21.2 \ + lparkinson/www-flood-resilience-dt:1.0 \ + redis:7 \ + + +echo "Saving docker build dependency images to build_dep.tar" +docker save -o build_dep.tar \ + lparkinson/bg_flood:v0.9 \ + continuumio/miniconda3:23.10.0-1 \ + node:lts \ + nginx:stable diff --git a/docker-compose.yml b/docker-compose.yml index 7e18d012a..fbaedd226 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -6,7 +6,7 @@ volumes: services: db_postgres: # Database to store all vector data, states, and links to raster data. - image: postgis/postgis + image: postgis/postgis:16-3.4 container_name: db_postgres_digital_twin restart: always ports: @@ -22,11 +22,22 @@ services: backend: # Performs analysis, computation, handles web requests, facilitates database interactions - build: . + build: + context: . + target: backend + image: lparkinson/backend-flood-resilience-dt:1.1 container_name: backend_digital_twin env_file: - .env + - api_keys.env - .env.docker-override + volumes: + - ${DATA_DIR_MODEL_OUTPUT}:/stored_data/model_output + healthcheck: + test: curl --fail -s http://localhost:5000/ || exit 1 + interval: 10s + timeout: 5s + retries: 10 ports: - "5000:5000" depends_on: @@ -34,7 +45,35 @@ services: - message_broker - geoserver - # Celery worker removed temporarily because BG_flood has not yet been configured for docker. + celery_worker: + # Performs tasks such as complex computation asynchronously on behalf of backend + build: + context: . + target: celery_worker + image: lparkinson/celery-flood-resilience-dt:1.1 + container_name: celery_worker_digital_twin + restart: always + env_file: + - .env + - api_keys.env + - .env.docker-override + volumes: + # Bind host data directories to container, allowing different instances to share data sources. + - ${DATA_DIR}:/stored_data + - ${DATA_DIR_REC}:/stored_data/rec_data + - ${DATA_DIR_MODEL_OUTPUT}:/stored_data/model_output + - ${DATA_DIR_GEOSERVER}:/stored_data/geoserver + ports: + - "5001:5001" + healthcheck: + test: curl --fail -s http://localhost:5001/ || exit 1 + interval: 10s + timeout: 5s + retries: 10 + depends_on: + - db_postgres + - message_broker + - geoserver geoserver: # Serves geospatial web data through interactions with files and database @@ -47,21 +86,43 @@ services: environment: - SKIP_DEMO_DATA=true - CORS_ENABLED=true + - ROOT_WEBAPP_REDIRECT=true ports: - "${GEOSERVER_PORT}:8080" restart: always healthcheck: - test: curl --fail -s http://localhost:8080/ || exit 1 - interval: 1m30s - timeout: 10s - retries: 3 + test: curl --fail -s http://localhost:8080/geoserver || exit 1 + timeout: 5s + retries: 10 + + www: + # Webserver for the website interface + image: lparkinson/www-flood-resilience-dt:1.1 + build: + context: ./visualisation + container_name: www_digital_twin + environment: + - VUE_APP_CESIUM_ACCESS_TOKEN=$CESIUM_ACCESS_TOKEN + - VUE_APP_GEOSERVER_HOST=$GEOSERVER_HOST + - VUE_APP_GEOSERVER_PORT=$GEOSERVER_PORT + - VUE_APP_POSTGRES_DB=$POSTGRES_DB + ports: + - "${WWW_PORT}:80" + healthcheck: + test: curl --fail -s http://localhost:80/ || exit 1 + timeout: 5s + retries: 10 - # WWW removed temporarily because build is failing and more work is needed. message_broker: # Communicates between backend and workers to assign tasks and store state - image: redis + image: redis:7 container_name: message_broker_digital_twin ports: - "6379:6379" + healthcheck: + test: redis-cli ping | grep PONG + timeout: 1s + retries: 10 + restart: always diff --git a/docs/_sources/autoapi/src/app/index.rst.txt b/docs/_sources/autoapi/src/app/index.rst.txt index 51e59a6c7..ef2c73df0 100644 --- a/docs/_sources/autoapi/src/app/index.rst.txt +++ b/docs/_sources/autoapi/src/app/index.rst.txt @@ -3,6 +3,11 @@ .. py:module:: src.app +.. autoapi-nested-parse:: + + The main web application that serves the Digital Twin to the web through a Rest API. + + Module Contents --------------- @@ -40,28 +45,82 @@ Attributes .. py:function:: health_check() -> flask.Response Ping this endpoint to check that the server is up and running + Supported methods: GET + + :returns: The HTTP Response. Expect OK if health check is successful + :rtype: Response + +.. py:function:: get_status(task_id: str) -> flask.Response -.. py:function:: get_status(task_id) -> flask.Response + Retrieves status of a particular Celery backend task. + Supported methods: GET + + :param task_id: The id of the Celery task to retrieve status from + :type task_id: str + + :returns: JSON response containing taskStatus + :rtype: Response .. py:function:: remove_task(task_id) -> flask.Response + Deletes and stops a particular Celery backend task. + Supported methods: DELETE + + :param task_id: The id of the Celery task to remove + :type task_id: str + + :returns: ACCEPTED is the expected response + :rtype: Response + .. py:function:: generate_model() -> flask.Response + Generates a flood model for a given area. + Supported methods: POST + POST values: {"bbox": {"lat1": number, "lat2": number, "lng1": number, "lng2": number}} + + :returns: ACCEPTED is the expected response. Response body contains Celery taskId + :rtype: Response + .. py:function:: get_wfs_layer_latest_model(model_id) .. py:function:: create_wkt_from_coords(lat1: float, lng1: float, lat2: float, lng2: float) -> str + Takes two points and creates a wkt bbox string from them + + :param lat1: latitude of first point + :type lat1: float + :param lng1: longitude of first point + :type lng1: float + :param lat2: latitude of second point + :type lat2: float + :param lng2: longitude of second point + :type lng2: float + + :returns: bbox in wkt form generated from the two coordinates + :rtype: str + .. py:function:: get_depth_at_point() -> flask.Response .. py:function:: valid_coordinates(latitude: float, longitude: float) -> bool + Validates coordinates are in the valid range of WGS84 + (-90 < latitude <= 90) and (-180 < longitude <= 180) + + :param latitude: The latitude part of the coordinate + :type latitude: float + :param longitude: The longitude part of the coordinate + :type longitude: float + + :returns: True if both latitude and longitude are within their valid ranges. + :rtype: bool + .. py:data:: gunicorn_logger diff --git a/docs/_sources/autoapi/src/config/index.rst.txt b/docs/_sources/autoapi/src/config/index.rst.txt index 1c746c266..417848c35 100644 --- a/docs/_sources/autoapi/src/config/index.rst.txt +++ b/docs/_sources/autoapi/src/config/index.rst.txt @@ -32,8 +32,8 @@ Attributes .. py:function:: get_env_variable(var_name: str, default: T = None, allow_empty: bool = False, cast_to: type = str) -> T - Reads an environment variable, with settings to allow defaults, empty values, and type casting - To read a boolean EXAMPLE_ENV_VAR=False use get_env_variable("EXAMPLE_ENV_VAR", cast_to=bool) + Reads an environment variable, with settings to allow defaults, empty values, and type casting + To read a boolean EXAMPLE_ENV_VAR=False use get_env_variable("EXAMPLE_ENV_VAR", cast_to=bool) :param var_name: The name of the environment variable to retrieve. :type var_name: str @@ -41,7 +41,7 @@ Attributes :type default: T = None :param allow_empty: If False then a KeyError will be raised if the environment variable is empty. :type allow_empty: bool - :param cast_to: The type to cast to eg. str, int, or bool + :param cast_to: The type to cast to e.g. str, int, or bool :type cast_to: Callable[[str], T] :rtype: The environment variable, or default if it does not exist, as type T. diff --git a/docs/_sources/autoapi/src/digitaltwin/data_to_db/index.rst.txt b/docs/_sources/autoapi/src/digitaltwin/data_to_db/index.rst.txt index 1de227267..9ab43934b 100644 --- a/docs/_sources/autoapi/src/digitaltwin/data_to_db/index.rst.txt +++ b/docs/_sources/autoapi/src/digitaltwin/data_to_db/index.rst.txt @@ -93,13 +93,13 @@ Attributes :param engine: The engine used to connect to the database. :type engine: Engine - :param vector_data: The GeoDataFrame containing the fetched vector data. + :param vector_data: A GeoDataFrame containing the fetched vector data. :type vector_data: gpd.GeoDataFrame :param table_name: The name of the table in the database. :type table_name: str :param unique_column_name: The name of the unique column in the table. :type unique_column_name: str - :param area_of_interest: The GeoDataFrame representing the area of interest. + :param area_of_interest: A GeoDataFrame representing the area of interest. :type area_of_interest: gpd.GeoDataFrame :returns: The set of IDs from the fetched vector_data that are not present in the specified table in the database. @@ -113,9 +113,9 @@ Attributes :param engine: The engine used to connect to the database. :type engine: Engine :param crs: The coordinate reference system (CRS) code to use. Default is 2193. - :type crs: int, optional + :type crs: int = 2193 :param verbose: Whether to print messages. Default is False. - :type verbose: bool, optional + :type verbose: bool = False :returns: This function does not return any value. :rtype: None @@ -128,7 +128,7 @@ Attributes :param engine: The engine used to connect to the database. :type engine: Engine - :param catchment_area: The GeoDataFrame representing the catchment area. + :param catchment_area: A GeoDataFrame representing the catchment area. :type catchment_area: gpd.GeoDataFrame :param table_name: The name of the table in the database. :type table_name: str @@ -151,12 +151,12 @@ Attributes :type layer_id: int :param table_name: The database table name of the geospatial layer. :type table_name: str - :param area_of_interest: The GeoDataFrame representing the area of interest. + :param area_of_interest: A GeoDataFrame representing the area of interest. :type area_of_interest: gpd.GeoDataFrame :param crs: The coordinate reference system (CRS) code to use. Default is 2193. - :type crs: int, optional + :type crs: int = 2193 :param verbose: Whether to print messages. Default is False. - :type verbose: bool, optional + :type verbose: bool = False :returns: This function does not return any value. :rtype: None @@ -176,12 +176,12 @@ Attributes :type table_name: str :param unique_column_name: The unique column name used for record identification in the database table. :type unique_column_name: str - :param area_of_interest: The GeoDataFrame representing the area of interest. + :param area_of_interest: A GeoDataFrame representing the area of interest. :type area_of_interest: gpd.GeoDataFrame :param crs: The coordinate reference system (CRS) code to use. Default is 2193. - :type crs: int, optional + :type crs: int = 2193 :param verbose: Whether to print messages. Default is False. - :type verbose: bool, optional + :type verbose: bool = False :returns: This function does not return any value. :rtype: None @@ -193,12 +193,12 @@ Attributes :param engine: The engine used to connect to the database. :type engine: Engine - :param catchment_area: The GeoDataFrame representing the catchment area. + :param catchment_area: A GeoDataFrame representing the catchment area. :type catchment_area: gpd.GeoDataFrame :param crs: The coordinate reference system (CRS) code to use. Default is 2193. - :type crs: int, optional + :type crs: int = 2193 :param verbose: Whether to print messages. Default is False. - :type verbose: bool, optional + :type verbose: bool = False :returns: This function does not return any value. :rtype: None @@ -210,12 +210,12 @@ Attributes :param engine: The engine used to connect to the database. :type engine: Engine - :param catchment_area: The GeoDataFrame representing the catchment area. + :param catchment_area: A GeoDataFrame representing the catchment area. :type catchment_area: gpd.GeoDataFrame :param crs: The coordinate reference system (CRS) code to use. Default is 2193. - :type crs: int, optional + :type crs: int = 2193 :param verbose: Whether to print messages. Default is False. - :type verbose: bool, optional + :type verbose: bool = False :returns: This function does not return any value. :rtype: None @@ -227,7 +227,7 @@ Attributes :param engine: The engine used to connect to the database. :type engine: Engine - :param catchment_area: The GeoDataFrame representing the catchment area. + :param catchment_area: A GeoDataFrame representing the catchment area. :type catchment_area: gpd.GeoDataFrame :returns: This function does not return any value. diff --git a/docs/_sources/autoapi/src/digitaltwin/get_data_using_geoapis/index.rst.txt b/docs/_sources/autoapi/src/digitaltwin/get_data_using_geoapis/index.rst.txt index bb7277285..a5521d73b 100644 --- a/docs/_sources/autoapi/src/digitaltwin/get_data_using_geoapis/index.rst.txt +++ b/docs/_sources/autoapi/src/digitaltwin/get_data_using_geoapis/index.rst.txt @@ -61,10 +61,10 @@ Functions Clean the fetched vector data by performing necessary transformations. - :param fetched_data: The fetched vector data as a GeoDataFrame. + :param fetched_data: A GeoDataFrame containing the fetched vector data. :type fetched_data: gpd.GeoDataFrame - :returns: The cleaned vector data as a GeoDataFrame. + :returns: A GeoDataFrame containing the cleaned vector data. :rtype: gpd.GeoDataFrame @@ -77,13 +77,13 @@ Functions :param layer_id: The ID of the layer to fetch. :type layer_id: int :param crs: The coordinate reference system (CRS) code to use. Default is 2193. - :type crs: int, optional + :type crs: int = 2193 :param verbose: Whether to print messages. Default is False. - :type verbose: bool, optional + :type verbose: bool = False :param bounding_polygon: Bounding polygon for data fetching. Default is all of New Zealand. - :type bounding_polygon: gpd.GeoDataFrame, optional + :type bounding_polygon: Optional[gpd.GeoDataFrame] = None - :returns: The fetched vector data as a GeoDataFrame. + :returns: A GeoDataFrame containing the fetched vector data. :rtype: gpd.GeoDataFrame :raises ValueError: If an unsupported 'data_provider' value is provided. diff --git a/docs/_sources/autoapi/src/digitaltwin/index.rst.txt b/docs/_sources/autoapi/src/digitaltwin/index.rst.txt index 45b8fb23f..93ee643ac 100644 --- a/docs/_sources/autoapi/src/digitaltwin/index.rst.txt +++ b/docs/_sources/autoapi/src/digitaltwin/index.rst.txt @@ -13,7 +13,7 @@ Submodules data_to_db/index.rst get_data_using_geoapis/index.rst instructions_records_to_db/index.rst - run/index.rst + retrieve_static_boundaries/index.rst setup_environment/index.rst tables/index.rst utils/index.rst diff --git a/docs/_sources/autoapi/src/digitaltwin/instructions_records_to_db/index.rst.txt b/docs/_sources/autoapi/src/digitaltwin/instructions_records_to_db/index.rst.txt index d42312d17..fc3c26159 100644 --- a/docs/_sources/autoapi/src/digitaltwin/instructions_records_to_db/index.rst.txt +++ b/docs/_sources/autoapi/src/digitaltwin/instructions_records_to_db/index.rst.txt @@ -5,8 +5,8 @@ .. autoapi-nested-parse:: - This script processes 'instructions_run' records, validates URLs and instruction fields, and stores them in the - 'geospatial_layers' table of the database. + This script processes 'static_vector_instructions' records, validates URLs and instruction fields, and stores them in + the 'geospatial_layers' table of the database. @@ -75,7 +75,7 @@ Attributes .. py:function:: read_and_check_instructions_file() -> pandas.DataFrame - Read and check the instructions_run file, validating URLs and instruction fields. + Read and check the static_vector_instructions file, validating URLs and instruction fields. :returns: The processed instructions DataFrame. :rtype: pd.DataFrame @@ -94,20 +94,20 @@ Attributes .. py:function:: get_non_existing_records(instructions_df: pandas.DataFrame, existing_layers_df: pandas.DataFrame) -> pandas.DataFrame - Get 'instructions_run' records that are not available in the database. + Get 'static_vector_instructions' records that are not available in the database. - :param instructions_df: Data frame containing the 'instructions_run' records. + :param instructions_df: Data frame containing the 'static_vector_instructions' records. :type instructions_df: pd.DataFrame - :param existing_layers_df: Data frame containing the existing 'instructions_run' records from the database. + :param existing_layers_df: Data frame containing the existing 'static_vector_instructions' records from the database. :type existing_layers_df: pd.DataFrame - :returns: Data frame containing the 'instructions_run' records that are not available in the database. + :returns: Data frame containing the 'static_vector_instructions' records that are not available in the database. :rtype: pd.DataFrame .. py:function:: store_instructions_records_to_db(engine: sqlalchemy.engine.Engine) -> None - Store 'instructions_run' records in the 'geospatial_layers' table in the database. + Store 'static_vector_instructions' records in the 'geospatial_layers' table in the database. :param engine: The engine used to connect to the database. :type engine: Engine diff --git a/docs/_sources/autoapi/src/digitaltwin/retrieve_static_boundaries/index.rst.txt b/docs/_sources/autoapi/src/digitaltwin/retrieve_static_boundaries/index.rst.txt new file mode 100644 index 000000000..85c1017e3 --- /dev/null +++ b/docs/_sources/autoapi/src/digitaltwin/retrieve_static_boundaries/index.rst.txt @@ -0,0 +1,59 @@ +:py:mod:`src.digitaltwin.retrieve_static_boundaries` +==================================================== + +.. py:module:: src.digitaltwin.retrieve_static_boundaries + +.. autoapi-nested-parse:: + + This script automates the retrieval and storage of geospatial data from various providers using the 'geoapis' library. + It populates the 'geospatial_layers' table in the database and stores user log information for tracking and reference. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.digitaltwin.retrieve_static_boundaries.main + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + src.digitaltwin.retrieve_static_boundaries.sample_polygon + + +.. py:function:: main(selected_polygon_gdf: geopandas.GeoDataFrame, log_level: src.digitaltwin.utils.LogLevel = LogLevel.DEBUG) -> None + + Connects to various data providers to fetch geospatial data for the selected polygon, i.e., the catchment area. + Subsequently, it populates the 'geospatial_layers' table in the database and stores user log information for + tracking and reference. + + :param selected_polygon_gdf: A GeoDataFrame representing the selected polygon, i.e., the catchment area. + :type selected_polygon_gdf: gpd.GeoDataFrame + :param log_level: The log level to set for the root logger. Defaults to LogLevel.DEBUG. + The available logging levels and their corresponding numeric values are: + - LogLevel.CRITICAL (50) + - LogLevel.ERROR (40) + - LogLevel.WARNING (30) + - LogLevel.INFO (20) + - LogLevel.DEBUG (10) + - LogLevel.NOTSET (0) + :type log_level: LogLevel = LogLevel.DEBUG + + :returns: This function does not return any value. + :rtype: None + + +.. py:data:: sample_polygon + + + diff --git a/docs/_sources/autoapi/src/digitaltwin/setup_environment/index.rst.txt b/docs/_sources/autoapi/src/digitaltwin/setup_environment/index.rst.txt index f6e0349b9..37892994a 100644 --- a/docs/_sources/autoapi/src/digitaltwin/setup_environment/index.rst.txt +++ b/docs/_sources/autoapi/src/digitaltwin/setup_environment/index.rst.txt @@ -5,8 +5,8 @@ .. autoapi-nested-parse:: - This script provides functions to set up the database connection using SQLAlchemy and environment - variables, as well as to create an SQLAlchemy engine for database operations. + This script provides functions to set up the database connection using SQLAlchemy and environment variables, + as well as to create an SQLAlchemy engine for database operations. diff --git a/docs/_sources/autoapi/src/digitaltwin/tables/index.rst.txt b/docs/_sources/autoapi/src/digitaltwin/tables/index.rst.txt index e35e191c6..98f3c4ba6 100644 --- a/docs/_sources/autoapi/src/digitaltwin/tables/index.rst.txt +++ b/docs/_sources/autoapi/src/digitaltwin/tables/index.rst.txt @@ -19,7 +19,8 @@ Classes src.digitaltwin.tables.GeospatialLayers src.digitaltwin.tables.UserLogInfo - src.digitaltwin.tables.HydroDEM + src.digitaltwin.tables.RiverNetworkExclusions + src.digitaltwin.tables.RiverNetwork src.digitaltwin.tables.BGFloodModelOutput @@ -88,13 +89,13 @@ Attributes Name of the unique column in the table. - :type: str, optional + :type: Optional[str] .. attribute:: coverage_area - Coverage area of the geospatial data. It can be either the whole country or NULL. + Coverage area of the geospatial data, e.g. 'New Zealand'. - :type: str, optional + :type: Optional[str] .. attribute:: url @@ -135,6 +136,11 @@ Attributes + .. py:attribute:: __table_args__ + :value: () + + + .. py:class:: UserLogInfo @@ -159,7 +165,7 @@ Attributes A list of tables (geospatial layers) associated with the log entry. - :type: List[str] + :type: Dict[str] .. attribute:: created_at @@ -195,12 +201,12 @@ Attributes -.. py:class:: HydroDEM +.. py:class:: RiverNetworkExclusions Bases: :py:obj:`Base` - Class representing the 'hydrological_dem' table. + Class representing the 'rec_network_exclusions' table. .. attribute:: __tablename__ @@ -208,21 +214,85 @@ Attributes :type: str - .. attribute:: unique_id + .. attribute:: rec_network_id - Unique identifier for each entry (primary key). + An identifier for the river network associated with each new run. :type: int - .. attribute:: file_name + .. attribute:: objectid - Name of the hydrological DEM file. + An identifier for the REC river object matching from the 'rec_data' table. + + :type: int + + .. attribute:: exclusion_cause + + Cause of exclusion, i.e., the reason why the REC river geometry was excluded. :type: str - .. attribute:: file_path + .. attribute:: geometry + + Geometric representation of the excluded REC river features. + + :type: LineString + + .. py:attribute:: __tablename__ + :value: 'rec_network_exclusions' + + + + .. py:attribute:: rec_network_id + + + + .. py:attribute:: objectid + + + + .. py:attribute:: exclusion_cause + + + + .. py:attribute:: geometry + + + + .. py:attribute:: __table_args__ + :value: () + + + + +.. py:class:: RiverNetwork + + + Bases: :py:obj:`Base` + + Class representing the 'rec_network' table. + + .. attribute:: __tablename__ + + Name of the database table. + + :type: str + + .. attribute:: rec_network_id + + An identifier for the river network associated with each new run (primary key). + + :type: int + + .. attribute:: network_path - Path to the hydrological DEM file. + Path to the REC river network file. + + :type: str + + .. attribute:: network_data_path + + Path to the REC river network data file for the AOI. :type: str @@ -236,22 +306,22 @@ Attributes Geometric representation of the catchment area coverage. - :type: Geometry + :type: Polygon .. py:attribute:: __tablename__ - :value: 'hydrological_dem' + :value: 'rec_network' - .. py:attribute:: unique_id + .. py:attribute:: rec_network_id - .. py:attribute:: file_name + .. py:attribute:: network_path - .. py:attribute:: file_path + .. py:attribute:: network_data_path @@ -305,7 +375,7 @@ Attributes Geometric representation of the catchment area coverage. - :type: Geometry + :type: Polygon .. py:attribute:: __tablename__ :value: 'bg_flood_model_output' @@ -355,7 +425,7 @@ Attributes :param table_name: The name of the table to check for existence. :type table_name: str :param schema: The name of the schema where the table resides. Defaults to "public". - :type schema: str, optional + :type schema: str = "public" :returns: True if the table exists, False otherwise. :rtype: bool diff --git a/docs/_sources/autoapi/src/digitaltwin/utils/index.rst.txt b/docs/_sources/autoapi/src/digitaltwin/utils/index.rst.txt index 1fa802840..e82e1ebfb 100644 --- a/docs/_sources/autoapi/src/digitaltwin/utils/index.rst.txt +++ b/docs/_sources/autoapi/src/digitaltwin/utils/index.rst.txt @@ -136,7 +136,7 @@ Attributes - LogLevel.INFO (20) - LogLevel.DEBUG (10) - LogLevel.NOTSET (0) - :type log_level: int, optional + :type log_level: LogLevel = LogLevel.DEBUG :returns: This function does not return any value. :rtype: None @@ -146,12 +146,12 @@ Attributes Convert the coordinate reference system (CRS) of the catchment area GeoDataFrame to the specified CRS. - :param catchment_area: The GeoDataFrame representing the catchment area. + :param catchment_area: A GeoDataFrame representing the catchment area. :type catchment_area: gpd.GeoDataFrame :param to_crs: Coordinate Reference System (CRS) code to convert the catchment area to. Default is 2193. - :type to_crs: int, optional + :type to_crs: int = 2193 - :returns: The catchment area GeoDataFrame with the transformed CRS. + :returns: A GeoDataFrame representing the catchment area with the transformed CRS. :rtype: gpd.GeoDataFrame @@ -162,7 +162,7 @@ Attributes :param engine: The engine used to connect to the database. :type engine: Engine :param to_crs: Coordinate Reference System (CRS) code to which the boundary will be converted. Default is 2193. - :type to_crs: int, optional + :type to_crs: int = 2193 :returns: A GeoDataFrame representing the boundary of New Zealand in the specified CRS. :rtype: gpd.GeoDataFrame diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/index.rst.txt index 6347bc86a..e1d7c6e98 100644 --- a/docs/_sources/autoapi/src/dynamic_boundary_conditions/index.rst.txt +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/index.rst.txt @@ -4,35 +4,14 @@ .. py:module:: src.dynamic_boundary_conditions -Submodules ----------- +Subpackages +----------- .. toctree:: :titlesonly: - :maxdepth: 1 + :maxdepth: 3 - hirds_rainfall_data_from_db/index.rst - hirds_rainfall_data_to_db/index.rst - hydrograph/index.rst - hyetograph/index.rst - main_rainfall/index.rst - main_river/index.rst - main_tide_slr/index.rst - osm_waterways/index.rst - rainfall_data_from_hirds/index.rst - rainfall_enum/index.rst - rainfall_model_input/index.rst - rainfall_sites/index.rst - rec1_osm_match/index.rst - river_data_to_from_db/index.rst - river_enum/index.rst - river_model_input/index.rst - river_network_for_aoi/index.rst - sea_level_rise_data/index.rst - thiessen_polygons/index.rst - tide_data_from_niwa/index.rst - tide_enum/index.rst - tide_query_location/index.rst - tide_slr_combine/index.rst - tide_slr_model_input/index.rst + rainfall/index.rst + river/index.rst + tide/index.rst diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/hirds_rainfall_data_from_db/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/hirds_rainfall_data_from_db/index.rst.txt new file mode 100644 index 000000000..4782138b5 --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/hirds_rainfall_data_from_db/index.rst.txt @@ -0,0 +1,93 @@ +:py:mod:`src.dynamic_boundary_conditions.rainfall.hirds_rainfall_data_from_db` +============================================================================== + +.. py:module:: src.dynamic_boundary_conditions.rainfall.hirds_rainfall_data_from_db + +.. autoapi-nested-parse:: + + Retrieve all rainfall data for sites within the catchment area from the database. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.rainfall.hirds_rainfall_data_from_db.filter_for_duration + src.dynamic_boundary_conditions.rainfall.hirds_rainfall_data_from_db.get_one_site_rainfall_data + src.dynamic_boundary_conditions.rainfall.hirds_rainfall_data_from_db.rainfall_data_from_db + + + +.. py:function:: filter_for_duration(rain_data: pandas.DataFrame, duration: str) -> pandas.DataFrame + + Filter the HIRDS rainfall data for a requested duration. + + :param rain_data: HIRDS rainfall data in Pandas DataFrame format. + :type rain_data: pd.DataFrame + :param duration: Storm duration. Valid options are: '10m', '20m', '30m', '1h', '2h', '6h', '12h', '24h', '48h', '72h', + '96h', '120h', or 'all'. + :type duration: str + + :returns: Filtered rainfall data for the requested duration. + :rtype: pd.DataFrame + + +.. py:function:: get_one_site_rainfall_data(engine: sqlalchemy.engine.Engine, site_id: str, rcp: Optional[float], time_period: Optional[str], ari: float, duration: str, idf: bool) -> pandas.DataFrame + + Get the HIRDS rainfall data for the requested site from the database and return the required data in + Pandas DataFrame format. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param site_id: HIRDS rainfall site ID. + :type site_id: str + :param rcp: Representative Concentration Pathway (RCP) value. Valid options are 2.6, 4.5, 6.0, 8.5, or None + for historical data. + :type rcp: Optional[float] + :param time_period: Future time period. Valid options are "2031-2050", "2081-2100", or None for historical data. + :type time_period: Optional[str] + :param ari: Average Recurrence Interval (ARI) value. Valid options are 1.58, 2, 5, 10, 20, 30, 40, 50, 60, 80, 100, or 250. + :type ari: float + :param duration: Storm duration. Valid options are: '10m', '20m', '30m', '1h', '2h', '6h', '12h', '24h', '48h', '72h', + '96h', '120h', or 'all'. + :type duration: str + :param idf: Set to False for rainfall depth data, and True for rainfall intensity data. + :type idf: bool + + :returns: HIRDS rainfall data for the requested site and parameters. + :rtype: pd.DataFrame + + :raises ValueError: If rcp and time_period arguments are inconsistent. + + +.. py:function:: rainfall_data_from_db(engine: sqlalchemy.engine.Engine, sites_in_catchment: geopandas.GeoDataFrame, rcp: Optional[float], time_period: Optional[str], ari: float, idf: bool = False, duration: str = 'all') -> pandas.DataFrame + + Get rainfall data for the sites within the catchment area and return it as a Pandas DataFrame. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param sites_in_catchment: Rainfall sites coverage areas (Thiessen polygons) within the catchment area. + :type sites_in_catchment: gpd.GeoDataFrame + :param rcp: Representative Concentration Pathway (RCP) value. Valid options are 2.6, 4.5, 6.0, 8.5, or None + for historical data. + :type rcp: Optional[float] + :param time_period: Future time period. Valid options are "2031-2050", "2081-2100", or None for historical data. + :type time_period: Optional[str] + :param ari: Average Recurrence Interval (ARI) value. Valid options are 1.58, 2, 5, 10, 20, 30, 40, 50, 60, 80, 100, or 250. + :type ari: float + :param idf: Set to False for rainfall depth data, and True for rainfall intensity data. + :type idf: bool = False + :param duration: Storm duration. Valid options are: '10m', '20m', '30m', '1h', '2h', '6h', '12h', '24h', '48h', '72h', + '96h', '120h', or 'all'. Default is 'all'. + :type duration: str = "all" + + :returns: A DataFrame containing the rainfall data for the sites within the catchment area. + :rtype: pd.DataFrame + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/hirds_rainfall_data_to_db/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/hirds_rainfall_data_to_db/index.rst.txt new file mode 100644 index 000000000..182ea5f8d --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/hirds_rainfall_data_to_db/index.rst.txt @@ -0,0 +1,123 @@ +:py:mod:`src.dynamic_boundary_conditions.rainfall.hirds_rainfall_data_to_db` +============================================================================ + +.. py:module:: src.dynamic_boundary_conditions.rainfall.hirds_rainfall_data_to_db + +.. autoapi-nested-parse:: + + Store the rainfall data for all the sites within the catchment area in the database. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.rainfall.hirds_rainfall_data_to_db.db_rain_table_name + src.dynamic_boundary_conditions.rainfall.hirds_rainfall_data_to_db.get_sites_id_in_catchment + src.dynamic_boundary_conditions.rainfall.hirds_rainfall_data_to_db.get_sites_id_not_in_db + src.dynamic_boundary_conditions.rainfall.hirds_rainfall_data_to_db.add_rainfall_data_to_db + src.dynamic_boundary_conditions.rainfall.hirds_rainfall_data_to_db.add_each_site_rainfall_data + src.dynamic_boundary_conditions.rainfall.hirds_rainfall_data_to_db.rainfall_data_to_db + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.rainfall.hirds_rainfall_data_to_db.log + + +.. py:data:: log + + + +.. py:function:: db_rain_table_name(idf: bool) -> str + + Return the relevant rainfall data table name used in the database. + + :param idf: Set to False for rainfall depth data, and True for rainfall intensity data. + :type idf: bool + + :returns: The relevant rainfall data table name. + :rtype: str + + +.. py:function:: get_sites_id_in_catchment(sites_in_catchment: geopandas.GeoDataFrame) -> List[str] + + Get the rainfall site IDs within the catchment area. + + :param sites_in_catchment: Rainfall site coverage areas (Thiessen polygons) that intersect or are within the catchment area. + :type sites_in_catchment: gpd.GeoDataFrame + + :returns: The rainfall site IDs within the catchment area. + :rtype: List[str] + + +.. py:function:: get_sites_id_not_in_db(engine: sqlalchemy.engine.Engine, sites_id_in_catchment: List[str], idf: bool) -> List[str] + + Get the list of rainfall site IDs that are within the catchment area but not in the database. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param sites_id_in_catchment: Rainfall site IDs within the catchment area. + :type sites_id_in_catchment: List[str] + :param idf: Set to False for rainfall depth data, and True for rainfall intensity data. + :type idf: bool + + :returns: The rainfall site IDs within the catchment area but not present in the database. + :rtype: List[str] + + +.. py:function:: add_rainfall_data_to_db(engine: sqlalchemy.engine.Engine, site_id: str, idf: bool) -> None + + Store the rainfall data for a specific site in the database. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param site_id: HIRDS rainfall site ID. + :type site_id: str + :param idf: Set to False for rainfall depth data, and True for rainfall intensity data. + :type idf: bool + + :returns: This function does not return any value. + :rtype: None + + +.. py:function:: add_each_site_rainfall_data(engine: sqlalchemy.engine.Engine, sites_id_list: List[str], idf: bool) -> None + + Add rainfall data for each site in the sites_id_list to the database. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param sites_id_list: List of rainfall sites' IDs. + :type sites_id_list: List[str] + :param idf: Set to False for rainfall depth data, and True for rainfall intensity data. + :type idf: bool + + :returns: This function does not return any value. + :rtype: None + + +.. py:function:: rainfall_data_to_db(engine: sqlalchemy.engine.Engine, sites_in_catchment: geopandas.GeoDataFrame, idf: bool = False) -> None + + Store rainfall data of all the sites within the catchment area in the database. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param sites_in_catchment: Rainfall sites coverage areas (Thiessen polygons) that intersect or are within the catchment area. + :type sites_in_catchment: gpd.GeoDataFrame + :param idf: Set to False for rainfall depth data, and True for rainfall intensity data. + :type idf: bool = False + + :returns: This function does not return any value. + :rtype: None + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/hyetograph/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/hyetograph/index.rst.txt new file mode 100644 index 000000000..d1315d542 --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/hyetograph/index.rst.txt @@ -0,0 +1,196 @@ +:py:mod:`src.dynamic_boundary_conditions.rainfall.hyetograph` +============================================================= + +.. py:module:: src.dynamic_boundary_conditions.rainfall.hyetograph + +.. autoapi-nested-parse:: + + Get hyetograph data and generate interactive hyetograph plots for sites located within the catchment area. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.rainfall.hyetograph.get_transposed_data + src.dynamic_boundary_conditions.rainfall.hyetograph.get_interpolated_data + src.dynamic_boundary_conditions.rainfall.hyetograph.get_interp_incremental_data + src.dynamic_boundary_conditions.rainfall.hyetograph.get_storm_length_increment_data + src.dynamic_boundary_conditions.rainfall.hyetograph.add_time_information + src.dynamic_boundary_conditions.rainfall.hyetograph.transform_data_for_selected_method + src.dynamic_boundary_conditions.rainfall.hyetograph.hyetograph_depth_to_intensity + src.dynamic_boundary_conditions.rainfall.hyetograph.get_hyetograph_data + src.dynamic_boundary_conditions.rainfall.hyetograph.hyetograph_data_wide_to_long + src.dynamic_boundary_conditions.rainfall.hyetograph.hyetograph + + + +.. py:function:: get_transposed_data(rain_depth_in_catchment: pandas.DataFrame) -> pandas.DataFrame + + Clean and transpose the retrieved scenario data from the database for sites within the catchment area and + return it in transposed Pandas DataFrame format. + + :param rain_depth_in_catchment: Rainfall depths for sites within the catchment area for a specified scenario retrieved from the database. + :type rain_depth_in_catchment: pd.DataFrame + + :returns: A DataFrame containing the cleaned and transposed scenario data. + :rtype: pd.DataFrame + + +.. py:function:: get_interpolated_data(transposed_catchment_data: pandas.DataFrame, increment_mins: int, interp_method: str) -> pandas.DataFrame + + Perform temporal interpolation on the transposed scenario data for sites within the catchment area and + return it in Pandas DataFrame format. + + :param transposed_catchment_data: Transposed scenario data retrieved from the database. + :type transposed_catchment_data: pd.DataFrame + :param increment_mins: Time interval in minutes. + :type increment_mins: int + :param interp_method: Temporal interpolation method to be used. Refer to 'scipy.interpolate.interp1d()' for available methods. + One of 'linear', 'nearest', 'nearest-up', 'zero', 'slinear', 'quadratic', 'cubic', 'previous', or 'next'. + :type interp_method: str + + :returns: A DataFrame containing the interpolated scenario data. + :rtype: pd.DataFrame + + :raises ValueError: - If the specified 'increment_mins' is out of range. + - If the specified 'interp_method' is not supported. + + +.. py:function:: get_interp_incremental_data(interp_catchment_data: pandas.DataFrame) -> pandas.DataFrame + + Get the incremental rainfall depths (difference between current and preceding cumulative rainfall) + for sites within the catchment area and return it in Pandas DataFrame format. + + :param interp_catchment_data: Interpolated scenario data for sites within the catchment area. + :type interp_catchment_data: pd.DataFrame + + :returns: A DataFrame containing the incremental rainfall depths. + :rtype: pd.DataFrame + + +.. py:function:: get_storm_length_increment_data(interp_increment_data: pandas.DataFrame, storm_length_mins: int) -> pandas.DataFrame + + Get the incremental rainfall depths for sites within the catchment area for a specific storm duration. + + :param interp_increment_data: Incremental rainfall depths for sites within the catchment area. + :type interp_increment_data: pd.DataFrame + :param storm_length_mins: Storm duration in minutes. + :type storm_length_mins: int + + :returns: Incremental rainfall depths for sites within the catchment area for the specified storm duration. + :rtype: pd.DataFrame + + :raises ValueError: If the specified 'storm_length_mins' is less than the minimum storm duration available in the data. + + +.. py:function:: add_time_information(site_data: pandas.DataFrame, storm_length_mins: int, time_to_peak_mins: Union[int, float], increment_mins: int, hyeto_method: src.dynamic_boundary_conditions.rainfall.rainfall_enum.HyetoMethod) -> pandas.DataFrame + + Add time information (seconds, minutes, and hours column) to the hyetograph data based on the + selected hyetograph method. + + :param site_data: Hyetograph data for a rainfall site or gauge. + :type site_data: pd.DataFrame + :param storm_length_mins: Storm duration in minutes. + :type storm_length_mins: int + :param time_to_peak_mins: The time in minutes when rainfall is at its greatest (reaches maximum). + :type time_to_peak_mins: Union[int, float] + :param increment_mins: Time interval in minutes. + :type increment_mins: int + :param hyeto_method: Hyetograph method to be used. + :type hyeto_method: HyetoMethod + + :returns: Hyetograph data with added time information. + :rtype: pd.DataFrame + + :raises ValueError: If the specified 'time_to_peak_mins' is less than half of the storm duration. + + +.. py:function:: transform_data_for_selected_method(interp_increment_data: pandas.DataFrame, storm_length_mins: int, time_to_peak_mins: Union[int, float], increment_mins: int, hyeto_method: src.dynamic_boundary_conditions.rainfall.rainfall_enum.HyetoMethod) -> pandas.DataFrame + + Transform the storm length incremental rainfall depths for sites within the catchment area based on + the selected hyetograph method and return hyetograph depths data for all sites within the catchment area + in Pandas DataFrame format. + + :param interp_increment_data: Incremental rainfall depths for sites within the catchment area. + :type interp_increment_data: pd.DataFrame + :param storm_length_mins: Storm duration in minutes. + :type storm_length_mins: int + :param time_to_peak_mins: The time in minutes when rainfall is at its greatest (reaches maximum). + :type time_to_peak_mins: Union[int, float] + :param increment_mins: Time interval in minutes. + :type increment_mins: int + :param hyeto_method: Hyetograph method to be used. + :type hyeto_method: HyetoMethod + + :returns: Hyetograph depths data for all sites within the catchment area. + :rtype: pd.DataFrame + + +.. py:function:: hyetograph_depth_to_intensity(hyetograph_depth: pandas.DataFrame, increment_mins: int, hyeto_method: src.dynamic_boundary_conditions.rainfall.rainfall_enum.HyetoMethod) -> pandas.DataFrame + + Convert hyetograph depths data to hyetograph intensities data for all sites within the catchment area. + + :param hyetograph_depth: Hyetograph depths data for sites within the catchment area. + :type hyetograph_depth: pd.DataFrame + :param increment_mins: Time interval in minutes. + :type increment_mins: int + :param hyeto_method: Hyetograph method to be used. + :type hyeto_method: HyetoMethod + + :returns: Hyetograph intensities data for all sites within the catchment area. + :rtype: pd.DataFrame + + +.. py:function:: get_hyetograph_data(rain_depth_in_catchment: pandas.DataFrame, storm_length_mins: int, time_to_peak_mins: Union[int, float], increment_mins: int, interp_method: str, hyeto_method: src.dynamic_boundary_conditions.rainfall.rainfall_enum.HyetoMethod) -> pandas.DataFrame + + Get hyetograph intensities data for all sites within the catchment area and return it in Pandas DataFrame format. + + :param rain_depth_in_catchment: Rainfall depths for sites within the catchment area for a specified scenario retrieved from the database. + :type rain_depth_in_catchment: pd.DataFrame + :param storm_length_mins: Storm duration in minutes. + :type storm_length_mins: int + :param time_to_peak_mins: The time in minutes when rainfall is at its greatest (reaches maximum). + :type time_to_peak_mins: Union[int, float] + :param increment_mins: Time interval in minutes. + :type increment_mins: int + :param interp_method: Temporal interpolation method to be used. Refer to 'scipy.interpolate.interp1d()' for available methods. + One of 'linear', 'nearest', 'nearest-up', 'zero', 'slinear', 'quadratic', 'cubic', 'previous', or 'next'. + :type interp_method: str + :param hyeto_method: Hyetograph method to be used. + :type hyeto_method: HyetoMethod + + :returns: Hyetograph intensities data for all sites within the catchment area. + :rtype: pd.DataFrame + + +.. py:function:: hyetograph_data_wide_to_long(hyetograph_data: pandas.DataFrame) -> pandas.DataFrame + + Transform hyetograph intensities data for all sites within the catchment area from wide format to long format. + + :param hyetograph_data: Hyetograph intensities data for sites within the catchment area. + :type hyetograph_data: pd.DataFrame + + :returns: Hyetograph intensities data in long format. + :rtype: pd.DataFrame + + +.. py:function:: hyetograph(hyetograph_data: pandas.DataFrame, ari: float) -> None + + Create interactive individual hyetograph plots for sites within the catchment area. + + :param hyetograph_data: Hyetograph intensities data for sites within the catchment area. + :type hyetograph_data: pd.DataFrame + :param ari: Average Recurrence Interval (ARI) value. Valid options are 1.58, 2, 5, 10, 20, 30, 40, 50, 60, 80, 100, or 250. + :type ari: float + + :returns: This function does not return any value. + :rtype: None + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/index.rst.txt new file mode 100644 index 000000000..b49ac381f --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/index.rst.txt @@ -0,0 +1,23 @@ +:py:mod:`src.dynamic_boundary_conditions.rainfall` +================================================== + +.. py:module:: src.dynamic_boundary_conditions.rainfall + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + hirds_rainfall_data_from_db/index.rst + hirds_rainfall_data_to_db/index.rst + hyetograph/index.rst + main_rainfall/index.rst + rainfall_data_from_hirds/index.rst + rainfall_enum/index.rst + rainfall_model_input/index.rst + rainfall_sites/index.rst + thiessen_polygons/index.rst + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/main_rainfall/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/main_rainfall/index.rst.txt new file mode 100644 index 000000000..f83da0ec4 --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/main_rainfall/index.rst.txt @@ -0,0 +1,87 @@ +:py:mod:`src.dynamic_boundary_conditions.rainfall.main_rainfall` +================================================================ + +.. py:module:: src.dynamic_boundary_conditions.rainfall.main_rainfall + +.. autoapi-nested-parse:: + + Main rainfall script used to fetch and store rainfall data in the database, and to generate the requested + rainfall model input for BG-Flood, etc. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.rainfall.main_rainfall.remove_existing_rain_inputs + src.dynamic_boundary_conditions.rainfall.main_rainfall.main + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.rainfall.main_rainfall.sample_polygon + + +.. py:function:: remove_existing_rain_inputs(bg_flood_dir: pathlib.Path) -> None + + Remove existing rain input files from the specified directory. + + :param bg_flood_dir: BG-Flood model directory containing the rain input files. + :type bg_flood_dir: pathlib.Path + + :returns: This function does not return any value. + :rtype: None + + +.. py:function:: main(selected_polygon_gdf: geopandas.GeoDataFrame, rcp: Optional[float], time_period: Optional[str], ari: float, storm_length_mins: int, time_to_peak_mins: Union[int, float], increment_mins: int, hyeto_method: src.dynamic_boundary_conditions.rainfall.rainfall_enum.HyetoMethod, input_type: src.dynamic_boundary_conditions.rainfall.rainfall_enum.RainInputType, log_level: src.digitaltwin.utils.LogLevel = LogLevel.DEBUG) -> None + + Fetch and store rainfall data in the database, and generate the requested rainfall model input for BG-Flood. + + :param selected_polygon_gdf: A GeoDataFrame representing the selected polygon, i.e., the catchment area. + :type selected_polygon_gdf: gpd.GeoDataFrame + :param rcp: Representative Concentration Pathway (RCP) value. Valid options are 2.6, 4.5, 6.0, 8.5, or None + for historical data. + :type rcp: Optional[float] + :param time_period: Future time period. Valid options are "2031-2050", "2081-2100", or None for historical data. + :type time_period: Optional[str] + :param ari: Average Recurrence Interval (ARI) value. Valid options are 1.58, 2, 5, 10, 20, 30, 40, 50, 60, 80, 100, or 250. + :type ari: float + :param storm_length_mins: Storm duration in minutes. + :type storm_length_mins: int + :param time_to_peak_mins: The time in minutes when rainfall is at its greatest (reaches maximum). + :type time_to_peak_mins: Union[int, float] + :param increment_mins: Time interval in minutes. + :type increment_mins: int + :param hyeto_method: Hyetograph method to be used. Valid options are HyetoMethod.ALT_BLOCK or HyetoMethod.CHICAGO. + :type hyeto_method: HyetoMethod + :param input_type: The type of rainfall model input to be generated. Valid options are 'uniform' or 'varying', + representing spatially uniform rain input (text file) or spatially varying rain input (NetCDF file). + :type input_type: RainInputType + :param log_level: The log level to set for the root logger. Defaults to LogLevel.DEBUG. + The available logging levels and their corresponding numeric values are: + - LogLevel.CRITICAL (50) + - LogLevel.ERROR (40) + - LogLevel.WARNING (30) + - LogLevel.INFO (20) + - LogLevel.DEBUG (10) + - LogLevel.NOTSET (0) + :type log_level: LogLevel = LogLevel.DEBUG + + :returns: This function does not return any value. + :rtype: None + + +.. py:data:: sample_polygon + + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/rainfall_data_from_hirds/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/rainfall_data_from_hirds/index.rst.txt new file mode 100644 index 000000000..076b28608 --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/rainfall_data_from_hirds/index.rst.txt @@ -0,0 +1,141 @@ +:py:mod:`src.dynamic_boundary_conditions.rainfall.rainfall_data_from_hirds` +=========================================================================== + +.. py:module:: src.dynamic_boundary_conditions.rainfall.rainfall_data_from_hirds + +.. autoapi-nested-parse:: + + Fetch rainfall data from the HIRDS website. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.rainfall.rainfall_data_from_hirds.BlockStructure + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.rainfall.rainfall_data_from_hirds.get_site_url_key + src.dynamic_boundary_conditions.rainfall.rainfall_data_from_hirds.get_data_from_hirds + src.dynamic_boundary_conditions.rainfall.rainfall_data_from_hirds.get_layout_structure_of_data + src.dynamic_boundary_conditions.rainfall.rainfall_data_from_hirds.convert_to_tabular_data + + + +.. py:function:: get_site_url_key(site_id: str, idf: bool) -> str + + Get the unique URL key of the requested rainfall site from the HIRDS website. + + :param site_id: HIRDS rainfall site ID. + :type site_id: str + :param idf: Set to False for rainfall depth data, and True for rainfall intensity data. + :type idf: bool + + :returns: Unique URL key of the requested rainfall site. + :rtype: str + + +.. py:function:: get_data_from_hirds(site_id: str, idf: bool) -> str + + Fetch rainfall data for the requested rainfall site from the HIRDS website. + + :param site_id: HIRDS rainfall site ID. + :type site_id: str + :param idf: Set to False for rainfall depth data, and True for rainfall intensity data. + :type idf: bool + + :returns: Rainfall data for the requested site as a string. + :rtype: str + + +.. py:class:: BlockStructure + + + Bases: :py:obj:`NamedTuple` + + Represents the layout structure of fetched rainfall data. + + .. attribute:: skip_rows + + Number of lines to skip at the start of the fetched rainfall site_data. + + :type: int + + .. attribute:: rcp + + There are four different representative concentration pathways (RCPs), and abbreviated as RCP2.6, RCP4.5, + RCP6.0 and RCP8.5, in order of increasing radiative forcing by greenhouse gases, or nan for historical data. + + :type: Optional[float] + + .. attribute:: time_period + + Rainfall estimates for two future time periods (e.g. 2031-2050 or 2081-2100) for four RCPs, or None for + historical data. + + :type: Optional[str] + + .. attribute:: category + + Historical data, Historical Standard Error or Projections (i.e. hist, hist_stderr or proj). + + :type: str + + .. py:attribute:: skip_rows + :type: int + + + + .. py:attribute:: rcp + :type: Optional[float] + + + + .. py:attribute:: time_period + :type: Optional[str] + + + + .. py:attribute:: category + :type: str + + + + +.. py:function:: get_layout_structure_of_data(site_data: str) -> List[BlockStructure] + + Get the layout structure of the fetched rainfall data. + + :param site_data: Fetched rainfall data text string from the HIRDS website for the requested rainfall site. + :type site_data: str + + :returns: List of BlockStructure named tuples representing the layout structure of the fetched rainfall data. + :rtype: List[BlockStructure] + + +.. py:function:: convert_to_tabular_data(site_data: str, site_id: str, block_structure: BlockStructure) -> pandas.DataFrame + + Convert the fetched rainfall data for the requested site into a Pandas DataFrame. + + :param site_data: Fetched rainfall data text string from the HIRDS website for the requested rainfall site. + :type site_data: str + :param site_id: HIRDS rainfall site ID. + :type site_id: str + :param block_structure: The layout structure of the fetched rainfall data, containing skip rows, RCP, time period, and category. + :type block_structure: BlockStructure + + :returns: Rainfall data for the requested site in tabular format. + :rtype: pd.DataFrame + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/rainfall_enum/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/rainfall_enum/index.rst.txt new file mode 100644 index 000000000..a01bd936b --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/rainfall_enum/index.rst.txt @@ -0,0 +1,85 @@ +:py:mod:`src.dynamic_boundary_conditions.rainfall.rainfall_enum` +================================================================ + +.. py:module:: src.dynamic_boundary_conditions.rainfall.rainfall_enum + +.. autoapi-nested-parse:: + + Enum(s) used in the rainfall module. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.rainfall.rainfall_enum.HyetoMethod + src.dynamic_boundary_conditions.rainfall.rainfall_enum.RainInputType + + + + +.. py:class:: HyetoMethod + + + Bases: :py:obj:`enum.StrEnum` + + Enum class representing different hyetograph methods. + + .. attribute:: ALT_BLOCK + + Alternating Block Method. + + :type: str + + .. attribute:: CHICAGO + + Chicago Method. + + :type: str + + .. py:attribute:: ALT_BLOCK + :value: 'alt_block' + + + + .. py:attribute:: CHICAGO + :value: 'chicago' + + + + +.. py:class:: RainInputType + + + Bases: :py:obj:`enum.StrEnum` + + Enum class representing different types of rain input used in the BG-Flood Model. + + .. attribute:: UNIFORM + + Spatially uniform rain input. + + :type: str + + .. attribute:: VARYING + + Spatially varying rain input. + + :type: str + + .. py:attribute:: UNIFORM + :value: 'uniform' + + + + .. py:attribute:: VARYING + :value: 'varying' + + + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/rainfall_model_input/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/rainfall_model_input/index.rst.txt new file mode 100644 index 000000000..01aa16fcb --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/rainfall_model_input/index.rst.txt @@ -0,0 +1,159 @@ +:py:mod:`src.dynamic_boundary_conditions.rainfall.rainfall_model_input` +======================================================================= + +.. py:module:: src.dynamic_boundary_conditions.rainfall.rainfall_model_input + +.. autoapi-nested-parse:: + + Generate the requested rainfall model input for BG-Flood, which can be either + spatially uniform rain input ('rain_forcing.txt' text file) or + spatially varying rain input ('rain_forcing.nc' NetCDF file). + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.rainfall.rainfall_model_input.sites_voronoi_intersect_catchment + src.dynamic_boundary_conditions.rainfall.rainfall_model_input.sites_coverage_in_catchment + src.dynamic_boundary_conditions.rainfall.rainfall_model_input.mean_catchment_rainfall + src.dynamic_boundary_conditions.rainfall.rainfall_model_input.spatial_uniform_rain_input + src.dynamic_boundary_conditions.rainfall.rainfall_model_input.create_rain_data_cube + src.dynamic_boundary_conditions.rainfall.rainfall_model_input.spatial_varying_rain_input + src.dynamic_boundary_conditions.rainfall.rainfall_model_input.generate_rain_model_input + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.rainfall.rainfall_model_input.log + + +.. py:data:: log + + + +.. py:function:: sites_voronoi_intersect_catchment(sites_in_catchment: geopandas.GeoDataFrame, catchment_area: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Get the intersecting areas between the rainfall site coverage areas (Thiessen polygons) and the catchment area, + i.e. return the overlapped areas. + + :param sites_in_catchment: Rainfall site coverage areas (Thiessen polygons) that intersect or are within the catchment area. + :type sites_in_catchment: gpd.GeoDataFrame + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing the intersecting areas between the rainfall site coverage areas and the + catchment area. + :rtype: gpd.GeoDataFrame + + +.. py:function:: sites_coverage_in_catchment(sites_in_catchment: geopandas.GeoDataFrame, catchment_area: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Get the intersecting areas between the rainfall site coverage areas (Thiessen polygons) and the catchment area, + and calculate the size and percentage of the area covered by each rainfall site inside the catchment area. + + :param sites_in_catchment: Rainfall sites coverage areas (Thiessen polygons) that intersect or are within the catchment area. + :type sites_in_catchment: gpd.GeoDataFrame + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing the intersecting areas between the rainfall site coverage areas and the + catchment area, with calculated size and percentage of area covered by each rainfall site. + :rtype: gpd.GeoDataFrame + + +.. py:function:: mean_catchment_rainfall(hyetograph_data: pandas.DataFrame, sites_coverage: geopandas.GeoDataFrame) -> pandas.DataFrame + + Calculate the mean catchment rainfall intensities (weighted average of gauge measurements) + across all durations using the Thiessen polygon method. + + :param hyetograph_data: Hyetograph intensities data for sites within the catchment area. + :type hyetograph_data: pd.DataFrame + :param sites_coverage: A GeoDataFrame containing information about the coverage area of each rainfall site within the catchment area, + including the size and percentage of the catchment area covered by each site. + :type sites_coverage: gpd.GeoDataFrame + + :returns: A DataFrame containing the mean catchment rainfall intensities across all durations. + :rtype: pd.DataFrame + + +.. py:function:: spatial_uniform_rain_input(hyetograph_data: pandas.DataFrame, sites_coverage: geopandas.GeoDataFrame, bg_flood_dir: pathlib.Path) -> None + + Write the mean catchment rainfall intensities data (i.e., 'seconds' and 'rain_intensity_mmhr' columns) + into a text file named 'rain_forcing.txt'. This file can be used as spatially uniform rain input + for the BG-Flood model. + + :param hyetograph_data: Hyetograph intensities data for sites within the catchment area. + :type hyetograph_data: pd.DataFrame + :param sites_coverage: A GeoDataFrame containing information about the coverage area of each rainfall site within the catchment area, + including the size and percentage of the catchment area covered by each site. + :type sites_coverage: gpd.GeoDataFrame + :param bg_flood_dir: BG-Flood model directory. + :type bg_flood_dir: pathlib.Path + + :returns: This function does not return any value. + :rtype: None + + +.. py:function:: create_rain_data_cube(hyetograph_data: pandas.DataFrame, sites_coverage: geopandas.GeoDataFrame) -> xarray.Dataset + + Create rainfall intensities data cube (xarray data) for the catchment area across all durations, + i.e. convert rainfall intensities vector data into rasterized xarray data. + + :param hyetograph_data: Hyetograph intensities data for sites within the catchment area. + :type hyetograph_data: pd.DataFrame + :param sites_coverage: A GeoDataFrame containing information about the coverage area of each rainfall site within the catchment area, + including the size and percentage of the catchment area covered by each site. + :type sites_coverage: gpd.GeoDataFrame + + :returns: Rainfall intensities data cube in the form of xarray dataset. + :rtype: xr.Dataset + + +.. py:function:: spatial_varying_rain_input(hyetograph_data: pandas.DataFrame, sites_coverage: geopandas.GeoDataFrame, bg_flood_dir: pathlib.Path) -> None + + Write the rainfall intensities data cube in NetCDF format (rain_forcing.nc). + This file can be used as spatially varying rain input for the BG-Flood model. + + :param hyetograph_data: Hyetograph intensities data for sites within the catchment area. + :type hyetograph_data: pd.DataFrame + :param sites_coverage: A GeoDataFrame containing information about the coverage area of each rainfall site within the catchment area, + including the size and percentage of the catchment area covered by each site. + :type sites_coverage: gpd.GeoDataFrame + :param bg_flood_dir: BG-Flood model directory. + :type bg_flood_dir: pathlib.Path + + :returns: This function does not return any value. + :rtype: None + + +.. py:function:: generate_rain_model_input(hyetograph_data: pandas.DataFrame, sites_coverage: geopandas.GeoDataFrame, bg_flood_dir: pathlib.Path, input_type: src.dynamic_boundary_conditions.rainfall.rainfall_enum.RainInputType) -> None + + Generate the requested rainfall model input for BG-Flood, either spatially uniform rain input + ('rain_forcing.txt' text file) or spatially varying rain input ('rain_forcing.nc' NetCDF file). + + :param hyetograph_data: Hyetograph intensities data for sites within the catchment area. + :type hyetograph_data: pd.DataFrame + :param sites_coverage: A GeoDataFrame containing information about the coverage area of each rainfall site within the catchment area, + including the size and percentage of the catchment area covered by each site. + :type sites_coverage: gpd.GeoDataFrame + :param bg_flood_dir: BG-Flood model directory. + :type bg_flood_dir: pathlib.Path + :param input_type: The type of rainfall model input to be generated. Valid options are 'uniform' or 'varying', + representing spatially uniform rain input (text file) or spatially varying rain input (NetCDF file). + :type input_type: RainInputType + + :returns: This function does not return any value. + :rtype: None + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/rainfall_sites/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/rainfall_sites/index.rst.txt new file mode 100644 index 000000000..ee1f4da45 --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/rainfall_sites/index.rst.txt @@ -0,0 +1,65 @@ +:py:mod:`src.dynamic_boundary_conditions.rainfall.rainfall_sites` +================================================================= + +.. py:module:: src.dynamic_boundary_conditions.rainfall.rainfall_sites + +.. autoapi-nested-parse:: + + Fetch rainfall sites data from the HIRDS website and store it in the database. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.rainfall.rainfall_sites.get_rainfall_sites_data + src.dynamic_boundary_conditions.rainfall.rainfall_sites.get_rainfall_sites_in_df + src.dynamic_boundary_conditions.rainfall.rainfall_sites.rainfall_sites_to_db + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.rainfall.rainfall_sites.log + + +.. py:data:: log + + + +.. py:function:: get_rainfall_sites_data() -> str + + Get rainfall sites data from the HIRDS website. + + :returns: The rainfall sites data as a string. + :rtype: str + + +.. py:function:: get_rainfall_sites_in_df() -> geopandas.GeoDataFrame + + Get rainfall sites data from the HIRDS website and transform it into a GeoDataFrame. + + :returns: A GeoDataFrame containing the rainfall sites data. + :rtype: gpd.GeoDataFrame + + +.. py:function:: rainfall_sites_to_db(engine: sqlalchemy.engine.Engine) -> None + + Store rainfall sites data from the HIRDS website in the database. + + :param engine: The engine used to connect to the database. + :type engine: Engine + + :returns: This function does not return any value. + :rtype: None + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/thiessen_polygons/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/thiessen_polygons/index.rst.txt new file mode 100644 index 000000000..f578b773a --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/rainfall/thiessen_polygons/index.rst.txt @@ -0,0 +1,101 @@ +:py:mod:`src.dynamic_boundary_conditions.rainfall.thiessen_polygons` +==================================================================== + +.. py:module:: src.dynamic_boundary_conditions.rainfall.thiessen_polygons + +.. autoapi-nested-parse:: + + Calculate the area covered by each rainfall site throughout New Zealand and store it in the database. + Retrieve the coverage areas (Thiessen polygons) for all rainfall sites located within the catchment area. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.rainfall.thiessen_polygons.get_sites_within_aoi + src.dynamic_boundary_conditions.rainfall.thiessen_polygons.thiessen_polygons_calculator + src.dynamic_boundary_conditions.rainfall.thiessen_polygons.thiessen_polygons_to_db + src.dynamic_boundary_conditions.rainfall.thiessen_polygons.thiessen_polygons_from_db + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.rainfall.thiessen_polygons.log + + +.. py:data:: log + + + +.. py:function:: get_sites_within_aoi(engine: sqlalchemy.engine.Engine, area_of_interest: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Get all rainfall sites within the area of interest from the database and return the required data as a + GeoDataFrame. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param area_of_interest: A GeoDataFrame representing the area of interest. + :type area_of_interest: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing the rainfall sites within the area of interest. + :rtype: gpd.GeoDataFrame + + +.. py:function:: thiessen_polygons_calculator(area_of_interest: geopandas.GeoDataFrame, sites_in_aoi: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Create Thiessen polygons for rainfall sites within the area of interest and calculate the area covered by each + rainfall site. + + :param area_of_interest: A GeoDataFrame representing the area of interest. + :type area_of_interest: gpd.GeoDataFrame + :param sites_in_aoi: Rainfall sites within the area of interest. + :type sites_in_aoi: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing the Thiessen polygons, site information, and area covered by each rainfall site. + :rtype: gpd.GeoDataFrame + + :raises ValueError: - If the provided 'area_of_interest' GeoDataFrame does not contain any data. + - If the provided 'sites_in_aoi' GeoDataFrame does not contain any data. + + +.. py:function:: thiessen_polygons_to_db(engine: sqlalchemy.engine.Engine, area_of_interest: geopandas.GeoDataFrame, sites_in_aoi: geopandas.GeoDataFrame) -> None + + Store the data representing the Thiessen polygons, site information, and the area covered by + each rainfall site in the database. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param area_of_interest: A GeoDataFrame representing the area of interest. + :type area_of_interest: gpd.GeoDataFrame + :param sites_in_aoi: Rainfall sites within the area of interest. + :type sites_in_aoi: gpd.GeoDataFrame + + :returns: This function does not return any value. + :rtype: None + + +.. py:function:: thiessen_polygons_from_db(engine: sqlalchemy.engine.Engine, catchment_area: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Get the coverage areas (Thiessen polygons) of all rainfall sites that intersect or are within the + specified catchment area. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing the coverage areas (Thiessen polygons) of rainfall sites within the catchment area. + :rtype: gpd.GeoDataFrame + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/align_rec_osm/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/align_rec_osm/index.rst.txt new file mode 100644 index 000000000..1ab72658a --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/align_rec_osm/index.rst.txt @@ -0,0 +1,209 @@ +:py:mod:`src.dynamic_boundary_conditions.river.align_rec_osm` +============================================================= + +.. py:module:: src.dynamic_boundary_conditions.river.align_rec_osm + +.. autoapi-nested-parse:: + + This script handles the task of obtaining data for REC river inflow segments whose boundary points align with the + boundary points of OpenStreetMap (OSM) waterways within a specified distance threshold. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.river.align_rec_osm.get_rec_network_data_on_bbox + src.dynamic_boundary_conditions.river.align_rec_osm.get_single_intersect_inflows + src.dynamic_boundary_conditions.river.align_rec_osm.get_exploded_multi_intersect + src.dynamic_boundary_conditions.river.align_rec_osm.determine_multi_intersect_inflow_index + src.dynamic_boundary_conditions.river.align_rec_osm.categorize_exploded_multi_intersect + src.dynamic_boundary_conditions.river.align_rec_osm.get_multi_intersect_inflows + src.dynamic_boundary_conditions.river.align_rec_osm.get_rec_inflows_on_bbox + src.dynamic_boundary_conditions.river.align_rec_osm.get_osm_waterways_on_bbox + src.dynamic_boundary_conditions.river.align_rec_osm.align_rec_with_osm + src.dynamic_boundary_conditions.river.align_rec_osm.get_rec_inflows_aligned_to_osm + + + +.. py:exception:: NoRiverDataException + + + Bases: :py:obj:`Exception` + + Exception raised when no river data is to be used for the BG-Flood model. + + +.. py:function:: get_rec_network_data_on_bbox(engine: sqlalchemy.engine.Engine, catchment_area: geopandas.GeoDataFrame, rec_network_data: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Obtain REC river network data that intersects with the catchment area boundary, along with the corresponding + intersection points on the boundary. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + :param rec_network_data: A GeoDataFrame containing the REC river network data. + :type rec_network_data: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing REC river network data that intersects with the catchment area boundary, + along with the corresponding intersection points on the boundary. + :rtype: gpd.GeoDataFrame + + :raises NoRiverDataException: If no REC river segment is found crossing the catchment boundary. + + +.. py:function:: get_single_intersect_inflows(rec_on_bbox: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Identifies REC river segments that intersect the catchment boundary once, then retrieves the segments + that are inflows into the catchment area, along with their corresponding inflow boundary points. + + :param rec_on_bbox: A GeoDataFrame containing REC river network data that intersects with the catchment area boundary, + along with the corresponding intersection points on the boundary. + :type rec_on_bbox: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing the REC river segments that intersect the catchment boundary once and + are inflows into the catchment area, along with their corresponding inflow boundary points. + :rtype: gpd.GeoDataFrame + + +.. py:function:: get_exploded_multi_intersect(rec_on_bbox: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Identifies REC river segments that intersect the catchment boundary multiple times, + transforms MultiPoint geometries into individual Point geometries (boundary points), + calculates the distance along the river segment for each boundary point, and + adds a new column containing boundary points sorted by their distance along the river. + + :param rec_on_bbox: A GeoDataFrame containing REC river network data that intersects with the catchment area boundary, + along with the corresponding intersection points on the boundary. + :type rec_on_bbox: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing the REC river segments that intersect the catchment boundary multiple times, + along with the corresponding intersection points on the boundary, sorted by distance along the river. + :rtype: gpd.GeoDataFrame + + +.. py:function:: determine_multi_intersect_inflow_index(multi_intersect_row: pandas.Series) -> int + + Determines the index that represents the position of the first inflow boundary point along a REC river segment. + + :param multi_intersect_row: A REC river segment that intersects the catchment boundary multiple times, along with the + corresponding intersection points on the boundary, sorted by distance along the river. + :type multi_intersect_row: pd.Series + + :returns: An integer that represents the position of the first inflow boundary point along a REC river segment. + :rtype: int + + :raises ValueError: If the index that represents the position of the first inflow boundary point along a REC river segment + cannot be determined. + + +.. py:function:: categorize_exploded_multi_intersect(multi_intersect: geopandas.GeoDataFrame) -> Dict[int, Dict[str, List[shapely.geometry.Point]]] + + Categorizes boundary points of REC river segments that intersect the catchment boundary multiple times into + 'inflow' and 'outflow' based on their sequential positions along the river segment etc. + + :param multi_intersect: A GeoDataFrame containing the REC river segments that intersect the catchment boundary multiple times, + along with the corresponding intersection points on the boundary, sorted by distance along the river. + :type multi_intersect: gpd.GeoDataFrame + + :returns: A dictionary where the keys represent the 'objectid' values of REC river segments, and the values are + dictionaries. Each of these dictionaries contains two lists: 'inflow' and 'outflow,' which respectively + represent the boundary points where water flows into and out of the catchment area. + :rtype: Dict[int, Dict[str, List[Point]]] + + +.. py:function:: get_multi_intersect_inflows(rec_on_bbox: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Identifies REC river segments that intersect the catchment boundary multiple times, then retrieves the segments + that are inflows into the catchment area, along with their corresponding inflow boundary points. + + :param rec_on_bbox: A GeoDataFrame containing REC river network data that intersects with the catchment area boundary, + along with the corresponding intersection points on the boundary. + :type rec_on_bbox: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing the REC river segments that intersect the catchment boundary multiple times and + are inflows into the catchment area, along with their corresponding inflow boundary points. + :rtype: gpd.GeoDataFrame + + +.. py:function:: get_rec_inflows_on_bbox(engine: sqlalchemy.engine.Engine, catchment_area: geopandas.GeoDataFrame, rec_network_data: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Obtain REC river segments that are inflows into the specified catchment area, along with their corresponding + inflow boundary points. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + :param rec_network_data: A GeoDataFrame containing the REC river network data. + :type rec_network_data: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing REC river segments that are inflows into the catchment area, along with their + corresponding inflow boundary points. + :rtype: gpd.GeoDataFrame + + :raises NoRiverDataException: If no REC river segment is found crossing the catchment boundary. + + +.. py:function:: get_osm_waterways_on_bbox(engine: sqlalchemy.engine.Engine, catchment_area: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Retrieve OpenStreetMap (OSM) waterway data that intersects with the catchment area boundary, + along with the corresponding intersection points on the boundary. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing OpenStreetMap (OSM) waterway data that intersects with the catchment boundary, + along with the corresponding intersection points on the boundary. + :rtype: gpd.GeoDataFrame + + +.. py:function:: align_rec_with_osm(rec_inflows_on_bbox: geopandas.GeoDataFrame, osm_waterways_on_bbox: geopandas.GeoDataFrame, distance_m: int = 300) -> geopandas.GeoDataFrame + + Aligns the boundary points of REC river inflow segments with the boundary points of OpenStreetMap (OSM) waterways + within a specified distance threshold. + + :param rec_inflows_on_bbox: A GeoDataFrame containing REC river network segments where water flows into the catchment area, + along with their corresponding inflow boundary points. + :type rec_inflows_on_bbox: gpd.GeoDataFrame + :param osm_waterways_on_bbox: A GeoDataFrame containing OpenStreetMap (OSM) waterway data that intersects with the catchment boundary, + along with the corresponding intersection points on the boundary. + :type osm_waterways_on_bbox: gpd.GeoDataFrame + :param distance_m: Distance threshold in meters for spatial proximity matching. The default value is 300 meters. + :type distance_m: int = 300 + + :returns: A GeoDataFrame containing the boundary points of REC river inflow segments aligned with the boundary points of + OpenStreetMap (OSM) waterways within a specified distance threshold. + :rtype: gpd.GeoDataFrame + + +.. py:function:: get_rec_inflows_aligned_to_osm(engine: sqlalchemy.engine.Engine, catchment_area: geopandas.GeoDataFrame, rec_network_data: geopandas.GeoDataFrame, distance_m: int = 300) -> geopandas.GeoDataFrame + + Obtain data for REC river inflow segments whose boundary points align with the boundary points of + OpenStreetMap (OSM) waterways within a specified distance threshold. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + :param rec_network_data: A GeoDataFrame containing the REC river network data. + :type rec_network_data: gpd.GeoDataFrame + :param distance_m: Distance threshold in meters for spatial proximity matching. The default value is 300 meters. + :type distance_m: int = 300 + + :returns: A GeoDataFrame containing data for REC river inflow segments whose boundary points align with the + boundary points of OpenStreetMap (OSM) waterways within a specified distance threshold. + :rtype: gpd.GeoDataFrame + + :raises NoRiverDataException: If no REC river segment is found crossing the catchment boundary. + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/hydrograph/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/hydrograph/index.rst.txt new file mode 100644 index 000000000..b9fccb3a1 --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/hydrograph/index.rst.txt @@ -0,0 +1,105 @@ +:py:mod:`src.dynamic_boundary_conditions.river.hydrograph` +========================================================== + +.. py:module:: src.dynamic_boundary_conditions.river.hydrograph + +.. autoapi-nested-parse:: + + This script handles the task of obtaining REC river inflow scenario data, whether it's Mean Annual Flood (MAF) or + Average Recurrence Interval (ARI)-based, and generates corresponding hydrograph data for the requested scenarios. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.river.hydrograph.clean_rec_inflow_data + src.dynamic_boundary_conditions.river.hydrograph.extract_valid_ari_values + src.dynamic_boundary_conditions.river.hydrograph.get_rec_inflow_scenario_data + src.dynamic_boundary_conditions.river.hydrograph.get_hydrograph_data + + + +.. py:function:: clean_rec_inflow_data(rec_inflows_w_input_points: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Selects and renames specific columns that represent REC river inflow data from the input GeoDataFrame. + + :param rec_inflows_w_input_points: A GeoDataFrame containing data for REC river inflow segments whose boundary points align with the + boundary points of OpenStreetMap (OSM) waterways within a specified distance threshold, + along with their corresponding river input points used in the BG-Flood model. + :type rec_inflows_w_input_points: gpd.GeoDataFrame + + :returns: A GeoDataFrame with selected and renamed columns representing REC river inflow data. + :rtype: gpd.GeoDataFrame + + +.. py:function:: extract_valid_ari_values(rec_inflow_data: geopandas.GeoDataFrame) -> List[int] + + Extracts valid ARI (Annual Recurrence Interval) values from the column names of the REC river inflow data. + + :param rec_inflow_data: A GeoDataFrame containing REC river inflow data with column names that include ARI values. + :type rec_inflow_data: gpd.GeoDataFrame + + :returns: A list of valid ARI values extracted from the column names of the REC river inflow data. + :rtype: List[int] + + +.. py:function:: get_rec_inflow_scenario_data(rec_inflows_w_input_points: geopandas.GeoDataFrame, maf: bool = True, ari: Optional[int] = None, bound: src.dynamic_boundary_conditions.river.river_enum.BoundType = BoundType.MIDDLE) -> geopandas.GeoDataFrame + + Obtain the requested REC river inflow scenario data, which can be either Mean Annual Flood (MAF)-based or + Average Recurrence Interval (ARI)-based scenario data. + + :param rec_inflows_w_input_points: A GeoDataFrame containing data for REC river inflow segments whose boundary points align with the + boundary points of OpenStreetMap (OSM) waterways within a specified distance threshold, + along with their corresponding river input points used in the BG-Flood model. + :type rec_inflows_w_input_points: gpd.GeoDataFrame + :param maf: Set to True to obtain MAF-based scenario data or False to obtain ARI-based scenario data. + :type maf: bool = True + :param ari: The Average Recurrence Interval (ARI) value. Valid options are 5, 10, 20, 50, 100, or 1000. + Mandatory when 'maf' is set to False, and should be set to None when 'maf' is set to True. + :type ari: Optional[int] = None + :param bound: Set the type of bound (estimate) for the REC river inflow scenario data. + Valid options include: 'BoundType.LOWER', 'BoundType.MIDDLE', or 'BoundType.UPPER'. + :type bound: BoundType = BoundType.MIDDLE + + :returns: A GeoDataFrame containing the requested REC river inflow scenario data. + :rtype: gpd.GeoDataFrame + + :raises ValueError: - If 'ari' is provided when 'maf' is set to True (i.e. 'maf' is True and 'ari' is not set to None). + - If 'ari' is not provided when 'maf' is set to False (i.e. 'maf' is False and 'ari' is set to None). + - If an invalid 'ari' value is provided. + + +.. py:function:: get_hydrograph_data(rec_inflows_w_input_points: geopandas.GeoDataFrame, flow_length_mins: int, time_to_peak_mins: Union[int, float], maf: bool = True, ari: Optional[int] = None, bound: src.dynamic_boundary_conditions.river.river_enum.BoundType = BoundType.MIDDLE) -> geopandas.GeoDataFrame + + Generate hydrograph data for the requested REC river inflow scenario. + + :param rec_inflows_w_input_points: A GeoDataFrame containing data for REC river inflow segments whose boundary points align with the + boundary points of OpenStreetMap (OSM) waterways within a specified distance threshold, + along with their corresponding river input points used in the BG-Flood model. + :type rec_inflows_w_input_points: gpd.GeoDataFrame + :param flow_length_mins: Duration of the river flow in minutes. + :type flow_length_mins: int + :param time_to_peak_mins: The time in minutes when flow is at its greatest (reaches maximum). + :type time_to_peak_mins: Union[int, float] + :param maf: Set to True to obtain MAF-based scenario data or False to obtain ARI-based scenario data. + :type maf: bool = True + :param ari: The Average Recurrence Interval (ARI) value. Valid options are 5, 10, 20, 50, 100, or 1000. + Mandatory when 'maf' is set to False, and should be set to None when 'maf' is set to True. + :type ari: Optional[int] = None + :param bound: Set the type of bound (estimate) for the REC river inflow scenario data. + Valid options include: 'BoundType.LOWER', 'BoundType.MIDDLE', or 'BoundType.UPPER'. + :type bound: BoundType = BoundType.MIDDLE + + :returns: A GeoDataFrame containing hydrograph data for the requested REC river inflow scenario. + :rtype: gpd.GeoDataFrame + + :raises ValueError: If the specified 'time_to_peak_mins' is less than half of the river flow duration. + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/index.rst.txt new file mode 100644 index 000000000..53f81865e --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/index.rst.txt @@ -0,0 +1,25 @@ +:py:mod:`src.dynamic_boundary_conditions.river` +=============================================== + +.. py:module:: src.dynamic_boundary_conditions.river + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + align_rec_osm/index.rst + hydrograph/index.rst + main_river/index.rst + osm_waterways/index.rst + river_data_from_niwa/index.rst + river_data_to_from_db/index.rst + river_enum/index.rst + river_inflows/index.rst + river_model_input/index.rst + river_network_for_aoi/index.rst + river_network_to_from_db/index.rst + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/main_river/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/main_river/index.rst.txt new file mode 100644 index 000000000..04f5888ba --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/main_river/index.rst.txt @@ -0,0 +1,117 @@ +:py:mod:`src.dynamic_boundary_conditions.river.main_river` +========================================================== + +.. py:module:: src.dynamic_boundary_conditions.river.main_river + +.. autoapi-nested-parse:: + + Main river script used to read and store REC data in the database, fetch OSM waterways data, create a river network + and its associated data, and generate the requested river model input for BG-Flood etc. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.river.main_river.retrieve_hydro_dem_info + src.dynamic_boundary_conditions.river.main_river.get_hydro_dem_boundary_lines + src.dynamic_boundary_conditions.river.main_river.remove_existing_river_inputs + src.dynamic_boundary_conditions.river.main_river.main + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.river.main_river.log + src.dynamic_boundary_conditions.river.main_river.sample_polygon + + +.. py:data:: log + + + +.. py:function:: retrieve_hydro_dem_info(engine: sqlalchemy.engine.Engine, catchment_area: geopandas.GeoDataFrame) -> Tuple[xarray.Dataset, shapely.geometry.LineString, Union[int, float]] + + Retrieves the Hydrologically Conditioned DEM (Hydro DEM) data, along with its spatial extent and resolution, + for the specified catchment area. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + + :returns: A tuple containing the Hydro DEM data as a xarray Dataset, the spatial extent of the Hydro DEM as a LineString, + and the resolution of the Hydro DEM as either an integer or a float. + :rtype: Tuple[xr.Dataset, LineString, Union[int, float]] + + +.. py:function:: get_hydro_dem_boundary_lines(engine: sqlalchemy.engine.Engine, catchment_area: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Get the boundary lines of the Hydrologically Conditioned DEM. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing the boundary lines of the Hydrologically Conditioned DEM. + :rtype: gpd.GeoDataFrame + + +.. py:function:: remove_existing_river_inputs(bg_flood_dir: pathlib.Path) -> None + + Remove existing river input files from the specified directory. + + :param bg_flood_dir: The BG-Flood model directory containing the river input files. + :type bg_flood_dir: pathlib.Path + + :returns: This function does not return any value. + :rtype: None + + +.. py:function:: main(selected_polygon_gdf: geopandas.GeoDataFrame, flow_length_mins: int, time_to_peak_mins: Union[int, float], maf: bool = True, ari: Optional[int] = None, bound: src.dynamic_boundary_conditions.river.river_enum.BoundType = BoundType.MIDDLE, log_level: src.digitaltwin.utils.LogLevel = LogLevel.DEBUG) -> None + + Read and store REC data in the database, fetch OSM waterways data, create a river network and its associated data, + and generate the requested river model input for BG-Flood. + + :param selected_polygon_gdf: A GeoDataFrame representing the selected polygon, i.e., the catchment area. + :type selected_polygon_gdf: gpd.GeoDataFrame + :param flow_length_mins: Duration of the river flow in minutes. + :type flow_length_mins: int + :param time_to_peak_mins: The time in minutes when flow is at its greatest (reaches maximum). + :type time_to_peak_mins: Union[int, float] + :param maf: Set to True to obtain MAF-based scenario data or False to obtain ARI-based scenario data. + :type maf: bool = True + :param ari: The Average Recurrence Interval (ARI) value. Valid options are 5, 10, 20, 50, 100, or 1000. + Mandatory when 'maf' is set to False, and should be set to None when 'maf' is set to True. + :type ari: Optional[int] = None + :param bound: Set the type of bound (estimate) for the REC river inflow scenario data. + Valid options include: 'BoundType.LOWER', 'BoundType.MIDDLE', or 'BoundType.UPPER'. + :type bound: BoundType = BoundType.MIDDLE + :param log_level: The log level to set for the root logger. Defaults to LogLevel.DEBUG. + The available logging levels and their corresponding numeric values are: + - LogLevel.CRITICAL (50) + - LogLevel.ERROR (40) + - LogLevel.WARNING (30) + - LogLevel.INFO (20) + - LogLevel.DEBUG (10) + - LogLevel.NOTSET (0) + :type log_level: LogLevel = LogLevel.DEBUG + + :returns: This function does not return any value. + :rtype: None + + +.. py:data:: sample_polygon + + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/osm_waterways/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/osm_waterways/index.rst.txt new file mode 100644 index 000000000..256cc97a7 --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/osm_waterways/index.rst.txt @@ -0,0 +1,57 @@ +:py:mod:`src.dynamic_boundary_conditions.river.osm_waterways` +============================================================= + +.. py:module:: src.dynamic_boundary_conditions.river.osm_waterways + +.. autoapi-nested-parse:: + + This script handles the fetching of OpenStreetMap (OSM) waterways data for the defined catchment area. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.river.osm_waterways.configure_osm_cache + src.dynamic_boundary_conditions.river.osm_waterways.fetch_osm_waterways + src.dynamic_boundary_conditions.river.osm_waterways.get_osm_waterways_data + + + +.. py:function:: configure_osm_cache() -> None + + Change the directory for storing the OSM cache files. + + :returns: This function does not return any value. + :rtype: None + + +.. py:function:: fetch_osm_waterways(catchment_area: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Fetches OpenStreetMap (OSM) waterways data for the specified catchment area. + + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing the retrieved OSM waterways data for the specified catchment area. + :rtype: gpd.GeoDataFrame + + +.. py:function:: get_osm_waterways_data(catchment_area: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Fetches OpenStreetMap (OSM) waterways data for the specified catchment area. + Only LineString geometries representing waterways of type "river" or "stream" are included. + + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing only LineString geometries representing waterways of type "river" or "stream". + :rtype: gpd.GeoDataFrame + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/river_data_from_niwa/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/river_data_from_niwa/index.rst.txt new file mode 100644 index 000000000..2a02314c3 --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/river_data_from_niwa/index.rst.txt @@ -0,0 +1,155 @@ +:py:mod:`src.dynamic_boundary_conditions.river.river_data_from_niwa` +==================================================================== + +.. py:module:: src.dynamic_boundary_conditions.river.river_data_from_niwa + +.. autoapi-nested-parse:: + + Fetch REC data in New Zealand from NIWA using the ArcGIS REST API. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.river.river_data_from_niwa.RecordCounts + + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.river.river_data_from_niwa.get_feature_layer_record_counts + src.dynamic_boundary_conditions.river.river_data_from_niwa.gen_rec_query_param_list + src.dynamic_boundary_conditions.river.river_data_from_niwa.fetch_rec_data + src.dynamic_boundary_conditions.river.river_data_from_niwa.fetch_rec_data_for_nz + src.dynamic_boundary_conditions.river.river_data_from_niwa.fetch_rec_data_from_niwa + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.river.river_data_from_niwa.log + src.dynamic_boundary_conditions.river.river_data_from_niwa.REC_API_URL + + +.. py:data:: log + + + +.. py:data:: REC_API_URL + :value: 'https://gis.niwa.co.nz/server/rest/services/HYDRO/Flood_Statistics_Henderson_Collins_V2/MapServer/2' + + + +.. py:class:: RecordCounts + + + Bases: :py:obj:`NamedTuple` + + Represents the record counts of the REC feature layer. + + .. attribute:: max_record_count + + The maximum number of records that will be returned per query. + + :type: int + + .. attribute:: total_record_count + + The total number of records available in the feature layer. + + :type: int + + .. py:attribute:: max_record_count + :type: int + + + + .. py:attribute:: total_record_count + :type: int + + + + +.. py:function:: get_feature_layer_record_counts(url: str = REC_API_URL) -> RecordCounts + + Retrieves the maximum and total record counts from the REC feature layer. + + :param url: The URL of the REC feature layer. Defaults to `REC_API_URL`. + :type url: str = REC_API_URL + + :returns: A named tuple containing the maximum and total record counts of the REC feature layer. + :rtype: RecordCounts + + +.. py:function:: gen_rec_query_param_list(engine: sqlalchemy.engine.Engine, max_record_count: int, total_record_count: int) -> List[Dict[str, Union[str, int]]] + + Generate a list of API query parameters used to retrieve REC data in New Zealand. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param max_record_count: The maximum number of records that will be returned per query. + :type max_record_count: int + :param total_record_count: The total number of records available in the feature layer. + :type total_record_count: int + + :returns: A list of API query parameters used to retrieve REC data in New Zealand. + :rtype: List[Dict[str, Union[str, int]]] + + +.. py:function:: fetch_rec_data(session: aiohttp.ClientSession, query_param: Dict[str, Union[str, int]], url: str = f'{REC_API_URL}/query') -> geopandas.GeoDataFrame + :async: + + Fetch REC data using the provided query parameters within a single API call. + + :param session: An instance of `aiohttp.ClientSession` used for making HTTP requests. + :type session: aiohttp.ClientSession + :param query_param: The query parameters used to retrieve REC data. + :type query_param: Dict[str, Union[str, int]] + :param url: The query URL of the REC feature layer. + :type url: str = REC_API_URL + + :returns: A GeoDataFrame containing the fetched REC data. + :rtype: gpd.GeoDataFrame + + +.. py:function:: fetch_rec_data_for_nz(query_param_list: List[Dict[str, Union[str, int]]], url: str = REC_API_URL) -> geopandas.GeoDataFrame + :async: + + Iterate over the list of API query parameters to fetch REC data in New Zealand. + + :param query_param_list: A list of API query parameters used to retrieve REC data in New Zealand. + :type query_param_list: List[Dict[str, Union[str, int]]] + :param url: The URL of the REC feature layer. Defaults to `REC_API_URL`. + :type url: str = REC_API_URL + + :returns: A GeoDataFrame containing the fetched REC data in New Zealand. + :rtype: gpd.GeoDataFrame + + +.. py:function:: fetch_rec_data_from_niwa(engine: sqlalchemy.engine.Engine, url: str = REC_API_URL) -> geopandas.GeoDataFrame + + Retrieve REC data in New Zealand from NIWA using the ArcGIS REST API. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param url: The URL of the REC feature layer. Defaults to `REC_API_URL`. + :type url: str = REC_API_URL + + :returns: A GeoDataFrame containing the fetched REC data in New Zealand. + :rtype: gpd.GeoDataFrame + + :raises RuntimeError: If failed to fetch REC data. + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/river_data_to_from_db/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/river_data_to_from_db/index.rst.txt new file mode 100644 index 000000000..342b16602 --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/river_data_to_from_db/index.rst.txt @@ -0,0 +1,93 @@ +:py:mod:`src.dynamic_boundary_conditions.river.river_data_to_from_db` +===================================================================== + +.. py:module:: src.dynamic_boundary_conditions.river.river_data_to_from_db + +.. autoapi-nested-parse:: + + This script handles storing REC data in the database, and retrieving REC data enriched with sea-draining catchment + information from the database. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.river.river_data_to_from_db.load_backup_rec_data_from_niwa + src.dynamic_boundary_conditions.river.river_data_to_from_db.store_rec_data_to_db + src.dynamic_boundary_conditions.river.river_data_to_from_db.get_sdc_data_from_db + src.dynamic_boundary_conditions.river.river_data_to_from_db.get_rec_data_with_sdc_from_db + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.river.river_data_to_from_db.log + + +.. py:data:: log + + + +.. py:function:: load_backup_rec_data_from_niwa() -> geopandas.GeoDataFrame + + Loads REC data from the NIWA REC dataset. + + :returns: A GeoDataFrame containing the REC data from the NZ REC dataset. + :rtype: gpd.GeoDataFrame + + :raises FileNotFoundError: If the REC data directory does not exist or if there are no Shapefiles in the specified directory. + + +.. py:function:: store_rec_data_to_db(engine: sqlalchemy.engine.Engine) -> None + + Store REC data in the database. + + :param engine: The engine used to connect to the database. + :type engine: Engine + + :returns: This function does not return any value. + :rtype: None + + +.. py:function:: get_sdc_data_from_db(engine: sqlalchemy.engine.Engine, catchment_area: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Retrieve sea-draining catchment data from the database that intersects with the given catchment area. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing sea-draining catchment data that intersects with the given catchment area. + :rtype: gpd.GeoDataFrame + + +.. py:function:: get_rec_data_with_sdc_from_db(engine: sqlalchemy.engine.Engine, catchment_area: geopandas.GeoDataFrame, river_network_id: int) -> geopandas.GeoDataFrame + + Retrieve REC data from the database for the specified catchment area with an additional column that identifies + the associated sea-draining catchment for each REC geometry. + Simultaneously, identify the REC geometries that do not fully reside within sea-draining catchments and + proceed to add these excluded REC geometries to the appropriate database table. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + :param river_network_id: An identifier for the river network associated with the current run. + :type river_network_id: int + + :returns: A GeoDataFrame containing the retrieved REC data for the specified catchment area with an additional column + that identifies the associated sea-draining catchment for each REC geometry. + :rtype: gpd.GeoDataFrame + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/river_enum/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/river_enum/index.rst.txt new file mode 100644 index 000000000..235200123 --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/river_enum/index.rst.txt @@ -0,0 +1,65 @@ +:py:mod:`src.dynamic_boundary_conditions.river.river_enum` +========================================================== + +.. py:module:: src.dynamic_boundary_conditions.river.river_enum + +.. autoapi-nested-parse:: + + Enum(s) used in the river module. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.river.river_enum.BoundType + + + + +.. py:class:: BoundType + + + Bases: :py:obj:`enum.StrEnum` + + Enum class representing different types of estimates used in river flow scenarios. + + .. attribute:: LOWER + + Lower bound of a confidence interval. + + :type: str + + .. attribute:: MIDDLE + + Point estimate or sample mean. + + :type: str + + .. attribute:: UPPER + + Upper bound of a confidence interval. + + :type: str + + .. py:attribute:: LOWER + :value: 'lower' + + + + .. py:attribute:: MIDDLE + :value: 'middle' + + + + .. py:attribute:: UPPER + :value: 'upper' + + + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/river_inflows/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/river_inflows/index.rst.txt new file mode 100644 index 000000000..819dd0a0b --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/river_inflows/index.rst.txt @@ -0,0 +1,82 @@ +:py:mod:`src.dynamic_boundary_conditions.river.river_inflows` +============================================================= + +.. py:module:: src.dynamic_boundary_conditions.river.river_inflows + +.. autoapi-nested-parse:: + + This script handles the task of obtaining REC river inflow data along with the corresponding river input points used + for the BG-Flood model. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.river.river_inflows.get_elevations_near_rec_entry_point + src.dynamic_boundary_conditions.river.river_inflows.get_min_elevation_river_input_point + src.dynamic_boundary_conditions.river.river_inflows.get_rec_inflows_with_input_points + + + +.. py:function:: get_elevations_near_rec_entry_point(rec_inflows_row: pandas.Series, hydro_dem: xarray.Dataset) -> geopandas.GeoDataFrame + + Extracts elevation values and their corresponding coordinates from the Hydrologically Conditioned DEM in the + vicinity of the entry point of the REC river inflow segment. + + :param rec_inflows_row: Represents data pertaining to an individual REC river inflow segment, including its entry point into the + catchment area and the boundary line it aligns with. + :type rec_inflows_row: pd.Series + :param hydro_dem: Hydrologically Conditioned DEM for the catchment area. + :type hydro_dem: xr.Dataset + + :returns: A GeoDataFrame containing elevation values and their corresponding coordinates extracted from the + Hydrologically Conditioned DEM in the vicinity of the entry point of the REC river inflow segment. + :rtype: gpd.GeoDataFrame + + +.. py:function:: get_min_elevation_river_input_point(rec_inflows_row: pandas.Series, hydro_dem: xarray.Dataset) -> geopandas.GeoDataFrame + + Locate the river input point with the lowest elevation, used for BG-Flood model river input, from the + Hydrologically Conditioned DEM for the specific REC river inflow segment. + + :param rec_inflows_row: Represents data pertaining to an individual REC river inflow segment, including its entry point into the + catchment area and the boundary line it aligns with. + :type rec_inflows_row: pd.Series + :param hydro_dem: Hydrologically Conditioned DEM for the catchment area. + :type hydro_dem: xr.Dataset + + :returns: A GeoDataFrame containing the river input point with the lowest elevation, used for BG-Flood model river input, + from the Hydrologically Conditioned DEM for the specific REC river inflow segment. + :rtype: gpd.GeoDataFrame + + +.. py:function:: get_rec_inflows_with_input_points(engine: sqlalchemy.engine.Engine, catchment_area: geopandas.GeoDataFrame, rec_network_data: geopandas.GeoDataFrame, distance_m: int = 300) -> geopandas.GeoDataFrame + + Obtain data for REC river inflow segments whose boundary points align with the boundary points of + OpenStreetMap (OSM) waterways within a specified distance threshold, along with their corresponding + river input points used for the BG-Flood model. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + :param rec_network_data: A GeoDataFrame containing the REC river network data. + :type rec_network_data: gpd.GeoDataFrame + :param distance_m: Distance threshold in meters for spatial proximity matching. The default value is 300 meters. + :type distance_m: int = 300 + + :returns: A GeoDataFrame containing data for REC river inflow segments whose boundary points align with the + boundary points of OpenStreetMap (OSM) waterways within a specified distance threshold, + along with their corresponding river input points used for the BG-Flood model. + :rtype: gpd.GeoDataFrame + + :raises NoRiverDataException: If no REC river segment is found crossing the catchment boundary. + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/river_model_input/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/river_model_input/index.rst.txt new file mode 100644 index 000000000..5466c0d10 --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/river_model_input/index.rst.txt @@ -0,0 +1,49 @@ +:py:mod:`src.dynamic_boundary_conditions.river.river_model_input` +================================================================= + +.. py:module:: src.dynamic_boundary_conditions.river.river_model_input + +.. autoapi-nested-parse:: + + This script handles the task of generating the requested river model inputs for BG-Flood. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.river.river_model_input.generate_river_model_input + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.river.river_model_input.log + + +.. py:data:: log + + + +.. py:function:: generate_river_model_input(bg_flood_dir: pathlib.Path, hydrograph_data: geopandas.GeoDataFrame) -> None + + Generate the requested river model inputs for BG-Flood. + + :param bg_flood_dir: The BG-Flood model directory. + :type bg_flood_dir: pathlib.Path + :param hydrograph_data: A GeoDataFrame containing hydrograph data for the requested REC river inflow scenario. + :type hydrograph_data: gpd.GeoDataFrame + + :returns: This function does not return any value. + :rtype: None + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/river_network_for_aoi/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/river_network_for_aoi/index.rst.txt new file mode 100644 index 000000000..e70c8b1ae --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/river_network_for_aoi/index.rst.txt @@ -0,0 +1,225 @@ +:py:mod:`src.dynamic_boundary_conditions.river.river_network_for_aoi` +===================================================================== + +.. py:module:: src.dynamic_boundary_conditions.river.river_network_for_aoi + +.. autoapi-nested-parse:: + + This script processes REC data to construct a river network for the defined catchment area. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.river.river_network_for_aoi.get_unique_nodes_dict + src.dynamic_boundary_conditions.river.river_network_for_aoi.add_nodes_to_rec + src.dynamic_boundary_conditions.river.river_network_for_aoi.add_nodes_intersection_type + src.dynamic_boundary_conditions.river.river_network_for_aoi.prepare_network_data_for_construction + src.dynamic_boundary_conditions.river.river_network_for_aoi.add_nodes_to_network + src.dynamic_boundary_conditions.river.river_network_for_aoi.add_initial_edges_to_network + src.dynamic_boundary_conditions.river.river_network_for_aoi.identify_absent_edges_to_add + src.dynamic_boundary_conditions.river.river_network_for_aoi.add_absent_edges_to_network + src.dynamic_boundary_conditions.river.river_network_for_aoi.add_edge_directions_to_network_data + src.dynamic_boundary_conditions.river.river_network_for_aoi.remove_unconnected_edges_from_network + src.dynamic_boundary_conditions.river.river_network_for_aoi.build_rec_river_network + src.dynamic_boundary_conditions.river.river_network_for_aoi.get_rec_river_network + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.river.river_network_for_aoi.log + + +.. py:data:: log + + + +.. py:function:: get_unique_nodes_dict(rec_data_w_node_coords: geopandas.GeoDataFrame) -> Dict[shapely.geometry.Point, int] + + Generates a dictionary that contains the unique node coordinates in the REC data for the catchment area. + + :param rec_data_w_node_coords: A GeoDataFrame containing the REC data for the catchment area with additional columns for the + first and last coordinates of each LineString. + :type rec_data_w_node_coords: gpd.GeoDataFrame + + :returns: A dictionary that contains the unique node coordinates (Point objects) in the REC data for the catchment area. + :rtype: Dict[Point, int] + + +.. py:function:: add_nodes_to_rec(rec_data_with_sdc: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Add columns for the first and last coordinates/nodes of each LineString in the REC data within the catchment area. + + :param rec_data_with_sdc: A GeoDataFrame containing the REC data for the catchment area with an additional column that identifies + the associated sea-draining catchment for each REC geometry. + :type rec_data_with_sdc: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing the REC data for the catchment area with additional columns for the + first and last coordinates/nodes of each LineString. + :rtype: gpd.GeoDataFrame + + +.. py:function:: add_nodes_intersection_type(catchment_area: geopandas.GeoDataFrame, rec_data_with_nodes: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Calculate and add an 'intersection_type' column to the GeoDataFrame that contains REC data with node information. + + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + :param rec_data_with_nodes: A GeoDataFrame containing the REC data for the catchment area with additional columns for the + first and last coordinates/nodes of each LineString. + :type rec_data_with_nodes: gpd.GeoDataFrame + + :returns: The input GeoDataFrame with the 'intersection_type' column added. + :rtype: gpd.GeoDataFrame + + +.. py:function:: prepare_network_data_for_construction(catchment_area: geopandas.GeoDataFrame, rec_data_with_sdc: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Prepares the necessary data for constructing the river network for the catchment area using the REC data. + + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + :param rec_data_with_sdc: A GeoDataFrame containing the REC data for the catchment area with an additional column that identifies + the associated sea-draining catchment for each REC geometry. + :type rec_data_with_sdc: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing the necessary data for constructing the river network for the catchment area. + :rtype: gpd.GeoDataFrame + + +.. py:function:: add_nodes_to_network(rec_network: networkx.Graph, prepared_network_data: geopandas.GeoDataFrame) -> None + + Add nodes to the REC river network along with their attributes. + + :param rec_network: The REC river network, a directed graph, to which nodes will be added. + :type rec_network: nx.Graph + :param prepared_network_data: A GeoDataFrame containing the necessary data for constructing the river network for the catchment area. + :type prepared_network_data: gpd.GeoDataFrame + + :returns: This function does not return any value. + :rtype: None + + +.. py:function:: add_initial_edges_to_network(rec_network: networkx.Graph, prepared_network_data: geopandas.GeoDataFrame) -> None + + Add initial edges to the REC river network along with their attributes. + + :param rec_network: The REC river network, a directed graph, to which initial edges will be added. + :type rec_network: nx.Graph + :param prepared_network_data: A GeoDataFrame containing the necessary data for constructing the river network for the catchment area. + :type prepared_network_data: gpd.GeoDataFrame + + :returns: This function does not return any value. + :rtype: None + + +.. py:function:: identify_absent_edges_to_add(rec_network: networkx.Graph, prepared_network_data: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Identify edges that are absent from the REC river network and require addition. + + :param rec_network: The REC river network, a directed graph. + :type rec_network: nx.Graph + :param prepared_network_data: A GeoDataFrame containing the necessary data for constructing the river network for the catchment area. + :type prepared_network_data: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing edges that are absent from the REC river network and require addition. + :rtype: gpd.GeoDataFrame + + +.. py:function:: add_absent_edges_to_network(engine: sqlalchemy.engine.Engine, catchment_area: geopandas.GeoDataFrame, rec_network: networkx.Graph, prepared_network_data: geopandas.GeoDataFrame) -> None + + Add absent edges that are required for the current river network construction to the REC river network along with + their attributes. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame, + :param rec_network: The REC river network, a directed graph, to which absent edges will be added. + :type rec_network: nx.Graph + :param prepared_network_data: A GeoDataFrame containing the necessary data for constructing the river network for the catchment area. + :type prepared_network_data: gpd.GeoDataFrame + + :returns: This function does not return any value. + :rtype: None + + +.. py:function:: add_edge_directions_to_network_data(engine: sqlalchemy.engine.Engine, rec_network_id: int, rec_network: networkx.Graph, prepared_network_data: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Add edge directions to the river network data based on the provided REC river network. + Subsequently, eliminate REC geometries from the network data where the edge direction is absent (None), and + append these excluded REC geometries to the relevant database table. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param rec_network_id: An identifier for the river network associated with the current run. + :type rec_network_id: int + :param rec_network: The REC river network, a directed graph, used to determine the edge directions. + :type rec_network: nx.Graph + :param prepared_network_data: A GeoDataFrame containing the necessary data for constructing the river network for the catchment area. + :type prepared_network_data: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing the updated river network data with added edge directions. + :rtype: gpd.GeoDataFrame + + +.. py:function:: remove_unconnected_edges_from_network(engine: sqlalchemy.engine.Engine, rec_network_id: int, rec_network: networkx.Graph, rec_network_data: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Remove REC river network edges that are not connected to their respective sea-draining catchment's end nodes. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param rec_network_id: An identifier for the river network associated with the current run. + :type rec_network_id: int + :param rec_network: The REC river network, a directed graph, used to identify edges that are connected to the end nodes of their + respective sea-draining catchments. + :type rec_network: nx.Graph + :param rec_network_data: A GeoDataFrame containing the REC river network data with added edge directions. + :type rec_network_data: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing the modified river network data with REC geometries removed if they are not + connected to their end nodes within their respective sea-draining catchments. + :rtype: gpd.GeoDataFrame + + +.. py:function:: build_rec_river_network(engine: sqlalchemy.engine.Engine, catchment_area: geopandas.GeoDataFrame, rec_network_id: int) -> Tuple[networkx.DiGraph, geopandas.GeoDataFrame] + + Builds a river network for the catchment area using the REC data. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + :param rec_network_id: An identifier for the river network associated with the current run. + :type rec_network_id: int + + :returns: A tuple containing the constructed REC river network, represented as a directed graph (DiGraph), + along with its associated data in the form of a GeoDataFrame. + :rtype: Tuple[nx.DiGraph, gpd.GeoDataFrame] + + +.. py:function:: get_rec_river_network(engine: sqlalchemy.engine.Engine, catchment_area: geopandas.GeoDataFrame) -> Tuple[networkx.Graph, geopandas.GeoDataFrame] + + Retrieve or create REC river network for the specified catchment area. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + + :returns: A tuple containing the REC river network as a directed graph (DiGraph) and its associated data + as a GeoDataFrame. + :rtype: Tuple[nx.Graph, gpd.GeoDataFrame] + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/river_network_to_from_db/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/river_network_to_from_db/index.rst.txt new file mode 100644 index 000000000..9246a75d1 --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/river/river_network_to_from_db/index.rst.txt @@ -0,0 +1,144 @@ +:py:mod:`src.dynamic_boundary_conditions.river.river_network_to_from_db` +======================================================================== + +.. py:module:: src.dynamic_boundary_conditions.river.river_network_to_from_db + +.. autoapi-nested-parse:: + + This script handles the following tasks: storing both the REC river network and its associated data in files along with + their metadata in the database, retrieving the existing REC river network and its associated data from the database, + and managing the addition of REC geometries that have been excluded from the river network in the database, + as well as retrieving them for an existing REC river network. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.river.river_network_to_from_db.get_next_network_id + src.dynamic_boundary_conditions.river.river_network_to_from_db.add_network_exclusions_to_db + src.dynamic_boundary_conditions.river.river_network_to_from_db.get_new_network_output_paths + src.dynamic_boundary_conditions.river.river_network_to_from_db.get_network_output_metadata + src.dynamic_boundary_conditions.river.river_network_to_from_db.store_rec_network_to_db + src.dynamic_boundary_conditions.river.river_network_to_from_db.get_existing_network_metadata_from_db + src.dynamic_boundary_conditions.river.river_network_to_from_db.get_existing_network + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.river.river_network_to_from_db.log + + +.. py:data:: log + + + +.. py:function:: get_next_network_id(engine: sqlalchemy.engine.Engine) -> int + + Get the next available REC River Network ID from the River Network Exclusions table. + + :param engine: The engine used to connect to the database. + :type engine: Engine + + :returns: An identifier for the river network associated with each run, representing the next available River Network ID. + :rtype: int + + +.. py:function:: add_network_exclusions_to_db(engine: sqlalchemy.engine.Engine, rec_network_id: int, rec_network_exclusions: geopandas.GeoDataFrame, exclusion_cause: str) -> None + + Add REC geometries that are excluded from the river network for the current run in the database. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param rec_network_id: An identifier for the river network associated with the current run. + :type rec_network_id: int + :param rec_network_exclusions: A GeoDataFrame containing the REC geometries that are excluded from the river network for the current run. + :type rec_network_exclusions: gpd.GeoDataFrame + :param exclusion_cause: Cause of exclusion, i.e., the reason why the REC river geometry was excluded. + :type exclusion_cause: str + + :returns: This function does not return any value. + :rtype: None + + +.. py:function:: get_new_network_output_paths() -> Tuple[pathlib.Path, pathlib.Path] + + Get new file paths that incorporate the current timestamp into the filenames for storing both the REC Network and + its associated data. + + :returns: A tuple containing the file path to the REC Network and the file path to the REC Network data. + :rtype: Tuple[pathlib.Path, pathlib.Path] + + +.. py:function:: get_network_output_metadata(network_path: pathlib.Path, network_data_path: pathlib.Path, catchment_area: geopandas.GeoDataFrame) -> Tuple[str, str, str] + + Get metadata associated with the REC Network. + + :param network_path: The path to the REC Network file. + :type network_path: pathlib.Path + :param network_data_path: The path to the REC Network data file. + :type network_data_path: pathlib.Path + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + + :returns: A tuple containing the absolute path to the REC Network file as a string, the absolute path to the REC Network + data file as a string, and the Well-Known Text (WKT) representation of the catchment area's geometry. + :rtype: Tuple[str, str, str] + + +.. py:function:: store_rec_network_to_db(engine: sqlalchemy.engine.Engine, catchment_area: geopandas.GeoDataFrame, rec_network_id: int, rec_network: networkx.Graph, rec_network_data: geopandas.GeoDataFrame) -> None + + Store both the REC river network and its associated data in files, and their metadata in the database. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + :param rec_network_id: An identifier for the river network associated with the current run. + :type rec_network_id: int + :param rec_network: The constructed REC river network, represented as a directed graph (DiGraph). + :type rec_network: nx.Graph + :param rec_network_data: A GeoDataFrame containing the REC river network data. + :type rec_network_data: gpd.GeoDataFrame + + :returns: This function does not return any value. + :rtype: None + + +.. py:function:: get_existing_network_metadata_from_db(engine: sqlalchemy.engine.Engine, catchment_area: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Retrieve existing REC river network metadata for the specified catchment area from the database. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing the existing REC river network metadata for the specified catchment area. + :rtype: gpd.GeoDataFrame + + +.. py:function:: get_existing_network(engine: sqlalchemy.engine.Engine, existing_network_meta: geopandas.GeoDataFrame) -> Tuple[networkx.Graph, geopandas.GeoDataFrame] + + Retrieve existing REC river network and its associated data. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param existing_network_meta: A GeoDataFrame containing the metadata for the existing REC river network. + :type existing_network_meta: gpd.GeoDataFrame + + :returns: A tuple containing the existing REC river network as a directed graph (DiGraph) and its associated data + as a GeoDataFrame. + :rtype: Tuple[nx.Graph, gpd.GeoDataFrame] + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/index.rst.txt new file mode 100644 index 000000000..7578d70e9 --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/index.rst.txt @@ -0,0 +1,21 @@ +:py:mod:`src.dynamic_boundary_conditions.tide` +============================================== + +.. py:module:: src.dynamic_boundary_conditions.tide + + +Submodules +---------- +.. toctree:: + :titlesonly: + :maxdepth: 1 + + main_tide_slr/index.rst + sea_level_rise_data/index.rst + tide_data_from_niwa/index.rst + tide_enum/index.rst + tide_query_location/index.rst + tide_slr_combine/index.rst + tide_slr_model_input/index.rst + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/main_tide_slr/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/main_tide_slr/index.rst.txt new file mode 100644 index 000000000..c2711b51a --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/main_tide_slr/index.rst.txt @@ -0,0 +1,94 @@ +:py:mod:`src.dynamic_boundary_conditions.tide.main_tide_slr` +============================================================ + +.. py:module:: src.dynamic_boundary_conditions.tide.main_tide_slr + +.. autoapi-nested-parse:: + + Main tide and sea level rise script used to fetch tide data, download and store sea level rise data in the database, + and generate the requested tide uniform boundary model input for BG-Flood etc. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.tide.main_tide_slr.remove_existing_boundary_inputs + src.dynamic_boundary_conditions.tide.main_tide_slr.main + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.tide.main_tide_slr.log + src.dynamic_boundary_conditions.tide.main_tide_slr.sample_polygon + + +.. py:data:: log + + + +.. py:function:: remove_existing_boundary_inputs(bg_flood_dir: pathlib.Path) -> None + + Remove existing uniform boundary input files from the specified directory. + + :param bg_flood_dir: BG-Flood model directory containing the uniform boundary input files. + :type bg_flood_dir: pathlib.Path + + :returns: This function does not return any value. + :rtype: None + + +.. py:function:: main(selected_polygon_gdf: geopandas.GeoDataFrame, tide_length_mins: int, time_to_peak_mins: Union[int, float], interval_mins: int, proj_year: int, confidence_level: str, ssp_scenario: str, add_vlm: bool, percentile: int, log_level: src.digitaltwin.utils.LogLevel = LogLevel.DEBUG) -> None + + Fetch tide data, read and store sea level rise data in the database, and generate the requested tide + uniform boundary model input for BG-Flood. + + :param selected_polygon_gdf: A GeoDataFrame representing the selected polygon, i.e., the catchment area. + :type selected_polygon_gdf: gpd.GeoDataFrame + :param tide_length_mins: The length of the tide event in minutes. + :type tide_length_mins: int + :param time_to_peak_mins: The time in minutes when the tide is at its greatest (reaches maximum). + :type time_to_peak_mins: Union[int, float] + :param interval_mins: The time interval, in minutes, between each recorded tide data point. + :type interval_mins: int + :param proj_year: The projection year for which the combined tide and sea level rise data should be generated. + :type proj_year: int + :param confidence_level: The desired confidence level for the sea level rise data. Valid values are 'low' or 'medium'. + :type confidence_level: str + :param ssp_scenario: The desired Shared Socioeconomic Pathways (SSP) scenario for the sea level rise data. + Valid options for both low and medium confidence are: 'SSP1-2.6', 'SSP2-4.5', or 'SSP5-8.5'. + Additional options for medium confidence are: 'SSP1-1.9' or 'SSP3-7.0'. + :type ssp_scenario: str + :param add_vlm: Indicates whether Vertical Land Motion (VLM) should be included in the sea level rise data. + Set to True if VLM should be included, False otherwise. + :type add_vlm: bool + :param percentile: The desired percentile for the sea level rise data. Valid values are 17, 50, or 83. + :type percentile: int + :param log_level: The log level to set for the root logger. Defaults to LogLevel.DEBUG. + The available logging levels and their corresponding numeric values are: + - LogLevel.CRITICAL (50) + - LogLevel.ERROR (40) + - LogLevel.WARNING (30) + - LogLevel.INFO (20) + - LogLevel.DEBUG (10) + - LogLevel.NOTSET (0) + :type log_level: LogLevel = LogLevel.DEBUG + + :returns: This function does not return any value. + :rtype: None + + +.. py:data:: sample_polygon + + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/sea_level_rise_data/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/sea_level_rise_data/index.rst.txt new file mode 100644 index 000000000..96d81f0a1 --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/sea_level_rise_data/index.rst.txt @@ -0,0 +1,103 @@ +:py:mod:`src.dynamic_boundary_conditions.tide.sea_level_rise_data` +================================================================== + +.. py:module:: src.dynamic_boundary_conditions.tide.sea_level_rise_data + +.. autoapi-nested-parse:: + + This script handles the downloading and reading of sea level rise data from the NZ Sea level rise datasets, + storing the data in the database, and retrieving the closest sea level rise data from the database for all locations + in the provided tide data. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.tide.sea_level_rise_data.download_slr_data_files_from_takiwa + src.dynamic_boundary_conditions.tide.sea_level_rise_data.read_slr_data_from_files + src.dynamic_boundary_conditions.tide.sea_level_rise_data.store_slr_data_to_db + src.dynamic_boundary_conditions.tide.sea_level_rise_data.get_closest_slr_data + src.dynamic_boundary_conditions.tide.sea_level_rise_data.get_slr_data_from_db + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.tide.sea_level_rise_data.log + + +.. py:data:: log + + + +.. py:function:: download_slr_data_files_from_takiwa(slr_data_dir: pathlib.Path) -> None + + Download regional sea level rise (SLR) data files from the NZ SeaRise Takiwa website. + + :param slr_data_dir: The directory where the downloaded sea level rise data files will be saved. + :type slr_data_dir: pathlib.Path + + :returns: This function does not return any value. + :rtype: None + + +.. py:function:: read_slr_data_from_files(slr_data_dir: pathlib.Path) -> geopandas.GeoDataFrame + + Read sea level rise data from the NZ Sea level rise datasets and return a GeoDataFrame. + + :param slr_data_dir: The directory containing the downloaded sea level rise data files. + :type slr_data_dir: pathlib.Path + + :returns: A GeoDataFrame containing the sea level rise data from the NZ Sea level rise datasets. + :rtype: gpd.GeoDataFrame + + :raises FileNotFoundError: If the sea level rise data directory does not exist or if there are no CSV files in the specified directory. + + +.. py:function:: store_slr_data_to_db(engine: sqlalchemy.engine.Engine) -> None + + Store sea level rise data to the database. + + :param engine: The engine used to connect to the database. + :type engine: Engine + + :returns: This function does not return any value. + :rtype: None + + +.. py:function:: get_closest_slr_data(engine: sqlalchemy.engine.Engine, single_query_loc: pandas.Series) -> geopandas.GeoDataFrame + + Retrieve the closest sea level rise data for a single query location from the database. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param single_query_loc: Pandas Series containing the location coordinate and additional information used for retrieval. + :type single_query_loc: pd.Series + + :returns: A GeoDataFrame containing the closest sea level rise data for the query location from the database. + :rtype: gpd.GeoDataFrame + + +.. py:function:: get_slr_data_from_db(engine: sqlalchemy.engine.Engine, tide_data: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Retrieve the closest sea level rise data from the database for all locations in the provided tide data. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param tide_data: A GeoDataFrame containing tide data with added time information (seconds, minutes, hours) and location details. + :type tide_data: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing the closest sea level rise data for all locations in the tide data. + :rtype: gpd.GeoDataFrame + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/tide_data_from_niwa/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/tide_data_from_niwa/index.rst.txt new file mode 100644 index 000000000..e7802aa9f --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/tide_data_from_niwa/index.rst.txt @@ -0,0 +1,315 @@ +:py:mod:`src.dynamic_boundary_conditions.tide.tide_data_from_niwa` +================================================================== + +.. py:module:: src.dynamic_boundary_conditions.tide.tide_data_from_niwa + +.. autoapi-nested-parse:: + + Fetch tide data from NIWA using the Tide API based on the specified approach, datum, etc. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.tide.tide_data_from_niwa.get_query_loc_coords_position + src.dynamic_boundary_conditions.tide.tide_data_from_niwa.get_date_ranges + src.dynamic_boundary_conditions.tide.tide_data_from_niwa.gen_tide_query_param_list + src.dynamic_boundary_conditions.tide.tide_data_from_niwa.fetch_tide_data + src.dynamic_boundary_conditions.tide.tide_data_from_niwa.fetch_tide_data_for_requested_period + src.dynamic_boundary_conditions.tide.tide_data_from_niwa.convert_to_nz_timezone + src.dynamic_boundary_conditions.tide.tide_data_from_niwa.fetch_tide_data_from_niwa + src.dynamic_boundary_conditions.tide.tide_data_from_niwa.get_highest_tide_datetime + src.dynamic_boundary_conditions.tide.tide_data_from_niwa.get_highest_tide_datetime_span + src.dynamic_boundary_conditions.tide.tide_data_from_niwa.get_highest_tide_date_span + src.dynamic_boundary_conditions.tide.tide_data_from_niwa.fetch_tide_data_around_highest_tide + src.dynamic_boundary_conditions.tide.tide_data_from_niwa.get_time_mins_to_add + src.dynamic_boundary_conditions.tide.tide_data_from_niwa.add_time_information + src.dynamic_boundary_conditions.tide.tide_data_from_niwa.get_tide_data + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.tide.tide_data_from_niwa.TIDE_API_URL_DATA + src.dynamic_boundary_conditions.tide.tide_data_from_niwa.TIDE_API_URL_DATA_CSV + + +.. py:data:: TIDE_API_URL_DATA + :value: 'https://api.niwa.co.nz/tides/data' + + + +.. py:data:: TIDE_API_URL_DATA_CSV + :value: 'https://api.niwa.co.nz/tides/data.csv' + + + +.. py:function:: get_query_loc_coords_position(query_loc_row: geopandas.GeoDataFrame) -> Tuple[float, float, str] + + Get the latitude, longitude, and position of a query location. + + :param query_loc_row: A GeoDataFrame representing a query location used to fetch tide data from NIWA using the tide API. + :type query_loc_row: gpd.GeoDataFrame + + :returns: A tuple containing the latitude, longitude, and position of the query location. + :rtype: Tuple[float, float, str] + + +.. py:function:: get_date_ranges(start_date: datetime.date = date.today(), total_days: int = 365, days_per_call: int = 31) -> Dict[datetime.date, int] + + Get the start date and duration, measured in days, for each API call used to fetch tide data for the + requested period. + + :param start_date: The start date for retrieving tide data. It can be in the past or present. Default is today's date. + :type start_date: date = date.today() + :param total_days: The total number of days of tide data to retrieve. Default is 365 days (one year). + :type total_days: int = 365 + :param days_per_call: The number of days to fetch in each API call. Must be between 1 and 31 inclusive. + Default is 31, which represents the maximum number of days that can be fetched per API call. + :type days_per_call: int = 31 + + :returns: A dictionary containing the start date as the key and the duration, in days, for each API call as the value. + :rtype: Dict[date, int] + + :raises ValueError: - If 'total_days' is less than 1. + - If 'days_per_call' is not between 1 and 31 inclusive. + + +.. py:function:: gen_tide_query_param_list(lat: Union[int, float], long: Union[int, float], date_ranges: Dict[datetime.date, int], interval_mins: Optional[int] = None, datum: src.dynamic_boundary_conditions.tide.tide_enum.DatumType = DatumType.LAT) -> List[Dict[str, Union[str, int]]] + + Generate a list of API query parameters used to retrieve tide data for the requested period. + + :param lat: Latitude in the range of -29 to -53 (e.g., -30.876). + :type lat: Union[int, float] + :param long: Longitude in the range of 160 to 180 and -175 to -180 (e.g., -175.543). + :type long: Union[int, float] + :param date_ranges: Dictionary of start date and number of days for each API call needed to retrieve tide data + for the requested period. + :type date_ranges: Dict[date, int] + :param interval_mins: Output time interval in minutes, range from 10 to 1440 minutes (1 day). + Omit to retrieve only the highest and lowest tide data. + :type interval_mins: Optional[int] = None + :param datum: Datum used for fetching tide data from NIWA. Default value is LAT. + Valid options are LAT for the Lowest Astronomical Tide and MSL for the Mean Sea Level. + :type datum: DatumType = DatumType.LAT + + :returns: A list of API query parameters used to retrieve tide data for the requested period. + :rtype: List[Dict[str, Union[str, int]]] + + :raises ValueError: - If the latitude is outside the range of -29 to -53. + - If the longitude is outside the range of 160 to 180 or -175 to -180. + - If the time interval is provided and outside the range of 10 to 1440. + + +.. py:function:: fetch_tide_data(session: aiohttp.ClientSession, query_param: Dict[str, Union[str, int]], url: str = TIDE_API_URL_DATA) -> geopandas.GeoDataFrame + :async: + + Fetch tide data using the provided query parameters within a single API call. + + :param session: An instance of `aiohttp.ClientSession` used for making HTTP requests. + :type session: aiohttp.ClientSession + :param query_param: The query parameters used to retrieve tide data for a specific location and time period. + :type query_param: Dict[str, Union[str, int]] + :param url: Tide API HTTP request URL. Defaults to `TIDE_API_URL_DATA`. + Can be either `TIDE_API_URL_DATA` or `TIDE_API_URL_DATA_CSV`. + :type url: str = TIDE_API_URL_DATA + + :returns: A GeoDataFrame containing the fetched tide data. + :rtype: gpd.GeoDataFrame + + +.. py:function:: fetch_tide_data_for_requested_period(query_param_list: List[Dict[str, Union[str, int]]], url: str = TIDE_API_URL_DATA) -> geopandas.GeoDataFrame + :async: + + Iterate over the list of API query parameters to fetch tide data for the requested period. + + :param query_param_list: A list of API query parameters used to retrieve tide data for the requested period. + :type query_param_list: List[Dict[str, Union[str, int]]] + :param url: Tide API HTTP request URL. Defaults to `TIDE_API_URL_DATA`. + Can be either `TIDE_API_URL_DATA` or `TIDE_API_URL_DATA_CSV`. + :type url: str = TIDE_API_URL_DATA + + :returns: A GeoDataFrame containing the fetched tide data for the requested period. + :rtype: gpd.GeoDataFrame + + :raises ValueError: If an invalid URL is specified for the Tide API HTTP request. + :raises RuntimeError: If failed to fetch tide data. + + +.. py:function:: convert_to_nz_timezone(tide_data_utc: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Convert the time column in the initially retrieved tide data for the requested period from UTC to NZ timezone. + + :param tide_data_utc: The original tide data obtained for the requested period with the time column expressed in UTC. + :type tide_data_utc: gpd.GeoDataFrame + + :returns: The tide data with the time column converted to NZ timezone. + :rtype: gpd.GeoDataFrame + + +.. py:function:: fetch_tide_data_from_niwa(tide_query_loc: geopandas.GeoDataFrame, datum: src.dynamic_boundary_conditions.tide.tide_enum.DatumType = DatumType.LAT, start_date: datetime.date = date.today(), total_days: int = 365, interval_mins: Optional[int] = None) -> geopandas.GeoDataFrame + + Retrieve tide data from NIWA for the requested time period using the Tide API. + + :param tide_query_loc: A GeoDataFrame containing the query coordinates and their positions. + :type tide_query_loc: gpd.GeoDataFrame + :param datum: Datum used for fetching tide data from NIWA. Default value is LAT. + Valid options are LAT for the Lowest Astronomical Tide and MSL for the Mean Sea Level. + :type datum: DatumType = DatumType.LAT + :param start_date: The start date for retrieving tide data. It can be in the past or present. Default is today's date. + :type start_date: date = date.today() + :param total_days: The total number of days of tide data to retrieve. Default is 365 days (one year). + :type total_days: int = 365 + :param interval_mins: Output time interval in minutes, range from 10 to 1440 minutes (1 day). + Omit to retrieve only the highest and lowest tide data. + :type interval_mins: Optional[int] = None + + :returns: A GeoDataFrame containing the fetched tide data from NIWA for the requested time period. + :rtype: gpd.GeoDataFrame + + +.. py:function:: get_highest_tide_datetime(tide_data: geopandas.GeoDataFrame) -> pandas.Timestamp + + Get the datetime of the most recent highest tide that occurred within the requested time period. + + :param tide_data: The tide data fetched from NIWA for the requested time period. + The time column is expressed in NZ timezone, which was converted from UTC. + :type tide_data: gpd.GeoDataFrame + + :returns: The datetime of the most recent highest tide that occurred within the requested time period. + :rtype: pd.Timestamp + + +.. py:function:: get_highest_tide_datetime_span(highest_tide_datetime: pandas.Timestamp, tide_length_mins: int) -> Tuple[pandas.Timestamp, pandas.Timestamp] + + Get the start and end datetimes of a tide event centered around the datetime of the highest tide. + + :param highest_tide_datetime: The datetime of the most recent highest tide that occurred within the requested time period. + :type highest_tide_datetime: pd.Timestamp + :param tide_length_mins: The length of the tide event in minutes. + :type tide_length_mins: int + + :returns: A tuple containing the start and end datetimes of the tide event centered around the + datetime of the highest tide. + :rtype: Tuple[pd.Timestamp, pd.Timestamp] + + +.. py:function:: get_highest_tide_date_span(start_datetime: pandas.Timestamp, end_datetime: pandas.Timestamp) -> Tuple[datetime.date, int] + + Get the start date and duration in days of a tide event centered around the datetime of the highest tide. + + :param start_datetime: The start datetime of the tide event centered around the datetime of the highest tide. + :type start_datetime: pd.Timestamp + :param end_datetime: The end datetime of the tide event centered around the datetime of the highest tide. + :type end_datetime: pd.Timestamp + + :returns: A tuple containing the start date and the duration in days of a tide event centered around the + datetime of the highest tide. + :rtype: Tuple[date, int] + + +.. py:function:: fetch_tide_data_around_highest_tide(tide_data: geopandas.GeoDataFrame, tide_length_mins: int, interval_mins: int = 10, datum: src.dynamic_boundary_conditions.tide.tide_enum.DatumType = DatumType.LAT) -> geopandas.GeoDataFrame + + Fetch tide data around the highest tide from NIWA for the specified tide length and interval. + + :param tide_data: The tide data fetched from NIWA for the requested time period. + The time column is expressed in NZ timezone, which was converted from UTC. + :type tide_data: gpd.GeoDataFrame + :param tide_length_mins: The length of the tide event in minutes. + :type tide_length_mins: int + :param interval_mins: The time interval, in minutes, between each recorded tide data point. The default value is 10 minutes. + :type interval_mins: int = 10 + :param datum: Datum used for fetching tide data from NIWA. Default value is LAT. + Valid options are LAT for the Lowest Astronomical Tide and MSL for the Mean Sea Level. + :type datum: DatumType = DatumType.LAT + + :returns: The tide data around the highest tide, fetched from NIWA, for the specified tide length and interval. + :rtype: gpd.GeoDataFrame + + +.. py:function:: get_time_mins_to_add(tide_data: geopandas.GeoDataFrame, tide_length_mins: int, time_to_peak_mins: Union[int, float], interval_mins: int = 10) -> List[Union[float, int]] + + Get the time values in minutes to add to the tide data. + + :param tide_data: The tide data for which time values in minutes will be calculated. + :type tide_data: gpd.GeoDataFrame + :param tide_length_mins: The length of the tide event in minutes. + :type tide_length_mins: int + :param time_to_peak_mins: The time in minutes when the tide is at its greatest (reaches maximum). + :type time_to_peak_mins: Union[int, float] + :param interval_mins: The time interval, in minutes, between each recorded tide data point. The default value is 10 minutes. + :type interval_mins: int = 10 + + :returns: A list containing the time values in minutes to add to the tide data. + :rtype: List[Union[float, int]] + + +.. py:function:: add_time_information(tide_data: geopandas.GeoDataFrame, time_to_peak_mins: Union[int, float], interval_mins: int = 10, tide_length_mins: Optional[int] = None, total_days: Optional[int] = None, approach: src.dynamic_boundary_conditions.tide.tide_enum.ApproachType = ApproachType.KING_TIDE) -> geopandas.GeoDataFrame + + Add time information (seconds, minutes, hours) to the tide data. + + :param tide_data: The tide data for which time information will be added. + :type tide_data: gpd.GeoDataFrame + :param time_to_peak_mins: The time in minutes when the tide is at its greatest (reaches maximum). + :type time_to_peak_mins: Union[int, float] + :param interval_mins: The time interval, in minutes, between each recorded tide data point. The default value is 10 minutes. + :type interval_mins: int = 10 + :param tide_length_mins: The length of the tide event in minutes. Only required if the 'approach' is KING_TIDE. + :type tide_length_mins: Optional[int] = None + :param total_days: The total number of days for the tide event. Only required if the 'approach' is PERIOD_TIDE. + :type total_days: Optional[int] = None + :param approach: The approach used to get the tide data. Default is KING_TIDE. + :type approach: ApproachType = ApproachType.KING_TIDE + + :returns: The tide data with added time information in seconds, minutes, and hours. + :rtype: gpd.GeoDataFrame + + :raises ValueError: If 'time_to_peak_mins' is less than the minimum time to peak. + + .. rubric:: Notes + + The minimum time to peak is calculated differently depending on the approach used: + - For the KING_TIDE approach, it is half of the 'tide_length_mins'. + - For the PERIOD_TIDE approach, it is half of the 'total_days' converted to minutes. + + +.. py:function:: get_tide_data(tide_query_loc: geopandas.GeoDataFrame, time_to_peak_mins: Union[int, float], approach: src.dynamic_boundary_conditions.tide.tide_enum.ApproachType = ApproachType.KING_TIDE, start_date: datetime.date = date.today(), total_days: Optional[int] = None, tide_length_mins: Optional[int] = None, interval_mins: int = 10, datum: src.dynamic_boundary_conditions.tide.tide_enum.DatumType = DatumType.LAT) -> geopandas.GeoDataFrame + + Fetch tide data from NIWA using the Tide API based on the specified approach, datum, and other parameters. + + :param tide_query_loc: A GeoDataFrame containing the query coordinates and their positions. + :type tide_query_loc: gpd.GeoDataFrame + :param time_to_peak_mins: The time in minutes when the tide is at its greatest (reaches maximum). + :type time_to_peak_mins: Union[int, float] + :param approach: The approach used to get the tide data. Default is KING_TIDE. + :type approach: ApproachType = ApproachType.KING_TIDE + :param start_date: The start date for retrieving tide data. It can be in the past or present. Default is today's date. + :type start_date: date = date.today() + :param total_days: The total number of days for the tide event. Only required if the 'approach' is PERIOD_TIDE. + :type total_days: Optional[int] = None + :param tide_length_mins: The length of the tide event in minutes. Only required if the 'approach' is KING_TIDE. + :type tide_length_mins: Optional[int] = None + :param interval_mins: The time interval, in minutes, between each recorded tide data point. The default value is 10 minutes. + :type interval_mins: int = 10 + :param datum: Datum used for fetching tide data from NIWA. Default value is LAT. + Valid options are LAT for the Lowest Astronomical Tide and MSL for the Mean Sea Level. + :type datum: DatumType = DatumType.LAT + + :returns: The tide data with added time information in seconds, minutes, and hours. + :rtype: gpd.GeoDataFrame + + :raises ValueError: - If 'interval_mins' is None. + - If the 'approach' is KING_TIDE and 'tide_length_mins' is None or 'total_days' is not None. + - If the 'approach' is PERIOD_TIDE and 'total_days' is None or 'tide_length_mins' is not None. + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/tide_enum/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/tide_enum/index.rst.txt new file mode 100644 index 000000000..40b1b23cd --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/tide_enum/index.rst.txt @@ -0,0 +1,85 @@ +:py:mod:`src.dynamic_boundary_conditions.tide.tide_enum` +======================================================== + +.. py:module:: src.dynamic_boundary_conditions.tide.tide_enum + +.. autoapi-nested-parse:: + + Enum(s) used in the tide_slr module. + + + +Module Contents +--------------- + +Classes +~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.tide.tide_enum.DatumType + src.dynamic_boundary_conditions.tide.tide_enum.ApproachType + + + + +.. py:class:: DatumType + + + Bases: :py:obj:`enum.StrEnum` + + Enum class representing different datum types. + + .. attribute:: LAT + + Lowest astronomical tide. + + :type: str + + .. attribute:: MSL + + Mean sea level. + + :type: str + + .. py:attribute:: LAT + :value: 'lat' + + + + .. py:attribute:: MSL + :value: 'msl' + + + + +.. py:class:: ApproachType + + + Bases: :py:obj:`enum.StrEnum` + + Enum class representing different types of approaches. + + .. attribute:: KING_TIDE + + King Tide approach. + + :type: str + + .. attribute:: PERIOD_TIDE + + Period Tide approach. + + :type: str + + .. py:attribute:: KING_TIDE + :value: 'king_tide' + + + + .. py:attribute:: PERIOD_TIDE + :value: 'period_tide' + + + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/tide_query_location/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/tide_query_location/index.rst.txt new file mode 100644 index 000000000..17eb4bdfe --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/tide_query_location/index.rst.txt @@ -0,0 +1,134 @@ +:py:mod:`src.dynamic_boundary_conditions.tide.tide_query_location` +================================================================== + +.. py:module:: src.dynamic_boundary_conditions.tide.tide_query_location + +.. autoapi-nested-parse:: + + Get the locations used to fetch tide data from NIWA using the tide API. + sli229 + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.tide.tide_query_location.get_regional_council_clipped_from_db + src.dynamic_boundary_conditions.tide.tide_query_location.get_nz_coastline_from_db + src.dynamic_boundary_conditions.tide.tide_query_location.get_catchment_boundary_info + src.dynamic_boundary_conditions.tide.tide_query_location.get_catchment_boundary_lines + src.dynamic_boundary_conditions.tide.tide_query_location.get_catchment_boundary_centroids + src.dynamic_boundary_conditions.tide.tide_query_location.get_non_intersection_centroid_position + src.dynamic_boundary_conditions.tide.tide_query_location.get_tide_query_locations + + + +.. py:exception:: NoTideDataException + + + Bases: :py:obj:`Exception` + + Exception raised when no tide data is to be used for the BG-Flood model. + + +.. py:function:: get_regional_council_clipped_from_db(engine: sqlalchemy.engine.Engine, catchment_area: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Retrieve regional council clipped data from the database based on the catchment area. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing the regional council clipped data for the catchment area. + :rtype: gpd.GeoDataFrame + + +.. py:function:: get_nz_coastline_from_db(engine: sqlalchemy.engine.Engine, catchment_area: geopandas.GeoDataFrame, distance_km: int = 1) -> geopandas.GeoDataFrame + + Retrieve the New Zealand coastline data within a specified distance of the catchment area from the database. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + :param distance_km: Distance in kilometers used to buffer the catchment area for coastline retrieval. Default is 1 kilometer. + :type distance_km: int = 1 + + :returns: A GeoDataFrame containing the New Zealand coastline data within the specified distance of the catchment area. + :rtype: gpd.GeoDataFrame + + +.. py:function:: get_catchment_boundary_info(catchment_area: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Get information about the boundary segments of the catchment area. + + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing information about the boundary segments of the catchment area. + :rtype: gpd.GeoDataFrame + + :raises ValueError: If the position of a catchment boundary line cannot be identified. + + +.. py:function:: get_catchment_boundary_lines(catchment_area: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Get the boundary lines of the catchment area. + + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing the boundary lines of the catchment area. + :rtype: gpd.GeoDataFrame + + +.. py:function:: get_catchment_boundary_centroids(catchment_area: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Get the centroids of the boundary lines of the catchment area. + + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing the centroids of the boundary lines of the catchment area. + :rtype: gpd.GeoDataFrame + + +.. py:function:: get_non_intersection_centroid_position(catchment_area: geopandas.GeoDataFrame, non_intersection_area: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Determine the positions of non-intersection centroid points relative to the boundary lines of the catchment area. + + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + :param non_intersection_area: A GeoDataFrame representing the non-intersection area. + :type non_intersection_area: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing the positions of non-intersection centroid points relative to the catchment boundary + lines. The GeoDataFrame includes the 'position' column denoting the relative position and the 'geometry' column + representing the centroid points of the non-intersection areas. + :rtype: gpd.GeoDataFrame + + +.. py:function:: get_tide_query_locations(engine: sqlalchemy.engine.Engine, catchment_area: geopandas.GeoDataFrame, distance_km: int = 1) -> geopandas.GeoDataFrame + + Get the locations used to fetch tide data from NIWA using the tide API. + + :param engine: The engine used to connect to the database. + :type engine: Engine + :param catchment_area: A GeoDataFrame representing the catchment area. + :type catchment_area: gpd.GeoDataFrame + :param distance_km: Distance in kilometers used to buffer the catchment area for coastline retrieval. Default is 1 kilometer. + :type distance_km: int = 1 + + :returns: A GeoDataFrame containing the locations used to fetch tide data from NIWA using the tide API. + :rtype: gpd.GeoDataFrame + + :raises NoTideDataException: If no coastline is found within the specified distance of the catchment area. + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/tide_slr_combine/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/tide_slr_combine/index.rst.txt new file mode 100644 index 000000000..968dbf5a1 --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/tide_slr_combine/index.rst.txt @@ -0,0 +1,136 @@ +:py:mod:`src.dynamic_boundary_conditions.tide.tide_slr_combine` +=============================================================== + +.. py:module:: src.dynamic_boundary_conditions.tide.tide_slr_combine + +.. autoapi-nested-parse:: + + Generates combined tide and sea level rise (SLR) data for a specific projection year, taking into account the provided + confidence level, SSP scenario, inclusion of Vertical Land Motion (VLM), percentile, and more. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.tide.tide_slr_combine.split_slr_measurementname_column + src.dynamic_boundary_conditions.tide.tide_slr_combine.get_slr_scenario_data + src.dynamic_boundary_conditions.tide.tide_slr_combine.get_interpolated_slr_scenario_data + src.dynamic_boundary_conditions.tide.tide_slr_combine.add_slr_to_tide + src.dynamic_boundary_conditions.tide.tide_slr_combine.get_combined_tide_slr_data + + + +.. py:function:: split_slr_measurementname_column(slr_data: geopandas.GeoDataFrame) -> geopandas.GeoDataFrame + + Split the 'measurementname' column in the sea level rise data to extract and add additional information. + + :param slr_data: A GeoDataFrame containing the sea level rise data. + :type slr_data: gpd.GeoDataFrame + + :returns: A GeoDataFrame containing the sea level rise data with additional columns for extracted information: + 'confidence_level', 'ssp_scenario', and 'add_vlm'. + :rtype: gpd.GeoDataFrame + + +.. py:function:: get_slr_scenario_data(slr_data: geopandas.GeoDataFrame, confidence_level: str, ssp_scenario: str, add_vlm: bool, percentile: int) -> geopandas.GeoDataFrame + + Get sea level rise scenario data based on the specified confidence_level, ssp_scenario, add_vlm, and percentile. + + :param slr_data: A GeoDataFrame containing the sea level rise data. + :type slr_data: gpd.GeoDataFrame + :param confidence_level: The desired confidence level for the scenario data. Valid values are 'low' or 'medium'. + :type confidence_level: str + :param ssp_scenario: The desired Shared Socioeconomic Pathways (SSP) scenario for the scenario data. + Valid options for both low and medium confidence are: 'SSP1-2.6', 'SSP2-4.5', or 'SSP5-8.5'. + Additional options for medium confidence are: 'SSP1-1.9' or 'SSP3-7.0'. + :type ssp_scenario: str + :param add_vlm: Indicates whether to include Vertical Land Motion (VLM) in the scenario data. + Set to True if VLM should be included, False otherwise. + :type add_vlm: bool + :param percentile: The desired percentile for the scenario data. Valid values are 17, 50, or 83. + :type percentile: int + + :returns: A GeoDataFrame containing the sea level rise scenario data based on the specified + confidence_level, ssp_scenario, add_vlm, and percentile. + :rtype: gpd.GeoDataFrame + + :raises ValueError: - If an invalid 'confidence_level' value is provided. + - If an invalid 'ssp_scenario' value is provided. + - If an invalid 'add_vlm' value is provided. + - If an invalid 'percentile' value is provided. + + +.. py:function:: get_interpolated_slr_scenario_data(slr_scenario_data: geopandas.GeoDataFrame, increment_year: int = 1, interp_method: str = 'linear') -> geopandas.GeoDataFrame + + Interpolates sea level rise scenario data based on the specified year interval and interpolation method. + + :param slr_scenario_data: A GeoDataFrame containing the sea level rise scenario data. + :type slr_scenario_data: gpd.GeoDataFrame + :param increment_year: The year interval used for interpolation. Defaults to 1 year. + :type increment_year: int = 1 + :param interp_method: Temporal interpolation method to be used. Defaults to 'linear'. + Available methods: 'linear', 'nearest', 'nearest-up', 'zero', 'slinear', 'quadratic', 'cubic', 'previous', + 'next'. Refer to 'scipy.interpolate.interp1d()' for more details. + :type interp_method: str = "linear" + + :returns: A GeoDataFrame containing the interpolated sea level rise scenario data. + :rtype: gpd.GeoDataFrame + + :raises ValueError: - If the specified 'increment_year' is out of range. + - If the specified 'interp_method' is not supported. + + +.. py:function:: add_slr_to_tide(tide_data: geopandas.GeoDataFrame, slr_interp_scenario: geopandas.GeoDataFrame, proj_year: int) -> pandas.DataFrame + + Adds sea level rise (SLR) data to the tide data for a specific projection year and + returns the combined tide and sea level rise value. + + :param tide_data: A GeoDataFrame containing tide data with added time information (seconds, minutes, hours) and location details. + :type tide_data: gpd.GeoDataFrame + :param slr_interp_scenario: A GeoDataFrame containing the interpolated sea level rise scenario data. + :type slr_interp_scenario: gpd.GeoDataFrame + :param proj_year: The projection year for which sea level rise data should be added to the tide data. + :type proj_year: int + + :returns: A DataFrame that contains the combined tide and sea level rise data for the specified projection year. + :rtype: pd.DataFrame + + +.. py:function:: get_combined_tide_slr_data(tide_data: geopandas.GeoDataFrame, slr_data: geopandas.GeoDataFrame, proj_year: int, confidence_level: str, ssp_scenario: str, add_vlm: bool, percentile: int, increment_year: int = 1, interp_method: str = 'linear') -> pandas.DataFrame + + Generates the combined tide and sea level rise (SLR) data for a specific projection year, considering the given + confidence_level, ssp_scenario, add_vlm, percentile, and more. + + :param tide_data: A GeoDataFrame containing tide data with added time information (seconds, minutes, hours) and location details. + :type tide_data: gpd.GeoDataFrame + :param slr_data: A GeoDataFrame containing the sea level rise data. + :type slr_data: gpd.GeoDataFrame + :param proj_year: The projection year for which the combined tide and sea level rise data should be generated. + :type proj_year: int + :param confidence_level: The desired confidence level for the sea level rise data. + :type confidence_level: str + :param ssp_scenario: The desired Shared Socioeconomic Pathways (SSP) scenario for the sea level rise data. + :type ssp_scenario: str + :param add_vlm: Indicates whether Vertical Land Motion (VLM) should be included in the sea level rise data. + :type add_vlm: bool + :param percentile: The desired percentile for the sea level rise data. + :type percentile: int + :param increment_year: The year interval used for interpolating the sea level rise data. Defaults to 1 year. + :type increment_year: int = 1 + :param interp_method: Temporal interpolation method used for interpolating the sea level rise data. Defaults to 'linear'. + Available methods: 'linear', 'nearest', 'nearest-up', 'zero', 'slinear', 'quadratic', 'cubic', 'previous', + 'next'. Refer to 'scipy.interpolate.interp1d()' for more details. + :type interp_method: str = "linear" + + :returns: A DataFrame containing the combined tide and sea level rise data for the specified projection year, + taking into account the provided confidence_level, ssp_scenario, add_vlm, percentile, and more. + :rtype: pd.DataFrame + + diff --git a/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/tide_slr_model_input/index.rst.txt b/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/tide_slr_model_input/index.rst.txt new file mode 100644 index 000000000..0c466638d --- /dev/null +++ b/docs/_sources/autoapi/src/dynamic_boundary_conditions/tide/tide_slr_model_input/index.rst.txt @@ -0,0 +1,49 @@ +:py:mod:`src.dynamic_boundary_conditions.tide.tide_slr_model_input` +=================================================================== + +.. py:module:: src.dynamic_boundary_conditions.tide.tide_slr_model_input + +.. autoapi-nested-parse:: + + Generates the requested water level uniform boundary model input for BG-Flood. + + + +Module Contents +--------------- + + +Functions +~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.tide.tide_slr_model_input.generate_uniform_boundary_input + + + +Attributes +~~~~~~~~~~ + +.. autoapisummary:: + + src.dynamic_boundary_conditions.tide.tide_slr_model_input.log + + +.. py:data:: log + + + +.. py:function:: generate_uniform_boundary_input(bg_flood_dir: pathlib.Path, tide_slr_data: pandas.DataFrame) -> None + + Generates the requested water level uniform boundary model input for BG-Flood. + + :param bg_flood_dir: The BG-Flood model directory. + :type bg_flood_dir: pathlib.Path + :param tide_slr_data: A DataFrame containing the combined tide and sea level rise data. + :type tide_slr_data: pd.DataFrame + + :returns: This function does not return any value. + :rtype: None + + diff --git a/docs/_sources/autoapi/src/flood_model/bg_flood_model/index.rst.txt b/docs/_sources/autoapi/src/flood_model/bg_flood_model/index.rst.txt index 5f6a6d865..8d03d54c1 100644 --- a/docs/_sources/autoapi/src/flood_model/bg_flood_model/index.rst.txt +++ b/docs/_sources/autoapi/src/flood_model/bg_flood_model/index.rst.txt @@ -5,9 +5,8 @@ .. autoapi-nested-parse:: - This script handles the processing of input files for the BG-Flood Model, executes the flood model, - stores the resulting model output metadata in the database, and incorporates the model output into GeoServer for - visualization. + This script handles the processing of input files for the BG-Flood Model, executes the flood model, stores the + resulting model output metadata in the database, and incorporates the model output into GeoServer for visualization. @@ -155,7 +154,7 @@ Attributes :rtype: None -.. py:function:: prepare_bg_flood_model_inputs(bg_flood_dir: pathlib.Path, model_output_path: pathlib.Path, hydro_dem_path: pathlib.Path, resolution: Union[int, float], output_timestep: Union[int, float] = 0, end_time: Union[int, float] = 0, mask: Union[int, float] = 9999, gpu_device: int = 0, small_nc: int = 0) -> None +.. py:function:: prepare_bg_flood_model_inputs(bg_flood_dir: pathlib.Path, model_output_path: pathlib.Path, hydro_dem_path: pathlib.Path, resolution: Union[int, float], output_timestep: Union[int, float], end_time: Union[int, float], mask: Union[int, float] = 9999, gpu_device: int = 0, small_nc: int = 0) -> None Prepare inputs for the BG-Flood Model. @@ -167,26 +166,26 @@ Attributes :type hydro_dem_path: pathlib.Path, :param resolution: The grid resolution in meters for metric grids, representing the size of each grid cell. :type resolution: Union[int, float] - :param output_timestep: Time step between model outputs in seconds. Default value is 0.0 (no output generated). - :type output_timestep: Union[int, float], optional - :param end_time: Time in seconds when the model stops. Default value is 0.0 (model initializes but does not run). - :type end_time: Union[int, float], optional + :param output_timestep: Time step between model outputs in seconds. If the value is set to 0 then no output is generated. + :type output_timestep: Union[int, float] + :param end_time: Time in seconds when the model stops. If the value is set to 0 then the model initializes but does not run. + :type end_time: Union[int, float] :param mask: The mask value is used to remove blocks from computation where the topography elevation (zb) is greater than the specified value. Default value is 9999.0 (no areas are masked). - :type mask: Union[int, float], optional + :type mask: Union[int, float] = 9999 :param gpu_device: Specify the GPU device to be used. Default value is 0 (the first available GPU). Set the value to -1 to use the CPU. For other GPUs, use values 2 and above. - :type gpu_device: int, optional + :type gpu_device: int = 0 :param small_nc: Specify whether the output should be saved as short integers to reduce the size of the output file. Set the value to 1 to enable short integer conversion, or set it to 0 to save all variables as floats. Default value is 0. - :type small_nc: int, optional + :type small_nc: int = 0 :returns: This function does not return any value. :rtype: None -.. py:function:: run_bg_flood_model(engine: sqlalchemy.engine.Engine, catchment_area: geopandas.GeoDataFrame, model_output_path: pathlib.Path, output_timestep: Union[int, float] = 0, end_time: Union[int, float] = 0, resolution: Optional[Union[int, float]] = None, mask: Union[int, float] = 9999, gpu_device: int = 0, small_nc: int = 0) -> None +.. py:function:: run_bg_flood_model(engine: sqlalchemy.engine.Engine, catchment_area: geopandas.GeoDataFrame, model_output_path: pathlib.Path, output_timestep: Union[int, float], end_time: Union[int, float], resolution: Optional[Union[int, float]] = None, mask: Union[int, float] = 9999, gpu_device: int = 0, small_nc: int = 0) -> None Run the BG-Flood Model for the specified catchment area. @@ -196,30 +195,66 @@ Attributes :type catchment_area: gpd.GeoDataFrame :param model_output_path: The new file path for saving the BG Flood model output with the current timestamp included in the filename. :type model_output_path: pathlib.Path - :param output_timestep: Time step between model outputs in seconds. Default value is 0.0 (no output generated). - :type output_timestep: Union[int, float], optional - :param end_time: Time in seconds when the model stops. Default value is 0.0 (model initializes but does not run). - :type end_time: Union[int, float], optional + :param output_timestep: Time step between model outputs in seconds. If the value is set to 0 then no output is generated. + :type output_timestep: Union[int, float] + :param end_time: Time in seconds when the model stops. If the value is set to 0 then the model initializes but does not run. + :type end_time: Union[int, float] :param resolution: The grid resolution in meters for metric grids, representing the size of each grid cell. If not provided (default is None), the resolution of the Hydrologically conditioned DEM will be used as the grid resolution. - :type resolution: Optional[Union[int, float]], optional + :type resolution: Optional[Union[int, float]] = None :param mask: The mask value is used to remove blocks from computation where the topography elevation (zb) is greater than the specified value. Default value is 9999.0 (no areas are masked). - :type mask: Union[int, float], optional + :type mask: Union[int, float] = 9999 :param gpu_device: Specify the GPU device to be used. Default value is 0 (the first available GPU). Set the value to -1 to use the CPU. For other GPUs, use values 2 and above. - :type gpu_device: int, optional + :type gpu_device: int = 0 :param small_nc: Specify whether the output should be saved as short integers to reduce the size of the output file. Set the value to 1 to enable short integer conversion, or set it to 0 to save all variables as floats. Default value is 0. - :type small_nc: int, optional + :type small_nc: int = 0 :returns: This function does not return any value. :rtype: None -.. py:function:: main(selected_polygon_gdf: geopandas.GeoDataFrame, log_level: src.digitaltwin.utils.LogLevel = LogLevel.DEBUG) -> None +.. py:function:: main(selected_polygon_gdf: geopandas.GeoDataFrame, output_timestep: Union[int, float], end_time: Union[int, float], resolution: Optional[Union[int, float]] = None, mask: Union[int, float] = 9999, gpu_device: int = 0, small_nc: int = 0, log_level: src.digitaltwin.utils.LogLevel = LogLevel.DEBUG) -> None + + Generate BG-Flood model output for the requested catchment area, and incorporate the model output to GeoServer + for visualization. + + :param selected_polygon_gdf: A GeoDataFrame representing the selected polygon, i.e., the catchment area. + :type selected_polygon_gdf: gpd.GeoDataFrame + :param output_timestep: Time step between model outputs in seconds. If the value is set to 0 then no output is generated. + :type output_timestep: Union[int, float] + :param end_time: Time in seconds when the model stops. If the value is set to 0 then the model initializes but does not run. + :type end_time: Union[int, float] + :param resolution: The grid resolution in meters for metric grids, representing the size of each grid cell. + If not provided (default is None), the resolution of the Hydrologically conditioned DEM will be used as + the grid resolution. + :type resolution: Optional[Union[int, float]] = None + :param mask: The mask value is used to remove blocks from computation where the topography elevation (zb) is greater than + the specified value. Default value is 9999.0 (no areas are masked). + :type mask: Union[int, float] = 9999 + :param gpu_device: Specify the GPU device to be used. Default value is 0 (the first available GPU). + Set the value to -1 to use the CPU. For other GPUs, use values 2 and above. + :type gpu_device: int = 0 + :param small_nc: Specify whether the output should be saved as short integers to reduce the size of the output file. + Set the value to 1 to enable short integer conversion, or set it to 0 to save all variables as floats. + Default value is 0. + :type small_nc: int = 0 + :param log_level: The log level to set for the root logger. Defaults to LogLevel.DEBUG. + The available logging levels and their corresponding numeric values are: + - LogLevel.CRITICAL (50) + - LogLevel.ERROR (40) + - LogLevel.WARNING (30) + - LogLevel.INFO (20) + - LogLevel.DEBUG (10) + - LogLevel.NOTSET (0) + :type log_level: LogLevel = LogLevel.DEBUG + + :returns: This function does not return any value. + :rtype: None .. py:data:: sample_polygon diff --git a/docs/_sources/autoapi/src/run_all/index.rst.txt b/docs/_sources/autoapi/src/run_all/index.rst.txt index 19549cead..2768fd25e 100644 --- a/docs/_sources/autoapi/src/run_all/index.rst.txt +++ b/docs/_sources/autoapi/src/run_all/index.rst.txt @@ -27,13 +27,37 @@ Attributes .. autoapisummary:: - src.run_all.module_to_log_level + src.run_all.DEFAULT_MODULES_TO_PARAMETERS + src.run_all.sample_polygon -.. py:function:: main(selected_polygon_gdf: geopandas.GeoDataFrame, modules_with_log_levels: Dict[types.ModuleType, src.digitaltwin.utils.LogLevel]) -> None +.. py:function:: main(selected_polygon_gdf: geopandas.GeoDataFrame, modules_to_parameters: Dict[types.ModuleType, Dict[str, Union[str, int, float, bool, None, enum.Enum]]]) -> None + Runs each module in the Digital Twin using the selected polygon and the defined parameters for each module's + main function. -.. py:data:: module_to_log_level + :param selected_polygon_gdf: A GeoDataFrame representing the selected polygon, i.e., the catchment area. + :type selected_polygon_gdf: gpd.GeoDataFrame + :param modules_to_parameters: A dictionary that associates each module with the parameters necessary for its main function, including the + option to set the log level for each module's root logger. + The available logging levels and their corresponding numeric values are: + - LogLevel.CRITICAL (50) + - LogLevel.ERROR (40) + - LogLevel.WARNING (30) + - LogLevel.INFO (20) + - LogLevel.DEBUG (10) + - LogLevel.NOTSET (0) + :type modules_to_parameters: Dict[ModuleType, Dict[str, Union[str, int, float, bool, None, Enum]]] + + :returns: This function does not return any value. + :rtype: None + + +.. py:data:: DEFAULT_MODULES_TO_PARAMETERS + + + +.. py:data:: sample_polygon diff --git a/docs/autoapi/index.html b/docs/autoapi/index.html index cc8bc72a6..7b62a3284 100644 --- a/docs/autoapi/index.html +++ b/docs/autoapi/index.html @@ -1,13 +1,15 @@ - +
- + -