From ef2a236162a913371d4f4d8fd714c7a93f48a5d1 Mon Sep 17 00:00:00 2001 From: Julie Prestopnik Date: Tue, 17 Sep 2024 09:58:36 -0600 Subject: [PATCH 01/95] Feature 2679 user support (#2681) * Per 2679, updating user support documentation * Per #2679, fixing formatting error * Update user_support.rst * Update user_support.rst --------- Co-authored-by: John Halley Gotway --- docs/Contributors_Guide/user_support.rst | 70 +++++++++++++++++------- 1 file changed, 51 insertions(+), 19 deletions(-) diff --git a/docs/Contributors_Guide/user_support.rst b/docs/Contributors_Guide/user_support.rst index ecade65d1b..d504141c60 100644 --- a/docs/Contributors_Guide/user_support.rst +++ b/docs/Contributors_Guide/user_support.rst @@ -13,12 +13,12 @@ discussions and begin conversations in the right place. Support Responsibilities ======================== -Five staff members take turns monitoring Discussions each day of the week. +Staff members take turns monitoring Discussions throughout the week. -Each day, all support members should follow up on existing Discussions in +All support members should follow up on existing Discussions in which they are assigned (i.e. tagged). -The responsibilities for each daily assignee are described below. +The responsibilities for each assignee are described below. Review the Tips/Tricks Page @@ -62,11 +62,14 @@ Review the `unanswered discussions `_. +Review `answered discussions that are unlocked `_. If there is any recent activity on an **answered** question that indicates the questioner is satisified with the response, click on the button to **lock** the @@ -95,6 +98,9 @@ Actions for Handling New Discussions * If you can answer it yourself, do so. + * If the question is due to unclear or insufficient documentation, please fix + the documentation and point the user to the updated version. + * If you can answer it with help from someone, post to the User Support Slack channel of dtc-metplus.slack.com, asking for advice. @@ -120,23 +126,49 @@ If an obvious answer exists and it is apparent the questioner is satisified with the response, consider posting something like the following before locking the discussion: -*At this point I'm going to LOCK this conversation to prevent future posts. Our team has decided that once discussions have been answered, and the answer has been confirmed, we'll lock them. We want to encourage users to ask new questions in new discussions rather than posting to old ones. Hopefully that'll make the questions/answers easier for other users to follow. So if/when more issues/questions arise, please feel free to start a new discussion.* +*I'm going to lock and close this conversation to prevent future posts. Our +team has decided that once discussions have been answered, we'll lock them to +encourage users to ask new questions in new discussions rather than posting +to old ones. Hopefully that'll make the questions/answers easier for other +users to follow. So if/when more issues/questions arise, please feel free +to start a new discussion.* If there is no answer selected and no indication that the questioner is satisified with the response, follow up, requesting that the user mark -the best answer so we can lock the discussion. The following text may need -to be modified to match a specific situation, but this is a good starting -point as an example: - -*I'd like to mark this discussion as being answered and "lock it" to prevent future posts. That's how we encourage users to create new discussions for new questions. But I wanted to give you an opportunity to comment on it before doing so. Please feel free to select one of the responses as being the best answer to your original question. That'll help future users with similar questions find answers.* - -If following up on discussions where the user hasn't responded back in a while, -consider posting something like the following before locking the discussion, -filling in the MONTH and DAY below: - -*I wanted to check in with you about the initial discussion you submitted. We were able to provide some guidance for you on , but haven't heard back if the suggested actions helped. If it did, or if you found a different way of accomplishing your goal, we'd appreciate hearing from you. If you have additional questions on this, we can try to find a solution; otherwise we'll choose an answer and lock this discussion due to inactivity.* +the best answer so we can lock and close the discussion. The following +text may need to be modified to match a specific situation, but this is +a good starting point as an example: + +*I'd like to mark this discussion as being answered, and lock and close it, +to prevent future posts. That's how we encourage users to create new +discussions for new questions. But I wanted to give you an opportunity to +comment on it before doing so. Please feel free to select one of the +responses as being the best answer to your original question. That'll help +future users with similar questions find answers.* + +or + +*I wanted to check in with you about the initial discussion you submitted. +We were able to provide some guidance for you on , but haven't +heard back if the suggested actions helped. If it did, or if you found a +different way of accomplishing your goal, we'd appreciate hearing from you. +If you have additional questions on this, we can try to find a solution; +otherwise we'll choose an answer, and lock and close this discussion due +to inactivity.* + +If following up on discussions where the user hasn't responded back in two +weeks, consider posting something like the following before locking and closing +the discussion, filling in the MONTH and DAY below: + +*We were able to provide some guidance for you on , regarding the +initial discussion you submitted, but we haven't heard back if the suggested +actions helped. It is our +`policy `_ to lock and +close discussions after two weeks of inactivity.* GitHub users with sufficient permissions in the METplus repository should see a -"Lock conversation" link on the right side navigation of each Discussions page. +"Lock conversation" link on the right side navigation of each Discussions page +and a "Close discussion" button at the bottom of the page under the comment +box. From a8111897f9a7c4f1332f7d0094d458b53b227e68 Mon Sep 17 00:00:00 2001 From: Julie Prestopnik Date: Thu, 19 Sep 2024 14:26:22 -0600 Subject: [PATCH 02/95] Feature 2452 update release guide (#2692) * Per #2452, updating MET official release instructions * Update to indicate the "DTC" web server machine Co-authored-by: John Halley Gotway * Updated language for tar file Co-authored-by: John Halley Gotway --------- Co-authored-by: John Halley Gotway --- docs/Release_Guide/met_official.rst | 1 + .../release_steps/met/update_tar_files_link_official.rst | 8 ++++++++ .../release_steps/met/update_version_official.rst | 2 ++ 3 files changed, 11 insertions(+) create mode 100644 docs/Release_Guide/release_steps/met/update_tar_files_link_official.rst diff --git a/docs/Release_Guide/met_official.rst b/docs/Release_Guide/met_official.rst index 0a4549cdf5..34cefc7b23 100644 --- a/docs/Release_Guide/met_official.rst +++ b/docs/Release_Guide/met_official.rst @@ -21,6 +21,7 @@ Create a new vX.Y.Z official release from the develop branch. .. include:: release_steps/create_release_extra.rst .. include:: release_steps/met/update_dtc_website.rst .. include:: release_steps/finalize_release_on_github_official.rst +.. include:: release_steps/met/update_tar_files_link_official.rst .. include:: release_steps/update_docs_official.rst .. include:: release_steps/met/update_zenodo.rst .. include:: release_steps/set_beta_deletion_reminder_official.rst diff --git a/docs/Release_Guide/release_steps/met/update_tar_files_link_official.rst b/docs/Release_Guide/release_steps/met/update_tar_files_link_official.rst new file mode 100644 index 0000000000..34a1083cfe --- /dev/null +++ b/docs/Release_Guide/release_steps/met/update_tar_files_link_official.rst @@ -0,0 +1,8 @@ +Update tar_files Link +^^^^^^^^^^^^^^^^^^^^^ + +On the DTC web server machine (i.e. mohawk), in /d2/www/dtcenter/dfiles/code/METplus/MET/installation/, update +the link for tar_files.latest.tgz, to point to the latest version of the +tar_files.met-base-vX.Y.tgz file (viewable at https://dtcenter.ucar.edu/dfiles/code/METplus/MET/installation/). + + diff --git a/docs/Release_Guide/release_steps/met/update_version_official.rst b/docs/Release_Guide/release_steps/met/update_version_official.rst index cd9f86fc1d..31f06d31cb 100644 --- a/docs/Release_Guide/release_steps/met/update_version_official.rst +++ b/docs/Release_Guide/release_steps/met/update_version_official.rst @@ -8,6 +8,8 @@ Update Version Number * If necessary, update the *met_version* variable in *src/basic/vx_util/util_constants.h* which defines the version number written to the MET output files. * In *docs/conf.py*, update the *version*, *release_year*, and *release_date* variables for the documentation. + + * In *docs/Users_Guide/installation.rst*, search for the X.Y version, replacing the current X.Y version with the official X.Y version, if necessary. Pay particular attention to the "Note" about the C++ standard and modify if necessary. The X.Y version number in the "Note" box should NOT change unless the default C++ standard changes. * If necessary, update the version number listed in the MET configuration files: From 81aed77128db11797c825448e8deaa8a53b6e8dc Mon Sep 17 00:00:00 2001 From: Julie Prestopnik Date: Mon, 23 Sep 2024 10:53:41 -0600 Subject: [PATCH 03/95] Updating release schedule --- docs/Users_Guide/release-notes.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/Users_Guide/release-notes.rst b/docs/Users_Guide/release-notes.rst index 752d1e87b6..2c90caf871 100644 --- a/docs/Users_Guide/release-notes.rst +++ b/docs/Users_Guide/release-notes.rst @@ -19,7 +19,7 @@ is broken down into the following development cycles for each component: 3. **Beta3** releases for the METplus components occurred around 2024-02-08. 4. **Beta4** releases for the METplus components occurred around 2024-04-17. 5. **Beta5** releases for the METplus components occurred around 2024-07-10. -6. **Beta6** releases are tentatively scheduled for 2024-10-02. +6. **Beta6** releases are tentatively scheduled for 2024-10-16. 7. **Release Candidate 1** releases are tentatively scheduled for 2024-11-13. 8. **Official Release** releases are tentatively scheduled for 2024-12-11. From 3d752d17c145d7addec5e5d6379eca3f89350480 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Mon, 23 Sep 2024 15:52:00 -0600 Subject: [PATCH 04/95] Feature #2612 forecast lead groups (#2698) * Refactor to prevent calling sys.exit from functions that could cause the calling script to exit unintentionally. Instead return None or False and only sys.exit a non-zero value from a script that is called explicitly. Refactored validate config logic to reduce cognitive complexity to satisfy SonarQube * per #2596, ensure the same RUN_ID is set for a given METplus run even if instances are used in the PROCESS_LIST * fixed linter complaints * remove unused imports * cleanup for SonarQube and linter * remove unused imports * import parent wrappers directly so that PyCharm can follow class hierarchy * per #2612, add support for processing groups of forecast leads * remove call to prune_empty function and ci-run-all-diff * fixed check on DataFrame to properly check for an empty one * remove prune_empty function that is no longer used * update conda environment requirements for metdataio * set label to Group for LEAD_SEQ_ if LEAD_SEQ__LABEL is not set (instead of erroring), use forecast lead label for lead if set, log lead group label and formatted list of leads for each run * increase timeout for request from 60 to 90 * handle exception and output an error message with a suggestion to rerun the failed GHA jobs when a hiccup with DockerHub prevents the list of available Docker data volumes from being obtained. FYI @jprestop * cleanup to address SonarQube complaints, move call to exit out of main function so a script calling the function does not exit when the function errors * cleanup to appease SonarQube * per #2612, add support for creating groups of forecast leads by specifying a list of forecast leads and a time interval to create groups of leads * per #2612, add unit tests for function that gets groups of forecast leads and added tests for new groups capabilities * various refactoring to satisfy SonarQube complaints * renamed config LEAD_SEQ_DIVISIONS to LEAD_SEQ_GROUP_SIZE and LEAD_SEQ_DIVISIONS_LABEL to LEAD_SEQ_GROUP_LABEL * per #2612, add documentation for new config variables including examples * fix empty check for DataFrame * fix rst typo * minor cleanup to satisfy linter * Per #2612, if runtime freq is RUN_ONCE_PER_LEAD and groups of forecast leads are specified, then the wrapper will be run once per lead GROUP instead of once for each unique lead in the lead groups. Update unit test to reflect new behavior --- .github/jobs/docker_utils.py | 2 +- .github/jobs/get_data_volumes.py | 17 +- .github/jobs/setup_and_run_use_cases.py | 81 ++++---- docs/Users_Guide/glossary.rst | 21 +- docs/Users_Guide/systemconfiguration.rst | 63 +++++- .../docker_env/scripts/metdataio_env.sh | 3 +- .../config_validate/test_config_validate.py | 6 +- .../util/system_util/test_system_util.py | 23 --- .../util/time_looping/test_time_looping.py | 181 +++++++++++------ .../wrappers/user_script/test_user_script.py | 10 +- metplus/util/config_metplus.py | 27 ++- metplus/util/config_validate.py | 54 +++--- metplus/util/run_util.py | 2 + metplus/util/system_util.py | 27 --- metplus/util/time_looping.py | 108 ++++++++--- metplus/util/time_util.py | 91 ++++----- metplus/wrappers/__init__.py | 27 ++- metplus/wrappers/compare_gridded_wrapper.py | 2 +- metplus/wrappers/cyclone_plotter_wrapper.py | 7 +- metplus/wrappers/example_wrapper.py | 3 +- metplus/wrappers/extract_tiles_wrapper.py | 8 +- metplus/wrappers/gempak_to_cf_wrapper.py | 3 +- metplus/wrappers/gen_ens_prod_wrapper.py | 4 +- metplus/wrappers/gen_vx_mask_wrapper.py | 4 +- metplus/wrappers/grid_diag_wrapper.py | 2 +- metplus/wrappers/loop_times_wrapper.py | 1 - metplus/wrappers/mode_wrapper.py | 2 +- metplus/wrappers/plot_data_plane_wrapper.py | 3 +- metplus/wrappers/plot_point_obs_wrapper.py | 3 +- metplus/wrappers/py_embed_ingest_wrapper.py | 4 +- metplus/wrappers/runtime_freq_wrapper.py | 182 +++++++++++------- ush/validate_config.py | 128 ++++++------ 32 files changed, 624 insertions(+), 475 deletions(-) diff --git a/.github/jobs/docker_utils.py b/.github/jobs/docker_utils.py index edf0b00167..b5a102d99d 100644 --- a/.github/jobs/docker_utils.py +++ b/.github/jobs/docker_utils.py @@ -39,7 +39,7 @@ def get_dockerhub_url(branch_name): def docker_get_volumes_last_updated(current_branch): import requests dockerhub_url = get_dockerhub_url(current_branch) - dockerhub_request = requests.get(dockerhub_url, timeout=60) + dockerhub_request = requests.get(dockerhub_url, timeout=90) if dockerhub_request.status_code != 200: print(f"Could not find DockerHub URL: {dockerhub_url}") return None diff --git a/.github/jobs/get_data_volumes.py b/.github/jobs/get_data_volumes.py index e492711710..cbf3a4e01d 100755 --- a/.github/jobs/get_data_volumes.py +++ b/.github/jobs/get_data_volumes.py @@ -7,8 +7,6 @@ import sys import os -import subprocess -import shlex from docker_utils import docker_get_volumes_last_updated, get_branch_name from docker_utils import get_data_repo, DOCKERHUB_METPLUS_DATA_DEV @@ -54,7 +52,12 @@ def main(args): branch_name = branch_name[5:] # get all docker data volumes associated with current branch - available_volumes = docker_get_volumes_last_updated(branch_name).keys() + try: + available_volumes = docker_get_volumes_last_updated(branch_name).keys() + except AttributeError: + print("ERROR: Could not get available Docker data volumes " + f"for {branch_name}. Try rerunning failed jobs in GitHub Actions") + return None # loop through arguments and get data volume for each category for model_app_name in args: @@ -78,7 +81,7 @@ def main(args): volume_name = f'{prefix}-{model_app_name}' # add output- to model app name - model_app_name=f'output-{model_app_name}' + model_app_name = f'output-{model_app_name}' # set DockerHub repo to dev version because all output data # should be in dev repository @@ -108,11 +111,11 @@ def main(args): if __name__ == "__main__": # split up command line args that have commas before passing into main - args = [] + arg_list = [] for arg in sys.argv[1:]: - args.extend(arg.split(',')) - out = main(args) + arg_list.extend(arg.split(',')) + out = main(arg_list) if out is None: print("ERROR: Something went wrong") sys.exit(1) diff --git a/.github/jobs/setup_and_run_use_cases.py b/.github/jobs/setup_and_run_use_cases.py index a2e55f8fa0..37b47537f6 100755 --- a/.github/jobs/setup_and_run_use_cases.py +++ b/.github/jobs/setup_and_run_use_cases.py @@ -63,7 +63,7 @@ def main(): # use BuildKit to build image os.environ['DOCKER_BUILDKIT'] = '1' - isOK = True + is_ok = True failed_use_cases = [] for setup_commands, use_case_commands, requirements in all_commands: # get environment image tag @@ -79,45 +79,38 @@ def main(): f"-f {DOCKERFILE_DIR}/{dockerfile_name} ." ) - print(f'Building Docker environment/branch image...') + print('Building Docker environment/branch image...') if not run_commands(docker_build_cmd): - isOK = False + is_ok = False continue - commands = [] - - # print list of existing docker images - commands.append('docker images') - + # print list of existing docker images, # remove docker image after creating run env or prune untagged images - commands.append(f'docker image rm dtcenter/metplus-dev:{branch_name} -f') - commands.append('docker image prune -f') - run_commands(commands) - - commands = [] - - # list docker images again after removal - commands.append('docker images') - - # start interactive container in the background - commands.append( - f"docker run -d --rm -it -e GITHUB_WORKSPACE " - f"--name {RUN_TAG} " - f"{os.environ.get('NETWORK_ARG', '')} " - f"{' '.join(VOLUME_MOUNTS)} " - f"{volumes_from} --workdir {GITHUB_WORKSPACE} " - f'{RUN_TAG} bash' - ) - - # list running containers - commands.append('docker ps -a') - - # execute setup commands in running docker container - commands.append(_format_docker_exec_command(setup_commands)) + run_commands([ + 'docker images', + f'docker image rm dtcenter/metplus-dev:{branch_name} -f', + 'docker image prune -f', + ]) + + # list docker images again after removal, + # start interactive container in the background, + # list running containers, + # then execute setup commands in running docker container + commands = [ + 'docker images', + (f"docker run -d --rm -it -e GITHUB_WORKSPACE " + f"--name {RUN_TAG} " + f"{os.environ.get('NETWORK_ARG', '')} " + f"{' '.join(VOLUME_MOUNTS)} " + f"{volumes_from} --workdir {GITHUB_WORKSPACE} " + f'{RUN_TAG} bash'), + 'docker ps -a', + _format_docker_exec_command(setup_commands), + ] # run docker commands and skip running cases if something went wrong if not run_commands(commands): - isOK = False + is_ok = False # force remove container if setup step fails run_commands(f'docker rm -f {RUN_TAG}') @@ -132,7 +125,7 @@ def main(): for use_case_command in use_case_commands: if not run_commands(_format_docker_exec_command(use_case_command)): failed_use_cases.append(use_case_command) - isOK = False + is_ok = False # print bashrc file to see what was added by setup commands # then force remove container to stop and remove it @@ -140,15 +133,20 @@ def main(): _format_docker_exec_command('cat /root/.bashrc'), f'docker rm -f {RUN_TAG}', ]): - isOK = False + is_ok = False # print summary of use cases that failed - for failed_use_case in failed_use_cases: - print(f'ERROR: Use case failed: {failed_use_case}') + _print_failed_use_cases(failed_use_cases) - if not isOK: + if not is_ok: print("ERROR: Some commands failed.") - sys.exit(1) + + return is_ok + + +def _print_failed_use_cases(failed_use_cases): + for failed_use_case in failed_use_cases: + print(f'ERROR: Use case failed: {failed_use_case}') def _format_docker_exec_command(command): @@ -201,4 +199,7 @@ def _get_dockerfile_name(requirements): if __name__ == '__main__': - main() + ret = main() + # exit non-zero if anything went wrong + if not ret: + sys.exit(1) diff --git a/docs/Users_Guide/glossary.rst b/docs/Users_Guide/glossary.rst index f351df3f53..bb921251fc 100644 --- a/docs/Users_Guide/glossary.rst +++ b/docs/Users_Guide/glossary.rst @@ -2244,12 +2244,25 @@ METplus Configuration Glossary LEAD_SEQ_ Specify the sequence of forecast lead times to include in the analysis. Comma separated list format, e.g.:0, 6, 12. corresponds to the bin in which the user wishes to aggregate series by lead results. - | *Used by:* SeriesAnalysis + | *Used by:* All LEAD_SEQ__LABEL - Required when SERIES_BY_LEAD_GROUP_FCSTS=True. Specify the label of the corresponding bin of series by lead results. + Specify the label for the :term:`LEAD_SEQ_\` group of forecast leads. - | *Used by:* SeriesAnalysis + | *Used by:* All + + LEAD_SEQ_GROUP_SIZE + Defines the size of forecast lead groups to create from :term:`LEAD_SEQ`. + See :ref:`grouping_forecast_leads` for more information. + + | *Used by:* All + + LEAD_SEQ_GROUP_LABEL + Defines the label to apply for each forecast lead group that are created + using :term:`LEAD_SEQ` and :term:`LEAD_SEQ_GROUP_SIZE`. + See :ref:`grouping_forecast_leads` for more information. + + | *Used by:* All LINE_TYPE .. warning:: **DEPRECATED:** Please use :term:`LINE_TYPE_LIST` instead. @@ -3545,7 +3558,7 @@ METplus Configuration Glossary .. warning:: **DEPRECATED:** Please use :term:`LEAD_SEQ_\` and :term:`SERIES_ANALYSIS_RUNTIME_FREQ` instead. SERIES_BY_LEAD_GROUP_FCSTS - .. warning:: **DEPRECATED:** Please use :term:`SERIES_ANALYSIS_GROUP_FCSTS` instead. + .. warning:: **DEPRECATED:** Please use :term:`LEAD_SEQ_\` and :term:`SERIES_ANALYSIS_RUNTIME_FREQ` instead. SERIES_INIT_FILTERED_OUT_DIR .. warning:: **DEPRECATED:** Please use :term:`SERIES_ANALYSIS_FILTERED_OUTPUT_DIR` instead. diff --git a/docs/Users_Guide/systemconfiguration.rst b/docs/Users_Guide/systemconfiguration.rst index bca6adb1db..cdffb1ffc6 100644 --- a/docs/Users_Guide/systemconfiguration.rst +++ b/docs/Users_Guide/systemconfiguration.rst @@ -795,11 +795,16 @@ is equivalent to setting:: [config] LEAD_SEQ = 0, 3, 6, 9, 12 +.. _grouping_forecast_leads: + +Grouping Forecast Leads +""""""""""""""""""""""" + Grouping forecast leads is possible as well using a special version of -the :term:`LEAD_SEQ` variable for the -**SeriesByLead Wrapper Only**. -If :term:`SERIES_BY_LEAD_GROUP_FCSTS` = True, then groups of -forecast leads can be defined to be evaluated together. +the :term:`LEAD_SEQ` variable. +If {APP_NAME}_RUNTIME_FREQ, e.g. SERIES_ANALYSIS_RUNTIME_FREQ, is set to +**RUN_ONCE_PER_INIT_OR_VALID**, +then groups of forecast leads can be defined to be evaluated together. Any number of these groups can be defined by setting configuration variables LEAD_SEQ_1, LEAD_SEQ_2, ..., :term:`LEAD_SEQ_\`. The value can be defined with a @@ -807,13 +812,61 @@ comma-separated list of integers (currently only hours are supported here) or using :ref:`begin_end_incr`. Each :term:`LEAD_SEQ_\` must have a corresponding variable :term:`LEAD_SEQ__LABEL`. For example:: - [config] LEAD_SEQ_1 = 0, 6, 12, 18 LEAD_SEQ_1_LABEL = Day1 LEAD_SEQ_2 = begin_end_incr(24,42,6) LEAD_SEQ_2_LABEL = Day2 +In this example, the label **Day1** will be used for 0, 6, 12, 18 and +the label **Day2** will be used for 24, 30, 36, 42. + +Forecast leads can also be grouped by defining a single list of forecast leads +with :term:`LEAD_SEQ`, then specifying the size of each group using +:term:`LEAD_SEQ_GROUP_SIZE`. For example:: + + [config] + LEAD_SEQ = 0, 12, 24, 36 + LEAD_SEQ_GROUP_SIZE = 1d + +This configuration will create groups of forecast leads that each contain 1 day. +This is the equivalent of setting:: + + [config] + LEAD_SEQ_1 = 0, 12 + LEAD_SEQ_2 = 24, 36 + +Each group will be labeled Group where is the group number. +In this example, the label **Group1** will be used for 0, 12 and +the label **Group2** will be used for 24, 36. +The label can be referenced in filename templates using {label}. + +To change the text "Group" to something else, set :term:`LEAD_SEQ_GROUP_LABEL`. +Setting:: + + LEAD_SEQ_GROUP_LABEL = Day + +will label the groups **Day1** and **Day2**. + +:term:`LEAD_SEQ__LABEL` can also be used to change the label for a specific +group. From the previous example, setting:: + + LEAD_SEQ_2_LABEL = SecondDay + +will label the groups **Day1** and **SecondDay**. + +If the list of forecast leads contain a gap where there are no leads that fall +within a given group, that group will be skipped. For example:: + + [config] + LEAD_SEQ = 0, 12, 48, 60 + LEAD_SEQ_GROUP_SIZE = 1d + LEAD_SEQ_GROUP_LABEL = Day + +The label **Day1** will be used for 0, 12 and +the label **Day3** will be used for 48, 60. +Notice that a **Day2** label is not created. + :term:`INIT_SEQ` """""""""""""""" diff --git a/internal/scripts/docker_env/scripts/metdataio_env.sh b/internal/scripts/docker_env/scripts/metdataio_env.sh index a0eac3a869..012f64f352 100755 --- a/internal/scripts/docker_env/scripts/metdataio_env.sh +++ b/internal/scripts/docker_env/scripts/metdataio_env.sh @@ -23,4 +23,5 @@ BASE_ENV=metplus_base.${METPLUS_VERSION} mamba create -y --clone ${BASE_ENV} --name ${ENV_NAME} -mamba install -y --name ${ENV_NAME} -c conda-forge lxml==4.9.1 pymysql==1.0.2 pandas==1.5.1 +#mamba install -y --name ${ENV_NAME} -c conda-forge lxml==4.9.1 pymysql==1.0.2 pandas==1.5.1 +mamba install -y --name ${ENV_NAME} -c conda-forge pymysql==1.1.0 pyyaml==6.0 "xarray>=2023.1.0" lxml==4.9.1 netcdf4==1.6.2 diff --git a/internal/tests/pytests/util/config_validate/test_config_validate.py b/internal/tests/pytests/util/config_validate/test_config_validate.py index f93adbc14b..aa2ffe16fa 100644 --- a/internal/tests/pytests/util/config_validate/test_config_validate.py +++ b/internal/tests/pytests/util/config_validate/test_config_validate.py @@ -104,9 +104,9 @@ def test_is_var_item_valid_levels(metplus_config, item_list, configs_to_set, is_ @pytest.mark.parametrize( 'met_config_file, expected', [ - ('GridStatConfig_good', (True, [])), - ('GridStatConfig_bad', (False, [])), - ('GridStatConfig_fake', (False, [])), + ('GridStatConfig_good', True), + ('GridStatConfig_bad', False), + ('GridStatConfig_fake', False), ] ) @pytest.mark.util diff --git a/internal/tests/pytests/util/system_util/test_system_util.py b/internal/tests/pytests/util/system_util/test_system_util.py index ed370f2e1b..cf239cbbf5 100644 --- a/internal/tests/pytests/util/system_util/test_system_util.py +++ b/internal/tests/pytests/util/system_util/test_system_util.py @@ -228,29 +228,6 @@ def test_write_list_to_file(tmp_path_factory): assert actual == '\n'.join(output_list) + '\n' -@pytest.mark.util -def test_prune_empty(tmp_path_factory): - prune_dir = tmp_path_factory.mktemp('prune') - - dir1 = prune_dir / 'empty_file_dir' - dir2 = prune_dir / 'not_empty_file_dir' - dir3 = prune_dir / 'empty_dir' - for d in [dir1, dir2, dir3]: - os.makedirs(d) - - # make two files, one empty one not. - open(os.path.join(dir1, 'empty.txt'), 'a').close() - file_with_content = os.path.join(dir2, 'not_empty.txt') - with open(file_with_content, 'w') as f: - f.write('Fee fi fo fum.') - - su.prune_empty(prune_dir, mock.Mock()) - - assert not os.path.exists(dir1) - assert os.path.exists(file_with_content) - assert not os.path.exists(dir3) - - @pytest.mark.parametrize( 'regex, expected', [ (r'\d', ['bigfoot/123.txt', 'yeti/234.txt']), diff --git a/internal/tests/pytests/util/time_looping/test_time_looping.py b/internal/tests/pytests/util/time_looping/test_time_looping.py index 53689acce0..90159f142b 100644 --- a/internal/tests/pytests/util/time_looping/test_time_looping.py +++ b/internal/tests/pytests/util/time_looping/test_time_looping.py @@ -7,15 +7,93 @@ from metplus.util.time_util import ti_calculate, ti_get_hours_from_relativedelta +@pytest.mark.parametrize( + 'config_dict, expected_output', [ + # 1 group + ({'LEAD_SEQ_1': "0, 1, 2, 3", + 'LEAD_SEQ_1_LABEL': 'Day1', + }, {'Day1': [relativedelta(), relativedelta(hours=+1), relativedelta(hours=+2), relativedelta(hours=+3)]}), + # 2 groups, no overlap + ({'LEAD_SEQ_1': "0, 1, 2, 3", + 'LEAD_SEQ_1_LABEL': 'Day1', + 'LEAD_SEQ_2': "8, 9, 10, 11", + 'LEAD_SEQ_2_LABEL': 'Day2', + }, {'Day1': [relativedelta(), relativedelta(hours=+1), relativedelta(hours=+2), relativedelta(hours=+3)], + 'Day2': [relativedelta(hours=+8), relativedelta(hours=+9), relativedelta(hours=+10), + relativedelta(hours=+11)]}), + # 2 groups, overlap + ({'LEAD_SEQ_1': "0, 1, 2, 3", + 'LEAD_SEQ_1_LABEL': 'Day1', + 'LEAD_SEQ_2': "3, 4, 5, 6", + 'LEAD_SEQ_2_LABEL': 'Day2', + }, {'Day1': [relativedelta(), relativedelta(hours=+1), relativedelta(hours=+2), relativedelta(hours=+3)], + 'Day2': [relativedelta(hours=+3), relativedelta(hours=+4), relativedelta(hours=+5), + relativedelta(hours=+6)]}), + # 2 groups, no overlap, out of order + ({'LEAD_SEQ_1': "8, 9, 10, 11", + 'LEAD_SEQ_1_LABEL': 'Day2', + 'LEAD_SEQ_2': "0, 1, 2, 3", + 'LEAD_SEQ_2_LABEL': 'Day1', + }, {'Day2': [relativedelta(hours=+8), relativedelta(hours=+9), relativedelta(hours=+10), + relativedelta(hours=+11)], + 'Day1': [relativedelta(), relativedelta(hours=+1), relativedelta(hours=+2), relativedelta(hours=+3)]}), + # 2 groups, overlap, out of order + ({'LEAD_SEQ_1': "3, 4, 5, 6", + 'LEAD_SEQ_1_LABEL': 'Day2', + 'LEAD_SEQ_2': "0, 1, 2, 3", + 'LEAD_SEQ_2_LABEL': 'Day1', + }, {'Day2': [relativedelta(hours=+3), relativedelta(hours=+4), relativedelta(hours=+5), + relativedelta(hours=+6)], + 'Day1': [relativedelta(), relativedelta(hours=+1), relativedelta(hours=+2), relativedelta(hours=+3)]}), + # divisions without labels + ({'LEAD_SEQ': "begin_end_incr(0,36,12)", 'LEAD_SEQ_GROUP_SIZE': "1d"}, + {'Group1': [relativedelta(), relativedelta(hours=+12)], + 'Group2': [relativedelta(days=+1), relativedelta(days=+1, hours=+12)]}), + # divisions with divisions label + ({'LEAD_SEQ': "begin_end_incr(0,36,12)", 'LEAD_SEQ_GROUP_SIZE': "1d", 'LEAD_SEQ_GROUP_LABEL': 'Day'}, + {'Day1': [relativedelta(), relativedelta(hours=+12)], + 'Day2': [relativedelta(days=+1), relativedelta(days=+1, hours=+12)]}), + # divisions with explicit labels + ({'LEAD_SEQ': "begin_end_incr(0,36,12)", 'LEAD_SEQ_GROUP_SIZE': "1d", + 'LEAD_SEQ_1_LABEL': 'One', 'LEAD_SEQ_2_LABEL': 'Two'}, + {'One': [relativedelta(), relativedelta(hours=+12)], + 'Two': [relativedelta(days=+1), relativedelta(days=+1, hours=+12)]}), + # divisions with one explicit label, one no label + ({'LEAD_SEQ': "begin_end_incr(0,36,12)", 'LEAD_SEQ_GROUP_SIZE': "1d", 'LEAD_SEQ_1_LABEL': 'One'}, + {'One': [relativedelta(), relativedelta(hours=+12)], + 'Group2': [relativedelta(days=+1), relativedelta(days=+1, hours=+12)]}), + # divisions with one explicit label, one division label + ({'LEAD_SEQ': "begin_end_incr(0,36,12)", 'LEAD_SEQ_GROUP_SIZE': "1d", + 'LEAD_SEQ_1_LABEL': 'One', 'LEAD_SEQ_GROUP_LABEL': 'Day'}, + {'One': [relativedelta(), relativedelta(hours=+12)], + 'Day2': [relativedelta(days=+1), relativedelta(days=+1, hours=+12)]}), + # divisions with skipped index + ({'LEAD_SEQ': "0, 12, 48, 60", 'LEAD_SEQ_GROUP_SIZE': "1d", 'LEAD_SEQ_GROUP_LABEL': 'Day'}, + {'Day1': [relativedelta(), relativedelta(hours=+12)], + 'Day3': [relativedelta(days=+2), relativedelta(days=+2, hours=+12)]}), + ] +) +@pytest.mark.util +def test_get_lead_sequence_groups(metplus_config, config_dict, expected_output): + config = metplus_config + for key, value in config_dict.items(): + config.set('config', key, value) + + assert tl.get_lead_sequence_groups(config) == expected_output + + @pytest.mark.parametrize( 'run_time, skip_times, inc_times, expected_result', [ (datetime(2019, 12, 30), {'%d': ['30', '31']}, None, True), (datetime(2019, 12, 30), {'%d': ['29', '31']}, None, False), (datetime(2019, 2, 27), {'%m': ['3', '4', '5', '6', '7', '8', '9', '10', '11']}, None, False), (datetime(2019, 3, 30), {'%m': ['3', '4', '5', '6', '7', '8', '9', '10', '11']}, None, True), - (datetime(2019, 3, 30), {'%d': ['30', '31'], '%m': ['3', '4', '5', '6', '7', '8', '9', '10', '11']}, None, True), - (datetime(2019, 3, 29), {'%d': ['30', '31'], '%m': ['3', '4', '5', '6', '7', '8', '9', '10', '11']}, None, True), - (datetime(2019, 1, 29), {'%d': ['30', '31'], '%m': ['3', '4', '5', '6', '7', '8', '9', '10', '11']}, None, False), + (datetime(2019, 3, 30), + {'%d': ['30', '31'], '%m': ['3', '4', '5', '6', '7', '8', '9', '10', '11']}, None, True), + (datetime(2019, 3, 29), + {'%d': ['30', '31'], '%m': ['3', '4', '5', '6', '7', '8', '9', '10', '11']}, None, True), + (datetime(2019, 1, 29), + {'%d': ['30', '31'], '%m': ['3', '4', '5', '6', '7', '8', '9', '10', '11']}, None, False), (datetime(2020, 10, 31), {'%Y%m%d': ['20201031']}, None, True), (datetime(2020, 3, 31), {'%Y%m%d': ['20201031']}, None, False), (datetime(2020, 10, 30), {'%Y%m%d': ['20201031']}, None, False), @@ -46,6 +124,7 @@ def test_skip_time(run_time, skip_times, inc_times, expected_result): print(time_info) assert tl.skip_time(time_info, c_dict) == expected_result + @pytest.mark.parametrize( 'inc_init_times, skip_init_times, inc_valid_times, skip_valid_times, expected_result', [ # nothing set @@ -90,7 +169,7 @@ def test_skip_time(run_time, skip_times, inc_times, expected_result): (None, {'%m': ['12']}, None, {'%d': ['29', '30']}, True), # include/skip init/valid ({'%m': ['12']}, {'%d': ['29', '31']}, {'%m': ['11', '12']}, {'%d': ['29', '30']}, False), - ({'%m': ['10,' '11']}, {'%d': ['29', '31']}, {'%m': ['11', '12']}, {'%d': ['29', '30']}, True), + ({'%m': ['10', '11']}, {'%d': ['29', '31']}, {'%m': ['11', '12']}, {'%d': ['29', '30']}, True), ({'%m': ['12']}, {'%d': ['29', '30']}, {'%m': ['11', '12']}, {'%d': ['29', '30']}, True), ({'%m': ['12']}, {'%d': ['29', '31']}, {'%m': ['10', '11']}, {'%d': ['29', '30']}, True), ({'%m': ['12']}, {'%d': ['29', '31']}, {'%m': ['11', '12']}, {'%d': ['29', '31']}, True), @@ -107,15 +186,15 @@ def test_skip_time_init_and_valid(inc_init_times, skip_init_times, inc_valid_tim @pytest.mark.util def test_skip_time_no_valid(): - input_dict ={'init': datetime(2019, 1, 29)} - assert tl.skip_time(input_dict, {'SKIP_VALID_TIMES': {'%Y': ['2019']}}) == False + input_dict = {'init': datetime(2019, 1, 29)} + assert not tl.skip_time(input_dict, {'SKIP_VALID_TIMES': {'%Y': ['2019']}}) @pytest.mark.parametrize( 'skip_times_conf, expected_dict', [ - ('"%d:30,31"', {'%d': ['30','31']}), + ('"%d:30,31"', {'%d': ['30', '31']}), ('"%m:begin_end_incr(3,11,1)"', {'%m': ['3', '4', '5', '6', '7', '8', '9', '10', '11']}), - ('"%d:30,31", "%m:begin_end_incr(3,11,1)"', {'%d': ['30','31'], + ('"%d:30,31", "%m:begin_end_incr(3,11,1)"', {'%d': ['30', '31'], '%m': ['3', '4', '5', '6', '7', '8', '9', '10', '11']}), ('"%Y%m%d:20201031"', {'%Y%m%d': ['20201031']}), ('"%Y%m%d:20201031", "%Y:2019"', {'%Y%m%d': ['20201031'], @@ -124,52 +203,26 @@ def test_skip_time_no_valid(): ] ) @pytest.mark.util -def test_get_skip_times(metplus_config, skip_times_conf, expected_dict): - conf = metplus_config - conf.set('config', 'SKIP_VALID_TIMES', skip_times_conf) - - assert tl.get_skip_times(conf, 'SKIP', 'VALID', 'T') == expected_dict - - -@pytest.mark.parametrize( - 'skip_times_conf, expected_dict', [ - ('"%d:30,31"', {'%d': ['30','31']}), - ('"%m:begin_end_incr(3,11,1)"', {'%m': ['3', '4', '5', '6', '7', '8', '9', '10', '11']}), - ('"%d:30,31", "%m:begin_end_incr(3,11,1)"', {'%d': ['30','31'], - '%m': ['3', '4', '5', '6', '7', '8', '9', '10', '11']}), - ('"%Y%m%d:20201031"', {'%Y%m%d': ['20201031']}), - ('"%Y%m%d:20201031", "%Y:2019"', {'%Y%m%d': ['20201031'], - '%Y': ['2019']}), - ] -) -@pytest.mark.util -def test_get_skip_times_wrapper(metplus_config, skip_times_conf, expected_dict): - conf = metplus_config +class TestSkipTimes: - # set wrapper specific skip times, then ensure it is found - conf.set('config', 'GRID_STAT_SKIP_VALID_TIMES', skip_times_conf) - assert tl.get_skip_times(conf, 'SKIP', 'VALID', 'grid_stat') == expected_dict + def test_get_skip_times(self, metplus_config, skip_times_conf, expected_dict): + conf = metplus_config + conf.set('config', 'SKIP_VALID_TIMES', skip_times_conf) + assert tl.get_skip_times(conf, 'SKIP', 'VALID', 'T') == expected_dict + def test_get_skip_times_wrapper(self, metplus_config, skip_times_conf, expected_dict): + conf = metplus_config + # set wrapper specific skip times, then ensure it is found + conf.set('config', 'GRID_STAT_SKIP_VALID_TIMES', skip_times_conf) + assert tl.get_skip_times(conf, 'SKIP', 'VALID', 'grid_stat') == expected_dict -@pytest.mark.parametrize( - 'skip_times_conf, expected_dict', [ - ('"%d:30,31"', {'%d': ['30','31']}), - ('"%m:begin_end_incr(3,11,1)"', {'%m': ['3', '4', '5', '6', '7', '8', '9', '10', '11']}), - ('"%d:30,31", "%m:begin_end_incr(3,11,1)"', {'%d': ['30','31'], - '%m': ['3', '4', '5', '6', '7', '8', '9', '10', '11']}), - ('"%Y%m%d:20201031"', {'%Y%m%d': ['20201031']}), - ('"%Y%m%d:20201031", "%Y:2019"', {'%Y%m%d': ['20201031'], - '%Y': ['2019']}), - ] -) -@pytest.mark.util -def test_get_skip_times_wrapper_not_used(metplus_config, skip_times_conf, expected_dict): - conf = metplus_config + def test_get_skip_times_wrapper_not_used(self, metplus_config, skip_times_conf, expected_dict): + conf = metplus_config - # set generic SKIP_TIMES, then request grid_stat to ensure it uses generic - conf.set('config', 'SKIP_VALID_TIMES', skip_times_conf) + # set generic SKIP_TIMES, then request grid_stat to ensure it uses generic + conf.set('config', 'SKIP_VALID_TIMES', skip_times_conf) - assert tl.get_skip_times(conf, 'SKIP', 'VALID', 'grid_stat') == expected_dict + assert tl.get_skip_times(conf, 'SKIP', 'VALID', 'grid_stat') == expected_dict @pytest.mark.util @@ -244,7 +297,7 @@ def test_time_generator_list(metplus_config): next(generator) assert False except StopIteration: - assert True + pass @pytest.mark.util @@ -271,7 +324,7 @@ def test_time_generator_increment(metplus_config): next(generator) assert False except StopIteration: - assert True + pass @pytest.mark.parametrize( @@ -392,21 +445,23 @@ def test_get_lead_sequence_lead(metplus_config): @pytest.mark.parametrize( 'key, value', [ - ('begin_end_incr(3,12,3)', [ 3, 6, 9, 12]), - ('begin_end_incr( 3,12 , 3)', [ 3, 6, 9, 12]), - ('begin_end_incr(0,10,2)', [ 0, 2, 4, 6, 8, 10]), - ('begin_end_incr(10,0,-2)', [ 10, 8, 6, 4, 2, 0]), - ('begin_end_incr(2,2,20)', [ 2 ]), - ('begin_end_incr(72,72,6)', [ 72 ]), - ('begin_end_incr(0,12,1), begin_end_incr(15,60,3)', [0,1,2,3,4,5,6,7,8,9,10,11,12,15,18,21,24,27,30,33,36,39,42,45,48,51,54,57,60]), - ('begin_end_incr(0,10,2), 12', [ 0, 2, 4, 6, 8, 10, 12]), - ('begin_end_incr(0,10,2)H, 12', [ 0, 2, 4, 6, 8, 10, 12]), - ('begin_end_incr(0,10800,3600)S, 4H', [ 0, 1, 2, 3, 4]), + ('begin_end_incr(3,12,3)', [3, 6, 9, 12]), + ('begin_end_incr( 3,12 , 3)', [3, 6, 9, 12]), + ('begin_end_incr(0,10,2)', [0, 2, 4, 6, 8, 10]), + ('begin_end_incr(10,0,-2)', [10, 8, 6, 4, 2, 0]), + ('begin_end_incr(2,2,20)', [2]), + ('begin_end_incr(72,72,6)', [72]), + ('begin_end_incr(0,12,1), begin_end_incr(15,60,3)', + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15, 18, 21, 24, 27, 30, 33, + 36, 39, 42, 45, 48, 51, 54, 57, 60]), + ('begin_end_incr(0,10,2), 12', [0, 2, 4, 6, 8, 10, 12]), + ('begin_end_incr(0,10,2)H, 12', [0, 2, 4, 6, 8, 10, 12]), + ('begin_end_incr(0,10800,3600)S, 4H', [0, 1, 2, 3, 4]), ] ) @pytest.mark.util def test_get_lead_sequence_lead_list(metplus_config, key, value): - input_dict = { 'valid' : datetime(2019, 2, 1, 13) } + input_dict = {'valid': datetime(2019, 2, 1, 13)} conf = metplus_config conf.set('config', 'LEAD_SEQ', key) test_seq = tl.get_lead_sequence(conf, input_dict) @@ -451,7 +506,7 @@ def test_get_lead_sequence_lead_list(metplus_config, key, value): ] ) @pytest.mark.util -def test_get_lead_sequence_groups(metplus_config, config_dict, expected_list): +def test_get_lead_sequence_from_groups(metplus_config, config_dict, expected_list): config = metplus_config for key, value in config_dict.items(): config.set('config', key, value) diff --git a/internal/tests/pytests/wrappers/user_script/test_user_script.py b/internal/tests/pytests/wrappers/user_script/test_user_script.py index 64e0b765d4..5102d3c94c 100644 --- a/internal/tests/pytests/wrappers/user_script/test_user_script.py +++ b/internal/tests/pytests/wrappers/user_script/test_user_script.py @@ -168,14 +168,8 @@ def set_run_type_info(config, run_type): 'echo init_{init?fmt=%Y%m%d%H%M%S}_' 'valid_{valid?fmt=%Y%m%d%H%M%S}_lead_{lead?fmt=%3H}.nc')}, ['LEAD_GROUPS'], - ['echo init_*_valid_*_lead_000.nc', - 'echo init_*_valid_*_lead_024.nc', - 'echo init_*_valid_*_lead_048.nc', - 'echo init_*_valid_*_lead_072.nc', - 'echo init_*_valid_*_lead_096.nc', - 'echo init_*_valid_*_lead_120.nc', - 'echo init_*_valid_*_lead_144.nc', - 'echo init_*_valid_*_lead_168.nc', + ['echo init_*_valid_*_lead_*.nc', + 'echo init_*_valid_*_lead_*.nc', ]), # run all init/lead sequence - simple ({'USER_SCRIPT_RUNTIME_FREQ': 'RUN_ONCE_FOR_EACH', diff --git a/metplus/util/config_metplus.py b/metplus/util/config_metplus.py index c842f96840..c0d25deea6 100644 --- a/metplus/util/config_metplus.py +++ b/metplus/util/config_metplus.py @@ -98,12 +98,16 @@ def setup(args, base_confs=None): if base_confs is None: base_confs = _get_default_config_list() + if base_confs is None: + return None + override_list = _parse_launch_args(args) + if override_list is None: + return None # add default config files to override list override_list = base_confs + override_list config = launch(override_list) - return config @@ -132,7 +136,7 @@ def _get_default_config_list(parm_base=None): if not default_config_list: print(f"FATAL: No default config files found in {conf_dir}") - sys.exit(1) + return None return default_config_list @@ -195,9 +199,9 @@ def _parse_launch_args(args): # add file path to override list override_list.append(filepath) - # exit if anything went wrong reading config arguments + # return None if anything went wrong reading config arguments if bad: - sys.exit(2) + return None return override_list @@ -249,7 +253,8 @@ def launch(config_list): mkdir_p(config.getdir('OUTPUT_BASE')) # set and log variables to the config object - get_logger(config) + if not get_logger(config): + return None final_conf = config.getstr('config', 'METPLUS_CONF') @@ -337,14 +342,14 @@ def get_logger(config): log_level_val = logging.getLevelName(log_level) except ValueError: print(f'ERROR: Invalid value set for LOG_LEVEL: {log_level}') - sys.exit(1) + return None try: log_level_terminal_val = logging.getLevelName(log_level_terminal) except ValueError: print('ERROR: Invalid value set for LOG_LEVEL_TERMINAL:' f' {log_level_terminal}') - sys.exit(1) + return None # create log formatter from config settings formatter = METplusLogFormatter(config) @@ -410,7 +415,7 @@ def replace_config_from_section(config, section, required=True): # if not required, return input config object return config - new_config = METplusConfig() + new_config = METplusConfig(run_id=config.run_id) # copy over all key/values from sections for section_to_copy in ['config', 'user_env_vars']: @@ -444,9 +449,11 @@ class METplusConfig(ProdConfig): 'regex_pattern', ) - def __init__(self, conf=None): + def __init__(self, conf=None, run_id=None): """!Creates a new METplusConfig @param conf The configuration file + @param run_id 8 character identifier for the run or None if ID should + be created. Defaults to None. """ # set interpolation to None so you can supply filename template # that contain % to config.set @@ -455,7 +462,7 @@ def __init__(self, conf=None): interpolation=None) if (conf is None) else conf super().__init__(conf) self._cycle = None - self.run_id = str(uuid.uuid4())[0:8] + self.run_id = run_id if run_id else str(uuid.uuid4())[0:8] self._logger = logging.getLogger(f'metplus.{self.run_id}') # config.logger is called in wrappers, so set this name # so the code doesn't break diff --git a/metplus/util/config_validate.py b/metplus/util/config_validate.py index 2941733cfc..438cd344c6 100644 --- a/metplus/util/config_validate.py +++ b/metplus/util/config_validate.py @@ -17,8 +17,7 @@ def validate_config_variables(config): # check for deprecated env vars in MET config files and # warn user to remove/replace them - deprecated_met_is_ok, sed_cmds = check_for_deprecated_met_config(config) - all_sed_cmds.extend(sed_cmds) + deprecated_met_is_ok = check_for_deprecated_met_config(config) # validate configuration variables field_is_ok, sed_cmds = validate_field_info_configs(config) @@ -133,7 +132,6 @@ def handle_deprecated(old, alt, depr_info, config, all_sed_cmds, e_list, def check_for_deprecated_met_config(config): - sed_cmds = [] all_good = True # check if *_CONFIG_FILE if set in the METplus config file and check for @@ -159,7 +157,7 @@ def check_for_deprecated_met_config(config): met_tool): all_good = False - return all_good, sed_cmds + return all_good def check_for_deprecated_met_config_file(config, met_config, met_tool): @@ -184,31 +182,31 @@ def check_for_deprecated_met_config_file(config, met_config, met_tool): continue error_logs.append(f"Deprecated environment variable ${{{deprecated_item}}} found") - if error_logs: - config.logger.error(f"Deprecated environment variables found in {met_tool}_CONFIG_FILE: {met_config}") - for error_log in error_logs: - config.logger.error(error_log) - - met_install_dir = config.getdir('MET_INSTALL_DIR') - config_dir = os.path.join(met_install_dir, 'share', 'met', 'config') - default_config = f"{get_wrapper_name(met_tool)}Config_default" - default_path = os.path.join(config_dir, default_config) - config.logger.error( - "Please set values that differ from the defaults in a METplus " - f"config file and unset {met_tool}_CONFIG_FILE to use the " - "wrapped MET config that is provided with the METplus wrappers." - ) - config.logger.error( - f"Compare values set in {met_config} to {default_path}" - ) - config.logger.error( - "See https://metplus.readthedocs.io/en/latest/Users_Guide/" - "release-notes.html#metplus-wrappers-upgrade-instructions" - " for more information." - ) - return False + if not error_logs: + return True - return True + config.logger.error(f"Deprecated environment variables found in {met_tool}_CONFIG_FILE: {met_config}") + for error_log in error_logs: + config.logger.error(error_log) + + met_install_dir = config.getdir('MET_INSTALL_DIR') + config_dir = os.path.join(met_install_dir, 'share', 'met', 'config') + default_config = f"{get_wrapper_name(met_tool)}Config_default" + default_path = os.path.join(config_dir, default_config) + config.logger.error( + "Please set values that differ from the defaults in a METplus " + f"config file and unset {met_tool}_CONFIG_FILE to use the " + "wrapped MET config that is provided with the METplus wrappers." + ) + config.logger.error( + f"Compare values set in {met_config} to {default_path}" + ) + config.logger.error( + "See https://metplus.readthedocs.io/en/latest/Users_Guide/" + "release-notes.html#metplus-wrappers-upgrade-instructions" + " for more information." + ) + return False def _get_deprecated_met_list(config, met_tool): diff --git a/metplus/util/run_util.py b/metplus/util/run_util.py index cb61e05aaa..145468c5a6 100644 --- a/metplus/util/run_util.py +++ b/metplus/util/run_util.py @@ -121,6 +121,8 @@ def pre_run_setup(config_inputs): # Read config inputs and return a config instance config = setup(config_inputs) + if not config: + return None logger = config.logger diff --git a/metplus/util/system_util.py b/metplus/util/system_util.py index 55f2f964ae..897ceef8a8 100644 --- a/metplus/util/system_util.py +++ b/metplus/util/system_util.py @@ -109,33 +109,6 @@ def get_storms(filter_filename, id_only=False, sort_column='STORM_ID'): return storm_dict -def prune_empty(output_dir, logger): - """! Start from the output_dir, and recursively check - all directories and files. If there are any empty - files or directories, delete/remove them so they - don't cause performance degradation or errors - when performing subsequent tasks. - - @param output_dir The directory from which searching should begin. - @param logger The logger to which all logging is directed. - """ - - # Check for empty files. - for a_file in traverse_dir(output_dir): - if os.stat(a_file).st_size == 0: - logger.debug("Empty file: " + a_file + - "...removing") - os.remove(a_file) - - # Now check for any empty directories, some - # may have been created when removing - # empty files. - for full_dir in traverse_dir(output_dir, get_dirs=True): - if not os.listdir(full_dir): - logger.debug("Empty directory: " + full_dir + "...removing") - os.rmdir(full_dir) - - def get_files(filedir, filename_regex): """! Get all the files (with a particular naming format) by walking through the directories. Note this uses re.match and will only diff --git a/metplus/util/time_looping.py b/metplus/util/time_looping.py index 045c6e6ced..736b359538 100644 --- a/metplus/util/time_looping.py +++ b/metplus/util/time_looping.py @@ -12,7 +12,7 @@ def time_generator(config): """! Generator used to read METplusConfig variables for time looping - @param METplusConfig object to read + @param config METplusConfig object to read @returns None if not enough information is available on config. Yields the next run time dictionary or None if something went wrong """ @@ -414,16 +414,16 @@ def get_lead_sequence(config, input_dict=None, wildcard_if_empty=False): return lead_seq out_leads = _handle_lead_seq(config, - lead_seq, - lead_min, - lead_max) + lead_seq, + lead_min, + lead_max) # use INIT_SEQ to build lead list based on the valid time elif init_seq: out_leads = _handle_init_seq(init_seq, - input_dict, - lead_min, - lead_max) + input_dict, + lead_min, + lead_max) elif lead_groups: out_leads = _handle_lead_groups(lead_groups) @@ -435,6 +435,7 @@ def get_lead_sequence(config, input_dict=None, wildcard_if_empty=False): return out_leads + def _are_lead_configs_ok(lead_seq, init_seq, lead_groups, config, input_dict, no_max): if lead_groups is None: @@ -463,7 +464,7 @@ def _are_lead_configs_ok(lead_seq, init_seq, lead_groups, return False # if looping by init, fail and exit - if 'valid' not in input_dict.keys(): + if 'valid' not in input_dict.keys() or input_dict['valid'] == '*': log_msg = ('INIT_SEQ specified while looping by init time.' ' Use LEAD_SEQ or change to loop by valid time') config.logger.error(log_msg) @@ -476,8 +477,9 @@ def _are_lead_configs_ok(lead_seq, init_seq, lead_groups, return True + def _get_lead_min_max(config): - # remove any items that are outside of the range specified + # remove any items that are outside the range specified # by LEAD_SEQ_MIN and LEAD_SEQ_MAX # convert min and max to relativedelta objects, then use current time # to compare them to each forecast lead @@ -491,6 +493,7 @@ def _get_lead_min_max(config): lead_max = get_relativedelta(lead_max_str, 'H') return lead_min, lead_max, no_max + def _handle_lead_seq(config, lead_strings, lead_min=None, lead_max=None): out_leads = [] leads = [] @@ -511,11 +514,12 @@ def _handle_lead_seq(config, lead_strings, lead_min=None, lead_max=None): lead_max_approx = now_time + lead_max for lead in leads: lead_approx = now_time + lead - if lead_approx >= lead_min_approx and lead_approx <= lead_max_approx: + if lead_min_approx <= lead_approx <= lead_max_approx: out_leads.append(lead) return out_leads + def _handle_init_seq(init_seq, input_dict, lead_min, lead_max): out_leads = [] lead_min_hours = ti_get_hours_from_relativedelta(lead_min) @@ -533,14 +537,14 @@ def _handle_init_seq(init_seq, input_dict, lead_min, lead_max): out_leads.append(get_relativedelta(current_lead, default_unit='H')) current_lead += 24 - out_leads = sorted(out_leads, key=lambda - rd: ti_get_seconds_from_relativedelta(rd, input_dict['valid'])) + out_leads = sorted(out_leads, key=lambda rd: ti_get_seconds_from_relativedelta(rd, input_dict['valid'])) return out_leads + def _handle_lead_groups(lead_groups): """! Read groups of forecast leads and create a list with all unique items - @param lead_group dictionary where the values are lists of forecast + @param lead_groups dictionary where the values are lists of forecast leads stored as relativedelta objects @returns list of forecast leads stored as relativedelta objects """ @@ -552,36 +556,84 @@ def _handle_lead_groups(lead_groups): return out_leads + def get_lead_sequence_groups(config): # output will be a dictionary where the key will be the # label specified and the value will be the list of forecast leads - lead_seq_dict = {} - # used in plotting + lead_seq_dict = _get_lead_groups_from_indices(config) + if lead_seq_dict is not None: + return lead_seq_dict + + # if no indices were found, check if divisions are requested + return _get_lead_groups_from_divisions(config) + + +def _get_lead_groups_from_indices(config): all_conf = config.keys('config') indices = [] - regex = re.compile(r"LEAD_SEQ_(\d+)") + regex = re.compile(r"LEAD_SEQ_(\d+)$") for conf in all_conf: result = regex.match(conf) if result is not None: indices.append(result.group(1)) + if not indices: + return None + + lead_seq_dict = {} # loop over all possible variables and add them to list for index in indices: - if config.has_option('config', f"LEAD_SEQ_{index}_LABEL"): - label = config.getstr('config', f"LEAD_SEQ_{index}_LABEL") - else: - log_msg = (f'Need to set LEAD_SEQ_{index}_LABEL to describe ' - f'LEAD_SEQ_{index}') - config.logger.error(log_msg) - return None - + label = _get_label_from_index(config, index) # get forecast list for n lead_string_list = getlist(config.getstr('config', f'LEAD_SEQ_{index}')) - lead_seq = _handle_lead_seq(config, - lead_string_list, - lead_min=None, - lead_max=None) + lead_seq = _handle_lead_seq(config, lead_string_list, + lead_min=None, lead_max=None) # add to output dictionary lead_seq_dict[label] = lead_seq return lead_seq_dict + + +def _get_lead_groups_from_divisions(config): + if not config.has_option('config', 'LEAD_SEQ_GROUP_SIZE'): + return {} + + lead_list = getlist(config.getstr('config', 'LEAD_SEQ', '')) + divisions = config.getstr('config', 'LEAD_SEQ_GROUP_SIZE') + divisions = get_relativedelta(divisions, default_unit='H') + lead_min = get_relativedelta('0') + lead_max = divisions - get_relativedelta('1S') + index = 1 + now = datetime.now() + # maximum 1000 divisions can be created to prevent infinite loop + num_leads = 0 + lead_groups = {} + while now + lead_max < now + (divisions * 1000): + lead_seq = _handle_lead_seq(config, lead_list, + lead_min=lead_min, lead_max=lead_max) + if lead_seq: + label = _get_label_from_index(config, index) + lead_groups[label] = lead_seq + num_leads += len(lead_seq) + + # if all forecast leads have been handled, break out of while loop + if num_leads >= len(lead_list): + break + + index += 1 + lead_min += divisions + lead_max += divisions + + if num_leads < len(lead_list): + config.logger.warning('Could not split LEAD_SEQ using LEAD_SEQ_GROUP_SIZE') + return None + + return lead_groups + + +def _get_label_from_index(config, index): + if config.has_option('config', f"LEAD_SEQ_{index}_LABEL"): + return config.getstr('config', f"LEAD_SEQ_{index}_LABEL") + if config.has_option('config', "LEAD_SEQ_GROUP_LABEL"): + return f"{config.getstr('config', 'LEAD_SEQ_GROUP_LABEL')}{index}" + return f"Group{index}" diff --git a/metplus/util/time_util.py b/metplus/util/time_util.py index 88c7646e41..cc19485911 100755 --- a/metplus/util/time_util.py +++ b/metplus/util/time_util.py @@ -15,13 +15,6 @@ from .string_manip import format_thresh -'''!@namespace TimeInfo -@brief Utility to handle timing in METplus wrappers -@code{.sh} -Cannot be called directly. These are helper functions -to be used in other METplus wrappers -@endcode -''' # dictionary where key is letter of time unit, i.e. Y and value is # the string representation of it, i.e. year @@ -60,48 +53,52 @@ def get_relativedelta(value, default_unit='S'): mult = 1 reg = r'(-*)(\d+)([a-zA-Z]*)' match = re.match(reg, value) - if match: - if match.group(1) == '-': - mult = -1 - time_value = int(match.group(2)) * mult - unit_value = match.group(3) - - # create relativedelta (dateutil) object for unit - # if no units specified, use seconds unless default_unit is specified - if unit_value == '': - if default_unit == 'S': - return relativedelta(seconds=time_value) - else: - unit_value = default_unit - - if unit_value == 'H': - return relativedelta(hours=time_value) - - if unit_value == 'M': - return relativedelta(minutes=time_value) - - if unit_value == 'S': + if not match: + return None + + if match.group(1) == '-': + mult = -1 + time_value = int(match.group(2)) * mult + unit_value = match.group(3) + + # create relativedelta (dateutil) object for unit + # if no units specified, use seconds unless default_unit is specified + if unit_value == '': + if default_unit == 'S': return relativedelta(seconds=time_value) + else: + unit_value = default_unit - if unit_value == 'd': - return relativedelta(days=time_value) + if unit_value == 'H': + return relativedelta(hours=time_value) - if unit_value == 'm': - return relativedelta(months=time_value) + if unit_value == 'M': + return relativedelta(minutes=time_value) - if unit_value == 'Y': - return relativedelta(years=time_value) + if unit_value == 'S': + return relativedelta(seconds=time_value) - # unsupported time unit specified, return None - return None + if unit_value == 'd': + return relativedelta(days=time_value) + + if unit_value == 'm': + return relativedelta(months=time_value) + + if unit_value == 'Y': + return relativedelta(years=time_value) + + # unsupported time unit specified, return None + return None def get_seconds_from_string(value, default_unit='S', valid_time=None): - """!Convert string of time (optionally ending with time letter, i.e. HMSyMD to seconds - Args: - @param value string to convert, i.e. 3M, 4H, 17 - @param default_unit units to apply if not specified at end of string - @returns time in seconds if successfully parsed, None if not""" + """!Convert string of time, optionally ending with time letter, e.g. HMSyMD to seconds + + @param value string to convert, i.e. 3M, 4H, 17 + @param default_unit units to apply if not specified at end of string + @param valid_time optional valid time to compute seconds if units to + convert are months or years + @returns time in seconds if successfully parsed, None if not""" rd_obj = get_relativedelta(value, default_unit) return ti_get_seconds_from_relativedelta(rd_obj, valid_time) @@ -135,7 +132,7 @@ def ti_get_hours_from_relativedelta(lead, valid_time=None): @param lead relativedelta object to convert @param valid_time (optional) valid time required to convert values that contain months or years - @returns integer value of hours or None if cannot compute + @returns integer value of hours or None if it cannot be computed """ lead_seconds = ti_get_seconds_from_relativedelta(lead, valid_time) if lead_seconds is None: @@ -276,6 +273,14 @@ def ti_get_lead_string(lead, plural=True, letter_only=False): return f"{negative}{output}" +def format_lead_seq(lead_seq, plural=True, letter_only=False): + leads = [] + for lead in lead_seq: + lead_fmt = ti_get_lead_string(lead, plural=plural, letter_only=letter_only) + leads.append(lead_fmt) + return ', '.join(leads) + + def get_met_time_list(string_value, sort_list=True): """! Convert a string into a list of strings in MET time format HHMMSS. @@ -506,7 +511,7 @@ def add_to_time_input(time_input, clock_time=None, instance=None, custom=None): time_input['instance'] = instance if instance else '' # if custom is specified, set it - # otherwise leave it unset so it can be set within the wrapper + # otherwise leave it unset, so it can be set within the wrapper if custom: time_input['custom'] = custom diff --git a/metplus/wrappers/__init__.py b/metplus/wrappers/__init__.py index c9d53693ba..ae8a1c3aae 100644 --- a/metplus/wrappers/__init__.py +++ b/metplus/wrappers/__init__.py @@ -5,27 +5,22 @@ from importlib import import_module from ..util.metplus_check import plot_wrappers_are_enabled +# import parent classes to other wrappers +from .command_builder import CommandBuilder +from .runtime_freq_wrapper import RuntimeFreqWrapper +from .loop_times_wrapper import LoopTimesWrapper +from .reformat_gridded_wrapper import ReformatGriddedWrapper +from .reformat_point_wrapper import ReformatPointWrapper +from .compare_gridded_wrapper import CompareGriddedWrapper + +# import RegridDataPlane wrapper because it is used by other wrappers +from .regrid_data_plane_wrapper import RegridDataPlaneWrapper + # these wrappers should not be imported if plotting is disabled plotting_wrappers = [ 'cyclone_plotter_wrapper', ] -# import classes that other wrappers import -parent_classes = { - 'command_builder': 'CommandBuilder', - 'runtime_freq_wrapper': 'RuntimeFreqWrapper', - 'loop_times_wrapper': 'LoopTimesWrapper', - 'reformat_gridded_wrapper': 'ReformatGriddedWrapper', - 'reformat_point_wrapper': 'ReformatPointWrapper', - 'compare_gridded_wrapper': 'CompareGriddedWrapper', - 'regrid_data_plane_wrapper': 'RegridDataPlaneWrapper', -} - -for module_name, attribute_name in parent_classes.items(): - module = import_module(f"{__name__}.{module_name}") - attribute = getattr(module, attribute_name) - globals()[attribute_name] = attribute - # iterate through the modules in the current package package_dir = str(Path(__file__).resolve().parent) for (_, module_name, _) in iter_modules([package_dir]): diff --git a/metplus/wrappers/compare_gridded_wrapper.py b/metplus/wrappers/compare_gridded_wrapper.py index 9e27757b55..86ef774cdc 100755 --- a/metplus/wrappers/compare_gridded_wrapper.py +++ b/metplus/wrappers/compare_gridded_wrapper.py @@ -76,7 +76,7 @@ def create_c_dict(self): return c_dict - def set_environment_variables(self, time_info): + def set_environment_variables(self, time_info=None): """! Set environment variables that will be set when running this tool. Wrappers can override this function to set wrapper-specific values, then call this (super) version to handle user configs and printing diff --git a/metplus/wrappers/cyclone_plotter_wrapper.py b/metplus/wrappers/cyclone_plotter_wrapper.py index 8b7ceb53a4..1bd26b8e2d 100644 --- a/metplus/wrappers/cyclone_plotter_wrapper.py +++ b/metplus/wrappers/cyclone_plotter_wrapper.py @@ -8,8 +8,6 @@ import os import time import datetime -import re -import sys from collections import namedtuple @@ -331,6 +329,8 @@ def retrieve_data(self): if not self.is_global_extent: self.logger.debug(f"Subset the data based on the region of interest.") subset_by_region_df = self.subset_by_region(sanitized_df) + if subset_by_region_df.empty: + return None final_df = subset_by_region_df.copy(deep=True) else: final_df = sanitized_df.copy(deep=True) @@ -608,7 +608,8 @@ def subset_by_region(self, sanitized_df): masked.reset_index(drop=True,inplace=True) if len(masked) == 0: - sys.exit("No data in region specified, please check your lon and lat values in the config file.") + self.log_error("No data in region specified, please check your lon and lat values in the config file.") + return None return masked diff --git a/metplus/wrappers/example_wrapper.py b/metplus/wrappers/example_wrapper.py index 1802ee01fc..4d3ab77f00 100755 --- a/metplus/wrappers/example_wrapper.py +++ b/metplus/wrappers/example_wrapper.py @@ -12,8 +12,7 @@ import os -from ..util import do_string_sub, ti_calculate, get_lead_sequence -from ..util import skip_time +from ..util import do_string_sub from . import LoopTimesWrapper diff --git a/metplus/wrappers/extract_tiles_wrapper.py b/metplus/wrappers/extract_tiles_wrapper.py index fc091a4e5b..c0d15276fe 100755 --- a/metplus/wrappers/extract_tiles_wrapper.py +++ b/metplus/wrappers/extract_tiles_wrapper.py @@ -13,9 +13,9 @@ from datetime import datetime import re -from ..util import do_string_sub, ti_calculate, skip_time -from ..util import get_lead_sequence, sub_var_list -from ..util import parse_var_list, round_0p5, get_storms, prune_empty +from ..util import do_string_sub, ti_calculate +from ..util import sub_var_list +from ..util import parse_var_list, round_0p5, get_storms from .regrid_data_plane_wrapper import RegridDataPlaneWrapper from . import LoopTimesWrapper @@ -235,8 +235,6 @@ def run_at_time_once(self, time_info): else: self.use_tc_stat_input(storm_dict, idx_dict) - prune_empty(self.c_dict['OUTPUT_DIR'], self.logger) - def use_tc_stat_input(self, storm_dict, idx_dict): """! Find storms in TCStat input file and create tiles using the storm. diff --git a/metplus/wrappers/gempak_to_cf_wrapper.py b/metplus/wrappers/gempak_to_cf_wrapper.py index 6c6108dac8..9519da51fc 100755 --- a/metplus/wrappers/gempak_to_cf_wrapper.py +++ b/metplus/wrappers/gempak_to_cf_wrapper.py @@ -12,8 +12,7 @@ import os -from ..util import do_string_sub, skip_time, get_lead_sequence -from ..util import time_util +from ..util import do_string_sub from . import LoopTimesWrapper '''!@namespace GempakToCFWrapper diff --git a/metplus/wrappers/gen_ens_prod_wrapper.py b/metplus/wrappers/gen_ens_prod_wrapper.py index 3ed3fcd643..3fbde580f7 100755 --- a/metplus/wrappers/gen_ens_prod_wrapper.py +++ b/metplus/wrappers/gen_ens_prod_wrapper.py @@ -5,9 +5,7 @@ import os -from ..util import do_string_sub, ti_calculate, get_lead_sequence -from ..util import skip_time, parse_var_list, sub_var_list - +from ..util import do_string_sub, parse_var_list, sub_var_list from . import LoopTimesWrapper diff --git a/metplus/wrappers/gen_vx_mask_wrapper.py b/metplus/wrappers/gen_vx_mask_wrapper.py index 9d971537c6..59ef9e2f4c 100755 --- a/metplus/wrappers/gen_vx_mask_wrapper.py +++ b/metplus/wrappers/gen_vx_mask_wrapper.py @@ -12,9 +12,9 @@ import os -from ..util import getlist, get_lead_sequence, skip_time, ti_calculate, mkdir_p -from . import LoopTimesWrapper +from ..util import getlist from ..util import do_string_sub, remove_quotes +from . import LoopTimesWrapper '''!@namespace GenVxMaskWrapper @brief Wraps the GenVxMask tool to reformat ascii format to NetCDF diff --git a/metplus/wrappers/grid_diag_wrapper.py b/metplus/wrappers/grid_diag_wrapper.py index c95ab75d9a..f2228cde14 100755 --- a/metplus/wrappers/grid_diag_wrapper.py +++ b/metplus/wrappers/grid_diag_wrapper.py @@ -107,7 +107,7 @@ def create_c_dict(self): return c_dict - def set_environment_variables(self, time_info): + def set_environment_variables(self, time_info=None): """!Set environment variables that will be read by the MET config file. Reformat as needed. Print list of variables that were set and their values. diff --git a/metplus/wrappers/loop_times_wrapper.py b/metplus/wrappers/loop_times_wrapper.py index 09570364cc..c58f1f093a 100755 --- a/metplus/wrappers/loop_times_wrapper.py +++ b/metplus/wrappers/loop_times_wrapper.py @@ -4,7 +4,6 @@ """ from . import RuntimeFreqWrapper -from ..util import get_lead_sequence, skip_time, ti_calculate '''!@namespace LoopTimesWrapper @brief parent class for any wrapper that will loop over init/valid times diff --git a/metplus/wrappers/mode_wrapper.py b/metplus/wrappers/mode_wrapper.py index 13190a3b33..f650b26a29 100755 --- a/metplus/wrappers/mode_wrapper.py +++ b/metplus/wrappers/mode_wrapper.py @@ -12,8 +12,8 @@ import os -from . import CompareGriddedWrapper from ..util import do_string_sub +from . import CompareGriddedWrapper class MODEWrapper(CompareGriddedWrapper): diff --git a/metplus/wrappers/plot_data_plane_wrapper.py b/metplus/wrappers/plot_data_plane_wrapper.py index 6145324128..da5dee031a 100755 --- a/metplus/wrappers/plot_data_plane_wrapper.py +++ b/metplus/wrappers/plot_data_plane_wrapper.py @@ -12,8 +12,7 @@ import os -from ..util import time_util -from ..util import do_string_sub, remove_quotes, skip_time, get_lead_sequence +from ..util import do_string_sub, remove_quotes from . import LoopTimesWrapper '''!@namespace PlotDataPlaneWrapper diff --git a/metplus/wrappers/plot_point_obs_wrapper.py b/metplus/wrappers/plot_point_obs_wrapper.py index 076811804b..97f62a2880 100755 --- a/metplus/wrappers/plot_point_obs_wrapper.py +++ b/metplus/wrappers/plot_point_obs_wrapper.py @@ -12,8 +12,7 @@ import os -from ..util import do_string_sub, ti_calculate, get_lead_sequence -from ..util import skip_time +from ..util import do_string_sub from . import LoopTimesWrapper diff --git a/metplus/wrappers/py_embed_ingest_wrapper.py b/metplus/wrappers/py_embed_ingest_wrapper.py index 2f86694e33..0dcef33132 100755 --- a/metplus/wrappers/py_embed_ingest_wrapper.py +++ b/metplus/wrappers/py_embed_ingest_wrapper.py @@ -10,11 +10,9 @@ Condition codes: 0 for success, 1 for failure """ -import os import re -from ..util import time_util -from ..util import do_string_sub, get_lead_sequence +from ..util import do_string_sub from . import LoopTimesWrapper from . import RegridDataPlaneWrapper diff --git a/metplus/wrappers/runtime_freq_wrapper.py b/metplus/wrappers/runtime_freq_wrapper.py index 308e4ec711..57a3f76d26 100755 --- a/metplus/wrappers/runtime_freq_wrapper.py +++ b/metplus/wrappers/runtime_freq_wrapper.py @@ -10,12 +10,10 @@ Condition codes: 0 for success, 1 for failure """ -import os - from ..util import time_util -from ..util import log_runtime_banner, get_lead_sequence +from ..util import log_runtime_banner, get_lead_sequence, get_lead_sequence_groups from ..util import skip_time, getlist, get_start_and_end_times, get_time_prefix -from ..util import time_generator, add_to_time_input +from ..util import time_generator, add_to_time_input, format_lead_seq from ..util import sub_var_list, add_field_info_to_time_info from . import CommandBuilder @@ -95,21 +93,20 @@ def validate_runtime_freq(self, c_dict): # if list of supported frequencies are set by wrapper, # warn and use default if frequency is not supported - if hasattr(self, 'RUNTIME_FREQ_SUPPORTED'): - if self.RUNTIME_FREQ_SUPPORTED == 'ALL': - return + if (not hasattr(self, 'RUNTIME_FREQ_SUPPORTED') or + self.RUNTIME_FREQ_SUPPORTED == 'ALL'): + return - if c_dict['RUNTIME_FREQ'] not in self.RUNTIME_FREQ_SUPPORTED: - err_msg = (f"{self.app_name.upper()}_RUNTIME_FREQ=" - f"{c_dict['RUNTIME_FREQ']} not supported.") - if hasattr(self, 'RUNTIME_FREQ_DEFAULT'): - self.logger.warning( - f"{err_msg} Using {self.RUNTIME_FREQ_DEFAULT}" - ) - c_dict['RUNTIME_FREQ'] = self.RUNTIME_FREQ_DEFAULT - else: - self.log_error(err_msg) - return + if c_dict['RUNTIME_FREQ'] not in self.RUNTIME_FREQ_SUPPORTED: + err_msg = (f"{self.app_name.upper()}_RUNTIME_FREQ=" + f"{c_dict['RUNTIME_FREQ']} not supported.") + if hasattr(self, 'RUNTIME_FREQ_DEFAULT'): + self.logger.warning( + f"{err_msg} Using {self.RUNTIME_FREQ_DEFAULT}" + ) + c_dict['RUNTIME_FREQ'] = self.RUNTIME_FREQ_DEFAULT + else: + self.log_error(err_msg) def get_input_templates(self, c_dict, input_info=None): """!Read input templates from config. @@ -252,7 +249,7 @@ def run_once(self, custom): return self.run_at_time_once(time_info) def run_once_per_init_or_valid(self, custom): - self.logger.debug(f"Running once for each init/valid time") + self.logger.debug("Running once for each init/valid time") success = True for time_input in time_generator(self.config): @@ -273,37 +270,74 @@ def run_once_per_init_or_valid(self, custom): time_input['lead'] = '*' time_info = time_util.ti_calculate(time_input) - self.c_dict['ALL_FILES'] = self.get_all_files_from_leads(time_info) + lead_groups = self._get_leads_as_group(time_info) + if not self.run_once_per_lead_group(lead_groups, time_info): + success = False + + return success + + def _get_leads_as_group(self, time_input): + """!""" + lead_groups = get_lead_sequence_groups(self.config) + if lead_groups: + return lead_groups + + use_wildcard = self.c_dict.get('WILDCARD_LEAD_IF_EMPTY') + lead_seq = get_lead_sequence(self.config, time_input, + wildcard_if_empty=use_wildcard) + return {'': lead_seq} + + def _log_lead_group(self, label, lead_seq): + if not label: + return + self.logger.info(f"Processing lead group {label}:" + f" {format_lead_seq(lead_seq, plural=False)}") + + def run_once_per_lead_group(self, lead_groups, time_input): + success = True + for label, lead_seq in lead_groups.items(): + self._log_lead_group(label, lead_seq) + time_input['label'] = label + self.c_dict['ALL_FILES'] = ( + self.get_all_files_from_leads(time_input, lead_seq) + ) if not self._check_input_files(): continue self.clear() - if not self.run_at_time_once(time_info): + if not self.run_at_time_once(time_input): success = False return success def run_once_per_lead(self, custom): - self.logger.debug("Running once for forecast lead time") + # create input dict and only set 'now' item + time_input = { + 'init': '*', + 'valid': '*', + } + add_to_time_input(time_input, + clock_time=self.config.getstr('config', 'CLOCK_TIME'), + instance=self.instance, custom=custom) + + # check if forecast lead groups are specified and + # run once per group of leads if they are + lead_groups = get_lead_sequence_groups(self.config) + if lead_groups: + self.logger.debug("Running once per forecast lead group") + time_input['lead'] = '*' + return self.run_once_per_lead_group(lead_groups, time_input) + + self.logger.debug("Running once per forecast lead time") success = True lead_seq = get_lead_sequence(self.config, input_dict=None) for lead in lead_seq: - # create input dict and only set 'now' item - # create a new dictionary each iteration in case the function - # that it is passed into modifies it - time_input = {} - add_to_time_input(time_input, - clock_time=self.config.getstr('config', - 'CLOCK_TIME'), - instance=self.instance, - custom=custom) - - # add forecast lead + # add forecast lead to time input time_input['lead'] = lead - time_input['init'] = '*' - time_input['valid'] = '*' + # create a new dictionary each iteration in case the function + # that it is passed into modifies it time_info = time_util.ti_calculate(time_input) self.c_dict['ALL_FILES'] = self.get_all_files_for_lead(time_info) @@ -317,7 +351,7 @@ def run_once_per_lead(self, custom): return success def run_once_for_each(self, custom): - self.logger.debug(f"Running once for each init/valid and lead time") + self.logger.debug("Running once for each init/valid and lead time") success = True for time_input in time_generator(self.config): @@ -422,7 +456,11 @@ def get_all_files(self, custom=None): instance=self.instance, custom=custom) - lead_files = self.get_all_files_from_leads(time_input) + use_wildcard = self.c_dict.get('WILDCARD_LEAD_IF_EMPTY') + lead_seq = get_lead_sequence(self.config, + time_input, + wildcard_if_empty=use_wildcard) + lead_files = self.get_all_files_from_leads(time_input, lead_seq) all_files.extend(lead_files) return all_files @@ -441,16 +479,12 @@ def _check_input_files(self): return False return True - def get_all_files_from_leads(self, time_input): + def get_all_files_from_leads(self, time_input, lead_seq): if not self.c_dict.get('FIND_FILES', True): return True lead_files = [] # loop over all forecast leads - wildcard_if_empty = self.c_dict.get('WILDCARD_LEAD_IF_EMPTY', False) - lead_seq = get_lead_sequence(self.config, - time_input, - wildcard_if_empty=wildcard_if_empty) for lead in lead_seq: current_time_input = time_input.copy() current_time_input['lead'] = lead @@ -508,10 +542,9 @@ def get_files_from_time(self, time_info): @returns dictionary containing time_info dict and any relevant files with a key representing a description of that file """ + var_list = [None] if self.c_dict.get('ONCE_PER_FIELD', False): var_list = sub_var_list(self.c_dict.get('VAR_LIST_TEMP'), time_info) - else: - var_list = [None] # create a dictionary for each field (var) with time_info and files file_dict_list = [] @@ -661,34 +694,11 @@ def subset_input_files(self, time_info, output_dir=None, leads=None, if not self.c_dict.get('ALL_FILES') or self.c_dict.get('ALL_FILES') is True: return all_input_files - if leads is None: - lead_loop = [None] - else: - lead_loop = leads - + lead_loop = [None] if leads is None else leads for file_dict in self.c_dict['ALL_FILES']: for lead in lead_loop: - if lead is not None: - current_time_info = time_info.copy() - current_time_info['lead'] = lead - else: - current_time_info = time_info - - # compare time information for each input file - # add file to list of files to use if it matches - if not self.compare_time_info(current_time_info, - file_dict['time_info']): - continue - - for input_key in file_dict: - # skip time info key - if input_key == 'time_info': - continue - - if input_key not in all_input_files: - all_input_files[input_key] = [] - - all_input_files[input_key].extend(file_dict[input_key]) + self._add_files_that_match_time(all_input_files, time_info, + file_dict, lead) # return None if no matching input files were found if not all_input_files: @@ -709,6 +719,36 @@ def subset_input_files(self, time_info, output_dir=None, leads=None, return list_file_dict + def _add_files_that_match_time(self, all_input_files, time_info, file_dict, lead): + """!Check if time info of input files matches current time info. If it + is a match, add the file info to the all_input_files dictionary. + + @param all_input_files dictionary to add file info if there is a time match + @param time_info dictionary of time info to compare to file times + @param file_dict dictionary containing file information including file times + @param lead forecast lead to check + """ + current_time_info = time_info + if lead is not None: + current_time_info = time_info.copy() + current_time_info['lead'] = lead + + # compare time information for each input file + # add file to list of files to use if it matches + if not self.compare_time_info(current_time_info, + file_dict['time_info']): + return + + for input_key in file_dict: + # skip time info key + if input_key == 'time_info': + continue + + if input_key not in all_input_files: + all_input_files[input_key] = [] + + all_input_files[input_key].extend(file_dict[input_key]) + def get_list_file_name(self, time_info, identifier): """! Build name of ascii file that contains a list of files to process. If wildcard is set for init, valid, or lead then use the text ALL @@ -730,7 +770,7 @@ def get_list_file_name(self, time_info, identifier): valid = time_info['valid'].strftime('%Y%m%d%H%M%S') if time_info.get('lead', '*') == '*': - lead = 'ALL' + lead = time_info.get('label') if time_info.get('label') else 'ALL' else: lead = time_util.ti_get_seconds_from_lead(time_info['lead'], time_info['valid']) diff --git a/ush/validate_config.py b/ush/validate_config.py index 015d36662d..efdf84a440 100755 --- a/ush/validate_config.py +++ b/ush/validate_config.py @@ -33,6 +33,9 @@ def main(): # Parse arguments, options and return a config instance. config = config_metplus.setup(config_inputs) + if config is None: + print("ERROR: Could not parse config") + return False # validate configuration variables is_ok, sed_commands = validate_config_variables(config) @@ -40,78 +43,65 @@ def main(): # if everything is valid, report success and exit if is_ok: print("SUCCESS: Configuration passed all of the validation tests.") - sys.exit(0) + return True + + _prompt_sed_commands(sed_commands) + return True + + +def _prompt_sed_commands(sed_commands): # if sed commands can be run, output lines that will be changed and ask # user if they want to run the sed command - if sed_commands: - for cmd in sed_commands: - if cmd.startswith('#Add'): - add_line = cmd.replace('#Add ', '') - met_tool = None - for suffix in ['_REGRID_TO_GRID', '_OUTPUT_PREFIX']: - if suffix in add_line: - met_tool = add_line.split(suffix)[0] - section = 'config' - - for suffix in ['_CLIMO_MEAN_INPUT_TEMPLATE']: - if suffix in add_line: - met_tool = add_line.split(suffix)[0] - section = 'filename_templates' - - if met_tool: - print(f"\nIMPORTANT: If it is not already set, add the following in the [{section}] section to " - f"your METplus configuration file that sets {met_tool}_CONFIG_FILE:\n") - print(add_line) - input("Make this change before continuing! [OK]") - else: - print("ERROR: Something went wrong in the validate_config.py script. Please send an email to met_help@ucar.edu.") - - continue - - # remove -i from sed command to avoid replacing in the file - cmd_no_inline = cmd.replace('sed -i', 'sed') - split_cmd = shlex.split(cmd_no_inline) - original_file = split_cmd[-1] - - # call sed command to get result of find/replace - result = subprocess.check_output(split_cmd, encoding='utf-8').splitlines() - - # compare the result to the original file and show the differences - with open(original_file, 'r') as f: - original = [i.replace('\n', '') for i in f.readlines()] - - # if no differences, continue - if result == original: - continue - - print(f"\nThe following replacement is suggested for {original_file}\n") - - # loop over before and after files line by line and - for old, new in zip(original, result): - - if old != new: - print(f"Before:\n{old}\n") - print(f"After:\n{new}\n") - - # ask the user if they want to make the changes to their file (y/n default is no) - run_sed = False - user_answer = input(f"Would you like the make this change to {original_file}? (y/n)[n]") - - if user_answer and user_answer[0] == 'y': - run_sed = True - - # if yes, run original sed command - if run_sed: - print(f"Running command: {cmd}") - subprocess.run(shlex.split(cmd)) - else: - print(f"Skipping sed command for {original_file}") - - print("\nFinished running sed commands.") - print("\nRerun this script to ensure no other deprecated variables need to be replaced.") - print("See METplus User's Guide for more information on how to change deprecated variables") + if not sed_commands: + return + + for cmd in sed_commands: + # remove -i from sed command to avoid replacing in the file + cmd_no_inline = cmd.replace('sed -i', 'sed') + split_cmd = shlex.split(cmd_no_inline) + original_file = split_cmd[-1] + + # call sed command to get result of find/replace + result = subprocess.check_output(split_cmd, encoding='utf-8').splitlines() + + # compare the result to the original file and show the differences + with open(original_file, 'r') as f: + original = f.read().splitlines() + + # if no differences, continue + if result == original: + continue + + print(f"\nThe following replacement is suggested for {original_file}\n") + + # loop over before and after files line by line and + for old, new in zip(original, result): + + if old != new: + print(f"Before:\n{old}\n") + print(f"After:\n{new}\n") + + # ask the user if they want to make the changes to their file (y/n default is no) + run_sed = False + user_answer = input(f"Would you like the make this change to {original_file}? (y/n)[n]") + + if user_answer and user_answer[0] == 'y': + run_sed = True + + # if yes, run original sed command + if run_sed: + print(f"Running command: {cmd}") + subprocess.run(shlex.split(cmd)) + else: + print(f"Skipping sed command for {original_file}") + + print("\nFinished running sed commands.") + print("\nRerun this script to ensure no other deprecated variables need to be replaced.") + print("See METplus User's Guide for more information on how to change deprecated variables") if __name__ == "__main__": - main() + status = main() + if not status: + sys.exit(1) From bf9a13463752a9fcea4a64356eeac4a781d675f6 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Tue, 24 Sep 2024 06:38:09 -0600 Subject: [PATCH 05/95] update versions packages to match METplotpy requirements and update call to cartopy feature download script -- see https://github.com/SciTools/cartopy/pull/2263 --- internal/scripts/docker_env/Dockerfile.cartopy | 4 +--- internal/scripts/docker_env/README.md | 14 ++++---------- .../scripts/docker_env/scripts/metplotpy_env.sh | 2 +- 3 files changed, 6 insertions(+), 14 deletions(-) diff --git a/internal/scripts/docker_env/Dockerfile.cartopy b/internal/scripts/docker_env/Dockerfile.cartopy index 079259d51b..19ea6e2995 100644 --- a/internal/scripts/docker_env/Dockerfile.cartopy +++ b/internal/scripts/docker_env/Dockerfile.cartopy @@ -24,9 +24,7 @@ ARG METPLUS_ENV_VERSION ARG ENV_NAME RUN apt update && apt -y upgrade \ && apt install -y curl \ - && rm -f cartopy_feature_download.py \ - && curl https://raw.githubusercontent.com/SciTools/cartopy/master/tools/cartopy_feature_download.py > cartopy_feature_download.py \ - && /usr/local/conda/envs/${ENV_NAME}.${METPLUS_ENV_VERSION}/bin/python3 cartopy_feature_download.py cultural physical + && /usr/local/conda/envs/${ENV_NAME}.${METPLUS_ENV_VERSION}/bin/cartopy_feature_download.py cultural physical # remove base environment to free up space ARG METPLUS_ENV_VERSION diff --git a/internal/scripts/docker_env/README.md b/internal/scripts/docker_env/README.md index 6a121cde50..f52cc44873 100644 --- a/internal/scripts/docker_env/README.md +++ b/internal/scripts/docker_env/README.md @@ -225,7 +225,7 @@ export METPLUS_ENV_VERSION=v6.0 docker build -t dtcenter/metplus-envs:metplotpy.${METPLUS_ENV_VERSION} \ --build-arg METPLUS_ENV_VERSION \ --build-arg ENV_NAME=metplotpy \ - -f Dockerfile.cartopy . + -f Dockerfile.cartopy. docker push dtcenter/metplus-envs:metplotpy.${METPLUS_ENV_VERSION} ``` @@ -234,9 +234,7 @@ docker push dtcenter/metplus-envs:metplotpy.${METPLUS_ENV_VERSION} ``` export METPLUS_ENV_VERSION=v6.0 ./scripts/metplotpy_env.sh ${METPLUS_ENV_VERSION} -wget https://raw.githubusercontent.com/SciTools/cartopy/master/tools/cartopy_feature_download.py -/home/met_test/.conda/envs/metplotpy.${METPLUS_ENV_VERSION}/bin/python3 cartopy_feature_download.py cultural physical -rm cartopy_feature_download.py +/home/met_test/.conda/envs/metplotpy.${METPLUS_ENV_VERSION}/bin/cartopy_feature_download.py cultural physical ``` #### To install METplotpy and METcalcpy packages in environment @@ -283,9 +281,7 @@ docker push dtcenter/metplus-envs:weatherregime.${METPLUS_ENV_VERSION} ``` export METPLUS_ENV_VERSION=v6.0 ./scripts/weatherregime_env.sh ${METPLUS_ENV_VERSION} -wget https://raw.githubusercontent.com/SciTools/cartopy/master/tools/cartopy_feature_download.py -/home/met_test/.conda/envs/weatherregime.${METPLUS_ENV_VERSION}/bin/python3 cartopy_feature_download.py cultural physical -rm cartopy_feature_download.py +/home/met_test/.conda/envs/weatherregime.${METPLUS_ENV_VERSION}/bin/cartopy_feature_download.py cultural physical ``` #### To install METplotpy and METcalcpy packages in environment @@ -331,9 +327,7 @@ docker push dtcenter/metplus-envs:cycloneplotter.${METPLUS_ENV_VERSION} ``` export METPLUS_ENV_VERSION=v6.0 ./scripts/cycloneplotter_env.sh ${METPLUS_ENV_VERSION} -wget https://raw.githubusercontent.com/SciTools/cartopy/master/tools/cartopy_feature_download.py -/home/met_test/.conda/envs/cycloneplotter.${METPLUS_ENV_VERSION}/bin/python3 cartopy_feature_download.py cultural physical -rm cartopy_feature_download.py +/home/met_test/.conda/envs/cycloneplotter.${METPLUS_ENV_VERSION}/bin/cartopy_feature_download.py cultural physical ``` diff --git a/internal/scripts/docker_env/scripts/metplotpy_env.sh b/internal/scripts/docker_env/scripts/metplotpy_env.sh index 6a55e6238e..1fd5e811e3 100755 --- a/internal/scripts/docker_env/scripts/metplotpy_env.sh +++ b/internal/scripts/docker_env/scripts/metplotpy_env.sh @@ -33,4 +33,4 @@ BASE_ENV=metplus_base.${METPLUS_VERSION} mamba create -y --clone ${BASE_ENV} --name ${ENV_NAME} -mamba install -y --name ${ENV_NAME} -c conda-forge matplotlib==3.6.3 scipy==1.9.3 plotly==5.13.0 xarray==2023.1.0 netcdf4==1.6.2 pyyaml==6.0 python-kaleido==0.2.1 imageio==2.25.0 imutils==0.5.4 scikit-image pint==0.20.1 metpy cartopy==0.21.1 +mamba install -y --name ${ENV_NAME} -c conda-forge matplotlib==3.6.3 scipy>=1.11.1 plotly==5.13.0 xarray==2023.1.0 netcdf4==1.6.2 pyyaml==6.0 python-kaleido>=0.2.1 imageio==2.25.0 imutils==0.5.4 scikit-image==0.19.3 pint==0.20.1 metpy==1.4.0 cartopy==0.21.1 From bfd932d8ee43778613e45828400394f112dda6f5 Mon Sep 17 00:00:00 2001 From: George McCabe <23407799+georgemccabe@users.noreply.github.com> Date: Tue, 24 Sep 2024 09:53:51 -0600 Subject: [PATCH 06/95] Feature #2622 forecast climatology (#2696) * remove support for old environment variables CLIMO_MEAN_FILE and CLIMO_STDEV_FILE that were used in METplus versions earlier than v4.0.0 * fixed SonarQube bugs * add better error checking for DockerHub queries that occasionally fail in GHA * update pycharm files * per #2622, support setting climo_mean and climo_stdev dictionary values inside fcst and obs dictionaries for grid_stat, ensemble_stat, point_stat, and series_analysis. Quietly add support for setting climo dictionaries inside the ens dictionary for gen_ens_prod -- this support is not needed because there is only 1 dictionary that may contain the climo dicts, but adding support for completeness so it can be assumed that {APP_NAME}_{DICT}_{SUBDICT}_{VAR} is supported, e.g. GEN_ENS_PROD_ENS_CLIMO_MEAN_REGRID_METHOD * turn on use cases that started failing from MET changes * turn off all use cases from push events * reorder assert so that expected vs. actual in PyCharm diff are accurate * remove commented code * added unit tests for climo VAR variables that were missing * add VAR variables used to specify climo_mean/stdev field values --- .github/jobs/docker_utils.py | 7 +- .github/parm/use_case_groups.json | 4 +- .idea/METplus.iml | 7 +- .idea/modules.xml | 8 + docs/Users_Guide/glossary.rst | 960 ++++++++++++++++++ docs/Users_Guide/wrappers.rst | 648 +++++++++++- internal/tests/pytests/conftest.py | 8 +- .../util/met_config/test_met_config.py | 3 +- .../test_ensemble_stat_wrapper.py | 192 ++++ .../gen_ens_prod/test_gen_ens_prod_wrapper.py | 97 ++ .../grid_stat/test_grid_stat_wrapper.py | 193 ++++ .../point_stat/test_point_stat_wrapper.py | 193 ++++ .../series_analysis/test_series_analysis.py | 193 ++++ internal/tests/use_cases/test_use_cases.py | 5 +- metplus/util/met_config.py | 91 +- metplus/wrappers/command_builder.py | 7 +- metplus/wrappers/ensemble_stat_wrapper.py | 6 +- metplus/wrappers/gen_ens_prod_wrapper.py | 4 +- metplus/wrappers/grid_stat_wrapper.py | 4 + metplus/wrappers/point_stat_wrapper.py | 4 + metplus/wrappers/series_analysis_wrapper.py | 4 + parm/met_config/EnsembleStatConfig_wrapped | 7 +- parm/met_config/GenEnsProdConfig_wrapped | 3 + parm/met_config/GridStatConfig_wrapped | 4 + parm/met_config/PointStatConfig_wrapped | 4 + parm/met_config/SeriesAnalysisConfig_wrapped | 4 + .../EnsembleStat/EnsembleStat.conf | 41 + .../met_tool_wrapper/GridStat/GridStat.conf | 39 + .../met_tool_wrapper/PointStat/PointStat.conf | 61 ++ .../SeriesAnalysis/SeriesAnalysis.conf | 40 + .../read_aomlcable_rtofs_transport.py | 1 - ...ast_read-in_CFSv2_categoricalthresholds.py | 1 - ...read-in_CFSv2_categoricalthresholds_obs.py | 1 - 33 files changed, 2787 insertions(+), 57 deletions(-) create mode 100644 .idea/modules.xml diff --git a/.github/jobs/docker_utils.py b/.github/jobs/docker_utils.py index b5a102d99d..65c412456f 100644 --- a/.github/jobs/docker_utils.py +++ b/.github/jobs/docker_utils.py @@ -41,8 +41,11 @@ def docker_get_volumes_last_updated(current_branch): dockerhub_url = get_dockerhub_url(current_branch) dockerhub_request = requests.get(dockerhub_url, timeout=90) if dockerhub_request.status_code != 200: - print(f"Could not find DockerHub URL: {dockerhub_url}") - return None + print(f"Retrying DockerHub request: {dockerhub_url}") + dockerhub_request = requests.get(dockerhub_url, timeout=60) + if dockerhub_request.status_code != 200: + print(f"Could not query DockerHub URL: {dockerhub_url}") + return None # get version number to search for if main_vX.Y branch if current_branch.startswith('main_v'): diff --git a/.github/parm/use_case_groups.json b/.github/parm/use_case_groups.json index 549f8cbc85..e53d706aad 100644 --- a/.github/parm/use_case_groups.json +++ b/.github/parm/use_case_groups.json @@ -198,12 +198,12 @@ { "category": "s2s_mid_lat", "index_list": "0-2", - "run": true + "run": false }, { "category": "s2s_mid_lat", "index_list": "3", - "run": true + "run": false }, { "category": "s2s_mjo", diff --git a/.idea/METplus.iml b/.idea/METplus.iml index aad402c4e5..5fdd65ba2a 100644 --- a/.idea/METplus.iml +++ b/.idea/METplus.iml @@ -1,5 +1,10 @@ - + + + + + +