diff --git a/.github/scripts/ghcr-prune.py b/.github/scripts/ghcr-prune.py
index 87b5db7e112..37c538bd621 100644
--- a/.github/scripts/ghcr-prune.py
+++ b/.github/scripts/ghcr-prune.py
@@ -6,6 +6,11 @@
from datetime import datetime
from datetime import timedelta
+
+class GHCRPruneError(Exception):
+ pass
+
+
description = """
This script can be used to prune container images hosted on ghcr.io.\n
@@ -16,17 +21,28 @@
You can filter containers by any combination of name, age, and untagged.
"""
-parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawTextHelpFormatter)
+parser = argparse.ArgumentParser(
+ description=description, formatter_class=argparse.RawTextHelpFormatter
+)
parser.add_argument("--token", required=True, help='GitHub token with "repo" scope')
parser.add_argument("--org", required=True, help="Organization name")
parser.add_argument("--name", required=True, help="Package name")
parser.add_argument(
- "--age", type=int, help="Filter versions by age, removing anything older than"
+ "--age",
+ type=int,
+ help="Filter versions by age, removing anything older than",
+ default=7,
)
parser.add_argument(
"--filter", help="Filter which versions are consider for pruning", default=".*"
)
+parser.add_argument(
+ "--filter-pr",
+ action="store_true",
+ help="Filter pull requests, will skip removal if pull request is still open.",
+)
+parser.add_argument("--pr-prefix", default="pr-", help="Prefix for a pull request tag")
parser.add_argument("--untagged", action="store_true", help="Prune untagged versions")
parser.add_argument(
"--dry-run", action="store_true", help="Does not actually delete anything"
@@ -43,6 +59,8 @@
logger = logging.getLogger("ghcr-prune")
+logger.debug(f"Running with arguments:\n{kwargs}")
+
class GitHubPaginate:
"""Iterator for GitHub API.
@@ -51,13 +69,19 @@ class GitHubPaginate:
https://docs.github.com/en/rest/using-the-rest-api/using-pagination-in-the-rest-api?apiVersion=2022-11-28
"""
- def __init__(self, token, org, name, age, filter, untagged, **_):
+
+ def __init__(
+ self, token, org, name, age, filter, untagged, filter_pr, pr_prefix, **_
+ ):
self.token = token
self.session = None
self.url = (
f"https://api.github.com/orgs/{org}/packages/container/{name}/versions"
)
+ self.pr_url = f"https://api.github.com/repos/{org}/{name}/pulls"
self.expired = datetime.now() - timedelta(days=age)
+ self.filter_pr = filter_pr
+ self.pr_prefix = pr_prefix
self.filter = re.compile(filter)
self.page = None
self.untagged = untagged
@@ -72,12 +96,27 @@ def create_session(self):
}
)
+ def is_pr_open(self, pr_number):
+ logger.info(f"Checking if PR {pr_number} is still open")
+
+ pr_url = f"{self.pr_url}/{pr_number}"
+
+ response = self.session.get(pr_url)
+
+ response.raise_for_status()
+
+ data = response.json()
+
+ state = data["state"]
+
+ return state == "open"
+
def grab_page(self):
if self.session is None:
- raise Exception("Must create session first")
+ raise GHCRPruneError("Must create session first")
if self.url is None:
- raise Exception("No more pages")
+ raise GHCRPruneError("No more pages")
response = self.session.get(self.url)
@@ -90,7 +129,7 @@ def grab_page(self):
if remaining <= 0:
reset = response.headers["X-RateLimit-Reset"]
- raise Exception(f"Hit ratelimit will reset at {reset}")
+ raise GHCRPruneError(f"Hit ratelimit will reset at {reset}")
try:
self.url = self.get_next_url(response.headers["Link"])
@@ -114,35 +153,60 @@ def filter_results(self, data):
logger.info(f"Processing {len(data)} containers")
+ logger.info(f"Expiration date set to {self.expired}")
+
for x in data:
url = x["url"]
updated_at = datetime.strptime(x["updated_at"], "%Y-%m-%dT%H:%M:%SZ")
logger.debug(f"Processing\n{json.dumps(x, indent=2)}")
- try:
- tag = x["metadata"]["container"]["tags"][0]
- except IndexError:
+ tags = x["metadata"]["container"]["tags"]
+
+ if len(tags) == 0:
logger.info(f'Found untagged version {x["id"]}')
if self.untagged:
+ logger.info(f'Pruning version {x["id"]}')
+
results.append(url)
continue
- if not self.filter.match(tag):
- logger.info(f"Skipping {tag}, did not match filter")
+ # Any tag that is still valid will cause a pacakge version to not be removed
+ remove_package_version = True
- continue
+ for tag in tags:
+ if self.filter_pr and tag.startswith(self.pr_prefix):
+ pr_number = tag[len(self.pr_prefix) :]
+
+ if self.is_pr_open(pr_number):
+ logger.info(
+ f"Skipping package version {x['id']}, PR {pr_number} is still open"
+ )
- if updated_at < self.expired:
- logger.info(
- f"Pruning {tag}, updated at {updated_at}, expiration {self.expired}"
- )
+ remove_package_version = False
+
+ break
+ elif self.filter.match(tag) and updated_at > self.expired:
+ logger.info(
+ f"Skipping package version {x['id']}, tag {tag!r} matched but was updated at {updated_at}"
+ )
+
+ remove_package_version = False
+
+ break
+ else:
+ logger.info(f"Skipping package version {x['id']}, tag {tag!r}")
+
+ remove_package_version = False
+
+ break
+
+ if remove_package_version:
+ logger.info(f"Pruning package version {x['id']}")
results.append(url)
- else:
- logger.info(f"Skipping {tag}, more recent than {self.expired}")
return results
@@ -155,7 +219,7 @@ def __next__(self):
if self.page is None or len(self.page) == 0:
try:
self.page = self.grab_page()
- except Exception as e:
+ except GHCRPruneError as e:
logger.debug(f"StopIteration condition {e!r}")
raise StopIteration from None
@@ -181,7 +245,7 @@ def remove_container(self, url):
pager = GitHubPaginate(**kwargs)
for url in pager:
- if kwargs["dry_run"]:
- logger.info(f"Pruning {url}")
- else:
+ logger.info(f"Pruning {url}")
+
+ if not kwargs["dry_run"]:
pager.remove_container(url)
diff --git a/.github/workflows/ghcr-prune.yml b/.github/workflows/ghcr-prune.yml
index 5e26f83e3ce..ca4649170b0 100644
--- a/.github/workflows/ghcr-prune.yml
+++ b/.github/workflows/ghcr-prune.yml
@@ -1,11 +1,6 @@
name: Prune ghcr.io container images
on:
- schedule:
- # run once a day
- - cron: '0 2 * * *'
-
- # Temporary to test
- pull_request:
+ workflow_dispatch:
permissions: {}
@@ -13,6 +8,7 @@ jobs:
prune:
permissions:
packages: write
+ pull-requests: read
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
@@ -21,4 +17,4 @@ jobs:
pip install requests
# remove containers older than 14 days and only generated by testing workflow
- python .github/scripts/ghcr-prune.py --token ${{ secrets.GITHUB_TOKEN }} --org esmci --name cime --age 14 --filter sha- --untagged
+ python .github/scripts/ghcr-prune.py --token ${{ secrets.GITHUB_TOKEN }} --org esmci --name cime --age 14 --filter sha- --filter-pr --untagged
diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml
index 530c00db89e..81494fb57b6 100644
--- a/.github/workflows/testing.yml
+++ b/.github/workflows/testing.yml
@@ -34,6 +34,7 @@ permissions:
jobs:
build-containers:
runs-on: ubuntu-latest
+ if: ${{ github.event_name == 'push' || github.event.pull_request.head.repo.full_name == github.repository }}
permissions:
packages: write
steps:
@@ -56,6 +57,7 @@ jobs:
images: ghcr.io/ESMCI/cime
tags: |
type=raw,value=latest,enable=${{ github.event_name == 'push' }}
+ type=ref,event=pr,enable=${{ github.event_name == 'pull_request' }}
type=sha,format=long
- name: Build and push
uses: docker/build-push-action@v3
@@ -91,7 +93,7 @@ jobs:
if: ${{ github.event_name == 'pull_request' && always() && ! cancelled() }}
needs: build-containers
container:
- image: ghcr.io/esmci/cime:sha-${{ github.sha }}
+ image: ghcr.io/esmci/cime:${{ github.event.pull_request.head.repo.full_name == github.repository && format('sha-{0}', github.sha) || 'latest' }}
credentials:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
@@ -127,7 +129,7 @@ jobs:
if: ${{ github.event_name == 'pull_request' && always() && ! cancelled() }}
needs: build-containers
container:
- image: ghcr.io/esmci/cime:sha-${{ github.sha }}
+ image: ghcr.io/esmci/cime:${{ github.event.pull_request.head.repo.full_name == github.repository && format('sha-{0}', github.sha) || 'latest' }}
credentials:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
diff --git a/CIME/SystemTests/pgn.py b/CIME/SystemTests/pgn.py
index a597cc71f97..a8696c0afd2 100644
--- a/CIME/SystemTests/pgn.py
+++ b/CIME/SystemTests/pgn.py
@@ -45,7 +45,9 @@
]
)
FCLD_NC = "cam.h0.cloud.nc"
-INIT_COND_FILE_TEMPLATE = "20210915.v2.ne4_oQU240.F2010.{}.{}.0002-{:02d}-01-00000.nc"
+INIT_COND_FILE_TEMPLATE = (
+ "20240305.v3p0p0.F2010.ne4pg2_oQU480.chrysalis.{}.{}.0002-{:02d}-01-00000.nc"
+)
INSTANCE_FILE_TEMPLATE = "{}{}_{:04d}.h0.0001-01-01-00000{}.nc"
@@ -95,8 +97,8 @@ def build_phase(self, sharedlib_only=False, model_only=False):
logger.debug("PGN_INFO: Updating user_nl_* files")
csmdata_root = self._case.get_value("DIN_LOC_ROOT")
- csmdata_atm = os.path.join(csmdata_root, "atm/cam/inic/homme/ne4_v2_init")
- csmdata_lnd = os.path.join(csmdata_root, "lnd/clm2/initdata/ne4_oQU240_v2_init")
+ csmdata_atm = os.path.join(csmdata_root, "atm/cam/inic/homme/ne4pg2_v3_init")
+ csmdata_lnd = os.path.join(csmdata_root, "lnd/clm2/initdata/ne4pg2_v3_init")
iinst = 1
for icond in range(1, NUMBER_INITIAL_CONDITIONS + 1):
@@ -234,11 +236,10 @@ def _compare_baseline(self):
viewing = (
"{}\n"
" EVV viewing instructions can be found at: "
- " https://github.com/E3SM-Project/E3SM/blob/master/cime/scripts/"
+ " https://github.com/ESMCI/CIME/blob/master/scripts/"
"climate_reproducibility/README.md#test-passfail-and-extended-output"
"".format(evv_out_dir)
)
-
comments = (
"{} {} for test '{}'.\n"
" {}\n"
diff --git a/CIME/SystemTests/tsc.py b/CIME/SystemTests/tsc.py
index 3ecaefe75d0..f50fd4c334b 100644
--- a/CIME/SystemTests/tsc.py
+++ b/CIME/SystemTests/tsc.py
@@ -32,7 +32,9 @@
SIM_LENGTH = 600 # seconds
OUT_FREQ = 10 # seconds
INSPECT_AT = [300, 450, 600] # seconds
-INIT_COND_FILE_TEMPLATE = "20210915.v2.ne4_oQU240.F2010.{}.{}.0002-{:02d}-01-00000.nc"
+INIT_COND_FILE_TEMPLATE = (
+ "20240305.v3p0p0.F2010.ne4pg2_oQU480.chrysalis.{}.{}.0002-{:02d}-01-00000.nc"
+)
VAR_LIST = [
"T",
"Q",
@@ -100,8 +102,8 @@ def _run_with_specified_dtime(self, dtime=2):
self._case.set_value("STOP_OPTION", "nsteps")
csmdata_root = self._case.get_value("DIN_LOC_ROOT")
- csmdata_atm = os.path.join(csmdata_root, "atm/cam/inic/homme/ne4_v2_init")
- csmdata_lnd = os.path.join(csmdata_root, "lnd/clm2/initdata/ne4_oQU240_v2_init")
+ csmdata_atm = os.path.join(csmdata_root, "atm/cam/inic/homme/ne4pg2_v3_init")
+ csmdata_lnd = os.path.join(csmdata_root, "lnd/clm2/initdata/ne4pg2_v3_init")
nstep_output = OUT_FREQ // dtime
for iinst in range(1, NINST + 1):
@@ -223,7 +225,7 @@ def _compare_baseline(self):
viewing = (
"{}\n"
" EVV viewing instructions can be found at: "
- " https://github.com/E3SM-Project/E3SM/blob/master/cime/scripts/"
+ " https://github.com/ESMCI/CIME/blob/master/scripts/"
"climate_reproducibility/README.md#test-passfail-and-extended-output"
"".format(evv_out_dir)
)
diff --git a/CIME/Tools/jenkins_generic_job b/CIME/Tools/jenkins_generic_job
index ec93bfca238..b02a5b69199 100755
--- a/CIME/Tools/jenkins_generic_job
+++ b/CIME/Tools/jenkins_generic_job
@@ -186,6 +186,12 @@ OR
help="Do not fail if there are namelist diffs",
)
+ parser.add_argument(
+ "--ignore-diffs",
+ action="store_true",
+ help="Do not fail if there are history diffs",
+ )
+
parser.add_argument(
"--save-timing",
action="store_true",
@@ -272,6 +278,7 @@ OR
args.check_memory,
args.ignore_memleak,
args.ignore_namelists,
+ args.ignore_diffs,
args.save_timing,
args.pes_file,
args.jenkins_id,
@@ -304,6 +311,7 @@ def _main_func(description):
check_memory,
ignore_memleak,
ignore_namelists,
+ ignore_diffs,
save_timing,
pes_file,
jenkins_id,
@@ -334,6 +342,7 @@ def _main_func(description):
check_memory,
ignore_memleak,
ignore_namelists,
+ ignore_diffs,
save_timing,
pes_file,
jenkins_id,
diff --git a/CIME/Tools/wait_for_tests b/CIME/Tools/wait_for_tests
index ffe29f2727d..c166061c99b 100755
--- a/CIME/Tools/wait_for_tests
+++ b/CIME/Tools/wait_for_tests
@@ -74,6 +74,12 @@ OR
help="Do not fail a test if the only problem is diffing namelists",
)
+ parser.add_argument(
+ "--ignore-diffs",
+ action="store_true",
+ help="Do not fail a test if the only problem is diffing history files",
+ )
+
parser.add_argument(
"--ignore-memleak",
action="store_true",
@@ -122,6 +128,7 @@ OR
args.check_throughput,
args.check_memory,
args.ignore_namelist_diffs,
+ args.ignore_diffs,
args.ignore_memleak,
args.cdash_build_name,
args.cdash_project,
@@ -142,6 +149,7 @@ def _main_func(description):
check_throughput,
check_memory,
ignore_namelist_diffs,
+ ignore_diffs,
ignore_memleak,
cdash_build_name,
cdash_project,
@@ -160,6 +168,7 @@ def _main_func(description):
check_throughput=check_throughput,
check_memory=check_memory,
ignore_namelists=ignore_namelist_diffs,
+ ignore_diffs=ignore_diffs,
ignore_memleak=ignore_memleak,
cdash_build_name=cdash_build_name,
cdash_project=cdash_project,
diff --git a/CIME/XML/machines.py b/CIME/XML/machines.py
index e3a047d25de..b5c10332acb 100644
--- a/CIME/XML/machines.py
+++ b/CIME/XML/machines.py
@@ -263,6 +263,7 @@ def _probe_machine_name_one_guess_v3(self, nametomatch):
children = [y for x in nodes for y in self.get_children(root=x)]
+ machine = None
for child in children:
machtocheck = self.get(child, "MACH")
regex_str = self.text(child)
diff --git a/CIME/XML/test_reporter.py b/CIME/XML/test_reporter.py
index 3ef87c957a4..93f117a8a46 100644
--- a/CIME/XML/test_reporter.py
+++ b/CIME/XML/test_reporter.py
@@ -76,7 +76,7 @@ def push2testdb(self):
os.system("stty echo")
print()
params = {"username": username, "password": password, "testXML": xmlstr}
- url = "https://csegweb.cgd.ucar.edu/testdb/cgi-bin/processXMLtest.cgi"
+ url = "https://cseg.cgd.ucar.edu/testdb/cgi-bin/processXMLtest.cgi"
data = urllib.parse.urlencode(params)
data = data.encode("ascii")
req = urllib.request.Request(url, data)
diff --git a/CIME/bless_test_results.py b/CIME/bless_test_results.py
index 0502c541d3a..750b3a13870 100644
--- a/CIME/bless_test_results.py
+++ b/CIME/bless_test_results.py
@@ -216,7 +216,17 @@ def bless_test_results(
bless_perf=False,
**_, # Capture all for extra
):
- bless_all = not (namelists_only | hist_only | bless_tput | bless_mem | bless_perf)
+ if bless_perf:
+ bless_mem = True
+ bless_tput = True
+
+ bless_all_non_perf = not (namelists_only | hist_only | bless_tput | bless_mem)
+ is_perf_bless = bless_mem or bless_tput
+
+ expect(
+ not (is_perf_bless and hist_only) and not (is_perf_bless and namelists_only),
+ "Do not mix performance and non-performance blesses",
+ )
test_status_files = get_test_status_files(test_root, compiler, test_id=test_id)
@@ -284,12 +294,13 @@ def bless_test_results(
overall_result, phase = ts.get_overall_test_status(
ignore_namelists=True,
ignore_memleak=True,
- check_throughput=False,
- check_memory=False,
+ ignore_diffs=is_perf_bless,
+ check_throughput=bless_tput,
+ check_memory=bless_mem,
)
# See if we need to bless namelist
- if namelists_only or bless_all:
+ if namelists_only or bless_all_non_perf:
if no_skip_pass:
nl_bless = True
else:
@@ -301,25 +312,18 @@ def bless_test_results(
# Skip if test is build only i.e. testopts contains "B"
if not build_only:
- bless_needed = is_bless_needed(
+ hist_bless = is_hist_bless_needed(
test_name, ts, broken_blesses, overall_result, no_skip_pass, phase
+ ) and (hist_only or bless_all_non_perf)
+ tput_bless = (
+ bless_tput and ts.get_status(THROUGHPUT_PHASE) != TEST_PASS_STATUS
)
+ mem_bless = bless_mem and ts.get_status(MEMCOMP_PHASE) != TEST_PASS_STATUS
- # See if we need to bless baselines
- if hist_only or bless_all:
- hist_bless = bless_needed
-
- if bless_tput or bless_perf:
- tput_bless = bless_needed
-
- if not tput_bless:
- tput_bless = ts.get_status(THROUGHPUT_PHASE) != TEST_PASS_STATUS
-
- if bless_mem or bless_perf:
- mem_bless = bless_needed
-
- if not mem_bless:
- mem_bless = ts.get_status(MEMCOMP_PHASE) != TEST_PASS_STATUS
+ expect(
+ not ((nl_bless or hist_bless) and (tput_bless or mem_bless)),
+ "Do not mix performance and non-performance blessing",
+ )
# Now, do the bless
if not nl_bless and not hist_bless and not tput_bless and not mem_bless:
@@ -462,7 +466,9 @@ def bless_test_results(
return success
-def is_bless_needed(test_name, ts, broken_blesses, overall_result, no_skip_pass, phase):
+def is_hist_bless_needed(
+ test_name, ts, broken_blesses, overall_result, no_skip_pass, phase
+):
needed = False
run_result = ts.get_status(RUN_PHASE)
diff --git a/CIME/case/case_run.py b/CIME/case/case_run.py
index 2e86e594a5d..dd49786350b 100644
--- a/CIME/case/case_run.py
+++ b/CIME/case/case_run.py
@@ -10,6 +10,8 @@
import shutil, time, sys, os, glob
+TERMINATION_TEXT = ("HAS ENDED", "END OF MODEL RUN", "SUCCESSFUL TERMINATION")
+
logger = logging.getLogger(__name__)
###############################################################################
@@ -314,9 +316,14 @@ def _post_run_check(case, lid):
cpl_logs.append(
os.path.join(rundir, file_prefix + "_%04d.log." % (inst + 1) + lid)
)
+ if driver == "nuopc" and comp_standalone:
+ cpl_logs.append(
+ os.path.join(rundir, "med_%04d.log." % (inst + 1) + lid)
+ )
else:
cpl_logs = [os.path.join(rundir, file_prefix + ".log." + lid)]
-
+ if driver == "nuopc" and comp_standalone:
+ cpl_logs.append(os.path.join(rundir, "med.log." + lid))
cpl_logfile = cpl_logs[0]
# find the last model.log and cpl.log
model_logfile = os.path.join(rundir, model + ".log." + lid)
@@ -331,13 +338,7 @@ def _post_run_check(case, lid):
break
with open(cpl_logfile, "r") as fd:
logfile = fd.read()
- if (
- comp_standalone
- and "HAS ENDED" in logfile
- or "END OF MODEL RUN" in logfile
- ):
- count_ok += 1
- elif not comp_standalone and "SUCCESSFUL TERMINATION" in logfile:
+ if any([x in logfile for x in TERMINATION_TEXT]):
count_ok += 1
if count_ok < cpl_ninst:
expect(False, "Model did not complete - see {} \n ".format(cpl_logfile))
diff --git a/CIME/case/case_setup.py b/CIME/case/case_setup.py
index a170d1bfddd..730a9911452 100644
--- a/CIME/case/case_setup.py
+++ b/CIME/case/case_setup.py
@@ -424,8 +424,10 @@ def _case_setup_impl(
)
if comp == "cam":
camroot = case.get_value("COMP_ROOT_DIR_ATM")
- if os.path.exists(os.path.join(camroot, "cam.case_setup.py")):
- logger.debug("Running cam.case_setup.py")
+ if os.path.exists(
+ os.path.join(camroot, "cime_config/cam.case_setup.py")
+ ):
+ logger.info("Running cam.case_setup.py")
run_cmd_no_fail(
"python {cam}/cime_config/cam.case_setup.py {cam} {case}".format(
cam=camroot, case=caseroot
diff --git a/CIME/case/case_submit.py b/CIME/case/case_submit.py
index 7893d2d3aae..e3f30654814 100644
--- a/CIME/case/case_submit.py
+++ b/CIME/case/case_submit.py
@@ -290,9 +290,9 @@ def check_case(self, skip_pnl=False, chksum=False):
self.check_lockedfiles()
if not skip_pnl:
self.create_namelists() # Must be called before check_all_input_data
+
logger.info("Checking that inputdata is available as part of case submission")
- if not self.get_value("TEST"):
- self.check_all_input_data(chksum=chksum)
+ self.check_all_input_data(chksum=chksum)
if self.get_value("COMP_WAV") == "ww":
# the ww3 buildnml has dependencies on inputdata so we must run it again
diff --git a/CIME/data/config/cesm/config_files.xml b/CIME/data/config/cesm/config_files.xml
index 75a9520dc72..e128a870af9 100644
--- a/CIME/data/config/cesm/config_files.xml
+++ b/CIME/data/config/cesm/config_files.xml
@@ -29,7 +29,6 @@
$SRCROOT/ccs_config/config_grids.xml
$SRCROOT/ccs_config/config_grids_nuopc.xml
- $SRCROOT/ccs_config/config_grids_mct.xml
case_last
env_case.xml
@@ -161,6 +160,7 @@
$SRCROOT/components/pop/
$SRCROOT/components/mom/
$SRCROOT/components/nemo/
+ $SRCROOT/components/blom/
$SRCROOT/components/cpl7/components/data_comps_$COMP_INTERFACE/docn
$SRCROOT/components/cdeps/docn
$SRCROOT/components/cpl7/components/stub_comps_$COMP_INTERFACE/socn
@@ -198,7 +198,8 @@
unset
$SRCROOT/components/cism/
- $SRCROOT/components/cpl7/components/data_comps_$COMP_INTERFACE/dglc
+ $SRCROOT/components/cpl7/components/data_comps_$COMP_INTERFACE/dglc
+ $SRCROOT/components/cdeps/dglc
$SRCROOT/components/cpl7/components/stub_comps_$COMP_INTERFACE/sglc
$CIMEROOT/CIME/non_py/src/components/stub_comps_$COMP_INTERFACE/sglc
$SRCROOT/components/cpl7/components/xcpl_comps_$COMP_INTERFACE/xglc
@@ -314,6 +315,8 @@
$COMP_ROOT_DIR_OCN/cime_config/config_compsets.xml
$COMP_ROOT_DIR_OCN/cime_config/config_compsets.xml
$COMP_ROOT_DIR_OCN/cime_config/config_compsets.xml
+ $COMP_ROOT_DIR_OCN/cime_config/config_compsets.xml
+ $COMP_ROOT_DIR_WAV/cime_config/config_compsets.xml
case_last
env_case.xml
@@ -337,6 +340,8 @@
$COMP_ROOT_DIR_OCN/cime_config/config_pes.xml
$COMP_ROOT_DIR_OCN/cime_config/config_pes.xml
$COMP_ROOT_DIR_OCN/cime_config/config_pes.xml
+ $COMP_ROOT_DIR_OCN/cime_config/config_pes.xml
+ $COMP_ROOT_DIR_WAV/cime_config/config_pes.xml
case_last
env_case.xml
@@ -355,6 +360,7 @@
$COMP_ROOT_DIR_ICE/cime_config/config_archive.xml
$COMP_ROOT_DIR_LND/cime_config/config_archive.xml
$COMP_ROOT_DIR_OCN/cime_config/config_archive.xml
+ $COMP_ROOT_DIR_GLC/cime_config/config_archive.xml
$COMP_ROOT_DIR_WAV/cime_config/config_archive.xml
$COMP_ROOT_DIR_ATM/cime_config/config_archive.xml
@@ -366,6 +372,7 @@
$COMP_ROOT_DIR_OCN/cime_config/config_archive.xml
$COMP_ROOT_DIR_OCN/cime_config/config_archive.xml
$COMP_ROOT_DIR_OCN/cime_config/config_archive.xml
+ $COMP_ROOT_DIR_OCN/cime_config/config_archive.xml
$COMP_ROOT_DIR_ROF/cime_config/config_archive.xml
$COMP_ROOT_DIR_ROF/cime_config/config_archive.xml
$COMP_ROOT_DIR_ROF/cime_config/config_archive.xml
@@ -387,6 +394,7 @@
$COMP_ROOT_DIR_OCN/cime_config/SystemTests
$COMP_ROOT_DIR_OCN/cime_config/SystemTests
$COMP_ROOT_DIR_OCN/cime_config/SystemTests
+ $COMP_ROOT_DIR_OCN/cime_config/SystemTests
$COMP_ROOT_DIR_ICE/cime_config/SystemTests
$COMP_ROOT_DIR_ICE/cime_config/SystemTests
$COMP_ROOT_DIR_GLC/cime_config/SystemTests
@@ -414,6 +422,7 @@
$COMP_ROOT_DIR_OCN/cime_config/testdefs/testlist_pop.xml
$COMP_ROOT_DIR_OCN/cime_config/testdefs/testlist_mom.xml
$COMP_ROOT_DIR_OCN/cime_config/testdefs/testlist_nemo.xml
+ $COMP_ROOT_DIR_OCN/cime_config/testdefs/testlist_blom.xml
$COMP_ROOT_DIR_ROF/cime_config/testdefs/testlist_rtm.xml
$COMP_ROOT_DIR_ROF/cime_config/testdefs/testlist_mosart.xml
$COMP_ROOT_DIR_ROF/cime_config/testdefs/testlist_mizuRoute.xml
@@ -423,6 +432,7 @@
$SRCROOT/components/cdeps/dlnd/cime_config/testdefs/testlist_dlnd.xml
$SRCROOT/components/cdeps/docn/cime_config/testdefs/testlist_docn.xml
$SRCROOT/components/cdeps/drof/cime_config/testdefs/testlist_drof.xml
+ $SRCROOT/components/cdeps/dglc/cime_config/testdefs/testlist_dglc.xml
$SRCROOT/components/cdeps/dwav/cime_config/testdefs/testlist_dwav.xml
case_last
@@ -449,11 +459,13 @@
$COMP_ROOT_DIR_OCN/cime_config/testdefs/testmods_dirs
$COMP_ROOT_DIR_OCN/cime_config/testdefs/testmods_dirs
$COMP_ROOT_DIR_OCN/cime_config/testdefs/testmods_dirs
+ $COMP_ROOT_DIR_OCN/cime_config/testdefs/testmods_dirs
$SRCROOT/components/cdeps/datm/cime_config/testdefs/testmods_dirs
$SRCROOT/components/cdeps/dice/cime_config/testdefs/testmods_dirs
$SRCROOT/components/cdeps/dlnd/cime_config/testdefs/testmods_dirs
$SRCROOT/components/cdeps/docn/cime_config/testdefs/testmods_dirs
$SRCROOT/components/cdeps/drof/cime_config/testdefs/testmods_dirs
+ $SRCROOT/components/cdeps/dglc/cime_config/testdefs/testmods_dirs
$SRCROOT/components/cdeps/dwav/cime_config/testdefs/testmods_dirs
case_last
@@ -479,6 +491,7 @@
$COMP_ROOT_DIR_OCN/cime_config/usermods_dirs
$COMP_ROOT_DIR_OCN/cime_config/usermods_dirs
$COMP_ROOT_DIR_OCN/cime_config/usermods_dirs
+ $COMP_ROOT_DIR_OCN/cime_config/usermods_dirs
case_last
env_case.xml
@@ -496,6 +509,7 @@
$COMP_ROOT_DIR_ICE/cime_config/namelist_definition_dice.xml
$COMP_ROOT_DIR_LND/cime_config/namelist_definition_dlnd.xml
$COMP_ROOT_DIR_OCN/cime_config/namelist_definition_docn.xml
+ $COMP_ROOT_DIR_GLC/cime_config/namelist_definition_dglc.xml
$COMP_ROOT_DIR_WAV/cime_config/namelist_definition_dwav.xml
@@ -508,6 +522,7 @@
$COMP_ROOT_DIR_OCN/bld/namelist_files/namelist_definition_pop.xml
$COMP_ROOT_DIR_OCN/bld/namelist_files/namelist_definition_mom.xml
$COMP_ROOT_DIR_OCN/bld/namelist_files/namelist_definition_nemo.xml
+ $COMP_ROOT_DIR_OCN/bld/namelist_files/namelist_definition_blom.xml
-->
$COMP_ROOT_DIR_LND/bld/namelist_files/namelist_definition_slim.xml
diff --git a/CIME/jenkins_generic_job.py b/CIME/jenkins_generic_job.py
index d68bc2b007c..8d99e5c5874 100644
--- a/CIME/jenkins_generic_job.py
+++ b/CIME/jenkins_generic_job.py
@@ -280,6 +280,7 @@ def jenkins_generic_job(
check_memory,
ignore_memleak,
ignore_namelists,
+ ignore_diffs,
save_timing,
pes_file,
jenkins_id,
@@ -423,6 +424,7 @@ def jenkins_generic_job(
check_throughput=check_throughput,
check_memory=check_memory,
ignore_namelists=ignore_namelists,
+ ignore_diffs=ignore_diffs,
ignore_memleak=ignore_memleak,
cdash_build_name=cdash_build_name,
cdash_project=cdash_project,
diff --git a/CIME/scripts/create_test.py b/CIME/scripts/create_test.py
index 65fcc03b359..07a10689b3c 100755
--- a/CIME/scripts/create_test.py
+++ b/CIME/scripts/create_test.py
@@ -392,6 +392,12 @@ def parse_command_line(args, description):
help="Do not fail if there namelist diffs",
)
+ parser.add_argument(
+ "--ignore-diffs",
+ action="store_true",
+ help="Do not fail if there history file diffs",
+ )
+
parser.add_argument(
"--ignore-memleak", action="store_true", help="Do not fail if there's a memleak"
)
@@ -761,6 +767,7 @@ def parse_command_line(args, description):
args.check_throughput,
args.check_memory,
args.ignore_namelists,
+ args.ignore_diffs,
args.ignore_memleak,
args.allow_pnl,
args.non_local,
@@ -921,6 +928,7 @@ def create_test(
check_throughput,
check_memory,
ignore_namelists,
+ ignore_diffs,
ignore_memleak,
allow_pnl,
non_local,
@@ -976,6 +984,7 @@ def create_test(
check_throughput=check_throughput,
check_memory=check_memory,
ignore_namelists=ignore_namelists,
+ ignore_diffs=ignore_diffs,
ignore_memleak=ignore_memleak,
)
@@ -1064,6 +1073,7 @@ def _main_func(description=None):
check_throughput,
check_memory,
ignore_namelists,
+ ignore_diffs,
ignore_memleak,
allow_pnl,
non_local,
@@ -1116,6 +1126,7 @@ def _main_func(description=None):
check_throughput,
check_memory,
ignore_namelists,
+ ignore_diffs,
ignore_memleak,
allow_pnl,
non_local,
diff --git a/CIME/test_scheduler.py b/CIME/test_scheduler.py
index 47119a09320..b8b12ae08d2 100644
--- a/CIME/test_scheduler.py
+++ b/CIME/test_scheduler.py
@@ -1429,6 +1429,7 @@ def run_tests(
check_throughput=False,
check_memory=False,
ignore_namelists=False,
+ ignore_diffs=False,
ignore_memleak=False,
):
###########################################################################
@@ -1484,6 +1485,7 @@ def run_tests(
check_throughput=check_throughput,
check_memory=check_memory,
ignore_namelists=ignore_namelists,
+ ignore_diffs=ignore_diffs,
ignore_memleak=ignore_memleak,
no_run=self._no_run,
expect_test_complete=expect_test_complete,
diff --git a/CIME/test_status.py b/CIME/test_status.py
index 5f306b7db0e..5f32486ab51 100644
--- a/CIME/test_status.py
+++ b/CIME/test_status.py
@@ -113,6 +113,7 @@ def _test_helper2(
check_throughput=False,
check_memory=False,
ignore_namelists=False,
+ ignore_diffs=False,
no_run=False,
no_perm=False,
):
@@ -127,6 +128,7 @@ def _test_helper2(
check_throughput=check_throughput,
check_memory=check_memory,
ignore_namelists=ignore_namelists,
+ ignore_diffs=ignore_diffs,
no_run=no_run,
)
if rv is not None and the_status != rv:
@@ -410,6 +412,7 @@ def _get_overall_status_based_on_phases(
check_throughput=False,
check_memory=False,
ignore_namelists=False,
+ ignore_diffs=False,
ignore_memleak=False,
no_run=False,
):
@@ -452,6 +455,7 @@ def _get_overall_status_based_on_phases(
(not check_throughput and phase == THROUGHPUT_PHASE)
or (not check_memory and phase == MEMCOMP_PHASE)
or (ignore_namelists and phase == NAMELIST_PHASE)
+ or (ignore_diffs and phase == BASELINE_PHASE)
or (ignore_memleak and phase == MEMLEAK_PHASE)
):
continue
@@ -493,6 +497,7 @@ def get_overall_test_status(
check_throughput=False,
check_memory=False,
ignore_namelists=False,
+ ignore_diffs=False,
ignore_memleak=False,
no_run=False,
):
@@ -527,6 +532,10 @@ def get_overall_test_status(
('FAIL', 'COMPARE_2')
>>> _test_helper2('FAIL ERS.foo.A BASELINE\nFAIL ERS.foo.A NLCOMP\nPASS ERS.foo.A COMPARE_2\nPASS ERS.foo.A RUN')
('DIFF', 'BASELINE')
+ >>> _test_helper2('FAIL ERS.foo.A BASELINE\nPASS ERS.foo.A NLCOMP\nPASS ERS.foo.A COMPARE_2\nPASS ERS.foo.A RUN', ignore_diffs=True)
+ ('PASS', 'RUN')
+ >>> _test_helper2('FAIL ERS.foo.A BASELINE\nFAIL ERS.foo.A NLCOMP\nPASS ERS.foo.A COMPARE_2\nPASS ERS.foo.A RUN', ignore_diffs=True)
+ ('NLFAIL', 'RUN')
>>> _test_helper2('FAIL ERS.foo.A BASELINE\nFAIL ERS.foo.A NLCOMP\nFAIL ERS.foo.A COMPARE_2\nPASS ERS.foo.A RUN')
('FAIL', 'COMPARE_2')
>>> _test_helper2('PEND ERS.foo.A COMPARE_2\nFAIL ERS.foo.A RUN')
@@ -585,6 +594,7 @@ def get_overall_test_status(
check_throughput=check_throughput,
check_memory=check_memory,
ignore_namelists=ignore_namelists,
+ ignore_diffs=ignore_diffs,
ignore_memleak=ignore_memleak,
no_run=no_run,
)
@@ -602,6 +612,7 @@ def get_overall_test_status(
check_throughput=check_throughput,
check_memory=check_memory,
ignore_namelists=ignore_namelists,
+ ignore_diffs=ignore_diffs,
ignore_memleak=ignore_memleak,
no_run=no_run,
)
diff --git a/CIME/tests/test_unit_bless_test_results.py b/CIME/tests/test_unit_bless_test_results.py
index 91315b30f8d..f6ebce94857 100644
--- a/CIME/tests/test_unit_bless_test_results.py
+++ b/CIME/tests/test_unit_bless_test_results.py
@@ -10,7 +10,7 @@
_bless_memory,
bless_history,
bless_namelists,
- is_bless_needed,
+ is_hist_bless_needed,
)
@@ -469,7 +469,7 @@ def test_bless_memory_only(
ts = TestStatus.return_value
ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu"
- ts.get_overall_test_status.return_value = ("PASS", "RUN")
+ ts.get_overall_test_status.return_value = ("DIFF", "MEMCOMP")
ts.get_status.side_effect = ["PASS", "PASS", "FAIL", "FAIL"]
case = Case.return_value.__enter__.return_value
@@ -508,7 +508,7 @@ def test_bless_throughput_only(
ts = TestStatus.return_value
ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu"
- ts.get_overall_test_status.return_value = ("PASS", "RUN")
+ ts.get_overall_test_status.return_value = ("DIFF", "TPUTCOMP")
ts.get_status.side_effect = ["PASS", "PASS", "FAIL", "FAIL"]
case = Case.return_value.__enter__.return_value
@@ -950,7 +950,7 @@ def test_is_bless_needed_no_skip_fail(self):
broken_blesses = []
- needed = is_bless_needed(
+ needed = is_hist_bless_needed(
"SMS.f19_g16.A", ts, broken_blesses, "PASS", True, "RUN"
)
@@ -965,7 +965,7 @@ def test_is_bless_needed_overall_fail(self):
broken_blesses = []
- needed = is_bless_needed(
+ needed = is_hist_bless_needed(
"SMS.f19_g16.A", ts, broken_blesses, "FAIL", False, "RUN"
)
@@ -978,7 +978,7 @@ def test_is_bless_needed_baseline_fail(self):
broken_blesses = []
- needed = is_bless_needed(
+ needed = is_hist_bless_needed(
"SMS.f19_g16.A", ts, broken_blesses, "PASS", False, "RUN"
)
@@ -993,7 +993,7 @@ def test_is_bless_needed_run_phase_fail(self):
broken_blesses = []
- needed = is_bless_needed(
+ needed = is_hist_bless_needed(
"SMS.f19_g16.A", ts, broken_blesses, "PASS", False, "RUN"
)
@@ -1006,7 +1006,7 @@ def test_is_bless_needed_no_run_phase(self):
broken_blesses = []
- needed = is_bless_needed(
+ needed = is_hist_bless_needed(
"SMS.f19_g16.A", ts, broken_blesses, "PASS", False, "RUN"
)
@@ -1019,7 +1019,7 @@ def test_is_bless_needed(self):
broken_blesses = []
- needed = is_bless_needed(
+ needed = is_hist_bless_needed(
"SMS.f19_g16.A", ts, broken_blesses, "PASS", False, "RUN"
)
diff --git a/CIME/tests/test_unit_case.py b/CIME/tests/test_unit_case.py
index e7f8c9a2ead..b14458a8dea 100755
--- a/CIME/tests/test_unit_case.py
+++ b/CIME/tests/test_unit_case.py
@@ -23,19 +23,11 @@ class TestCaseSubmit(unittest.TestCase):
def test_check_case(self):
case = mock.MagicMock()
# get_value arguments TEST, COMP_WAV, COMP_INTERFACE, BUILD_COMPLETE
- case.get_value.side_effect = [False, "", "", True]
+ case.get_value.side_effect = ["", "", True]
case_submit.check_case(case, chksum=True)
case.check_all_input_data.assert_called_with(chksum=True)
- def test_check_case_test(self):
- case = mock.MagicMock()
- # get_value arguments TEST, COMP_WAV, COMP_INTERFACE, BUILD_COMPLETE
- case.get_value.side_effect = [True, "", "", True]
- case_submit.check_case(case, chksum=True)
-
- case.check_all_input_data.assert_not_called()
-
@mock.patch("CIME.case.case_submit.lock_file")
@mock.patch("CIME.case.case_submit.unlock_file")
@mock.patch("os.path.basename")
diff --git a/CIME/tests/test_unit_case_run.py b/CIME/tests/test_unit_case_run.py
new file mode 100644
index 00000000000..8f188925d57
--- /dev/null
+++ b/CIME/tests/test_unit_case_run.py
@@ -0,0 +1,50 @@
+import unittest
+from unittest import mock
+
+from CIME.utils import CIMEError
+from CIME.case.case_run import TERMINATION_TEXT
+from CIME.case.case_run import _post_run_check
+
+
+def _case_post_run_check():
+ case = mock.MagicMock()
+
+ # RUNDIR, COMP_INTERFACE, COMP_CPL, COMP_ATM, COMP_OCN, MULTI_DRIVER
+ case.get_value.side_effect = ("/tmp/run", "mct", "cpl", "satm", "socn", False)
+
+ # COMP_CLASSES
+ case.get_values.return_value = ("CPL", "ATM", "OCN")
+
+ return case
+
+
+class TestCaseSubmit(unittest.TestCase):
+ @mock.patch("os.stat")
+ @mock.patch("os.path.isfile")
+ def test_post_run_check(self, isfile, stat):
+ isfile.return_value = True
+
+ stat.return_value.st_size = 1024
+
+ # no exceptions means success
+ for x in TERMINATION_TEXT:
+ case = _case_post_run_check()
+
+ with mock.patch("builtins.open", mock.mock_open(read_data=x)) as mock_file:
+ _post_run_check(case, "1234")
+
+ @mock.patch("os.stat")
+ @mock.patch("os.path.isfile")
+ def test_post_run_check_no_termination(self, isfile, stat):
+ isfile.return_value = True
+
+ stat.return_value.st_size = 1024
+
+ case = _case_post_run_check()
+
+ with self.assertRaises(CIMEError):
+ with mock.patch(
+ "builtins.open",
+ mock.mock_open(read_data="I DONT HAVE A TERMINATION MESSAGE"),
+ ) as mock_file:
+ _post_run_check(case, "1234")
diff --git a/CIME/wait_for_tests.py b/CIME/wait_for_tests.py
index 10f27c10767..a6e87eaac8c 100644
--- a/CIME/wait_for_tests.py
+++ b/CIME/wait_for_tests.py
@@ -573,6 +573,7 @@ def wait_for_test(
check_throughput,
check_memory,
ignore_namelists,
+ ignore_diffs,
ignore_memleak,
no_run,
):
@@ -607,6 +608,7 @@ def wait_for_test(
check_throughput=check_throughput,
check_memory=check_memory,
ignore_namelists=ignore_namelists,
+ ignore_diffs=ignore_diffs,
ignore_memleak=ignore_memleak,
)
@@ -649,6 +651,7 @@ def wait_for_tests_impl(
check_throughput=False,
check_memory=False,
ignore_namelists=False,
+ ignore_diffs=False,
ignore_memleak=False,
no_run=False,
):
@@ -665,6 +668,7 @@ def wait_for_tests_impl(
check_throughput,
check_memory,
ignore_namelists,
+ ignore_diffs,
ignore_memleak,
no_run,
),
@@ -717,6 +721,7 @@ def wait_for_tests(
check_throughput=False,
check_memory=False,
ignore_namelists=False,
+ ignore_diffs=False,
ignore_memleak=False,
cdash_build_name=None,
cdash_project=E3SM_MAIN_CDASH,
@@ -739,6 +744,7 @@ def wait_for_tests(
check_throughput,
check_memory,
ignore_namelists,
+ ignore_diffs,
ignore_memleak,
no_run,
)
diff --git a/docker/Dockerfile b/docker/Dockerfile
index a148d921d4c..341ea4c575f 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -111,6 +111,9 @@ WORKDIR /src/cime
COPY .cime /root/.cime
COPY entrypoint.sh /entrypoint.sh
+# TODO: REMOVE trigger
+ENV TRIGGER=0
+
ENTRYPOINT [ "/entrypoint.sh" ]
FROM base as slurm
diff --git a/scripts/climate_reproducibility/README.md b/scripts/climate_reproducibility/README.md
index c45862e8b8f..38640369b38 100644
--- a/scripts/climate_reproducibility/README.md
+++ b/scripts/climate_reproducibility/README.md
@@ -51,11 +51,11 @@ Primarily, the statistical analysis of the climates is done through [EVV](https:
which will generate a portable test website to describe the results (pass or fail) in detail (see the extended output
section below).
-For E3SM supported machines, the `e3sm_simple` conda environment is provided for these tests and includes the `EVV`
-conda package. You can activate the `e3sm_simple` environment in the same way as `e3sm_unified` environment:
+For E3SM supported machines, the `cime_env` conda environment is provided for these tests and includes the `EVV`
+conda package. You can activate the `cime_env` environment in the same way as `e3sm_unified` environment:
```
-source /load_latest_e3sm_simple.sh
+source /load_latest_cime_env.sh
```
where `` is the machine-specific location of the activation script as described on this confluence page:
@@ -63,52 +63,52 @@ where `` is the machine-specific location of the activation scrip
https://acme-climate.atlassian.net/wiki/spaces/EIDMG/pages/780271950/Diagnostics+and+Analysis+Quickstart#DiagnosticsandAnalysisQuickstart-Accessingmetapackagesoftwarebyactivatingacondaenvironment
If you don't have access to confluence or are unable to activate this environment for whatever reason, you can install
-your own `e3sm_simple` conda environment with this command (once you have anaconda/miniconda installed):
+your own `cime_env` conda environment with this command (once you have anaconda/miniconda installed):
```
-conda create -n e3sm-simple -c conda-forge -c e3sm e3sm-simple
+conda create -n cime-env -c conda-forge -c e3sm cime-env
```
*NOTE: If you run into problems with getting this environment working on your machine, please open an issue on E3SM's
-Github and tag @jhkennedy, or send Joseph H. Kennedy an email.*
+Github and tag @mkstratos.
-After you've activated the `e3sm_simple` environment, change to the `$E3SM/cime/scripts` directory (where `$E3SM` is the
+After you've activated the `cime_env` environment, change to the `$E3SM/cime/scripts` directory (where `$E3SM` is the
directory containing E3SM). Then to run one of the tests, you will use the `create_test` script like normal.
To run the `MVK` test and generate a baseline, you would run a command like:
```
-./create_test MVK_PL.ne4_oQU240.FC5AV1C-L -g --baseline-root "/PATH/TO/BASELINE"
+./create_test MVK_PS.ne4pg2_oQU480.F2010 -g --baseline-root "/PATH/TO/BASELINE"
```
And to compare to the baseline, you would run a command like:
```
-./create_test MVK_PL.ne4_oQU240.FC5AV1C-L -c --baseline-root "/PATH/TO/BASELINE"
+./create_test MVK_PS.ne4pg2_oQU480.F2010 -c --baseline-root "/PATH/TO/BASELINE"
```
-*NOTE: The MVK run a 20 member ensemble for at least 13 months (using the last 12 for the
+*NOTE: The MVK runs a 30 member ensemble for 13 months (using the last 12 for the
statistical tests) and, depending on the machine, may take some fiddling to execute within a particular
queue's wallclock time limit. You may want to over-ride the requested walltime using `--walltime HH:MM:SS`
option to `create_test`.*
-The full set of commands to run the MVK test used on Cori are:
+The full set of commands to run the MVK test used on Perlmutter are:
*Generate a baseline*
```
cd $E3SM/cime/scripts
-source /global/project/projectdirs/acme/software/anaconda_envs/load_latest_e3sm_simple.sh
+source /global/common/software/e3sm/anaconda_envs/load_latest_cime_env.sh
-./create_test MVK_PL.ne4_ne4.FC5AV1C-L --baseline-root "${CSCRATCH}/baselines" --project acme -g -o --walltime 01:00:00
+./create_test MVK_PS.ne4pg2_oQU480.F2010 --baseline-root "${PSCRATCH}/baselines" --project e3sm -g -o --walltime 01:00:00
```
*Compare to a baseline*
```
cd $E3SM/cime/scripts
-source /global/project/projectdirs/acme/software/anaconda_envs/load_latest_e3sm_simple.sh
+source /global/common/software/e3sm/anaconda_envs/load_latest_cime_env.sh
-./create_test MVK_PL.ne4_ne4.FC5AV1C-L --baseline-root "${CSCRATCH}/baselines" --project acme -c --walltime 01:00:00
+./create_test MVK_PS.ne4pg2_oQU480.F2010 --baseline-root "${PSCRATCH}/baselines" --project e3sm -c --walltime 01:00:00
```
## Test pass/fail and extended output
@@ -117,9 +117,9 @@ When you launch these tests and compare to a baseline, CIME will output the loca
something like this:
```
-# On cori-knl:
-./create_test MVK_PL.ne4_ne4.FC5AV1C-L --baseline-root "${CSCRATCH}/baselines" --project acme -c --walltime 01:00:00
- Creating test directory /global/cscratch1/sd/${USER}/acme_scratch/cori-knl/MVK_PL.ne4_ne4.FC5AV1C-L.cori-knl_intel.C.YYYYMMDD_HHMMSS_RANDOMID
+# On pm-cpu:
+./create_test MVK_PS.ne4pg2_oQU480.F2010 --baseline-root "${PSCRATCH}/baselines" --project e3sm -c --walltime 01:00:00
+ Creating test directory ${PSCRATCH}/e3sm_scratch/pm-cpu/MVK_PS.ne4pg2_oQU480.F2010.pm-cpu_intel.C.YYYYMMDD_HHMMSS_RANDOMID
```
Let's call that directory `$CASE_DIR`. Once all the jobs are finished, navigate to that directory and
@@ -129,7 +129,7 @@ you can `cat TestStatus` to determine if the test passed or failed by looking at
cd $CASE_DIR
cat TestStatus
...
- PASS MVK_PL.ne4_ne4.FC5AV1C-L.cori-knl_intel BASELINE
+ PASS MVK_PS.ne4pg2_oQU480.F2010.pm-cpu_intel BASELINE
...
```
@@ -139,8 +139,8 @@ To get some basic summary statistics about the test that was run, look in the `T
```
2019-08-14 22:09:02: BASELINE PASS for test 'YYYYMMDD_HHMMSS_RANDOMID'.
Case: YYYYMMDD_HHMMSS_RANDOMID; Test status: pass; Variables analyzed: 118; Rejecting: 0; Critical value: 13; Ensembles: statistically identical
- EVV results can be viewed at: /global/cscratch1/sd/${USER}/acme_scratch/cori-knl/MVK_PL.ne4_ne4.FC5AV1C-L.cori-knl_intel.C.YYYYMMDD_HHMMSS_RANDOMID/run/MVK_PL.ne4_ne4.FC5AV1C-L.cori-knl_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv/
- EVV viewing instructions can be found at: https://github.com/E3SM-Project/E3SM/blob/master/cime/scripts/climate_reproducibility/README.md#test-passfail-and-extended-output
+ EVV results can be viewed at: ${PSCRATCH}/e3sm_scratch/pm-cpu/MVK_PS.ne4pg2_oQU480.F2010.pm-cpu_intel.C.YYYYMMDD_HHMMSS_RANDOMID/run/MVK_PS.ne4pg2_oQU480.F2010.pm-cpu_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv/
+ EVV viewing instructions can be found at: https://github.com/ESMCI/CIME/blob/master/scripts/climate_reproducibility/README.md#test-passfail-and-extended-output
```
EVV reports the location of the output website where you can see the details of the analysis. For
@@ -153,18 +153,18 @@ the website directory to your machine and view it using EVV.
### View via ssh
-For this example, we'll assume the tests were run on Cori at NERSC, but these instructions should be
-easily adaptable to any E3SM supported machine. First, log into Cori via ssh and connect your local
-8080 port to the 8080 port on Cori:
+For this example, we'll assume the tests were run on Perlmutter at NERSC, but these instructions should be
+easily adaptable to any E3SM supported machine. First, log into Perlmutter via ssh and connect your local
+8080 port to the 8080 port on Perlmutter:
```
-ssh -L 8080:localhost:8080 [USER]@cori.nersc.gov
+ssh -L 8080:localhost:8080 [USER]@saul-p1.nersc.gov
```
-Activate the `e3sm_simple` environment:
+Activate the `cime_env` environment:
```
-source /global/project/projectdirs/acme/software/anaconda_envs/load_latest_e3sm_simple.sh
+source /global/common/software/e3sm/anaconda_envs/load_latest_cime_env.sh
```
Navigate to the case's run directory:
@@ -176,7 +176,7 @@ pushd ${CASE_DIR}/run
Then, using EVV, serve the website over port 8080:
```
-evv -o PGN_P1x1.ne4_ne4.FC5AV1C-L.cori-knl_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv -s 8080
+evv -o PGN_P1x1.ne4pg2_oQU480.F2010.pm-cpu_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv -s 8080
```
Evv will then report to you the URL where you can view the website:
@@ -194,17 +194,17 @@ Evv will then report to you the URL where you can view the website:
Extended Verification and Validation for Earth System Models
--------------------------------------------------------------------
- Current run: 2019-08-27 14:16:49
- User: kennedyj
- OS Type: Linux 4.12.14-150.27-default
- Machine: cori07
+ Current run: 2024-03-06 07:56:37
+ User: mek
+ OS Type: Linux 5.14.21-150400.24.81_12.0.87-cray_shasta_c
+ Machine: login31
Serving HTTP on 0.0.0.0 port 8080 (http://0.0.0.0:8080/)
View the generated website by navigating to:
- http://0.0.0.0:8080/PGN_P1x1.ne4_ne4.FC5AV1C-L.cori-knl_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv/index.html
+ http://0.0.0.0:8080/PGN_P1x1.ne4pg2_oQU480.F2010.pm-cpu_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv/index.html
Exit by pressing `ctrl+c` to send a keyboard interrupt.
```
@@ -214,20 +214,20 @@ browser to view the output website.
### View a local copy
-For this example, we'll assume the tests were run on Cori at NERSC, but these instructions should be
-easily adaptable to any E3SM supported machine. Install `e3sm_simple` locally and activate it:
+For this example, we'll assume the tests were run on Perlmutter at NERSC, but these instructions should be
+easily adaptable to any E3SM supported machine. Install `cime_env` locally and activate it:
```
-conda create -n e3sm_simple -c conda-forge -c e3sm e3sm-simple
-conda activate e3sm_simple
+conda create -n cime_env -c conda-forge -c e3sm cime-env
+conda activate cime_env
```
Then, copy the website to your local machine, and view it:
```
# on your local machine
-scp -r /global/cscratch1/sd/${USER}/acme_scratch/cori-knl/MVK_PL.ne4_ne4.FC5AV1C-L.cori-knl_intel.C.YYYYMMDD_HHMMSS_RANDOMID/run/MVK_PL.ne4_ne4.FC5AV1C-L.cori-knl_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv .
-evv -o MVK_PL.ne4_ne4.FC5AV1C-L.cori-knl_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv -s
+scp -r ${PSCRATCH}/e3sm_scratch/pm-cpu/MVK_PS.ne4pg2_oQU480.F2010.pm-cpu_intel.C.YYYYMMDD_HHMMSS_RANDOMID/run/MVK_PS.ne4pg2_oQU480.F2010.pm-cpu_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv .
+evv -o MVK_PS.ne4pg2_oQU480.F2010.pm-cpu_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv -s
--------------------------------------------------------------------
______ __ __ __ __
| ____| \ \ / / \ \ / /
@@ -249,7 +249,7 @@ evv -o MVK_PL.ne4_ne4.FC5AV1C-L.cori-knl_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv -s
View the generated website by navigating to:
- http://0.0.0.0:8000/MVK_PL.ne4_ne4.FC5AV1C-L.cori-knl_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv/index.html
+ http://0.0.0.0:8000/MVK_PS.ne4pg2_oQU480.F2010.pm-cpu_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv/index.html
Exit by pressing `ctrl+c` to send a keyboard interrupt.
@@ -262,6 +262,6 @@ browser to view the output website.
**Please note:** the output website uses some JavaScript to render elements of the page (especially figures),
and opening up the `index.html` file using the `file://` protocol in a web browser will likely not work
well (most browser have stopped allowing access to "local resources" like JavaScript through the `file://`
-protocol). You can view the website by either copying it to a hosted location (`~/WWW` which is hosted at
-`http://users.nccs.gov/~user` on Titan, for example) or copying it to your local machine and running a
+protocol). You can view the website by either copying it to a hosted location (`/global/cfs/projectdirs/e3sm/www/${USER}` which is hosted at
+`https://portal.nersc.gov/project/e3sm/${USER}` on NERSC, for example) or copying it to your local machine and running a
local http server (included in python!) and viewing it through an address like `http://0.0.0.0:8000/index.html`.