diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 27760ea62b..8c086f8b92 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -12,7 +12,7 @@ A clear and concise description of what the bug is. **To Reproduce** Steps to reproduce the behavior/error: -1. +1. Code example: ```python @@ -29,7 +29,7 @@ If applicable, add screenshots to help explain your problem. **System Information (please complete the following information):** - Operating system and version: [e.g. Ubuntu 22.04, macOS 14.3.1, Windows 10] - - Python version: [e.g. 3.10] + - Python version: [e.g. 3.10] (to obtain this information execute > import sys >print(sys.version)) **Additional context** diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index ee53282998..b1e66a5759 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,6 +1,6 @@ Changes proposed in this PR: -- -- +- +- This PR fixes # diff --git a/.github/scripts/make_release.py b/.github/scripts/make_release.py index 5c6260d4db..cdba6755ca 100644 --- a/.github/scripts/make_release.py +++ b/.github/scripts/make_release.py @@ -13,9 +13,9 @@ def get_version() -> str: """Return the current version number, based on the _version.py file.""" [version_file] = glob.glob("climada*/_version.py") - with open(version_file, 'r', encoding="UTF-8") as vfp: + with open(version_file, "r", encoding="UTF-8") as vfp: content = vfp.read() - regex = r'^__version__\s*=\s*[\'\"](.*)[\'\"]\s*$' + regex = r"^__version__\s*=\s*[\'\"](.*)[\'\"]\s*$" mtch = re.match(regex, content) return mtch.group(1) diff --git a/.github/scripts/prepare_release.py b/.github/scripts/prepare_release.py index bce483b6f8..eb0dd4c2b7 100644 --- a/.github/scripts/prepare_release.py +++ b/.github/scripts/prepare_release.py @@ -5,7 +5,7 @@ - update version numbers in _version.py and setup.py - purge the "Unreleased" section of CHANGELOG.md and rename it to the new version number -- copy the README.md file to doc/misc/README.md, +- copy the README.md file to doc/misc/README.md, but without the badges as they interfere with the sphinx doc builder All changes are immediately commited to the repository. @@ -38,28 +38,28 @@ def bump_version_number(version_number: str, level: str) -> str: """Return a copy of `version_number` with one level number incremented.""" major, minor, patch = version_number.split(".") if level == "major": - major = str(int(major)+1) + major = str(int(major) + 1) minor = "0" patch = "0" elif level == "minor": - minor = str(int(minor)+1) + minor = str(int(minor) + 1) patch = "0" elif level == "patch": - patch = str(int(patch)+1) + patch = str(int(patch) + 1) else: raise ValueError(f"level should be 'major', 'minor' or 'patch', not {level}") return ".".join([major, minor, patch]) def update_readme(_nvn): - """align doc/misc/README.md with ./README.md but remove the non-markdown header lines from """ - with open("README.md", 'r', encoding="UTF-8") as rmin: - lines = [line for line in rmin.readlines() if not line.startswith('[![')] + """align doc/misc/README.md with ./README.md but remove the non-markdown header lines from""" + with open("README.md", "r", encoding="UTF-8") as rmin: + lines = [line for line in rmin.readlines() if not line.startswith("[![")] while not lines[0].strip(): lines = lines[1:] - with open("doc/misc/README.md", 'w', encoding="UTF-8") as rmout: + with open("doc/misc/README.md", "w", encoding="UTF-8") as rmout: rmout.writelines(lines) - return GitFile('doc/misc/README.md') + return GitFile("doc/misc/README.md") def update_changelog(nvn): @@ -70,16 +70,16 @@ def update_changelog(nvn): release = [] section_name = None section = [] - with open("CHANGELOG.md", 'r', encoding="UTF-8") as changelog: + with open("CHANGELOG.md", "r", encoding="UTF-8") as changelog: for line in changelog.readlines(): - if line.startswith('#'): - if line.startswith('### '): + if line.startswith("#"): + if line.startswith("### "): if section: release.append((section_name, section)) section_name = line[4:].strip() section = [] - #print("tag:", section_name) - elif line.startswith('## '): + # print("tag:", section_name) + elif line.startswith("## "): if section: release.append((section_name, section)) if release: @@ -88,7 +88,7 @@ def update_changelog(nvn): release = [] section_name = None section = [] - #print("release:", release_name) + # print("release:", release_name) else: section.append(line) if section: @@ -96,7 +96,7 @@ def update_changelog(nvn): if release: releases.append((release_name, release)) - with open("CHANGELOG.md", 'w', encoding="UTF-8") as changelog: + with open("CHANGELOG.md", "w", encoding="UTF-8") as changelog: changelog.write("# Changelog\n\n") for release_name, release in releases: if release_name: @@ -107,7 +107,11 @@ def update_changelog(nvn): if any(ln.strip() for ln in section): if section_name: changelog.write(f"### {section_name}\n") - lines = [ln.strip() for ln in section if "code freeze date: " not in ln.lower()] + lines = [ + ln.strip() + for ln in section + if "code freeze date: " not in ln.lower() + ] if not section_name and release_name.lower() == nvn: print("setting date") for i, line in enumerate(lines): @@ -116,26 +120,26 @@ def update_changelog(nvn): lines[i] = f"Release date: {today}" changelog.write(re.sub("\n+$", "\n", "\n".join(lines))) changelog.write("\n") - return GitFile('CHANGELOG.md') + return GitFile("CHANGELOG.md") def update_version(nvn): """Update the _version.py file""" [file_with_version] = glob.glob("climada*/_version.py") - regex = r'(^__version__\s*=\s*[\'\"]).*([\'\"]\s*$)' + regex = r"(^__version__\s*=\s*[\'\"]).*([\'\"]\s*$)" return update_file(file_with_version, regex, nvn) def update_setup(new_version_number): """Update the setup.py file""" file_with_version = "setup.py" - regex = r'(^\s+version\s*=\s*[\'\"]).*([\'\"]\s*,\s*$)' + regex = r"(^\s+version\s*=\s*[\'\"]).*([\'\"]\s*,\s*$)" return update_file(file_with_version, regex, new_version_number) def update_file(file_with_version, regex, new_version_number): """Replace the version number(s) in a file, based on a rgular expression.""" - with open(file_with_version, 'r', encoding="UTF-8") as curf: + with open(file_with_version, "r", encoding="UTF-8") as curf: lines = curf.readlines() successfully_updated = False for i, line in enumerate(lines): @@ -145,14 +149,15 @@ def update_file(file_with_version, regex, new_version_number): successfully_updated = True if not successfully_updated: raise RuntimeError(f"cannot determine version of {file_with_version}") - with open(file_with_version, 'w', encoding="UTF-8") as newf: + with open(file_with_version, "w", encoding="UTF-8") as newf: for line in lines: newf.write(line) return GitFile(file_with_version) -class GitFile(): +class GitFile: """Helper class for `git add`.""" + def __init__(self, path): self.path = path @@ -166,8 +171,9 @@ def gitadd(self): ).stdout.decode("utf8") -class Git(): +class Git: """Helper class for `git commit`.""" + def __init__(self): _gitname = subprocess.run( ["git", "config", "--global", "user.name", "'climada'"], @@ -228,6 +234,7 @@ def prepare_new_release(level): if __name__ == "__main__": from sys import argv + try: LEVEL = argv[1] except IndexError: diff --git a/.github/scripts/setup_devbranch.py b/.github/scripts/setup_devbranch.py index 001390fa0c..36c9e6c78f 100644 --- a/.github/scripts/setup_devbranch.py +++ b/.github/scripts/setup_devbranch.py @@ -33,14 +33,15 @@ def get_last_version() -> str: def update_changelog(): """Insert a vanilla "Unreleased" section on top.""" - with open("CHANGELOG.md", 'r', encoding="UTF-8") as changelog: + with open("CHANGELOG.md", "r", encoding="UTF-8") as changelog: lines = changelog.readlines() if "## Unreleased" in lines: return - with open("CHANGELOG.md", 'w', encoding="UTF-8") as changelog: - changelog.write("""# Changelog + with open("CHANGELOG.md", "w", encoding="UTF-8") as changelog: + changelog.write( + """# Changelog ## Unreleased @@ -62,27 +63,28 @@ def update_changelog(): ### Removed -""") +""" + ) changelog.writelines(lines[2:]) def update_version(nvn): """Update the _version.py file""" [file_with_version] = glob.glob("climada*/_version.py") - regex = r'(^__version__\s*=\s*[\'\"]).*([\'\"]\s*$)' + regex = r"(^__version__\s*=\s*[\'\"]).*([\'\"]\s*$)" return update_file(file_with_version, regex, nvn) def update_setup(new_version_number): """Update the setup.py file""" file_with_version = "setup.py" - regex = r'(^\s+version\s*=\s*[\'\"]).*([\'\"]\s*,\s*$)' + regex = r"(^\s+version\s*=\s*[\'\"]).*([\'\"]\s*,\s*$)" return update_file(file_with_version, regex, new_version_number) def update_file(file_with_version, regex, new_version_number): """Replace the version number(s) in a file, based on a rgular expression.""" - with open(file_with_version, 'r', encoding="UTF-8") as curf: + with open(file_with_version, "r", encoding="UTF-8") as curf: lines = curf.readlines() successfully_updated = False for i, line in enumerate(lines): @@ -92,7 +94,7 @@ def update_file(file_with_version, regex, new_version_number): successfully_updated = True if not successfully_updated: raise RuntimeError(f"cannot determine version of {file_with_version}") - with open(file_with_version, 'w', encoding="UTF-8") as newf: + with open(file_with_version, "w", encoding="UTF-8") as newf: for line in lines: newf.write(line) @@ -100,10 +102,10 @@ def update_file(file_with_version, regex, new_version_number): def setup_devbranch(): """Adjust files after a release was published, i.e., apply the canonical deviations from main in develop. - + Just changes files, all `git` commands are in the setup_devbranch.sh file. """ - main_version = get_last_version().strip('v') + main_version = get_last_version().strip("v") semver = main_version.split(".") semver[-1] = f"{int(semver[-1]) + 1}-dev" dev_version = ".".join(semver) diff --git a/MANIFEST.in b/MANIFEST.in index 2c9965a945..fff806f537 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -4,4 +4,4 @@ graft climada/*/test/data graft climada/test/data graft data global-exclude .* -global-exclude *.py[co] \ No newline at end of file +global-exclude *.py[co] diff --git a/climada.conf b/climada.conf index 3d07e07ca4..367928405c 100644 --- a/climada.conf +++ b/climada.conf @@ -27,4 +27,4 @@ "supported_exposures_types": ["litpop", "crop_production", "base"] }, "log_level": "INFO" -} \ No newline at end of file +} diff --git a/climada/data/demo/demo_emdat_impact_data_2020.csv b/climada/data/demo/demo_emdat_impact_data_2020.csv index 55c72eaf4a..3cf4f5c85b 100644 --- a/climada/data/demo/demo_emdat_impact_data_2020.csv +++ b/climada/data/demo/demo_emdat_impact_data_2020.csv @@ -1073,4 +1073,4 @@ Dis No,Year,Seq,Disaster Group,Disaster Subgroup,Disaster Type,Disaster Subtype, 2020-0132-TON,2020,0132,Natural,Meteorological,Storm,Tropical cyclone,,Cyclone 'Harold',--,Tonga,TON,Polynesia,Oceania,"Tongatapu, 'Eua",,,,,,,,,Kph,,,,,2020,4,6,2020,4,9,,,1289,,1289,,,111000, 2020-0015-TUV,2020,0015,Natural,Meteorological,Storm,Tropical cyclone,,Cyclone 'Tino',Affected,Tuvalu,TUV,Polynesia,Oceania,,,,,,,Yes,,,Kph,,,,,2020,1,18,2020,1,18,,,,,,,,, 2020-0219-USA,2020,0219,Natural,Meteorological,Storm,Tropical cyclone,,Tropical storm 'Cristobal',Affected,United States of America (the),USA,Northern America,Americas,"errebonne, Plaquemines, Lafourche Parishes (Louisiana)",,,,,,Yes,,80,Kph,,,,,2020,6,7,2020,6,7,,,,,,,,, -2020-0132-VUT,2020,0132,Natural,Meteorological,Storm,Tropical cyclone,,Cyclone 'Harold',--,Vanuatu,VUT,Melanesia,Oceania,Pentecost and Espiritu Santo,,,,,,,,,Kph,,,,,2020,4,6,2020,4,9,4,,,,,,,, \ No newline at end of file +2020-0132-VUT,2020,0132,Natural,Meteorological,Storm,Tropical cyclone,,Cyclone 'Harold',--,Vanuatu,VUT,Melanesia,Oceania,Pentecost and Espiritu Santo,,,,,,,,,Kph,,,,,2020,4,6,2020,4,9,4,,,,,,,, diff --git a/climada/data/system/GDP_TWN_IMF_WEO_data.csv b/climada/data/system/GDP_TWN_IMF_WEO_data.csv index e0acd9898c..e39f4cb62c 100644 --- a/climada/data/system/GDP_TWN_IMF_WEO_data.csv +++ b/climada/data/system/GDP_TWN_IMF_WEO_data.csv @@ -3,4 +3,4 @@ TWN,Taiwan Province of China,"Gross domestic product, current prices",U.S. dolla TWN,Taiwan Province of China,"Gross domestic product, deflator",Index,,"See notes for: Gross domestic product, constant prices (National currency) Gross domestic product, current prices (National currency).",69.946,77.417,79.33,81.444,82.495,82.523,86.575,86.605,86.657,88.892,93.472,96.725,99.824,103.299,105.065,107.554,110.062,112.506,116.182,113.911,112.88,112.189,111.733,110.174,109.894,108.209,107.095,106.638,103.869,104.003,102.405,100,100.543,102.019,103.749,107.128,108.085,106.84,105.834,106.337,106.484,107.149,108.054,109.026,109.951,2018 TWN,Taiwan Province of China,"Gross domestic product per capita, current prices",U.S. dollars,Units,"See notes for: Gross domestic product, current prices (National currency) Population (Persons).","2,367.600","2,692.406","2,675.823","2,882.402","3,203.468","3,295.112","4,010.111","5,325.216","6,337.499","7,577.046","8,178.152","9,092.297","10,725.702","11,266.123","12,108.752","13,076.007","13,597.248","13,968.097","12,787.258","13,768.274","14,876.879","13,408.383","13,715.525","14,094.370","15,360.724","16,503.313","16,984.540","17,780.925","18,102.946","16,959.775","19,261.667","20,911.643","21,269.614","21,887.992","22,638.917","22,373.564","22,572.702","24,389.677","25,007.747","24,827.898","25,525.806","26,861.070","28,324.425","29,870.221","31,483.799",2018 ,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, -"International Monetary Fund, World Economic Outlook Database, October 2019",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, \ No newline at end of file +"International Monetary Fund, World Economic Outlook Database, October 2019",,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,, diff --git a/climada/data/system/WEALTH2GDP_factors_CRI_2016.csv b/climada/data/system/WEALTH2GDP_factors_CRI_2016.csv index f63f0453aa..8e8bb97c90 100644 --- a/climada/data/system/WEALTH2GDP_factors_CRI_2016.csv +++ b/climada/data/system/WEALTH2GDP_factors_CRI_2016.csv @@ -169,4 +169,4 @@ Venezuela,VEN,0.29407,0.35328 Vietnam,VNM,1.23241,1.66724 Yemen,YEM,1.18584,1.76063 Zambia,ZMB,0.10663,0.32193 -Zimbabwe,ZWE,0.20161,1.65566 \ No newline at end of file +Zimbabwe,ZWE,0.20161,1.65566 diff --git a/climada/engine/test/data/emdat_testdata_BGD_USA_1970-2017.csv b/climada/engine/test/data/emdat_testdata_BGD_USA_1970-2017.csv index 5ca0ec256d..00748e54a0 100644 --- a/climada/engine/test/data/emdat_testdata_BGD_USA_1970-2017.csv +++ b/climada/engine/test/data/emdat_testdata_BGD_USA_1970-2017.csv @@ -691,7 +691,7 @@ Start date,End date,Country,ISO,Location,Latitude,Longitude,Magnitude value,Magn 02.05.02,08.05.02,United States of America (the),USA,"Pike district (Kentucky province), Virginia province",,,,Km2,Flood,Riverine flood,--,--,9,1000,13000,0,,2002-0266 05.05.02,05.05.02,United States of America (the),USA,"Happy town (Randall, Swisher districts, Texas province)",,,,Kph,Storm,Convective storm,--,--,2,183,0,0,,2002-0283 21.04.02,21.04.02,United States of America (the),USA,"Wayne, Jefferson districts (Illinois province)",,,,Kph,Storm,Convective storm,--,--,1,12,4000,0,,2002-0287 -27.04.02,03.05.02,United States of America (the),USA,"Breckinridge, Meade, Crittenden, Webster, Hopkins, Ohio, Hardin, Edmonson districts (Kentucky province), Bollinger, Howell districts (Missouri province), Charles, Calvert, Dorchester, Wicomico, Cecil districts (Maryland province), +27.04.02,03.05.02,United States of America (the),USA,"Breckinridge, Meade, Crittenden, Webster, Hopkins, Ohio, Hardin, Edmonson districts (Kentucky province), Bollinger, Howell districts (Missouri province), Charles, Calvert, Dorchester, Wicomico, Cecil districts (Maryland province), Illinois (Clay,Union, Johnson,Pope, Moultrie, Saline, Bond), Gordon district (Georgia province), Atchison district (Kansas province), Erie, Allegany districts (New York province), Stark district (Ohio province), Indiana, Mercer, Venango, Butler, Armstrong, Columbia, Lebanon, Allegheny districts (Pennsylvania province), Rutherford, Lake, Henry, Carter districts (Tennessee province), Virginia (Shenandoah, Greensville, Bedford, (Campbell, Nottoway, Prince George), Marshall district (West Virginia province), Pontotoc, Chickasaw districts (Mississippi province), Perry district (Indiana province)",,,290,Kph,Storm,Convective storm,--,--,10,100,2200000,2000500,,2002-0310 /04/2002,/04/2002,United States of America (the),USA,Arizona province,,,145,Km2,Wildfire,"Land fire (Brush, Bush, Pasture)",--,--,0,0,0,0,,2002-0312 @@ -858,7 +858,7 @@ Virginia (Shenandoah, Greensville, Bedford, (Campbell, Nottoway, Prince George), 26.06.07,06.07.07,United States of America (the),USA,"Wichita Falls area (Wichita district, Texas province), Georgetown areas (Williamson district, Texas province), Burnet, Marble Falls, Granite Shoals areas (Burnet district, Texas province), Granbury area (Hood district, Texas province), Lampasas, Parker, Eastland districts (Texas province), Miami, Commerce areas (Ottawa district, Oklahoma province), Shawnee, Tecumseh, Maud areas (Pottawatomie district, Oklahoma province), Oklahoma city (Oklahoma district, Oklahoma province), Waurika area (Jefferson district, Oklahoma province), Bartlesville, Dewey areas (Washington district, Oklahoma province), Love, Lincoln districts (Oklahoma province), Coffeyville area (Montgomery district, Kansas province), Osawatomie area (Miami district, Kansas province), Allen, Labette, Neosho, Wilson, Woodson districts (Kansas province), Rockville, Papinville areas (Bates district, Missouri province), Vernon district (Missouri province)",32.84,-97.17,507800,Km2,Flood,Riverine flood,--,Rain,8,5000,0,0,,2007-0244 19.06.07,20.06.07,United States of America (the),USA,New York province,42.23,-74.95,6500,Km2,Flood,Flash flood,Rain,--,4,120,0,0,,2007-0251 17.06.07,22.06.07,United States of America (the),USA,"North Texas, Oklahoma provinces",33.45,-97.3,34750,Km2,Flood,Riverine flood,--,--,10,750,28000,0,,2007-0254 -21.07.07,03.08.07,Bangladesh,BGD,"Goalanda village (Goalandaghat area, Rajbari district, Dhaka province), Aricha port (Shibalaya area, Manikganj district, Dhaka province), Bhagyakul village (Sreenagar area, Munshiganj district, Dhaka province), +21.07.07,03.08.07,Bangladesh,BGD,"Goalanda village (Goalandaghat area, Rajbari district, Dhaka province), Aricha port (Shibalaya area, Manikganj district, Dhaka province), Bhagyakul village (Sreenagar area, Munshiganj district, Dhaka province), Bandarban, Feni, Comilla districts (Chittagong province), Sirajganj district (Rajshahi province), Rangpur province",23.92,91.23,7000,Km2,Flood,Riverine flood,"Slide (land, mud, snow, rock)",--,1110,13771380,100000,0,,2007-0311 24.06.07,02.07.07,United States of America (the),USA,"Alpine, Amador, Calaveras, El Dorado, Mono, Placer, Tuolumne districts (California province)",,,,Km2,Wildfire,Forest fire,--,--,0,768,0,150000,,2007-0351 @@ -980,7 +980,7 @@ Bandarban, Feni, Comilla districts (Chittagong province), Sirajganj district (Ra 22.01.12,23.01.12,United States of America (the),USA,"Jefferson, Chilton districts (Alabama province)",,,240,Kph,Storm,Convective storm,--,--,2,100,175000,200000,,2012-0010 28.02.12,29.02.12,United States of America (the),USA,"Nebraska, Kansas, Missouri, Illinois, Indiana, Kentucky provinces",,,270,Kph,Storm,Convective storm,--,--,14,200,500000,450000,,2012-0055 02.03.12,04.03.12,United States of America (the),USA,"Alabama, Tennessee, Illinois, Kentucky, Indiana, Ohio, Georgia, Florida, Mississippi, North Carolina, Virginia provinces",,,112,Kph,Storm,Convective storm,Flood,Hail,41,0,5000000,2500000,,2012-0060 -06.04.12,06.04.12,Bangladesh,BGD,"Panchagarh, Rangpur, Nilphamari districts (Rangpur province), Noakhali, Comilla districts (Chittagong province), Narsingdi, Jamalpur, Faridpur, Shariatpur districts (Dhaka province), Jessore, Satkhira, Khulna, Chuadanga districts (Khulna province), Rajshahi district (Rajshahi province), Sylhet district (Sylhet province), +06.04.12,06.04.12,Bangladesh,BGD,"Panchagarh, Rangpur, Nilphamari districts (Rangpur province), Noakhali, Comilla districts (Chittagong province), Narsingdi, Jamalpur, Faridpur, Shariatpur districts (Dhaka province), Jessore, Satkhira, Khulna, Chuadanga districts (Khulna province), Rajshahi district (Rajshahi province), Sylhet district (Sylhet province), Bhola district (Barisal province)",,,56,Kph,Storm,Convective storm,Hail,--,25,55121,0,0,,2012-0082 02.04.12,03.04.12,United States of America (the),USA,"Dallas, Tarrant districts (Texas province)",,,,Kph,Storm,Convective storm,--,--,0,3300,1550000,800000,,2012-0122 14.04.12,15.04.12,United States of America (the),USA,"Oklahoma, Kansas, Iowa, Nebraska, South Dakota, Minnesota provinces",,,,Kph,Storm,Convective storm,--,--,6,297,1800000,910000,,2012-0156 @@ -1165,4 +1165,4 @@ Wilkes, Ashe )",,,140,Kph,Storm,Tropical cyclone,--,--,0,60,250000,0,Tropical de 03.11.17,12.12.17,Bangladesh,BGD,Cox’s Bazar ,,,,Vaccinated,Epidemic,Bacterial disease,--,--,15,789,0,0,Diphteria,2017-0556 06.03.17,09.03.17,United States of America (the),USA,"Missouri (Oak Grove in Jackson County, Clay and Clinton (Trimble, Plattsburg, Lathrop) counties), Iowa (Centerville in Appanoose county, Muscatine), Minnesota (Sherburne, Freeborn counties, Lake Ann in Carver county), Kansas (Wabaunsee, Pottawatomie and Butler counties), Wisconsin, Arkansas, Oklahoma, Illinois, Mississipi, Michigan, New-York, Pennsylvania, Massachussets, Ohio, Nebraska, Indiana",,,130,Kph,Storm,Convective storm,Hail,--,2,615,2200000,2000000,,2017-0563 25.03.17,28.03.17,United States of America (the),USA,"Texas (Justin in Denton, Collin, Rockwall, Lubbock counties, Seymour in Baylor, Dallas – Fort Worth metro area, Houston metro area), Oklahoma (El Reno in Canadian, Oklahoma city metro region, Caddo in Bryan, Cleveland South and East), Kansas (south), Kentucky, Tennessee, Mississippi, Alabama, Georgia, Indianapolis (Marion-IN)",,,175,Kph,Storm,Convective storm,Hail,Flood,1,0,2700000,2000000,,2017-0564 -/03/2017,/09/2017,United States of America (the),USA,"Upper Midwest, Northern Rockies and parts of the West",,,,Km2,Drought,Drought,--,--,0,0,2500000,1900000,,2017-9550 \ No newline at end of file +/03/2017,/09/2017,United States of America (the),USA,"Upper Midwest, Northern Rockies and parts of the West",,,,Km2,Drought,Drought,--,--,0,0,2500000,1900000,,2017-9550 diff --git a/climada/engine/test/data/emdat_testdata_fake_2007-2011.csv b/climada/engine/test/data/emdat_testdata_fake_2007-2011.csv index 6826050a44..3d6242746c 100644 --- a/climada/engine/test/data/emdat_testdata_fake_2007-2011.csv +++ b/climada/engine/test/data/emdat_testdata_fake_2007-2011.csv @@ -4,4 +4,4 @@ Start date,End date,Country,ISO,Location,Latitude,Longitude,Magnitude value,Magn 15.01.09,26.01.09,Switzerland,CHE,Zurich,47.37,8.54,1,Km2,Flood,Riverine flood,--,--,0,0,2000,0,FakeFlood3,2009-0001 15.01.10,27.01.10,Switzerland,CHE,Zurich,47.37,8.54,1,Km2,Flood,Riverine flood,--,--,0,0,2000,0,FakeFlood4,2010-0001 15.01.11,28.01.11,Switzerland,CHE,Zurich,47.37,8.54,1,Km2,Flood,Riverine flood,--,--,0,0,2000,0,FakeFlood5,2011-0001 -15.01.11,28.01.11,Germany,DEU,Konstanz,22,22,2,Km2,Flood,Riverine flood,--,--,0,0,1000,0,FakeFlood5,2011-0001 \ No newline at end of file +15.01.11,28.01.11,Germany,DEU,Konstanz,22,22,2,Km2,Flood,Riverine flood,--,--,0,0,1000,0,FakeFlood5,2011-0001 diff --git a/climada/hazard/test/data/trac_short_test.csv b/climada/hazard/test/data/trac_short_test.csv index 79defb690d..bacbd8c993 100644 --- a/climada/hazard/test/data/trac_short_test.csv +++ b/climada/hazard/test/data/trac_short_test.csv @@ -7,4 +7,4 @@ cgps_lat,cgps_lon,data_provider,gen_basin,ibtracsID,isotime,model,msize,ngps_lat 12.3,-31,hurdat_atl,NA,1951239N12334,1951082812,H08,101,12.3,-32.3,1,-999,1010,-999,0.1,0,6,25 12.3,-32.3,hurdat_atl,NA,1951239N12334,1951082818,H08,101,12.3,-33.6,1,-999,1010,-999,0.1,0,6,25 12.3,-33.6,hurdat_atl,NA,1951239N12334,1951082900,H08,101,12.3,-34.9,1,-999,1010,-999,0.1,0,6,25 -12.3,-34.9,hurdat_atl,NA,1951239N12334,1951082906,H08,101,12.3,-36.3,1,-999,1010,-999,0.1,0,6,25 \ No newline at end of file +12.3,-34.9,hurdat_atl,NA,1951239N12334,1951082906,H08,101,12.3,-36.3,1,-999,1010,-999,0.1,0,6,25 diff --git a/doc/Makefile b/doc/Makefile index 0a8a51eba2..41c2d07bf0 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -9,7 +9,7 @@ PAPER = # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) ./ +ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) ./ .PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest diff --git a/doc/climada/climada.engine.rst b/doc/climada/climada.engine.rst index 91274418fa..f21024fdeb 100644 --- a/doc/climada/climada.engine.rst +++ b/doc/climada/climada.engine.rst @@ -52,4 +52,3 @@ climada\.engine\.impact\_data module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.entity.disc_rates.rst b/doc/climada/climada.entity.disc_rates.rst index bc17051c65..4089561f02 100644 --- a/doc/climada/climada.entity.disc_rates.rst +++ b/doc/climada/climada.entity.disc_rates.rst @@ -8,4 +8,3 @@ climada\.entity\.disc\_rates\.base module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.entity.exposures.litpop.rst b/doc/climada/climada.entity.exposures.litpop.rst index 9e65391b0b..62e233a063 100644 --- a/doc/climada/climada.entity.exposures.litpop.rst +++ b/doc/climada/climada.entity.exposures.litpop.rst @@ -24,4 +24,3 @@ climada\.entity\.exposures\.litpop\.nightlight module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.entity.exposures.rst b/doc/climada/climada.entity.exposures.rst index 30f175d10c..952af75e85 100644 --- a/doc/climada/climada.entity.exposures.rst +++ b/doc/climada/climada.entity.exposures.rst @@ -12,4 +12,3 @@ climada\.entity\.exposures\.base module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.entity.impact_funcs.rst b/doc/climada/climada.entity.impact_funcs.rst index 91f88ff77f..90ad9441b1 100644 --- a/doc/climada/climada.entity.impact_funcs.rst +++ b/doc/climada/climada.entity.impact_funcs.rst @@ -32,4 +32,3 @@ climada\.entity\.impact\_funcs\.trop\_cyclone module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.entity.measures.rst b/doc/climada/climada.entity.measures.rst index a7d16c650a..8e63a2082b 100644 --- a/doc/climada/climada.entity.measures.rst +++ b/doc/climada/climada.entity.measures.rst @@ -16,4 +16,3 @@ climada\.entity\.measures\.measure\_set module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.hazard.centroids.rst b/doc/climada/climada.hazard.centroids.rst index 8038d406ef..7a9c65a908 100644 --- a/doc/climada/climada.hazard.centroids.rst +++ b/doc/climada/climada.hazard.centroids.rst @@ -8,4 +8,3 @@ climada\.hazard\.centroids\.centr module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.hazard.rst b/doc/climada/climada.hazard.rst index 8e4767ae62..3b3bef00b4 100644 --- a/doc/climada/climada.hazard.rst +++ b/doc/climada/climada.hazard.rst @@ -69,4 +69,3 @@ climada\.hazard\.tc\_tracks\_synth module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.hazard.trop_cyclone.rst b/doc/climada/climada.hazard.trop_cyclone.rst index c703126ec1..caafdcd93a 100644 --- a/doc/climada/climada.hazard.trop_cyclone.rst +++ b/doc/climada/climada.hazard.trop_cyclone.rst @@ -16,4 +16,3 @@ climada\.hazard\.trop\_cyclone\.trop\_cyclone\_windfields module :members: :undoc-members: :show-inheritance: - diff --git a/doc/climada/climada.rst b/doc/climada/climada.rst index e248812bca..557532912f 100644 --- a/doc/climada/climada.rst +++ b/doc/climada/climada.rst @@ -8,4 +8,3 @@ Software documentation per package climada.entity climada.hazard climada.util - diff --git a/doc/climada/climada.util.rst b/doc/climada/climada.util.rst index 820fd43f7f..98df93aec1 100644 --- a/doc/climada/climada.util.rst +++ b/doc/climada/climada.util.rst @@ -152,4 +152,3 @@ climada\.util\.yearsets module :members: :undoc-members: :show-inheritance: - diff --git a/doc/conf.py b/doc/conf.py index 02e19ecc07..b4ef1dc69d 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -18,49 +18,52 @@ # is relative to the documentation root, use os.path.abspath to make it # absolute, like shown here. # sys.path.append(os.path.abspath('sphinxext')) -sys.path.insert(0, os.path.abspath('../')) +sys.path.insert(0, os.path.abspath("../")) # set version from climada import _version + __version__ = _version.__version__ # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['matplotlib.sphinxext.plot_directive', - 'IPython.sphinxext.ipython_directive', - 'IPython.sphinxext.ipython_console_highlighting', - 'sphinx.ext.mathjax', - 'sphinx.ext.autodoc', - 'sphinx.ext.doctest', - 'sphinx.ext.inheritance_diagram', - 'sphinx.ext.viewcode', - 'sphinx.ext.napoleon', - 'sphinx.ext.ifconfig', - 'myst_nb', - 'sphinx_markdown_tables', - 'readthedocs_ext.readthedocs',] +extensions = [ + "matplotlib.sphinxext.plot_directive", + "IPython.sphinxext.ipython_directive", + "IPython.sphinxext.ipython_console_highlighting", + "sphinx.ext.mathjax", + "sphinx.ext.autodoc", + "sphinx.ext.doctest", + "sphinx.ext.inheritance_diagram", + "sphinx.ext.viewcode", + "sphinx.ext.napoleon", + "sphinx.ext.ifconfig", + "myst_nb", + "sphinx_markdown_tables", + "readthedocs_ext.readthedocs", +] # read the docs version used for links -if 'dev' in __version__: - read_docs_url = 'en/latest/' +if "dev" in __version__: + read_docs_url = "en/latest/" else: - read_docs_url = 'en/v{}/'.format(__version__) + read_docs_url = "en/v{}/".format(__version__) # Add any paths that contain templates here, relative to this directory. templates_path = [] # The encoding of source files. -#source_encoding = 'utf-8' +# source_encoding = 'utf-8' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'CLIMADA' -copyright = '2017, ETH Zurich' -author = 'CLIMADA contributors' +project = "CLIMADA" +copyright = "2017, ETH Zurich" +author = "CLIMADA contributors" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -73,45 +76,45 @@ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -language = 'en' +language = "en" # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. -#unused_docs = [] +# unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. # exclude_trees = [] # The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'test', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["_build", "test", "Thumbs.db", ".DS_Store"] # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True @@ -125,17 +128,17 @@ # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. @@ -149,45 +152,45 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_use_modindex = True +# html_use_modindex = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = '' +# html_file_suffix = '' # Output file base name for HTML help builder. -htmlhelp_basename = 'climadadoc' +htmlhelp_basename = "climadadoc" # -- Options for LaTeX output -------------------------------------------------- @@ -195,47 +198,55 @@ latex_engine = "xelatex" # The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' +# latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' +# latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ - (master_doc, 'climada.tex', u'CLIMADA documentation', - u'CLIMADA contributors', 'manual'), + ( + master_doc, + "climada.tex", + "CLIMADA documentation", + "CLIMADA contributors", + "manual", + ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +# latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. -#latex_use_parts = False +# latex_use_parts = False # Additional stuff for the LaTeX preamble. -#latex_preamble = '' +# latex_preamble = '' # Documents to append as an appendix to all manuals. -#latex_appendices = [] +# latex_appendices = [] # If false, no module index is generated. -#latex_use_modindex = True +# latex_use_modindex = True + # ----------------------------------------------------------------------------- # show __init__ documentation def skip(app, what, name, obj, skip, options): - if (name == "__init__"): + if name == "__init__": return False return skip + # remove docstrings modules def remove_module_docstring(app, what, name, obj, options, lines): if what == "module": del lines[:] + autodoc_member_order = "bysource" # --- MYST Parser settings ---- @@ -260,13 +271,15 @@ def remove_module_docstring(app, what, name, obj, options, lines): # --- + def setup(app): app.connect("autodoc-skip-member", skip) app.connect("autodoc-process-docstring", remove_module_docstring) # Pass to the app if we are building this on ReadTheDocs - on_rtd = True if (os.environ.get('READTHEDOCS') == 'True') else False - app.add_config_value('readthedocs', on_rtd, 'env') + on_rtd = True if (os.environ.get("READTHEDOCS") == "True") else False + app.add_config_value("readthedocs", on_rtd, "env") + # improve parameters description napoleon_use_param = False diff --git a/doc/guide/Guide_Configuration.ipynb b/doc/guide/Guide_Configuration.ipynb index 50ffc35f2f..69056eba61 100644 --- a/doc/guide/Guide_Configuration.ipynb +++ b/doc/guide/Guide_Configuration.ipynb @@ -54,9 +54,9 @@ ], "source": [ "# suboptimal\n", - "my_dict = {'x': 4}\n", - "if my_dict['x'] > 3:\n", - " msg = 'well, arh, ...'\n", + "my_dict = {\"x\": 4}\n", + "if my_dict[\"x\"] > 3:\n", + " msg = \"well, arh, ...\"\n", "msg" ] }, @@ -78,10 +78,10 @@ ], "source": [ "# good\n", - "X = 'x'\n", + "X = \"x\"\n", "my_dict = {X: 4}\n", "if my_dict[X] > 3:\n", - " msg = 'yeah!'\n", + " msg = \"yeah!\"\n", "msg" ] }, @@ -103,7 +103,7 @@ ], "source": [ "# possibly overdoing it\n", - "X = 'x'\n", + "X = \"x\"\n", "Y = \"this doesn't mean that every string must be a constant\"\n", "my_dict = {X: 4}\n", "if my_dict[X] > 3:\n", @@ -139,13 +139,16 @@ ], "source": [ "import pandas as pd\n", - "X = 'x'\n", - "df = pd.DataFrame({'x':[1,2,3], 'y':[4,5,6]})\n", + "\n", + "X = \"x\"\n", + "df = pd.DataFrame({\"x\": [1, 2, 3], \"y\": [4, 5, 6]})\n", "try:\n", " df.X\n", "except:\n", - " from sys import stderr; stderr.write(\"this does not work\\n\")\n", - "df[X] # this does work but it's less pretty\n", + " from sys import stderr\n", + "\n", + " stderr.write(\"this does not work\\n\")\n", + "df[X] # this does work but it's less pretty\n", "df.x" ] }, @@ -357,7 +360,9 @@ "try:\n", " CONFIG.hazard.trop_cyclone.random_seed.str()\n", "except Exception as e:\n", - " from sys import stderr; stderr.write(f\"cannot convert random_seed to str: {e}\\n\")" + " from sys import stderr\n", + "\n", + " stderr.write(f\"cannot convert random_seed to str: {e}\\n\")" ] }, { diff --git a/doc/guide/Guide_Exception_Logging.ipynb b/doc/guide/Guide_Exception_Logging.ipynb index 55341f434b..b4f776aa98 100644 --- a/doc/guide/Guide_Exception_Logging.ipynb +++ b/doc/guide/Guide_Exception_Logging.ipynb @@ -44,7 +44,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Bad (1)\n", + "# Bad (1)\n", "x = 1\n", "try:\n", " l = len(events)\n", @@ -60,7 +60,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Still bad (2)\n", + "# Still bad (2)\n", "try:\n", " l = len(events)\n", " if l < 1:\n", @@ -75,7 +75,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Better, but still unsufficient (3)\n", + "# Better, but still unsufficient (3)\n", "try:\n", " l = len(events)\n", " if l < 1:\n", @@ -90,7 +90,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Even better (4)\n", + "# Even better (4)\n", "try:\n", " l = len(events)\n", "except TypeError:\n", @@ -105,13 +105,13 @@ "metadata": {}, "outputs": [], "source": [ - "#Even better (5)\n", + "# Even better (5)\n", "try:\n", " l = len(events)\n", "except TypeError as tper:\n", " raise TypeError(\"The provided variable events is not a list\") from tper\n", "if l < 1:\n", - " raise ValueError(\"To compute an impact there must be at least one event.\")\n" + " raise ValueError(\"To compute an impact there must be at least one event.\")" ] }, { @@ -172,6 +172,7 @@ "source": [ "import logging\n", "from climada.util.config import LOGGER\n", + "\n", "LOGGER.setLevel(logging.ERROR)" ] }, diff --git a/doc/guide/Guide_Py_Performance.ipynb b/doc/guide/Guide_Py_Performance.ipynb index bb3cf209f3..21f81313dd 100644 --- a/doc/guide/Guide_Py_Performance.ipynb +++ b/doc/guide/Guide_Py_Performance.ipynb @@ -188,6 +188,7 @@ ], "source": [ "import numpy as np\n", + "\n", "%timeit np.sum(list_of_numbers)" ] }, @@ -947,6 +948,7 @@ "source": [ "from numba import njit\n", "\n", + "\n", "@njit\n", "def sum_array(arr):\n", " result = 0.0\n", diff --git a/doc/guide/Guide_PythonDos-n-Donts.ipynb b/doc/guide/Guide_PythonDos-n-Donts.ipynb index 85295356aa..222ffd0ab3 100644 --- a/doc/guide/Guide_PythonDos-n-Donts.ipynb +++ b/doc/guide/Guide_PythonDos-n-Donts.ipynb @@ -147,14 +147,12 @@ "outputs": [], "source": [ "# Vertically aligned with opening delimiter.\n", - "foo = long_function_name(var_one, var_two,\n", - " var_three, var_four)\n", + "foo = long_function_name(var_one, var_two, var_three, var_four)\n", + "\n", "\n", "# Hanging indentation (4 additonal spaces)\n", - "def very_very_long_function_name(\n", - " var_one, var_two, var_three,\n", - " var_four):\n", - " print(var_one)\n" + "def very_very_long_function_name(var_one, var_two, var_three, var_four):\n", + " print(var_one)" ] }, { @@ -303,6 +301,8 @@ " return math.sqrt(x)\n", " else:\n", " return None\n", + "\n", + "\n", "# Wrong\n", "def foo(x):\n", " if x >= 0:\n", @@ -601,7 +601,7 @@ "source": [ "@uppercase_decorator\n", "def say_hi():\n", - " return 'hello there'" + " return \"hello there\"" ] }, { diff --git a/doc/guide/Guide_Testing.ipynb b/doc/guide/Guide_Testing.ipynb index f1876080ce..319d8ada55 100644 --- a/doc/guide/Guide_Testing.ipynb +++ b/doc/guide/Guide_Testing.ipynb @@ -209,7 +209,9 @@ "source": [ "from climada.test import get_test_file\n", "\n", - "my_test_file = get_test_file(ds_name='my-test-file', file_format='hdf5') # returns a pathlib.Path object" + "my_test_file = get_test_file(\n", + " ds_name=\"my-test-file\", file_format=\"hdf5\"\n", + ") # returns a pathlib.Path object" ] }, { @@ -240,11 +242,16 @@ "outputs": [], "source": [ "import climada\n", + "\n", + "\n", "def x(download_file=climada.util.files_handler.download_file):\n", - " filepath = download_file('http://real_data.ch')\n", + " filepath = download_file(\"http://real_data.ch\")\n", " return Path(filepath).stat().st_size\n", "\n", + "\n", "import unittest\n", + "\n", + "\n", "class TestX(unittest.TestCase):\n", " def download_file_dummy(url):\n", " return \"phony_data.ch\"\n", diff --git a/doc/guide/Guide_continuous_integration_GitHub_actions.ipynb b/doc/guide/Guide_continuous_integration_GitHub_actions.ipynb index d9b1d9053a..f800f8eda3 100644 --- a/doc/guide/Guide_continuous_integration_GitHub_actions.ipynb +++ b/doc/guide/Guide_continuous_integration_GitHub_actions.ipynb @@ -95,20 +95,23 @@ } ], "source": [ - "def x(b:bool):\n", + "def x(b: bool):\n", " if b:\n", - " print('been here')\n", + " print(\"been here\")\n", " return 4\n", " else:\n", - " print('been there')\n", + " print(\"been there\")\n", " return 0\n", "\n", - "def y(b:bool):\n", - " print('been everywhere')\n", - " return 1/x(b)\n", + "\n", + "def y(b: bool):\n", + " print(\"been everywhere\")\n", + " return 1 / x(b)\n", "\n", "\n", "import unittest\n", + "\n", + "\n", "class TestXY(unittest.TestCase):\n", " def test_x(self):\n", " self.assertEqual(x(True), 4)\n", @@ -117,6 +120,7 @@ " def test_y(self):\n", " self.assertEqual(y(True), 0.25)\n", "\n", + "\n", "unittest.TextTestRunner().run(unittest.TestLoader().loadTestsFromTestCase(TestXY));" ] }, diff --git a/doc/index.rst b/doc/index.rst index 732290eeee..4ad14dd788 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -69,7 +69,7 @@ Jump right in: .. toctree:: :caption: API Reference :hidden: - + Python Modules diff --git a/doc/misc/AUTHORS.md b/doc/misc/AUTHORS.md index 2d2e8405f4..561ed5cd36 120000 --- a/doc/misc/AUTHORS.md +++ b/doc/misc/AUTHORS.md @@ -1 +1 @@ -../../AUTHORS.md \ No newline at end of file +../../AUTHORS.md diff --git a/doc/misc/CHANGELOG.md b/doc/misc/CHANGELOG.md index 699cc9e7b7..03cb731062 120000 --- a/doc/misc/CHANGELOG.md +++ b/doc/misc/CHANGELOG.md @@ -1 +1 @@ -../../CHANGELOG.md \ No newline at end of file +../../CHANGELOG.md diff --git a/doc/misc/CONTRIBUTING.md b/doc/misc/CONTRIBUTING.md index f939e75f21..bcac999a8e 120000 --- a/doc/misc/CONTRIBUTING.md +++ b/doc/misc/CONTRIBUTING.md @@ -1 +1 @@ -../../CONTRIBUTING.md \ No newline at end of file +../../CONTRIBUTING.md diff --git a/doc/tutorial/0_intro_python.ipynb b/doc/tutorial/0_intro_python.ipynb index 43df82d5bc..8318986028 100644 --- a/doc/tutorial/0_intro_python.ipynb +++ b/doc/tutorial/0_intro_python.ipynb @@ -27,15 +27,15 @@ "metadata": {}, "outputs": [], "source": [ - "print('Addition: 2 + 2 =', 2 + 2)\n", - "print('Substraction: 50 - 5*6 =', 50 - 5*6)\n", - "print('Use of parenthesis: (50 - 5*6) / 4 =', (50 - 5*6) / 4)\n", - "print('Classic division returns a float: 17 / 3 =', 17 / 3)\n", - "print('Floor division discards the fractional part: 17 // 3 =', 17 // 3)\n", - "print('The % operator returns the remainder of the division: 17 % 3 =', 17 % 3)\n", - "print('Result * divisor + remainder: 5 * 3 + 2 =', 5 * 3 + 2)\n", - "print('5 squared: 5 ** 2 =', 5 ** 2)\n", - "print('2 to the power of 7: 2 ** 7 =', 2 ** 7)" + "print(\"Addition: 2 + 2 =\", 2 + 2)\n", + "print(\"Substraction: 50 - 5*6 =\", 50 - 5 * 6)\n", + "print(\"Use of parenthesis: (50 - 5*6) / 4 =\", (50 - 5 * 6) / 4)\n", + "print(\"Classic division returns a float: 17 / 3 =\", 17 / 3)\n", + "print(\"Floor division discards the fractional part: 17 // 3 =\", 17 // 3)\n", + "print(\"The % operator returns the remainder of the division: 17 % 3 =\", 17 % 3)\n", + "print(\"Result * divisor + remainder: 5 * 3 + 2 =\", 5 * 3 + 2)\n", + "print(\"5 squared: 5 ** 2 =\", 5**2)\n", + "print(\"2 to the power of 7: 2 ** 7 =\", 2**7)" ] }, { @@ -72,11 +72,11 @@ "metadata": {}, "outputs": [], "source": [ - "print('spam eggs') # single quotes\n", - "print('doesn\\'t') # use \\' to escape the single quote...\n", - "print(\"doesn't\") # ...or use double quotes instead\n", + "print(\"spam eggs\") # single quotes\n", + "print(\"doesn't\") # use \\' to escape the single quote...\n", + "print(\"doesn't\") # ...or use double quotes instead\n", + "print('\"Yes,\" he said.')\n", "print('\"Yes,\" he said.')\n", - "print(\"\\\"Yes,\\\" he said.\")\n", "print('\"Isn\\'t,\" she said.')" ] }, @@ -96,13 +96,13 @@ "metadata": {}, "outputs": [], "source": [ - "word = 'Python'\n", - "print('word = ', word)\n", - "print('Character in position 0: word[0] =', word[0])\n", - "print('Character in position 5: word[5] =', word[5])\n", - "print('Last character: word[-1] =', word[-1])\n", - "print('Second-last character: word[-2] =', word[-2])\n", - "print('word[-6] =', word[-6])" + "word = \"Python\"\n", + "print(\"word = \", word)\n", + "print(\"Character in position 0: word[0] =\", word[0])\n", + "print(\"Character in position 5: word[5] =\", word[5])\n", + "print(\"Last character: word[-1] =\", word[-1])\n", + "print(\"Second-last character: word[-2] =\", word[-2])\n", + "print(\"word[-6] =\", word[-6])" ] }, { @@ -118,8 +118,8 @@ "metadata": {}, "outputs": [], "source": [ - "print('Characters from position 0 (included) to 2 (excluded): word[0:2] =', word[0:2])\n", - "print('Characters from position 2 (included) to 5 (excluded): word[2:5] =', word[2:5])" + "print(\"Characters from position 0 (included) to 2 (excluded): word[0:2] =\", word[0:2])\n", + "print(\"Characters from position 2 (included) to 5 (excluded): word[2:5] =\", word[2:5])" ] }, { @@ -145,11 +145,11 @@ "outputs": [], "source": [ "squares = [1, 4, 9, 16, 25]\n", - "print('squares: ', squares)\n", - "print('Indexing returns the item: squares[0]:', squares[0])\n", - "print('squares[-1]:', squares[-1])\n", - "print('Slicing returns a new list: squares[-3:]:', squares[-3:])\n", - "print('squares[:]:', squares[:])" + "print(\"squares: \", squares)\n", + "print(\"Indexing returns the item: squares[0]:\", squares[0])\n", + "print(\"squares[-1]:\", squares[-1])\n", + "print(\"Slicing returns a new list: squares[-3:]:\", squares[-3:])\n", + "print(\"squares[:]:\", squares[:])" ] }, { @@ -184,7 +184,7 @@ "cubes = [1, 8, 27, 65, 125] # something's wrong here\n", "cubes[3] = 64 # replace the wrong value\n", "cubes.append(216) # add the cube of 6\n", - "cubes.append(7 ** 3) # and the cube of 7\n", + "cubes.append(7**3) # and the cube of 7\n", "cubes" ] }, @@ -197,8 +197,8 @@ "# Note: execution of this cell will fail\n", "\n", "# Try to modify a character of a string\n", - "word = 'Python'\n", - "word[0] = 'p'" + "word = \"Python\"\n", + "word[0] = \"p\"" ] }, { @@ -262,7 +262,7 @@ "metadata": {}, "outputs": [], "source": [ - "t = 12345, 54321, 'hello!'\n", + "t = 12345, 54321, \"hello!\"\n", "t[0]" ] }, @@ -322,8 +322,8 @@ "metadata": {}, "outputs": [], "source": [ - "t = 12345, 54321, 'hello!' # tuple packing\n", - "x, y, z = t # tuple unpacking\n", + "t = 12345, 54321, \"hello!\" # tuple packing\n", + "x, y, z = t # tuple unpacking\n", "x, y, z" ] }, @@ -344,8 +344,8 @@ "metadata": {}, "outputs": [], "source": [ - "basket = {'apple', 'orange', 'apple', 'pear', 'orange', 'banana'}\n", - "basket # show that duplicates have been removed" + "basket = {\"apple\", \"orange\", \"apple\", \"pear\", \"orange\", \"banana\"}\n", + "basket # show that duplicates have been removed" ] }, { @@ -354,7 +354,7 @@ "metadata": {}, "outputs": [], "source": [ - "'orange' in basket # fast membership testing" + "\"orange\" in basket # fast membership testing" ] }, { @@ -363,7 +363,7 @@ "metadata": {}, "outputs": [], "source": [ - "'crabgrass' in basket" + "\"crabgrass\" in basket" ] }, { @@ -373,9 +373,9 @@ "outputs": [], "source": [ "# Demonstrate set operations on unique letters from two words\n", - "a = set('abracadabra')\n", - "b = set('alacazam')\n", - "a # unique letters in a" + "a = set(\"abracadabra\")\n", + "b = set(\"alacazam\")\n", + "a # unique letters in a" ] }, { @@ -384,7 +384,7 @@ "metadata": {}, "outputs": [], "source": [ - "a - b # letters in a but not in b" + "a - b # letters in a but not in b" ] }, { @@ -393,7 +393,7 @@ "metadata": {}, "outputs": [], "source": [ - "a | b # letters in a or b or both" + "a | b # letters in a or b or both" ] }, { @@ -402,7 +402,7 @@ "metadata": {}, "outputs": [], "source": [ - "a & b # letters in both a and b" + "a & b # letters in both a and b" ] }, { @@ -411,7 +411,7 @@ "metadata": {}, "outputs": [], "source": [ - "a ^ b # letters in a or b but not both" + "a ^ b # letters in a or b but not both" ] }, { @@ -440,7 +440,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Define a new set and try some set methods (freestyle)\n" + "# Define a new set and try some set methods (freestyle)" ] }, { @@ -465,8 +465,8 @@ "metadata": {}, "outputs": [], "source": [ - "tel = {'jack': 4098, 'sape': 4139}\n", - "tel['guido'] = 4127\n", + "tel = {\"jack\": 4098, \"sape\": 4139}\n", + "tel[\"guido\"] = 4127\n", "tel" ] }, @@ -476,7 +476,7 @@ "metadata": {}, "outputs": [], "source": [ - "tel['jack']" + "tel[\"jack\"]" ] }, { @@ -485,7 +485,7 @@ "metadata": {}, "outputs": [], "source": [ - "del tel['sape']" + "del tel[\"sape\"]" ] }, { @@ -494,7 +494,7 @@ "metadata": {}, "outputs": [], "source": [ - "tel['irv'] = 4127\n", + "tel[\"irv\"] = 4127\n", "tel" ] }, @@ -522,7 +522,7 @@ "metadata": {}, "outputs": [], "source": [ - "'guido' in tel" + "\"guido\" in tel" ] }, { @@ -531,7 +531,7 @@ "metadata": {}, "outputs": [], "source": [ - "'jack' not in tel" + "\"jack\" not in tel" ] }, { @@ -554,13 +554,13 @@ "metadata": {}, "outputs": [], "source": [ - "def fib(n): # write Fibonacci series up to n\n", - " \"\"\"Print a Fibonacci series up to n.\"\"\"\n", - " a, b = 0, 1 # two assignments in one line\n", - " while a < n:\n", - " print(a, end=' ')\n", - " a, b = b, a+b # two assignments in one line\n", - " print()" + "def fib(n): # write Fibonacci series up to n\n", + " \"\"\"Print a Fibonacci series up to n.\"\"\"\n", + " a, b = 0, 1 # two assignments in one line\n", + " while a < n:\n", + " print(a, end=\" \")\n", + " a, b = b, a + b # two assignments in one line\n", + " print()" ] }, { @@ -587,7 +587,7 @@ "outputs": [], "source": [ "print(fib)\n", - "print(type(fib)) # function type\n", + "print(type(fib)) # function type\n", "f = fib\n", "f(100)" ] @@ -608,15 +608,16 @@ "def dummy(x):\n", " x += x\n", "\n", + "\n", "xx = 5\n", - "print('xx before function call: ', xx)\n", + "print(\"xx before function call: \", xx)\n", "dummy(xx)\n", - "print('xx after function call: ', xx)\n", + "print(\"xx after function call: \", xx)\n", "\n", "yy = [5]\n", - "print('yy before function call: ', yy)\n", + "print(\"yy before function call: \", yy)\n", "dummy(yy)\n", - "print('yy after function call: ', yy)" + "print(\"yy after function call: \", yy)" ] }, { @@ -634,16 +635,16 @@ "metadata": {}, "outputs": [], "source": [ - "def ask_ok(prompt, retries=4, reminder='Please try again!'):\n", + "def ask_ok(prompt, retries=4, reminder=\"Please try again!\"):\n", " while True:\n", " ok = input(prompt)\n", - " if ok in ('y', 'ye', 'yes'):\n", + " if ok in (\"y\", \"ye\", \"yes\"):\n", " return True\n", - " if ok in ('n', 'no', 'nop', 'nope'):\n", + " if ok in (\"n\", \"no\", \"nop\", \"nope\"):\n", " return False\n", " retries = retries - 1\n", " if retries < 0:\n", - " raise ValueError('invalid user response')\n", + " raise ValueError(\"invalid user response\")\n", " print(reminder)" ] }, @@ -653,10 +654,10 @@ "metadata": {}, "outputs": [], "source": [ - "#This function can be called in several ways:\n", + "# This function can be called in several ways:\n", "\n", - "#giving only the mandatory argument:\n", - "ask_ok('Do you really want to quit?')\n" + "# giving only the mandatory argument:\n", + "ask_ok(\"Do you really want to quit?\")" ] }, { @@ -666,7 +667,7 @@ "outputs": [], "source": [ "# giving one of the optional arguments:\n", - "ask_ok('OK to overwrite the file?', 2)\n" + "ask_ok(\"OK to overwrite the file?\", 2)" ] }, { @@ -676,7 +677,7 @@ "outputs": [], "source": [ "# or even giving all arguments:\n", - "ask_ok('OK to overwrite the file?', 2, 'Come on, only yes or no!')" + "ask_ok(\"OK to overwrite the file?\", 2, \"Come on, only yes or no!\")" ] }, { @@ -692,7 +693,7 @@ "metadata": {}, "outputs": [], "source": [ - "ask_ok('OK to overwrite the file?', reminder='Come on, only yes or no!')" + "ask_ok(\"OK to overwrite the file?\", reminder=\"Come on, only yes or no!\")" ] }, { @@ -710,9 +711,11 @@ "source": [ "def test(x=None):\n", " if x is None:\n", - " print('no x here')\n", + " print(\"no x here\")\n", " else:\n", " print(x)\n", + "\n", + "\n", "test()" ] }, @@ -736,15 +739,15 @@ "metadata": {}, "outputs": [], "source": [ - "class Dog: # same as \"class Dog(object)\"\n", + "class Dog: # same as \"class Dog(object)\"\n", "\n", - " kind = 'canine' # class variable shared by all instances\n", + " kind = \"canine\" # class variable shared by all instances\n", "\n", - " def __init__(self, name): # initialization method\n", - " self.name = name # instance variable unique to each instance\n", - " self.tricks = [] # creates a new empty list for each dog\n", + " def __init__(self, name): # initialization method\n", + " self.name = name # instance variable unique to each instance\n", + " self.tricks = [] # creates a new empty list for each dog\n", "\n", - " def add_trick(self, trick): # class method\n", + " def add_trick(self, trick): # class method\n", " self.tricks.append(trick)" ] }, @@ -761,7 +764,9 @@ "metadata": {}, "outputs": [], "source": [ - "d = Dog('Fido') # creates a new instance of the class and assigns this object to the local variable d\n", + "d = Dog(\n", + " \"Fido\"\n", + ") # creates a new instance of the class and assigns this object to the local variable d\n", "d.name" ] }, @@ -771,9 +776,11 @@ "metadata": {}, "outputs": [], "source": [ - "e = Dog('Buddy') # creates a new instance of the class and assigns this object to the local variable e\n", - "d.add_trick('roll over')\n", - "e.add_trick('play dead')" + "e = Dog(\n", + " \"Buddy\"\n", + ") # creates a new instance of the class and assigns this object to the local variable e\n", + "d.add_trick(\"roll over\")\n", + "e.add_trick(\"play dead\")" ] }, { @@ -782,7 +789,7 @@ "metadata": {}, "outputs": [], "source": [ - "d.tricks # unique to d" + "d.tricks # unique to d" ] }, { @@ -791,7 +798,7 @@ "metadata": {}, "outputs": [], "source": [ - "e.tricks # unique to e" + "e.tricks # unique to e" ] }, { @@ -800,7 +807,7 @@ "metadata": {}, "outputs": [], "source": [ - "d.kind # shared by all dogs" + "d.kind # shared by all dogs" ] }, { @@ -809,7 +816,7 @@ "metadata": {}, "outputs": [], "source": [ - "e.kind # shared by all dogs" + "e.kind # shared by all dogs" ] }, { @@ -831,19 +838,22 @@ "metadata": {}, "outputs": [], "source": [ - "class Animal: # base class\n", + "class Animal: # base class\n", "\n", " def __init__(self, kind):\n", " self.kind = kind\n", " self.tricks = []\n", "\n", - " def add_trick(self, trick): # class method\n", + " def add_trick(self, trick): # class method\n", " self.tricks.append(trick)\n", "\n", - "class Dog(Animal): # derived class\n", "\n", - " def __init__(self): # override of __init__ base method\n", - " super(Dog, self).__init__('canine') # call Animal __init__ method with input string" + "class Dog(Animal): # derived class\n", + "\n", + " def __init__(self): # override of __init__ base method\n", + " super(Dog, self).__init__(\n", + " \"canine\"\n", + " ) # call Animal __init__ method with input string" ] }, { @@ -852,9 +862,9 @@ "metadata": {}, "outputs": [], "source": [ - "fido = Dog() # fido is automatically an animal of kind 'canine'\n", + "fido = Dog() # fido is automatically an animal of kind 'canine'\n", "print(fido.kind)\n", - "fido.add_trick('play dead') # Dog class can use Animal class\n", + "fido.add_trick(\"play dead\") # Dog class can use Animal class\n", "print(fido.tricks)" ] }, @@ -893,7 +903,8 @@ " for item in iterable:\n", " self.items_list.append(item)\n", "\n", - " __update = update # private copy of original update() method\n", + " __update = update # private copy of original update() method\n", + "\n", "\n", "class MappingSubclass(Mapping):\n", "\n", diff --git a/doc/tutorial/1_main_climada.ipynb b/doc/tutorial/1_main_climada.ipynb index 730d5e5ed1..36ce87bb2e 100644 --- a/doc/tutorial/1_main_climada.ipynb +++ b/doc/tutorial/1_main_climada.ipynb @@ -182,10 +182,13 @@ "source": [ "import numpy as np\n", "from climada.hazard import TCTracks\n", - "import warnings # To hide the warnings\n", - "warnings.filterwarnings('ignore')\n", + "import warnings # To hide the warnings\n", "\n", - "tracks = TCTracks.from_ibtracs_netcdf(provider='usa', basin='NA') # Here we download the full dataset for the analysis\n", + "warnings.filterwarnings(\"ignore\")\n", + "\n", + "tracks = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", basin=\"NA\"\n", + ") # Here we download the full dataset for the analysis\n", "# afterwards (e.g. return period), but you can also use \"year_range\" to adjust the range of the dataset to be downloaded.\n", "# While doing that, you need to make sure that the year 2017 is included if you want to run the blocks with the codes\n", "# subsetting a specific tropic cyclone, which happened in 2017. (Of course, you can also change the subsetting codes.)" @@ -220,8 +223,10 @@ ], "source": [ "# plotting tracks can be very time consuming, depending on the number of tracks. So we choose only a few here, by limiting the time range to one year\n", - "tracks_2017 = TCTracks.from_ibtracs_netcdf(provider='usa', basin='NA', year_range = (2017, 2017))\n", - "tracks_2017 .plot(); # This may take a very long time" + "tracks_2017 = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", basin=\"NA\", year_range=(2017, 2017)\n", + ")\n", + "tracks_2017.plot(); # This may take a very long time" ] }, { @@ -368,7 +373,9 @@ } ], "source": [ - "tracks.subset({\"sid\": \"2017260N12310\"}).plot(); # This is how we subset a TCTracks object" + "tracks.subset(\n", + " {\"sid\": \"2017260N12310\"}\n", + ").plot(); # This is how we subset a TCTracks object" ] }, { @@ -397,7 +404,7 @@ } ], "source": [ - "haz.plot_intensity(event='2017260N12310');" + "haz.plot_intensity(event=\"2017260N12310\");" ] }, { @@ -433,7 +440,7 @@ } ], "source": [ - "haz.plot_rp_intensity(return_periods=(5,10,20,40));" + "haz.plot_rp_intensity(return_periods=(5, 10, 20, 40));" ] }, { @@ -553,8 +560,10 @@ "source": [ "from climada.entity.exposures import LitPop\n", "\n", - "exp_litpop = LitPop.from_countries('Puerto Rico', res_arcsec = 120) # We'll go lower resolution than default to keep it simple\n", - "exp_litpop.set_geometry_points() # Set geodataframe geometries from lat lon data\n", + "exp_litpop = LitPop.from_countries(\n", + " \"Puerto Rico\", res_arcsec=120\n", + ") # We'll go lower resolution than default to keep it simple\n", + "exp_litpop.set_geometry_points() # Set geodataframe geometries from lat lon data\n", "\n", "exp_litpop.plot_hexbin(pop_name=True, linewidth=4, buffer=0.1);" ] @@ -647,7 +656,7 @@ } ], "source": [ - "exp_litpop.gdf['impf_TC'] = 1" + "exp_litpop.gdf[\"impf_TC\"] = 1" ] }, { @@ -688,8 +697,8 @@ "from climada.entity import Measure, MeasureSet\n", "\n", "meas_mangrove = Measure(\n", - " name='Mangrove',\n", - " haz_type='TC',\n", + " name=\"Mangrove\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([0.2, 0.2, 0.7]),\n", " cost=500000000,\n", " mdd_impact=(1, 0),\n", @@ -762,11 +771,13 @@ } ], "source": [ - "mangrove_exp, mangrove_imp_fun_set, mangrove_haz = meas_mangrove.apply(exp_litpop, imp_fun_set, haz)\n", + "mangrove_exp, mangrove_imp_fun_set, mangrove_haz = meas_mangrove.apply(\n", + " exp_litpop, imp_fun_set, haz\n", + ")\n", "axes1 = imp_fun_set.plot()\n", - "axes1.set_title('TC: Emanuel (2011) impact function')\n", + "axes1.set_title(\"TC: Emanuel (2011) impact function\")\n", "axes2 = mangrove_imp_fun_set.plot()\n", - "axes2.set_title('TC: Modified impact function')" + "axes2.set_title(\"TC: Modified impact function\")" ] }, { @@ -792,8 +803,8 @@ ], "source": [ "meas_buildings = Measure(\n", - " name='Building code',\n", - " haz_type='TC',\n", + " name=\"Building code\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([0.2, 0.7, 0.5]),\n", " cost=100000000,\n", " hazard_freq_cutoff=0.1,\n", @@ -802,7 +813,9 @@ "meas_set.append(meas_buildings)\n", "meas_set.check()\n", "\n", - "buildings_exp, buildings_imp_fun_set, buildings_haz = meas_buildings.apply(exp_litpop, imp_fun_set, haz)" + "buildings_exp, buildings_imp_fun_set, buildings_haz = meas_buildings.apply(\n", + " exp_litpop, imp_fun_set, haz\n", + ")" ] }, { @@ -861,7 +874,7 @@ } ], "source": [ - "haz.plot_rp_intensity(return_periods=(5, 20));\n", + "haz.plot_rp_intensity(return_periods=(5, 20))\n", "buildings_haz.plot_rp_intensity(return_periods=(5, 20));" ] }, @@ -906,8 +919,8 @@ "source": [ "from climada.entity import DiscRates\n", "\n", - "years=np.arange(1950, 2101)\n", - "rates=np.ones(years.size) * 0.02\n", + "years = np.arange(1950, 2101)\n", + "rates = np.ones(years.size) * 0.02\n", "disc = DiscRates(years=years, rates=rates)\n", "disc.check()\n", "disc.plot()" @@ -941,7 +954,7 @@ " exposures=exp_litpop,\n", " disc_rates=disc,\n", " impact_func_set=imp_fun_set,\n", - " measure_set=meas_set\n", + " measure_set=meas_set,\n", ")" ] }, @@ -1030,10 +1043,10 @@ } ], "source": [ - "freq_curve = imp.calc_freq_curve() # impact exceedance frequency curve\n", - "freq_curve.plot();\n", + "freq_curve = imp.calc_freq_curve() # impact exceedance frequency curve\n", + "freq_curve.plot()\n", "\n", - "print('Expected average annual impact: {:.3e} USD'.format(imp.aai_agg))" + "print(\"Expected average annual impact: {:.3e} USD\".format(imp.aai_agg))" ] }, { @@ -1071,7 +1084,7 @@ } ], "source": [ - "imp.plot_basemap_eai_exposure(buffer=0.1); # average annual impact at each exposure" + "imp.plot_basemap_eai_exposure(buffer=0.1); # average annual impact at each exposure" ] }, { @@ -1186,9 +1199,12 @@ "from climada.engine import CostBenefit\n", "\n", "cost_ben = CostBenefit()\n", - "cost_ben.calc(haz, ent, future_year=2040) # prints costs and benefits\n", - "cost_ben.plot_cost_benefit(); # plot cost benefit ratio and averted damage of every exposure\n", - "cost_ben.plot_event_view(return_per=(10, 20, 40)); # plot averted damage of each measure for every return period" + "cost_ben.calc(haz, ent, future_year=2040) # prints costs and benefits\n", + "cost_ben.plot_cost_benefit()\n", + "# plot cost benefit ratio and averted damage of every exposure\n", + "cost_ben.plot_event_view(\n", + " return_per=(10, 20, 40)\n", + "); # plot averted damage of each measure for every return period" ] }, { diff --git a/doc/tutorial/climada_engine_CostBenefit.ipynb b/doc/tutorial/climada_engine_CostBenefit.ipynb index 514bceb9e0..de98c79260 100644 --- a/doc/tutorial/climada_engine_CostBenefit.ipynb +++ b/doc/tutorial/climada_engine_CostBenefit.ipynb @@ -257,15 +257,23 @@ "\n", "client = Client()\n", "future_year = 2080\n", - "haz_present = client.get_hazard('tropical_cyclone',\n", - " properties={'country_name': 'Haiti',\n", - " 'climate_scenario': 'historical',\n", - " 'nb_synth_tracks':'10'})\n", - "haz_future = client.get_hazard('tropical_cyclone',\n", - " properties={'country_name': 'Haiti',\n", - " 'climate_scenario': 'rcp60',\n", - " 'ref_year': str(future_year),\n", - " 'nb_synth_tracks':'10'})\n" + "haz_present = client.get_hazard(\n", + " \"tropical_cyclone\",\n", + " properties={\n", + " \"country_name\": \"Haiti\",\n", + " \"climate_scenario\": \"historical\",\n", + " \"nb_synth_tracks\": \"10\",\n", + " },\n", + ")\n", + "haz_future = client.get_hazard(\n", + " \"tropical_cyclone\",\n", + " properties={\n", + " \"country_name\": \"Haiti\",\n", + " \"climate_scenario\": \"rcp60\",\n", + " \"ref_year\": str(future_year),\n", + " \"nb_synth_tracks\": \"10\",\n", + " },\n", + ")" ] }, { @@ -366,7 +374,7 @@ } ], "source": [ - "exp_present = client.get_litpop(country='Haiti')" + "exp_present = client.get_litpop(country=\"Haiti\")" ] }, { @@ -388,8 +396,8 @@ "exp_future.ref_year = future_year\n", "n_years = exp_future.ref_year - exp_present.ref_year + 1\n", "growth_rate = 1.02\n", - "growth = growth_rate ** n_years\n", - "exp_future.gdf['value'] = exp_future.gdf['value'] * growth" + "growth = growth_rate**n_years\n", + "exp_future.gdf[\"value\"] = exp_future.gdf[\"value\"] * growth" ] }, { @@ -517,8 +525,8 @@ "source": [ "# This would be done automatically in Impact calculations\n", "# but it's better to do it explicitly before the calculation\n", - "exp_present.assign_centroids(haz_present, distance='approx')\n", - "exp_future.assign_centroids(haz_future, distance='approx')" + "exp_present.assign_centroids(haz_present, distance=\"approx\")\n", + "exp_future.assign_centroids(haz_future, distance=\"approx\")" ] }, { @@ -592,9 +600,9 @@ "# This is more out of politeness, since if there's only one impact function\n", "# and one `impf_` column, CLIMADA can figure it out\n", "exp_present.gdf.rename(columns={\"impf_\": \"impf_TC\"}, inplace=True)\n", - "exp_present.gdf['impf_TC'] = 1\n", + "exp_present.gdf[\"impf_TC\"] = 1\n", "exp_future.gdf.rename(columns={\"impf_\": \"impf_TC\"}, inplace=True)\n", - "exp_future.gdf['impf_TC'] = 1" + "exp_future.gdf[\"impf_TC\"] = 1" ] }, { @@ -619,20 +627,20 @@ "from climada.entity.measures import Measure, MeasureSet\n", "\n", "meas_1 = Measure(\n", - " haz_type='TC',\n", - " name='Measure A',\n", + " haz_type=\"TC\",\n", + " name=\"Measure A\",\n", " color_rgb=np.array([0.8, 0.1, 0.1]),\n", " cost=5000000000,\n", - " hazard_inten_imp=(1, -5), # Decrease wind speeds by 5 m/s\n", + " hazard_inten_imp=(1, -5), # Decrease wind speeds by 5 m/s\n", " risk_transf_cover=0,\n", ")\n", "\n", "meas_2 = Measure(\n", - " haz_type='TC',\n", - " name='Measure B',\n", + " haz_type=\"TC\",\n", + " name=\"Measure B\",\n", " color_rgb=np.array([0.1, 0.1, 0.8]),\n", " cost=220000000,\n", - " paa_impact=(1, -0.10), # 10% fewer assets affected\n", + " paa_impact=(1, -0.10), # 10% fewer assets affected\n", ")\n", "\n", "# gather all measures\n", @@ -684,10 +692,18 @@ "source": [ "from climada.entity import Entity\n", "\n", - "entity_present = Entity(exposures=exp_present, disc_rates=discount_zero,\n", - " impact_func_set=impf_set, measure_set=meas_set)\n", - "entity_future = Entity(exposures=exp_future, disc_rates=discount_zero,\n", - " impact_func_set=impf_set, measure_set=meas_set)" + "entity_present = Entity(\n", + " exposures=exp_present,\n", + " disc_rates=discount_zero,\n", + " impact_func_set=impf_set,\n", + " measure_set=meas_set,\n", + ")\n", + "entity_future = Entity(\n", + " exposures=exp_future,\n", + " disc_rates=discount_zero,\n", + " impact_func_set=impf_set,\n", + " measure_set=meas_set,\n", + ")" ] }, { @@ -737,8 +753,16 @@ "from climada.engine.cost_benefit import risk_aai_agg\n", "\n", "costben_measures_only = CostBenefit()\n", - "costben_measures_only.calc(haz_present, entity_present, haz_future=None, ent_future=None,\n", - " future_year=future_year, risk_func=risk_aai_agg, imp_time_depen=None, save_imp=True)" + "costben_measures_only.calc(\n", + " haz_present,\n", + " entity_present,\n", + " haz_future=None,\n", + " ent_future=None,\n", + " future_year=future_year,\n", + " risk_func=risk_aai_agg,\n", + " imp_time_depen=None,\n", + " save_imp=True,\n", + ")" ] }, { @@ -783,10 +807,12 @@ } ], "source": [ - "combined_costben = costben_measures_only.combine_measures(['Measure A', 'Measure B'],\n", - " 'Combined measures',\n", - " new_color=np.array([0.1, 0.8, 0.8]),\n", - " disc_rates=discount_zero)" + "combined_costben = costben_measures_only.combine_measures(\n", + " [\"Measure A\", \"Measure B\"],\n", + " \"Combined measures\",\n", + " new_color=np.array([0.1, 0.8, 0.8]),\n", + " disc_rates=discount_zero,\n", + ")" ] }, { @@ -877,8 +903,16 @@ ], "source": [ "costben = CostBenefit()\n", - "costben.calc(haz_present, entity_present, haz_future=haz_future, ent_future=entity_future,\n", - " future_year=future_year, risk_func=risk_aai_agg, imp_time_depen=1, save_imp=True)" + "costben.calc(\n", + " haz_present,\n", + " entity_present,\n", + " haz_future=haz_future,\n", + " ent_future=entity_future,\n", + " future_year=future_year,\n", + " risk_func=risk_aai_agg,\n", + " imp_time_depen=1,\n", + " save_imp=True,\n", + ")" ] }, { @@ -939,8 +973,10 @@ "source": [ "# define this as a function because we'll use it again later\n", "def waterfall():\n", - " return costben.plot_waterfall(haz_present, entity_present, haz_future, entity_future,\n", - " risk_func=risk_aai_agg)\n", + " return costben.plot_waterfall(\n", + " haz_present, entity_present, haz_future, entity_future, risk_func=risk_aai_agg\n", + " )\n", + "\n", "\n", "ax = waterfall()" ] @@ -992,8 +1028,15 @@ } ], "source": [ - "costben.plot_arrow_averted(axis = waterfall(), in_meas_names=['Measure A', 'Measure B'], accumulate=True, combine=False,\n", - " risk_func=risk_aai_agg, disc_rates=None, imp_time_depen=1)" + "costben.plot_arrow_averted(\n", + " axis=waterfall(),\n", + " in_meas_names=[\"Measure A\", \"Measure B\"],\n", + " accumulate=True,\n", + " combine=False,\n", + " risk_func=risk_aai_agg,\n", + " disc_rates=None,\n", + " imp_time_depen=1,\n", + ")" ] }, { @@ -1025,10 +1068,18 @@ }, "outputs": [], "source": [ - "entity_present_disc = Entity(exposures=exp_present, disc_rates=discount_stern,\n", - " impact_func_set=impf_set, measure_set=meas_set)\n", - "entity_future_disc = Entity(exposures=exp_future, disc_rates=discount_stern,\n", - " impact_func_set=impf_set, measure_set=meas_set)" + "entity_present_disc = Entity(\n", + " exposures=exp_present,\n", + " disc_rates=discount_stern,\n", + " impact_func_set=impf_set,\n", + " measure_set=meas_set,\n", + ")\n", + "entity_future_disc = Entity(\n", + " exposures=exp_future,\n", + " disc_rates=discount_stern,\n", + " impact_func_set=impf_set,\n", + " measure_set=meas_set,\n", + ")" ] }, { @@ -1083,9 +1134,17 @@ ], "source": [ "costben_disc = CostBenefit()\n", - "costben_disc.calc(haz_present, entity_present_disc, haz_future=haz_future, ent_future=entity_future_disc,\n", - " future_year=future_year, risk_func=risk_aai_agg, imp_time_depen=1, save_imp=True)\n", - "print(costben_disc.imp_meas_future['no measure']['impact'].imp_mat.shape)" + "costben_disc.calc(\n", + " haz_present,\n", + " entity_present_disc,\n", + " haz_future=haz_future,\n", + " ent_future=entity_future_disc,\n", + " future_year=future_year,\n", + " risk_func=risk_aai_agg,\n", + " imp_time_depen=1,\n", + " save_imp=True,\n", + ")\n", + "print(costben_disc.imp_meas_future[\"no measure\"][\"impact\"].imp_mat.shape)" ] }, { @@ -1194,18 +1253,22 @@ } ], "source": [ - "combined_costben_disc = costben_disc.combine_measures(['Measure A', 'Measure B'],\n", - " 'Combined measures',\n", - " new_color=np.array([0.1, 0.8, 0.8]),\n", - " disc_rates=discount_stern)\n", - "efc_present = costben_disc.imp_meas_present['no measure']['efc']\n", - "efc_future = costben_disc.imp_meas_future['no measure']['efc']\n", - "efc_combined_measures = combined_costben_disc.imp_meas_future['Combined measures']['efc']\n", + "combined_costben_disc = costben_disc.combine_measures(\n", + " [\"Measure A\", \"Measure B\"],\n", + " \"Combined measures\",\n", + " new_color=np.array([0.1, 0.8, 0.8]),\n", + " disc_rates=discount_stern,\n", + ")\n", + "efc_present = costben_disc.imp_meas_present[\"no measure\"][\"efc\"]\n", + "efc_future = costben_disc.imp_meas_future[\"no measure\"][\"efc\"]\n", + "efc_combined_measures = combined_costben_disc.imp_meas_future[\"Combined measures\"][\n", + " \"efc\"\n", + "]\n", "\n", "ax = plt.subplot(1, 1, 1)\n", - "efc_present.plot(axis=ax, color='blue', label='Present')\n", - "efc_future.plot(axis=ax, color='orange', label='Future, unadapted')\n", - "efc_combined_measures.plot(axis=ax, color='green', label='Future, adapted')\n", + "efc_present.plot(axis=ax, color=\"blue\", label=\"Present\")\n", + "efc_future.plot(axis=ax, color=\"orange\", label=\"Future, unadapted\")\n", + "efc_combined_measures.plot(axis=ax, color=\"green\", label=\"Future, adapted\")\n", "leg = ax.legend()" ] }, diff --git a/doc/tutorial/climada_engine_Forecast.ipynb b/doc/tutorial/climada_engine_Forecast.ipynb index 74cbd00f85..29c9a5930f 100644 --- a/doc/tutorial/climada_engine_Forecast.ipynb +++ b/doc/tutorial/climada_engine_Forecast.ipynb @@ -42,12 +42,12 @@ "metadata": {}, "outputs": [], "source": [ - "#generate hazard\n", + "# generate hazard\n", "hazard, haz_model, run_datetime, event_date = generate_WS_forecast_hazard()\n", "# generate hazard with forecasts from past dates (works only if the files have already been downloaded)\n", "# hazard, haz_model, run_datetime, event_date = generate_WS_forecast_hazard(\n", "# run_datetime=datetime(2022,5,17),\n", - "# event_date=datetime(2022,5,19)) " + "# event_date=datetime(2022,5,19))" ] }, { @@ -56,7 +56,7 @@ "metadata": {}, "outputs": [], "source": [ - "#generate vulnerability\n", + "# generate vulnerability\n", "impact_function = ImpfStormEurope.from_welker()\n", "impact_function_set = ImpactFuncSet([impact_function])" ] @@ -67,12 +67,12 @@ "metadata": {}, "outputs": [], "source": [ - "#generate exposure and save to file\n", - "filename_exp = CONFIG.local_data.save_dir.dir() / ('exp_litpop_Switzerland.hdf5')\n", + "# generate exposure and save to file\n", + "filename_exp = CONFIG.local_data.save_dir.dir() / (\"exp_litpop_Switzerland.hdf5\")\n", "if filename_exp.exists():\n", " exposure = LitPop.from_hdf5(filename_exp)\n", "else:\n", - " exposure = LitPop.from_countries('Switzerland', reference_year=2020)\n", + " exposure = LitPop.from_countries(\"Switzerland\", reference_year=2020)\n", " exposure.write_hdf5(filename_exp)" ] }, @@ -82,7 +82,7 @@ "metadata": {}, "outputs": [], "source": [ - "#create and calculate Forecast\n", + "# create and calculate Forecast\n", "CH_WS_forecast = Forecast({run_datetime: hazard}, exposure, impact_function_set)\n", "CH_WS_forecast.calc()" ] @@ -106,7 +106,7 @@ } ], "source": [ - "CH_WS_forecast.plot_imp_map(save_fig=False,close_fig=False,proj=ccrs.epsg(2056));" + "CH_WS_forecast.plot_imp_map(save_fig=False, close_fig=False, proj=ccrs.epsg(2056));" ] }, { @@ -135,7 +135,7 @@ } ], "source": [ - "CH_WS_forecast.plot_hist(save_fig=False,close_fig=False);" + "CH_WS_forecast.plot_hist(save_fig=False, close_fig=False);" ] }, { @@ -164,7 +164,9 @@ } ], "source": [ - "CH_WS_forecast.plot_exceedence_prob(threshold=5000, save_fig=False, close_fig=False,proj=ccrs.epsg(2056));" + "CH_WS_forecast.plot_exceedence_prob(\n", + " threshold=5000, save_fig=False, close_fig=False, proj=ccrs.epsg(2056)\n", + ");" ] }, { @@ -198,31 +200,30 @@ "from climada.util.config import CONFIG\n", "\n", "\n", - "#create a file containing the polygons of Swiss cantons using natural earth\n", - "cantons_file = CONFIG.local_data.save_dir.dir() / 'cantons.shp'\n", - "adm1_shape_file = shapereader.natural_earth(resolution='10m',\n", - " category='cultural',\n", - " name='admin_1_states_provinces')\n", + "# create a file containing the polygons of Swiss cantons using natural earth\n", + "cantons_file = CONFIG.local_data.save_dir.dir() / \"cantons.shp\"\n", + "adm1_shape_file = shapereader.natural_earth(\n", + " resolution=\"10m\", category=\"cultural\", name=\"admin_1_states_provinces\"\n", + ")\n", "if not cantons_file.exists():\n", - " with fiona.open(adm1_shape_file, 'r') as source:\n", - " with fiona.open(\n", - " cantons_file, 'w',\n", - " **source.meta) as sink:\n", + " with fiona.open(adm1_shape_file, \"r\") as source:\n", + " with fiona.open(cantons_file, \"w\", **source.meta) as sink:\n", "\n", " for f in source:\n", - " if f['properties']['adm0_a3'] == 'CHE':\n", + " if f[\"properties\"][\"adm0_a3\"] == \"CHE\":\n", " sink.write(f)\n", - "CH_WS_forecast.plot_warn_map(str(cantons_file),\n", - " decision_level = 'polygon',\n", - " thresholds=[100000,500000,\n", - " 1000000,5000000],\n", - " probability_aggregation='mean',\n", - " area_aggregation='sum',\n", - " title=\"Building damage warning\",\n", - " explain_text=\"warn level based on aggregated damages\",\n", - " save_fig=False,\n", - " close_fig=False,\n", - " proj=ccrs.epsg(2056));" + "CH_WS_forecast.plot_warn_map(\n", + " str(cantons_file),\n", + " decision_level=\"polygon\",\n", + " thresholds=[100000, 500000, 1000000, 5000000],\n", + " probability_aggregation=\"mean\",\n", + " area_aggregation=\"sum\",\n", + " title=\"Building damage warning\",\n", + " explain_text=\"warn level based on aggregated damages\",\n", + " save_fig=False,\n", + " close_fig=False,\n", + " proj=ccrs.epsg(2056),\n", + ");" ] }, { @@ -255,43 +256,43 @@ "\n", "### generate exposure\n", "# find out which hazard coord to consider\n", - "CHE_borders = u_plot._get_borders(np.stack([exposure.gdf['latitude'].values,\n", - " exposure.gdf['longitude'].values],\n", - " axis=1)\n", - " )\n", - "centroid_selection = np.logical_and(np.logical_and(hazard.centroids.lat >= CHE_borders[2],\n", - " hazard.centroids.lat <= CHE_borders[3]),\n", - " np.logical_and(hazard.centroids.lon >= CHE_borders[0],\n", - " hazard.centroids.lon <= CHE_borders[1])\n", - " )\n", + "CHE_borders = u_plot._get_borders(\n", + " np.stack(\n", + " [exposure.gdf[\"latitude\"].values, exposure.gdf[\"longitude\"].values], axis=1\n", + " )\n", + ")\n", + "centroid_selection = np.logical_and(\n", + " np.logical_and(\n", + " hazard.centroids.lat >= CHE_borders[2], hazard.centroids.lat <= CHE_borders[3]\n", + " ),\n", + " np.logical_and(\n", + " hazard.centroids.lon >= CHE_borders[0], hazard.centroids.lon <= CHE_borders[1]\n", + " ),\n", + ")\n", "# Fill DataFrame with values for a \"neutral\" exposure (value = 1)\n", "\n", "exp_df = DataFrame()\n", - "exp_df['value'] = np.ones_like(hazard.centroids.lat[centroid_selection]) # provide value\n", - "exp_df['latitude'] = hazard.centroids.lat[centroid_selection]\n", - "exp_df['longitude'] = hazard.centroids.lon[centroid_selection]\n", - "exp_df['impf_WS'] = np.ones_like(hazard.centroids.lat[centroid_selection], int)\n", + "exp_df[\"value\"] = np.ones_like(\n", + " hazard.centroids.lat[centroid_selection]\n", + ") # provide value\n", + "exp_df[\"latitude\"] = hazard.centroids.lat[centroid_selection]\n", + "exp_df[\"longitude\"] = hazard.centroids.lon[centroid_selection]\n", + "exp_df[\"impf_WS\"] = np.ones_like(hazard.centroids.lat[centroid_selection], int)\n", "# Generate Exposures\n", "exp = Exposures(exp_df)\n", "exp.check()\n", - "exp.value_unit = 'warn_level'\n", + "exp.value_unit = \"warn_level\"\n", "\n", "### generate impact functions\n", "## impact functions for hazard based warnings\n", - "haz_type = 'WS'\n", + "haz_type = \"WS\"\n", "idx = 1\n", - "name = 'warn_level_low_elevation'\n", - "intensity_unit = 'm/s'\n", - "intensity = np.array([0.0, 19.439, \n", - " 19.44, 24.999, \n", - " 25.0, 30.549, \n", - " 30.55, 38.879, \n", - " 38.88, 100.0])\n", - "mdd = np.array([1.0, 1.0, \n", - " 2.0, 2.0, \n", - " 3.0, 3.0, \n", - " 4.0, 4.0, \n", - " 5.0, 5.0])\n", + "name = \"warn_level_low_elevation\"\n", + "intensity_unit = \"m/s\"\n", + "intensity = np.array(\n", + " [0.0, 19.439, 19.44, 24.999, 25.0, 30.549, 30.55, 38.879, 38.88, 100.0]\n", + ")\n", + "mdd = np.array([1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 5.0, 5.0])\n", "paa = np.ones_like(mdd)\n", "imp_fun_low = ImpactFunc(haz_type, idx, intensity, mdd, paa, intensity_unit, name)\n", "imp_fun_low.check()\n", @@ -305,7 +306,7 @@ "metadata": {}, "outputs": [], "source": [ - "#create and calculate Forecast\n", + "# create and calculate Forecast\n", "warn_forecast = Forecast({run_datetime: hazard}, exp, impf_set)\n", "warn_forecast.calc()" ] @@ -336,16 +337,18 @@ } ], "source": [ - "warn_forecast.plot_warn_map(cantons_file,\n", - " thresholds=[2,3,4,5],\n", - " decision_level = 'exposure_point',\n", - " probability_aggregation=0.5,\n", - " area_aggregation=0.5,\n", - " title=\"DWD ICON METEOROLOGICAL WARNING\",\n", - " explain_text=\"warn level based on wind gust thresholds\",\n", - " save_fig=False,\n", - " close_fig=False,\n", - " proj=ccrs.epsg(2056));" + "warn_forecast.plot_warn_map(\n", + " cantons_file,\n", + " thresholds=[2, 3, 4, 5],\n", + " decision_level=\"exposure_point\",\n", + " probability_aggregation=0.5,\n", + " area_aggregation=0.5,\n", + " title=\"DWD ICON METEOROLOGICAL WARNING\",\n", + " explain_text=\"warn level based on wind gust thresholds\",\n", + " save_fig=False,\n", + " close_fig=False,\n", + " proj=ccrs.epsg(2056),\n", + ");" ] }, { @@ -390,4 +393,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/doc/tutorial/climada_engine_Impact.ipynb b/doc/tutorial/climada_engine_Impact.ipynb index bbe55afd68..b6ea21cd89 100644 --- a/doc/tutorial/climada_engine_Impact.ipynb +++ b/doc/tutorial/climada_engine_Impact.ipynb @@ -329,7 +329,9 @@ "from climada.entity import LitPop\n", "\n", "# Cuba with resolution 10km and financial_mode = income group.\n", - "exp_lp = LitPop.from_countries(countries=['CUB'], res_arcsec=300, fin_mode='income_group')\n", + "exp_lp = LitPop.from_countries(\n", + " countries=[\"CUB\"], res_arcsec=300, fin_mode=\"income_group\"\n", + ")\n", "exp_lp.check()" ] }, @@ -492,7 +494,7 @@ "# not needed for impact calculations\n", "# visualize the define exposure\n", "exp_lp.plot_raster()\n", - "print('\\n Raster properties exposures:', exp_lp.meta)" + "print(\"\\n Raster properties exposures:\", exp_lp.meta)" ] }, { @@ -540,13 +542,17 @@ "from climada.hazard import TCTracks, TropCyclone, Centroids\n", "\n", "# Load histrocial tropical cyclone tracks from ibtracs over the North Atlantic basin between 2010-2012\n", - "ibtracks_na = TCTracks.from_ibtracs_netcdf(provider='usa', basin='NA', year_range=(2010, 2012), correct_pres=True)\n", - "print('num tracks hist:', ibtracks_na.size)\n", + "ibtracks_na = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", basin=\"NA\", year_range=(2010, 2012), correct_pres=True\n", + ")\n", + "print(\"num tracks hist:\", ibtracks_na.size)\n", "\n", - "ibtracks_na.equal_timestep(0.5) # Interpolation to make the track smooth and to allow applying calc_perturbed_trajectories\n", + "ibtracks_na.equal_timestep(\n", + " 0.5\n", + ") # Interpolation to make the track smooth and to allow applying calc_perturbed_trajectories\n", "# Add randomly generated tracks using the calc_perturbed_trajectories method (1 per historical track)\n", "ibtracks_na.calc_perturbed_trajectories(nb_synth_tracks=1)\n", - "print('num tracks hist+syn:', ibtracks_na.size)" + "print(\"num tracks hist+syn:\", ibtracks_na.size)" ] }, { @@ -620,8 +626,8 @@ "outputs": [], "source": [ "# Define the centroids from the exposures position\n", - "lat = exp_lp.gdf['latitude'].values\n", - "lon = exp_lp.gdf['longitude'].values\n", + "lat = exp_lp.gdf[\"latitude\"].values\n", + "lon = exp_lp.gdf[\"longitude\"].values\n", "centrs = Centroids.from_lat_lon(lat, lon)\n", "centrs.check()" ] @@ -702,6 +708,7 @@ "outputs": [], "source": [ "from climada.entity import ImpactFuncSet, ImpfTropCyclone\n", + "\n", "# impact function TC\n", "impf_tc = ImpfTropCyclone.from_emanuel_usa()\n", "\n", @@ -865,7 +872,7 @@ "source": [ "# Exposures: rename column and assign id\n", "exp_lp.gdf.rename(columns={\"impf_\": \"impf_\" + haz_type}, inplace=True)\n", - "exp_lp.gdf['impf_' + haz_type] = haz_id\n", + "exp_lp.gdf[\"impf_\" + haz_type] = haz_id\n", "exp_lp.check()\n", "exp_lp.gdf.head()" ] @@ -910,7 +917,10 @@ "source": [ "# Compute impact\n", "from climada.engine import ImpactCalc\n", - "imp = ImpactCalc(exp_lp, impf_set, tc).impact(save_mat=False) # Do not save the results geographically resolved (only aggregate values)" + "\n", + "imp = ImpactCalc(exp_lp, impf_set, tc).impact(\n", + " save_mat=False\n", + ") # Do not save the results geographically resolved (only aggregate values)" ] }, { @@ -1215,25 +1225,27 @@ "from datetime import datetime, date\n", "import pandas as pd\n", "\n", - "#set a harvest date\n", - "harvest_DOY=290 #17 October\n", + "# set a harvest date\n", + "harvest_DOY = 290 # 17 October\n", "\n", - "#loop over all events an check if they happened before or after harvest\n", - "event_ids_post_harvest=[]\n", - "event_ids_pre_harvest=[]\n", + "# loop over all events an check if they happened before or after harvest\n", + "event_ids_post_harvest = []\n", + "event_ids_pre_harvest = []\n", "for event_id in tc.event_id:\n", - " event_date = tc.date[np.where(tc.event_id==event_id)[0][0]]\n", - " day_of_year = event_date - date(datetime.fromordinal(event_date).year, 1, 1).toordinal() + 1\n", + " event_date = tc.date[np.where(tc.event_id == event_id)[0][0]]\n", + " day_of_year = (\n", + " event_date - date(datetime.fromordinal(event_date).year, 1, 1).toordinal() + 1\n", + " )\n", "\n", - " if day_of_year > harvest_DOY:\n", - " event_ids_post_harvest.append(event_id)\n", - " else:\n", - " event_ids_pre_harvest.append(event_id)\n", + " if day_of_year > harvest_DOY:\n", + " event_ids_post_harvest.append(event_id)\n", + " else:\n", + " event_ids_pre_harvest.append(event_id)\n", "\n", - "tc_post_harvest=tc.select(event_id=event_ids_post_harvest)\n", - "tc_pre_harvest=tc.select(event_id=event_ids_pre_harvest)\n", - "#print('pre-harvest:', tc_pre_harvest.event_name)\n", - "#print('post-harvest:', tc_post_harvest.event_name)" + "tc_post_harvest = tc.select(event_id=event_ids_post_harvest)\n", + "tc_pre_harvest = tc.select(event_id=event_ids_pre_harvest)\n", + "# print('pre-harvest:', tc_pre_harvest.event_name)\n", + "# print('post-harvest:', tc_post_harvest.event_name)" ] }, { @@ -1285,18 +1297,19 @@ ], "source": [ "from climada.engine import Impact\n", + "\n", "# impact function TC\n", "impf_tc = ImpfTropCyclone.from_emanuel_usa()\n", "# impact function TC after harvest is by factor 0.5 smaller\n", "impf_tc_posth = ImpfTropCyclone.from_emanuel_usa()\n", - "impf_tc_posth.mdd = impf_tc.mdd*0.1\n", + "impf_tc_posth.mdd = impf_tc.mdd * 0.1\n", "# add the impact function to an Impact function set\n", "impf_set = ImpactFuncSet([impf_tc])\n", "impf_set_posth = ImpactFuncSet([impf_tc_posth])\n", "impf_set.check()\n", "impf_set_posth.check()\n", "\n", - "#plot\n", + "# plot\n", "impf_set.plot()\n", "impf_set_posth.plot()\n", "\n", @@ -1360,16 +1373,17 @@ ], "source": [ "# Concatenate impacts again\n", - "imp_tot = Impact.concat([imp_preh,imp_posth])\n", + "imp_tot = Impact.concat([imp_preh, imp_posth])\n", "\n", - "#plot result\n", + "# plot result\n", "import matplotlib.pyplot as plt\n", - "ax=imp_preh.plot_hexbin_eai_exposure(gridsize=100,adapt_fontsize=False)\n", - "ax.set_title('Expected annual impact: Pre-Harvest')\n", - "ax=imp_posth.plot_hexbin_eai_exposure(gridsize=100,adapt_fontsize=False)\n", - "ax.set_title('Expected annual impact: Post-Harvest')\n", - "ax=imp_tot.plot_hexbin_eai_exposure(gridsize=100,adapt_fontsize=False)\n", - "ax.set_title('Expected annual impact: Total')\n" + "\n", + "ax = imp_preh.plot_hexbin_eai_exposure(gridsize=100, adapt_fontsize=False)\n", + "ax.set_title(\"Expected annual impact: Pre-Harvest\")\n", + "ax = imp_posth.plot_hexbin_eai_exposure(gridsize=100, adapt_fontsize=False)\n", + "ax.set_title(\"Expected annual impact: Post-Harvest\")\n", + "ax = imp_tot.plot_hexbin_eai_exposure(gridsize=100, adapt_fontsize=False)\n", + "ax.set_title(\"Expected annual impact: Total\")" ] }, { @@ -1459,22 +1473,34 @@ "from climada.engine import ImpactCalc\n", "\n", "# Set Exposures in points\n", - "exp_pnt = Exposures(crs='epsg:4326') #set coordinate system\n", - "exp_pnt.gdf['latitude'] = np.array([21.899326, 21.960728, 22.220574, 22.298390, 21.787977, 21.787977, 21.981732])\n", - "exp_pnt.gdf['longitude'] = np.array([88.307422, 88.565362, 88.378337, 87.806356, 88.348835, 88.348835, 89.246521])\n", - "exp_pnt.gdf['value'] = np.array([1.0e5, 1.2e5, 1.1e5, 1.1e5, 2.0e5, 2.5e5, 0.5e5])\n", + "exp_pnt = Exposures(crs=\"epsg:4326\") # set coordinate system\n", + "exp_pnt.gdf[\"latitude\"] = np.array(\n", + " [21.899326, 21.960728, 22.220574, 22.298390, 21.787977, 21.787977, 21.981732]\n", + ")\n", + "exp_pnt.gdf[\"longitude\"] = np.array(\n", + " [88.307422, 88.565362, 88.378337, 87.806356, 88.348835, 88.348835, 89.246521]\n", + ")\n", + "exp_pnt.gdf[\"value\"] = np.array([1.0e5, 1.2e5, 1.1e5, 1.1e5, 2.0e5, 2.5e5, 0.5e5])\n", "exp_pnt.check()\n", "exp_pnt.plot_scatter(buffer=0.05)\n", "\n", "# Set Hazard in Exposures points\n", "# set centroids from exposures coordinates\n", - "centr_pnt = Centroids.from_lat_lon(exp_pnt.gdf['latitude'].values, exp_pnt.gdf['longitude'].values, exp_pnt.crs)\n", + "centr_pnt = Centroids.from_lat_lon(\n", + " exp_pnt.gdf[\"latitude\"].values, exp_pnt.gdf[\"longitude\"].values, exp_pnt.crs\n", + ")\n", "# compute Hazard in that centroids\n", - "tr_pnt = TCTracks.from_ibtracs_netcdf(storm_id='2007314N10093')\n", + "tr_pnt = TCTracks.from_ibtracs_netcdf(storm_id=\"2007314N10093\")\n", "tc_pnt = TropCyclone.from_tracks(tr_pnt, centroids=centr_pnt)\n", "tc_pnt.check()\n", - "ax_pnt = tc_pnt.centroids.plot(c=np.array(tc_pnt.intensity[0,:].todense()).squeeze()) # plot intensity per point\n", - "ax_pnt.get_figure().colorbar(ax_pnt.collections[0], fraction=0.0175, pad=0.02).set_label('Intensity (m/s)') # add colorbar\n", + "ax_pnt = tc_pnt.centroids.plot(\n", + " c=np.array(tc_pnt.intensity[0, :].todense()).squeeze()\n", + ") # plot intensity per point\n", + "ax_pnt.get_figure().colorbar(\n", + " ax_pnt.collections[0], fraction=0.0175, pad=0.02\n", + ").set_label(\n", + " \"Intensity (m/s)\"\n", + ") # add colorbar\n", "\n", "# Set impact function\n", "impf_tc = ImpfTropCyclone.from_emanuel_usa()\n", @@ -1486,13 +1512,16 @@ "[haz_id] = impf_set.get_ids()[haz_type]\n", "# Exposures: rename column and assign id\n", "exp_lp.gdf.rename(columns={\"impf_\": \"impf_\" + haz_type}, inplace=True)\n", - "exp_lp.gdf['impf_' + haz_type] = haz_id\n", + "exp_lp.gdf[\"impf_\" + haz_type] = haz_id\n", "exp_lp.gdf.head()\n", "\n", "# Compute Impact\n", "imp_pnt = ImpactCalc(exp_pnt, impf_pnt, tc_pnt).impact()\n", "# nearest neighbor of exposures to centroids gives identity\n", - "print('Nearest neighbor hazard.centroids indexes for each exposure:', exp_pnt.gdf['centr_TC'].values)\n", + "print(\n", + " \"Nearest neighbor hazard.centroids indexes for each exposure:\",\n", + " exp_pnt.gdf[\"centr_TC\"].values,\n", + ")\n", "imp_pnt.plot_scatter_eai_exposure(ignore_zero=False, buffer=0.05);" ] }, @@ -1680,24 +1709,32 @@ "from climada.util.constants import HAZ_DEMO_FL\n", "\n", "# Exposures belonging to a raster (the raser information is contained in the meta attribute)\n", - "exp_ras = LitPop.from_countries(countries=['VEN'], res_arcsec=300, fin_mode='income_group')\n", + "exp_ras = LitPop.from_countries(\n", + " countries=[\"VEN\"], res_arcsec=300, fin_mode=\"income_group\"\n", + ")\n", "exp_ras.gdf.reset_index()\n", "exp_ras.check()\n", "exp_ras.plot_raster()\n", - "print('\\n Raster properties exposures:', exp_ras.meta)\n", + "print(\"\\n Raster properties exposures:\", exp_ras.meta)\n", "\n", "# Initialize hazard object with haz_type = 'FL' (for Flood)\n", - "hazard_type='FL'\n", + "hazard_type = \"FL\"\n", "# Load a previously generated (either with CLIMADA or other means) hazard\n", "# from file (HAZ_DEMO_FL) and resample the hazard raster to the exposures' ones\n", "# Hint: check how other resampling methods affect to final impact\n", - "haz_ras = Hazard.from_raster([HAZ_DEMO_FL], haz_type=hazard_type, dst_crs=exp_ras.meta['crs'], transform=exp_ras.meta['transform'],\n", - " width=exp_ras.meta['width'], height=exp_ras.meta['height'],\n", - " resampling=Resampling.nearest)\n", - "haz_ras.intensity[haz_ras.intensity==-9999] = 0 # correct no data values\n", + "haz_ras = Hazard.from_raster(\n", + " [HAZ_DEMO_FL],\n", + " haz_type=hazard_type,\n", + " dst_crs=exp_ras.meta[\"crs\"],\n", + " transform=exp_ras.meta[\"transform\"],\n", + " width=exp_ras.meta[\"width\"],\n", + " height=exp_ras.meta[\"height\"],\n", + " resampling=Resampling.nearest,\n", + ")\n", + "haz_ras.intensity[haz_ras.intensity == -9999] = 0 # correct no data values\n", "haz_ras.check()\n", "haz_ras.plot_intensity(1)\n", - "print('Raster properties centroids:', haz_ras.centroids.meta)\n", + "print(\"Raster properties centroids:\", haz_ras.centroids.meta)\n", "\n", "# Set dummy impact function\n", "intensity = np.linspace(0, 10, 100)\n", @@ -1710,13 +1747,16 @@ "\n", "# Exposures: rename column and assign id\n", "exp_lp.gdf.rename(columns={\"impf_\": \"impf_\" + hazard_type}, inplace=True)\n", - "exp_lp.gdf['impf_' + haz_type] = haz_id\n", + "exp_lp.gdf[\"impf_\" + haz_type] = haz_id\n", "exp_lp.gdf.head()\n", "\n", "# Compute impact\n", "imp_ras = ImpactCalc(exp_ras, impf_ras, haz_ras).impact(save_mat=False)\n", "# nearest neighbor of exposures to centroids is not identity because litpop does not contain data outside the country polygon\n", - "print('\\n Nearest neighbor hazard.centroids indexes for each exposure:', exp_ras.gdf['centr_FL'].values)\n", + "print(\n", + " \"\\n Nearest neighbor hazard.centroids indexes for each exposure:\",\n", + " exp_ras.gdf[\"centr_FL\"].values,\n", + ")\n", "imp_ras.plot_raster_eai_exposure();" ] }, @@ -1957,7 +1997,7 @@ "from climada_petals.entity import BlackMarble\n", "\n", "exp_video = BlackMarble()\n", - "exp_video.set_countries(['Cuba'], 2016, res_km=2.5)\n", + "exp_video.set_countries([\"Cuba\"], 2016, res_km=2.5)\n", "exp_video.check()\n", "\n", "# impact function\n", @@ -1967,19 +2007,23 @@ "\n", "# compute sequence of hazards using TropCyclone video_intensity method\n", "exp_sea = add_sea(exp_video, (100, 5))\n", - "centr_video = Centroids.from_lat_lon(exp_sea.gdf['latitude'].values, exp_sea.gdf['longitude'].values)\n", + "centr_video = Centroids.from_lat_lon(\n", + " exp_sea.gdf[\"latitude\"].values, exp_sea.gdf[\"longitude\"].values\n", + ")\n", "centr_video.check()\n", "\n", - "track_name = '2017242N16333'\n", - "tr_irma = TCTracks.from_ibtracs_netcdf(provider='usa', storm_id=track_name) # IRMA 2017\n", + "track_name = \"2017242N16333\"\n", + "tr_irma = TCTracks.from_ibtracs_netcdf(provider=\"usa\", storm_id=track_name) # IRMA 2017\n", "\n", "tc_video = TropCyclone()\n", - "tc_list, _ = tc_video.video_intensity(track_name, tr_irma, centr_video) # empty file name to not to write the video\n", + "tc_list, _ = tc_video.video_intensity(\n", + " track_name, tr_irma, centr_video\n", + ") # empty file name to not to write the video\n", "\n", "# generate video of impacts\n", - "file_name='./results/irma_imp_fl.gif'\n", + "file_name = \"./results/irma_imp_fl.gif\"\n", "imp_video = Impact()\n", - "imp_list = imp_video.video_direct_impact(exp_video, impfs_video, tc_list, file_name)\n" + "imp_list = imp_video.video_direct_impact(exp_video, impfs_video, tc_list, file_name)" ] } ], diff --git a/doc/tutorial/climada_engine_impact_data.ipynb b/doc/tutorial/climada_engine_impact_data.ipynb index 443a6f4141..40ead3d807 100644 --- a/doc/tutorial/climada_engine_impact_data.ipynb +++ b/doc/tutorial/climada_engine_impact_data.ipynb @@ -46,11 +46,15 @@ "from matplotlib import pyplot as plt\n", "\n", "from climada.util.constants import DEMO_DIR\n", - "from climada.engine.impact_data import emdat_countries_by_hazard, \\\n", - " emdat_impact_yearlysum, emdat_to_impact, clean_emdat_df\n", + "from climada.engine.impact_data import (\n", + " emdat_countries_by_hazard,\n", + " emdat_impact_yearlysum,\n", + " emdat_to_impact,\n", + " clean_emdat_df,\n", + ")\n", "\n", "# set path to CSV file downloaded from https://public.emdat.be :\n", - "emdat_file_path = DEMO_DIR.joinpath('demo_emdat_impact_data_2020.csv')" + "emdat_file_path = DEMO_DIR.joinpath(\"demo_emdat_impact_data_2020.csv\")" ] }, { @@ -129,8 +133,12 @@ "source": [ "\"\"\"Create DataFrame df with EM-DAT entries of tropical cyclones in Thailand and Viet Nam in the years 2005 and 2006\"\"\"\n", "\n", - "df = clean_emdat_df(emdat_file_path, countries=['THA', 'Viet Nam'], hazard=['TC'], \\\n", - " year_range=[2005, 2006])\n", + "df = clean_emdat_df(\n", + " emdat_file_path,\n", + " countries=[\"THA\", \"Viet Nam\"],\n", + " hazard=[\"TC\"],\n", + " year_range=[2005, 2006],\n", + ")\n", "print(df)" ] }, @@ -160,7 +168,9 @@ "source": [ "\"\"\"emdat_countries_by_hazard: get lists of countries impacted by tropical cyclones from 2010 to 2019\"\"\"\n", "\n", - "iso3_codes, country_names = emdat_countries_by_hazard(emdat_file_path, hazard='TC', year_range=(2010, 2019))\n", + "iso3_codes, country_names = emdat_countries_by_hazard(\n", + " emdat_file_path, hazard=\"TC\", year_range=(2010, 2019)\n", + ")\n", "\n", "print(country_names)\n", "\n", @@ -214,11 +224,18 @@ "source": [ "\"\"\"Global TC damages 2000 to 2009\"\"\"\n", "\n", - "impact_emdat, countries = emdat_to_impact(emdat_file_path, 'TC', year_range=(2000,2009))\n", - "\n", - "print('Number of TC events in EM-DAT 2000 to 2009 globally: %i' %(impact_emdat.event_id.size))\n", - "print('Global annual average monetary damage (AAI) from TCs as reported in EM-DAT 2000 to 2009: USD billion %2.2f' \\\n", - " %(impact_emdat.aai_agg/1e9))\n" + "impact_emdat, countries = emdat_to_impact(\n", + " emdat_file_path, \"TC\", year_range=(2000, 2009)\n", + ")\n", + "\n", + "print(\n", + " \"Number of TC events in EM-DAT 2000 to 2009 globally: %i\"\n", + " % (impact_emdat.event_id.size)\n", + ")\n", + "print(\n", + " \"Global annual average monetary damage (AAI) from TCs as reported in EM-DAT 2000 to 2009: USD billion %2.2f\"\n", + " % (impact_emdat.aai_agg / 1e9)\n", + ")" ] }, { @@ -267,26 +284,34 @@ "\"\"\"Total people affected by TCs in the Philippines in 2013:\"\"\"\n", "\n", "# People affected\n", - "impact_emdat_PHL, countries = emdat_to_impact(emdat_file_path, 'TC', countries='PHL', \\\n", - " year_range=(2013,2013), imp_str=\"Total Affected\")\n", - "\n", - "print('Number of TC events in EM-DAT in the Philipppines, 2013: %i' \\\n", - " %(impact_emdat_PHL.event_id.size))\n", - "print('\\nPeople affected by TC events in the Philippines in 2013 (per event):')\n", + "impact_emdat_PHL, countries = emdat_to_impact(\n", + " emdat_file_path,\n", + " \"TC\",\n", + " countries=\"PHL\",\n", + " year_range=(2013, 2013),\n", + " imp_str=\"Total Affected\",\n", + ")\n", + "\n", + "print(\n", + " \"Number of TC events in EM-DAT in the Philipppines, 2013: %i\"\n", + " % (impact_emdat_PHL.event_id.size)\n", + ")\n", + "print(\"\\nPeople affected by TC events in the Philippines in 2013 (per event):\")\n", "print(impact_emdat_PHL.at_event)\n", - "print('\\nPeople affected by TC events in the Philippines in 2013 (total):')\n", + "print(\"\\nPeople affected by TC events in the Philippines in 2013 (total):\")\n", "print(int(impact_emdat_PHL.aai_agg))\n", "\n", "# Comparison to monetary damages:\n", - "impact_emdat_PHL_USD, _ = emdat_to_impact(emdat_file_path, 'TC', countries='PHL', \\\n", - " year_range=(2013,2013))\n", + "impact_emdat_PHL_USD, _ = emdat_to_impact(\n", + " emdat_file_path, \"TC\", countries=\"PHL\", year_range=(2013, 2013)\n", + ")\n", "\n", "ax = plt.scatter(impact_emdat_PHL_USD.at_event, impact_emdat_PHL.at_event)\n", - "plt.title('Typhoon impacts in the Philippines, 2013')\n", - "plt.xlabel('Total Damage [USD]')\n", - "plt.ylabel('People Affected');\n", - "#plt.xscale('log')\n", - "#plt.yscale('log')" + "plt.title(\"Typhoon impacts in the Philippines, 2013\")\n", + "plt.xlabel(\"Total Damage [USD]\")\n", + "plt.ylabel(\"People Affected\");\n", + "# plt.xscale('log')\n", + "# plt.yscale('log')" ] }, { @@ -352,23 +377,40 @@ "source": [ "\"\"\"Yearly TC damages in the USA, normalized and current\"\"\"\n", "\n", - "yearly_damage_normalized_to_2019 = emdat_impact_yearlysum(emdat_file_path, countries='USA', \\\n", - " hazard='Tropical cyclone', year_range=None, \\\n", - " reference_year=2019)\n", + "yearly_damage_normalized_to_2019 = emdat_impact_yearlysum(\n", + " emdat_file_path,\n", + " countries=\"USA\",\n", + " hazard=\"Tropical cyclone\",\n", + " year_range=None,\n", + " reference_year=2019,\n", + ")\n", "\n", - "yearly_damage_current = emdat_impact_yearlysum(emdat_file_path, countries=['USA'], hazard='TC',)\n", + "yearly_damage_current = emdat_impact_yearlysum(\n", + " emdat_file_path,\n", + " countries=[\"USA\"],\n", + " hazard=\"TC\",\n", + ")\n", "\n", "import matplotlib.pyplot as plt\n", "\n", "fig, axis = plt.subplots(1, 1)\n", - "axis.plot(yearly_damage_current.year, yearly_damage_current.impact, 'b', label='USD current value')\n", - "axis.plot(yearly_damage_normalized_to_2019.year, yearly_damage_normalized_to_2019.impact_scaled, \\\n", - " 'r--', label='USD normalized to 2019')\n", + "axis.plot(\n", + " yearly_damage_current.year,\n", + " yearly_damage_current.impact,\n", + " \"b\",\n", + " label=\"USD current value\",\n", + ")\n", + "axis.plot(\n", + " yearly_damage_normalized_to_2019.year,\n", + " yearly_damage_normalized_to_2019.impact_scaled,\n", + " \"r--\",\n", + " label=\"USD normalized to 2019\",\n", + ")\n", "plt.legend()\n", - "axis.set_title('TC damage reported in EM-DAT in the USA')\n", + "axis.set_title(\"TC damage reported in EM-DAT in the USA\")\n", "axis.set_xticks([2000, 2004, 2008, 2012, 2016])\n", - "axis.set_xlabel('year')\n", - "axis.set_ylabel('Total Damage [USD]');\n" + "axis.set_xlabel(\"year\")\n", + "axis.set_ylabel(\"Total Damage [USD]\");" ] } ], diff --git a/doc/tutorial/climada_engine_unsequa.ipynb b/doc/tutorial/climada_engine_unsequa.ipynb index 08558632ef..a7f6fabd6c 100644 --- a/doc/tutorial/climada_engine_unsequa.ipynb +++ b/doc/tutorial/climada_engine_unsequa.ipynb @@ -154,11 +154,13 @@ "outputs": [], "source": [ "import warnings\n", - "warnings.filterwarnings('ignore') #Ignore warnings for making the tutorial's pdf.\n", "\n", - "#Define the base exposure\n", + "warnings.filterwarnings(\"ignore\") # Ignore warnings for making the tutorial's pdf.\n", + "\n", + "# Define the base exposure\n", "from climada.util.constants import EXP_DEMO_H5\n", "from climada.entity import Exposures\n", + "\n", "exp_base = Exposures.from_hdf5(EXP_DEMO_H5)" ] }, @@ -177,7 +179,7 @@ "# Here x_exp is the input uncertainty parameter and exp_func the inputvar.func.\n", "def exp_func(x_exp, exp_base=exp_base):\n", " exp = exp_base.copy()\n", - " exp.gdf['value'] *= x_exp\n", + " exp.gdf[\"value\"] *= x_exp\n", " return exp" ] }, @@ -197,8 +199,9 @@ "from climada.engine.unsequa import InputVar\n", "import scipy as sp\n", "\n", - "exp_distr = {\"x_exp\": sp.stats.uniform(0.9, 0.2),\n", - " }\n", + "exp_distr = {\n", + " \"x_exp\": sp.stats.uniform(0.9, 0.2),\n", + "}\n", "exp_iv = InputVar(exp_func, exp_distr)" ] }, @@ -249,8 +252,10 @@ ], "source": [ "# Evaluate for a given value of the uncertainty parameters\n", - "exp095 = exp_iv.func(x_exp = 0.95)\n", - "print(f\"Base value is {exp_base.gdf['value'].sum()}, and the value for x_exp=0.95 is {exp095.gdf['value'].sum()}\")" + "exp095 = exp_iv.func(x_exp=0.95)\n", + "print(\n", + " f\"Base value is {exp_base.gdf['value'].sum()}, and the value for x_exp=0.95 is {exp095.gdf['value'].sum()}\"\n", + ")" ] }, { @@ -315,11 +320,12 @@ "m_min, m_max = (1, 2)\n", "n_min, n_max = (1, 2)\n", "\n", + "\n", "# Define the function\n", "# Note that this here works, but might be slow because the method LitPop is called everytime the the function\n", "# is evaluated, and LitPop is relatively slow.\n", "def litpop_cat(m, n):\n", - " exp = Litpop.from_countries('CHE', res_arcsec=150, exponent=[m, n])\n", + " exp = Litpop.from_countries(\"CHE\", res_arcsec=150, exponent=[m, n])\n", " return exp" ] }, @@ -341,9 +347,10 @@ "litpop_dict = {}\n", "for m in range(m_min, m_max + 1):\n", " for n in range(n_min, n_max + 1):\n", - " exp_mn = LitPop.from_countries('CHE', res_arcsec=150, exponents=[m, n]);\n", + " exp_mn = LitPop.from_countries(\"CHE\", res_arcsec=150, exponents=[m, n])\n", " litpop_dict[(m, n)] = exp_mn\n", "\n", + "\n", "def litpop_cat(m, n, litpop_dict=litpop_dict):\n", " return litpop_dict[(m, n)]" ] @@ -360,16 +367,18 @@ }, "outputs": [], "source": [ - "#Define the distribution dictionnary\n", + "# Define the distribution dictionnary\n", "import scipy as sp\n", "from climada.engine.unsequa import InputVar\n", "\n", "distr_dict = {\n", - " 'm': sp.stats.randint(low=m_min, high=m_max+1),\n", - " 'n': sp.stats.randint(low=n_min, high=n_max+1)\n", - " }\n", + " \"m\": sp.stats.randint(low=m_min, high=m_max + 1),\n", + " \"n\": sp.stats.randint(low=n_min, high=n_max + 1),\n", + "}\n", "\n", - "cat_iv = InputVar(litpop_cat, distr_dict) # One can use either of the above definitions of litpop_cat" + "cat_iv = InputVar(\n", + " litpop_cat, distr_dict\n", + ") # One can use either of the above definitions of litpop_cat" ] }, { @@ -578,8 +587,9 @@ "# Requires internet connection\n", "from climada.util.constants import TEST_UNC_OUTPUT_IMPACT\n", "from climada.util.api_client import Client\n", + "\n", "apiclient = Client()\n", - "ds = apiclient.get_dataset_info(name=TEST_UNC_OUTPUT_IMPACT, status='test_dataset')\n", + "ds = apiclient.get_dataset_info(name=TEST_UNC_OUTPUT_IMPACT, status=\"test_dataset\")\n", "_target_dir, [filename] = apiclient.download_dataset(ds)" ] }, @@ -597,6 +607,7 @@ "source": [ "# If you produced your own data, you do not need the API. Just replace 'filename' with the path to your file.\n", "from climada.engine.unsequa import UncOutput\n", + "\n", "unc_imp = UncOutput.from_hdf5(filename)" ] }, @@ -623,7 +634,7 @@ } ], "source": [ - "unc_imp.plot_uncertainty(metric_list=['aai_agg'], figsize=(12,5));" + "unc_imp.plot_uncertainty(metric_list=[\"aai_agg\"], figsize=(12, 5));" ] }, { @@ -642,8 +653,9 @@ "# Requires internet connection\n", "from climada.util.constants import TEST_UNC_OUTPUT_COSTBEN\n", "from climada.util.api_client import Client\n", + "\n", "apiclient = Client()\n", - "ds = apiclient.get_dataset_info(name=TEST_UNC_OUTPUT_COSTBEN, status='test_dataset')\n", + "ds = apiclient.get_dataset_info(name=TEST_UNC_OUTPUT_COSTBEN, status=\"test_dataset\")\n", "_target_dir, [filename] = apiclient.download_dataset(ds)" ] }, @@ -661,6 +673,7 @@ "source": [ "# If you produced your own data, you do not need the API. Just replace 'filename' with the path to your file.\n", "from climada.engine.unsequa import UncOutput\n", + "\n", "unc_cb = UncOutput.from_hdf5(filename)" ] }, @@ -955,25 +968,27 @@ }, "outputs": [], "source": [ - "#Define the input variable functions\n", + "# Define the input variable functions\n", "import numpy as np\n", "\n", "from climada.entity import ImpactFunc, ImpactFuncSet, Exposures\n", "from climada.util.constants import EXP_DEMO_H5, HAZ_DEMO_H5\n", "from climada.hazard import Hazard\n", "\n", + "\n", "def impf_func(G=1, v_half=84.7, vmin=25.7, k=3, _id=1):\n", "\n", " def xhi(v, v_half, vmin):\n", " return max([(v - vmin), 0]) / (v_half - vmin)\n", "\n", " def sigmoid_func(v, G, v_half, vmin, k):\n", - " return G * xhi(v, v_half, vmin)**k / (1 + xhi(v, v_half, vmin)**k)\n", + " return G * xhi(v, v_half, vmin) ** k / (1 + xhi(v, v_half, vmin) ** k)\n", "\n", - " #In-function imports needed only for parallel computing on Windows\n", + " # In-function imports needed only for parallel computing on Windows\n", " import numpy as np\n", " from climada.entity import ImpactFunc, ImpactFuncSet\n", - " intensity_unit = 'm/s'\n", + "\n", + " intensity_unit = \"m/s\"\n", " intensity = np.linspace(0, 150, num=100)\n", " mdd = np.repeat(1, len(intensity))\n", " paa = np.array([sigmoid_func(v, G, v_half, vmin, k) for v in intensity])\n", @@ -982,16 +997,22 @@ " impf_set = ImpactFuncSet([imp_fun])\n", " return impf_set\n", "\n", + "\n", "haz = Hazard.from_hdf5(HAZ_DEMO_H5)\n", "exp_base = Exposures.from_hdf5(EXP_DEMO_H5)\n", - "#It is a good idea to assign the centroids to the base exposures in order to avoid repeating this\n", + "# It is a good idea to assign the centroids to the base exposures in order to avoid repeating this\n", "# potentially costly operation for each sample.\n", "exp_base.assign_centroids(haz)\n", + "\n", + "\n", "def exp_base_func(x_exp, exp_base):\n", " exp = exp_base.copy()\n", - " exp.gdf['value'] *= x_exp\n", + " exp.gdf[\"value\"] *= x_exp\n", " return exp\n", + "\n", + "\n", "from functools import partial\n", + "\n", "exp_func = partial(exp_base_func, exp_base=exp_base)" ] }, @@ -1018,7 +1039,7 @@ ], "source": [ "# Visualization of the parametrized impact function\n", - "impf_func(G=0.8, v_half=80, vmin=30,k=5).plot();" + "impf_func(G=0.8, v_half=80, vmin=30, k=5).plot();" ] }, { @@ -1032,13 +1053,15 @@ }, "outputs": [], "source": [ - "#Define the InputVars\n", + "# Define the InputVars\n", "\n", "import scipy as sp\n", "from climada.engine.unsequa import InputVar\n", "\n", - "exp_distr = {\"x_exp\": sp.stats.beta(10, 1.1)} #This is not really a reasonable distribution but is used\n", - " #here to show that you can use any scipy distribution.\n", + "exp_distr = {\n", + " \"x_exp\": sp.stats.beta(10, 1.1)\n", + "} # This is not really a reasonable distribution but is used\n", + "# here to show that you can use any scipy distribution.\n", "\n", "exp_iv = InputVar(exp_func, exp_distr)\n", "\n", @@ -1046,8 +1069,8 @@ " \"G\": sp.stats.truncnorm(0.5, 1.5),\n", " \"v_half\": sp.stats.uniform(35, 65),\n", " \"vmin\": sp.stats.uniform(0, 15),\n", - " \"k\": sp.stats.uniform(1, 4)\n", - " }\n", + " \"k\": sp.stats.uniform(1, 4),\n", + "}\n", "impf_iv = InputVar(impf_func, impf_distr)" ] }, @@ -1074,8 +1097,9 @@ ], "source": [ "import matplotlib.pyplot as plt\n", - "ax = exp_iv.plot(figsize=(6,4));\n", - "plt.yticks(fontsize=16);\n", + "\n", + "ax = exp_iv.plot(figsize=(6, 4))\n", + "plt.yticks(fontsize=16)\n", "plt.xticks(fontsize=16);" ] }, @@ -1215,7 +1239,7 @@ } ], "source": [ - "output_imp = calc_imp.make_sample(N=2**7, sampling_kwargs={'skip_values': 2**8})\n", + "output_imp = calc_imp.make_sample(N=2**7, sampling_kwargs={\"skip_values\": 2**8})\n", "output_imp.get_samples_df().tail()" ] }, @@ -1248,7 +1272,7 @@ } ], "source": [ - "output_imp.plot_sample(figsize=(15,8));" + "output_imp.plot_sample(figsize=(15, 8));" ] }, { @@ -1269,7 +1293,7 @@ }, "outputs": [], "source": [ - "output_imp = calc_imp.uncertainty(output_imp, rp = [50, 100, 250])" + "output_imp = calc_imp.uncertainty(output_imp, rp=[50, 100, 250])" ] }, { @@ -1306,7 +1330,7 @@ } ], "source": [ - "#All the computed uncertainty metrics attribute\n", + "# All the computed uncertainty metrics attribute\n", "output_imp.uncertainty_metrics" ] }, @@ -1384,8 +1408,8 @@ } ], "source": [ - "#One uncertainty dataframe\n", - "output_imp.get_unc_df('aai_agg').tail()" + "# One uncertainty dataframe\n", + "output_imp.get_unc_df(\"aai_agg\").tail()" ] }, { @@ -1519,7 +1543,7 @@ } ], "source": [ - "output_imp.plot_uncertainty(figsize=(12,12));" + "output_imp.plot_uncertainty(figsize=(12, 12));" ] }, { @@ -1552,7 +1576,7 @@ ], "source": [ "# Specific plot for the return period distributions\n", - "output_imp.plot_rp_uncertainty(figsize=(14.3,8));" + "output_imp.plot_rp_uncertainty(figsize=(14.3, 8));" ] }, { @@ -1704,7 +1728,7 @@ } ], "source": [ - "output_imp.get_sens_df('aai_agg').tail()" + "output_imp.get_sens_df(\"aai_agg\").tail()" ] }, { @@ -1824,7 +1848,7 @@ } ], "source": [ - "output_imp.get_sensitivity('S1')" + "output_imp.get_sensitivity(\"S1\")" ] }, { @@ -1918,7 +1942,7 @@ } ], "source": [ - "output_imp.get_largest_si(salib_si='S1')" + "output_imp.get_largest_si(salib_si=\"S1\")" ] }, { @@ -1953,7 +1977,7 @@ ], "source": [ "# Default for 'sobol' is to plot 'S1' sensitivity index.\n", - "output_imp.plot_sensitivity(figsize=(12,8));" + "output_imp.plot_sensitivity(figsize=(12, 8));" ] }, { @@ -1985,7 +2009,7 @@ } ], "source": [ - "output_imp.plot_sensitivity(salib_si = 'ST', figsize=(12,8));" + "output_imp.plot_sensitivity(salib_si=\"ST\", figsize=(12, 8));" ] }, { @@ -2017,7 +2041,7 @@ } ], "source": [ - "output_imp.plot_sensitivity_second_order(figsize=(12,8));" + "output_imp.plot_sensitivity_second_order(figsize=(12, 8));" ] }, { @@ -2050,7 +2074,7 @@ "from climada.engine.unsequa import CalcImpact\n", "\n", "calc_imp2 = CalcImpact(exp_iv, impf_iv, haz)\n", - "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method='latin')" + "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method=\"latin\")" ] }, { @@ -2075,7 +2099,7 @@ } ], "source": [ - "output_imp2.plot_sample(figsize=(15,8));" + "output_imp2.plot_sample(figsize=(15, 8));" ] }, { @@ -2104,13 +2128,15 @@ "import time\n", "\n", "calc_imp2 = CalcImpact(exp_iv, impf_iv, haz)\n", - "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method='latin')\n", + "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method=\"latin\")\n", "\n", "start = time.time()\n", - "output_imp2 = calc_imp2.uncertainty(output_imp2, rp = [50, 100, 250], calc_eai_exp=True, calc_at_event=True, processes=4)\n", + "output_imp2 = calc_imp2.uncertainty(\n", + " output_imp2, rp=[50, 100, 250], calc_eai_exp=True, calc_at_event=True, processes=4\n", + ")\n", "end = time.time()\n", - "time_passed = end-start\n", - "print(f'Time passed with pool: {time_passed}')" + "time_passed = end - start\n", + "print(f\"Time passed with pool: {time_passed}\")" ] }, { @@ -2148,13 +2174,15 @@ ], "source": [ "calc_imp2 = CalcImpact(exp_iv, impf_iv, haz)\n", - "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method='latin')\n", + "output_imp2 = calc_imp2.make_sample(N=1000, sampling_method=\"latin\")\n", "\n", "start2 = time.time()\n", - "output_imp2 = calc_imp2.uncertainty(output_imp2, rp = [50, 100, 250], calc_eai_exp=True, calc_at_event=True)\n", + "output_imp2 = calc_imp2.uncertainty(\n", + " output_imp2, rp=[50, 100, 250], calc_eai_exp=True, calc_at_event=True\n", + ")\n", "end2 = time.time()\n", - "time_passed_nopool = end2-start2\n", - "print(f'Time passed without pool: {time_passed_nopool}')" + "time_passed_nopool = end2 - start2\n", + "print(f\"Time passed without pool: {time_passed_nopool}\")" ] }, { @@ -2170,10 +2198,11 @@ "source": [ "# Add the original value of the impacts (without uncertainty) to the uncertainty plot\n", "from climada.engine import ImpactCalc\n", + "\n", "imp = ImpactCalc(exp_base, impf_func(), haz).impact(assign_centroids=False)\n", "aai_agg_o = imp.aai_agg\n", "freq_curve_o = imp.calc_freq_curve([50, 100, 250]).impact\n", - "orig_list = [aai_agg_o] + list(freq_curve_o) +[1]" + "orig_list = [aai_agg_o] + list(freq_curve_o) + [1]" ] }, { @@ -2201,7 +2230,12 @@ "source": [ "# plot the aai_agg and freq_curve uncertainty only\n", "# use logarithmic x-scale\n", - "output_imp2.plot_uncertainty(metric_list=['aai_agg', 'freq_curve'], orig_list=orig_list, log=True, figsize=(12,8));" + "output_imp2.plot_uncertainty(\n", + " metric_list=[\"aai_agg\", \"freq_curve\"],\n", + " orig_list=orig_list,\n", + " log=True,\n", + " figsize=(12, 8),\n", + ");" ] }, { @@ -2217,7 +2251,9 @@ "source": [ "# Use the method 'rbd_fast' which is recommend in pair with 'latin'. In addition, change one of the kwargs\n", "# (M=15) of the salib sampling method.\n", - "output_imp2 = calc_imp2.sensitivity(output_imp2, sensitivity_method='rbd_fast', sensitivity_kwargs = {'M': 15})" + "output_imp2 = calc_imp2.sensitivity(\n", + " output_imp2, sensitivity_method=\"rbd_fast\", sensitivity_kwargs={\"M\": 15}\n", + ")" ] }, { @@ -2345,7 +2381,7 @@ } ], "source": [ - "output_imp2.get_largest_si(salib_si='S1', metric_list=['eai_exp']).tail()" + "output_imp2.get_largest_si(salib_si=\"S1\", metric_list=[\"eai_exp\"]).tail()" ] }, { @@ -2401,16 +2437,17 @@ "from climada.util.constants import EXP_DEMO_H5, HAZ_DEMO_H5\n", "from climada.hazard import Centroids, TCTracks, Hazard, TropCyclone\n", "\n", + "\n", "def impf_func(G=1, v_half=84.7, vmin=25.7, k=3, _id=1):\n", "\n", " def xhi(v, v_half, vmin):\n", " return max([(v - vmin), 0]) / (v_half - vmin)\n", "\n", " def sigmoid_func(v, G, v_half, vmin, k):\n", - " return G * xhi(v, v_half, vmin)**k / (1 + xhi(v, v_half, vmin)**k)\n", + " return G * xhi(v, v_half, vmin) ** k / (1 + xhi(v, v_half, vmin) ** k)\n", "\n", - " #In-function imports needed only for parallel computing on Windows\n", - " intensity_unit = 'm/s'\n", + " # In-function imports needed only for parallel computing on Windows\n", + " intensity_unit = \"m/s\"\n", " intensity = np.linspace(0, 150, num=100)\n", " mdd = np.repeat(1, len(intensity))\n", " paa = np.array([sigmoid_func(v, G, v_half, vmin, k) for v in intensity])\n", @@ -2446,7 +2483,7 @@ "# pack future hazard sets into dictionary - we want to sample from this dictionary later\n", "haz_fut_list = [haz_26, haz_45, haz_60, haz_85]\n", "tc_haz_fut_dict = {}\n", - "for r, rcp in enumerate(['26', '45', '60', '85']):\n", + "for r, rcp in enumerate([\"26\", \"45\", \"60\", \"85\"]):\n", " tc_haz_fut_dict[rcp] = haz_fut_list[r]" ] }, @@ -2457,14 +2494,19 @@ "outputs": [], "source": [ "exp_base = Exposures.from_hdf5(EXP_DEMO_H5)\n", - "#It is a good idea to assign the centroids to the base exposures in order to avoid repeating this\n", + "# It is a good idea to assign the centroids to the base exposures in order to avoid repeating this\n", "# potentially costly operation for each sample.\n", "exp_base.assign_centroids(haz)\n", + "\n", + "\n", "def exp_base_func(x_exp, exp_base):\n", " exp = exp_base.copy()\n", - " exp.gdf['value'] *= x_exp\n", + " exp.gdf[\"value\"] *= x_exp\n", " return exp\n", + "\n", + "\n", "from functools import partial\n", + "\n", "exp_func = partial(exp_base_func, exp_base=exp_base)" ] }, @@ -2477,8 +2519,10 @@ "import scipy as sp\n", "from climada.engine.unsequa import InputVar\n", "\n", - "exp_distr = {\"x_exp\": sp.stats.beta(10, 1.1)} #This is not really a reasonable distribution but is used\n", - " #here to show that you can use any scipy distribution.\n", + "exp_distr = {\n", + " \"x_exp\": sp.stats.beta(10, 1.1)\n", + "} # This is not really a reasonable distribution but is used\n", + "# here to show that you can use any scipy distribution.\n", "\n", "exp_iv = InputVar(exp_func, exp_distr)\n", "\n", @@ -2486,8 +2530,8 @@ " \"G\": sp.stats.truncnorm(0.5, 1.5),\n", " \"v_half\": sp.stats.uniform(35, 65),\n", " \"vmin\": sp.stats.uniform(0, 15),\n", - " \"k\": sp.stats.uniform(1, 4)\n", - " }\n", + " \"k\": sp.stats.uniform(1, 4),\n", + "}\n", "impf_iv = InputVar(impf_func, impf_distr)" ] }, @@ -2504,16 +2548,15 @@ "metadata": {}, "outputs": [], "source": [ - "rcp_key = {0: '26',\n", - " 1: '45',\n", - " 2: '60',\n", - " 3: '85'}\n", + "rcp_key = {0: \"26\", 1: \"45\", 2: \"60\", 3: \"85\"}\n", + "\n", "\n", "# future\n", "def haz_fut_func(rcp_scenario):\n", " haz_fut = tc_haz_fut_dict[rcp_key[rcp_scenario]]\n", " return haz_fut\n", "\n", + "\n", "haz_fut_distr = {\"rcp_scenario\": sp.stats.randint(0, 4)}\n", "\n", "haz_fut_iv = InputVar(haz_fut_func, haz_fut_distr)" @@ -2573,8 +2616,8 @@ ], "source": [ "from climada.engine.unsequa import CalcDeltaImpact\n", - "calc_imp = CalcDeltaImpact(exp_iv, impf_iv, haz,\n", - " exp_iv, impf_iv, haz_fut_iv)" + "\n", + "calc_imp = CalcDeltaImpact(exp_iv, impf_iv, haz, exp_iv, impf_iv, haz_fut_iv)" ] }, { @@ -2639,6 +2682,7 @@ ], "source": [ "from climada.engine.unsequa import UncOutput\n", + "\n", "output_imp.plot_uncertainty(calc_delta=True)" ] }, @@ -2687,6 +2731,7 @@ ], "source": [ "from climada.engine.unsequa import UncOutput\n", + "\n", "output_imp.plot_rp_uncertainty(calc_delta=True)" ] }, @@ -2790,39 +2835,50 @@ "from climada.entity import Entity\n", "from climada.hazard import Hazard\n", "\n", + "\n", "# Entity today has an uncertainty in the total asset value\n", "def ent_today_func(x_ent):\n", - " #In-function imports needed only for parallel computing on Windows\n", + " # In-function imports needed only for parallel computing on Windows\n", " from climada.entity import Entity\n", " from climada.util.constants import ENT_DEMO_TODAY\n", + "\n", " entity = Entity.from_excel(ENT_DEMO_TODAY)\n", " entity.exposures.ref_year = 2018\n", - " entity.exposures.gdf['value'] *= x_ent\n", + " entity.exposures.gdf[\"value\"] *= x_ent\n", " return entity\n", "\n", + "\n", "# Entity in the future has a +- 10% uncertainty in the cost of all the adapatation measures\n", "def ent_fut_func(m_fut_cost):\n", - " #In-function imports needed only for parallel computing on Windows\n", + " # In-function imports needed only for parallel computing on Windows\n", " from climada.entity import Entity\n", " from climada.util.constants import ENT_DEMO_FUTURE\n", + "\n", " entity = Entity.from_excel(ENT_DEMO_FUTURE)\n", " entity.exposures.ref_year = 2040\n", - " for meas in entity.measures.get_measure('TC'):\n", + " for meas in entity.measures.get_measure(\"TC\"):\n", " meas.cost *= m_fut_cost\n", " return entity\n", "\n", + "\n", "haz_base = Hazard.from_hdf5(HAZ_DEMO_H5)\n", + "\n", + "\n", "# The hazard intensity in the future is also uncertainty by a multiplicative factor\n", "def haz_fut(x_haz_fut, haz_base):\n", - " #In-function imports needed only for parallel computing on Windows\n", + " # In-function imports needed only for parallel computing on Windows\n", " import copy\n", " from climada.hazard import Hazard\n", " from climada.util.constants import HAZ_DEMO_H5\n", + "\n", " haz = copy.deepcopy(haz_base)\n", " haz.intensity = haz.intensity.multiply(x_haz_fut)\n", " return haz\n", + "\n", + "\n", "from functools import partial\n", - "haz_fut_func = partial(haz_fut, haz_base=haz_base)\n" + "\n", + "haz_fut_func = partial(haz_fut, haz_base=haz_base)" ] }, { @@ -2853,10 +2909,12 @@ } ], "source": [ - "costs_1 = [meas.cost for meas in ent_fut_func(1).measures.get_measure('TC')]\n", - "costs_05 = [meas.cost for meas in ent_fut_func(0.5).measures.get_measure('TC')]\n", - "print(f\"\\nThe cost for m_fut_cost=1 are {costs_1}\\n\"\n", - " f\"The cost for m_fut_cost=0.5 are {costs_05}\");" + "costs_1 = [meas.cost for meas in ent_fut_func(1).measures.get_measure(\"TC\")]\n", + "costs_05 = [meas.cost for meas in ent_fut_func(0.5).measures.get_measure(\"TC\")]\n", + "print(\n", + " f\"\\nThe cost for m_fut_cost=1 are {costs_1}\\n\"\n", + " f\"The cost for m_fut_cost=0.5 are {costs_05}\"\n", + ");" ] }, { @@ -2882,14 +2940,15 @@ "\n", "haz_today = haz_base\n", "\n", - "haz_fut_distr = {\"x_haz_fut\": sp.stats.uniform(1, 3),\n", - " }\n", + "haz_fut_distr = {\n", + " \"x_haz_fut\": sp.stats.uniform(1, 3),\n", + "}\n", "haz_fut_iv = InputVar(haz_fut_func, haz_fut_distr)\n", "\n", - "ent_today_distr = {\"x_ent\": sp.stats.uniform(0.7, 1)}\n", + "ent_today_distr = {\"x_ent\": sp.stats.uniform(0.7, 1)}\n", "ent_today_iv = InputVar(ent_today_func, ent_today_distr)\n", "\n", - "ent_fut_distr = {\"m_fut_cost\": sp.stats.norm(1, 0.1)}\n", + "ent_fut_distr = {\"m_fut_cost\": sp.stats.norm(1, 0.1)}\n", "ent_fut_iv = InputVar(ent_fut_func, ent_fut_distr)" ] }, @@ -3042,8 +3101,12 @@ "source": [ "from climada.engine.unsequa import CalcCostBenefit\n", "\n", - "unc_cb = CalcCostBenefit(haz_input_var=haz_today, ent_input_var=ent_today_iv,\n", - " haz_fut_input_var=haz_fut_iv, ent_fut_input_var=ent_fut_iv)" + "unc_cb = CalcCostBenefit(\n", + " haz_input_var=haz_today,\n", + " ent_input_var=ent_today_iv,\n", + " haz_fut_input_var=haz_fut_iv,\n", + " ent_fut_input_var=ent_fut_iv,\n", + ")" ] }, { @@ -3132,7 +3195,7 @@ } ], "source": [ - "output_cb= unc_cb.make_sample(N=10, sampling_kwargs={'calc_second_order':False})\n", + "output_cb = unc_cb.make_sample(N=10, sampling_kwargs={\"calc_second_order\": False})\n", "output_cb.get_samples_df().tail()" ] }, @@ -4626,12 +4689,11 @@ } ], "source": [ - "\n", - "#without pool\n", + "# without pool\n", "output_cb = unc_cb.uncertainty(output_cb)\n", "\n", - "#with pool\n", - "output_cb = unc_cb.uncertainty(output_cb, processes=4)\n" + "# with pool\n", + "output_cb = unc_cb.uncertainty(output_cb, processes=4)" ] }, { @@ -4667,7 +4729,7 @@ } ], "source": [ - "#Top level metrics keys\n", + "# Top level metrics keys\n", "macro_metrics = output_cb.uncertainty_metrics\n", "macro_metrics" ] @@ -4803,7 +4865,7 @@ ], "source": [ "# The benefits and cost_ben_ratio are available for each measure\n", - "output_cb.get_uncertainty(metric_list=['benefit', 'cost_ben_ratio']).tail()" + "output_cb.get_uncertainty(metric_list=[\"benefit\", \"cost_ben_ratio\"]).tail()" ] }, { @@ -5073,7 +5135,7 @@ "source": [ "# The impact_meas_present and impact_meas_future provide values of the cost_meas, risk_transf, risk,\n", "# and cost_ins for each measure\n", - "output_cb.get_uncertainty(metric_list=['imp_meas_present']).tail()" + "output_cb.get_uncertainty(metric_list=[\"imp_meas_present\"]).tail()" ] }, { @@ -5106,7 +5168,7 @@ ], "source": [ "# tot_climate_risk and benefit\n", - "output_cb.plot_uncertainty(metric_list=['benefit'], figsize=(12,8));" + "output_cb.plot_uncertainty(metric_list=[\"benefit\"], figsize=(12, 8));" ] }, { @@ -5127,7 +5189,9 @@ }, "outputs": [], "source": [ - "output_cb = unc_cb.sensitivity(output_cb, sensitivity_kwargs={'calc_second_order':False})" + "output_cb = unc_cb.sensitivity(\n", + " output_cb, sensitivity_kwargs={\"calc_second_order\": False}\n", + ")" ] }, { @@ -5161,8 +5225,10 @@ } ], "source": [ - "#plot only certain metrics\n", - "axes = output_cb.plot_sensitivity(metric_list=['cost_ben_ratio','tot_climate_risk','benefit'], figsize=(12,8));" + "# plot only certain metrics\n", + "axes = output_cb.plot_sensitivity(\n", + " metric_list=[\"cost_ben_ratio\", \"tot_climate_risk\", \"benefit\"], figsize=(12, 8)\n", + ");" ] }, { @@ -5216,6 +5282,7 @@ "outputs": [], "source": [ "from climada.util.api_client import Client\n", + "\n", "client = Client()" ] }, @@ -5231,9 +5298,9 @@ "\n", "def get_ws(iso):\n", " properties = {\n", - " 'country_iso3alpha': iso,\n", + " \"country_iso3alpha\": iso,\n", " }\n", - " return client.get_hazard('storm_europe', properties=properties)\n" + " return client.get_hazard(\"storm_europe\", properties=properties)" ] }, { @@ -5242,12 +5309,12 @@ "metadata": {}, "outputs": [], "source": [ - "#Define list of exposures and/or of hazard files\n", + "# Define list of exposures and/or of hazard files\n", "\n", - "exp_list = [get_litpop(iso) for iso in ['CHE', 'DEU', 'ITA']]\n", - "haz_list = [get_ws(iso) for iso in ['CHE', 'DEU', 'ITA']]\n", + "exp_list = [get_litpop(iso) for iso in [\"CHE\", \"DEU\", \"ITA\"]]\n", + "haz_list = [get_ws(iso) for iso in [\"CHE\", \"DEU\", \"ITA\"]]\n", "for exp, haz in zip(exp_list, haz_list):\n", - " exp.gdf['impf_WS'] = 1\n", + " exp.gdf[\"impf_WS\"] = 1\n", " exp.assign_centroids(haz)" ] }, @@ -5257,7 +5324,7 @@ "metadata": {}, "outputs": [], "source": [ - "#Define the input variable\n", + "# Define the input variable\n", "from climada.entity import ImpactFuncSet, Exposures\n", "from climada.entity.impact_funcs.storm_europe import ImpfStormEurope\n", "from climada.hazard import Hazard\n", @@ -5265,31 +5332,40 @@ "import scipy as sp\n", "import copy\n", "\n", + "\n", "def exp_func(cnt, x_exp, exp_list=exp_list):\n", " exp = exp_list[int(cnt)].copy()\n", - " exp.gdf['value'] *= x_exp\n", + " exp.gdf[\"value\"] *= x_exp\n", " return exp\n", "\n", - "exp_distr = {\"x_exp\": sp.stats.uniform(0.9, 0.2),\n", - " \"cnt\": sp.stats.randint(low=0, high=len(exp_list)) #use the same parameter name accross input variables\n", - " }\n", + "\n", + "exp_distr = {\n", + " \"x_exp\": sp.stats.uniform(0.9, 0.2),\n", + " \"cnt\": sp.stats.randint(\n", + " low=0, high=len(exp_list)\n", + " ), # use the same parameter name accross input variables\n", + "}\n", "exp_iv = InputVar(exp_func, exp_distr)\n", "\n", "\n", "def haz_func(cnt, i_haz, haz_list=haz_list):\n", - " haz = copy.deepcopy(haz_list[int(cnt)]) #use the same parameter name accross input variables\n", + " haz = copy.deepcopy(\n", + " haz_list[int(cnt)]\n", + " ) # use the same parameter name accross input variables\n", " haz.intensity *= i_haz\n", " return haz\n", "\n", - "haz_distr = {\"i_haz\": sp.stats.norm(1, 0.2),\n", - " \"cnt\": sp.stats.randint(low=0, high=len(haz_list))\n", - " }\n", + "\n", + "haz_distr = {\n", + " \"i_haz\": sp.stats.norm(1, 0.2),\n", + " \"cnt\": sp.stats.randint(low=0, high=len(haz_list)),\n", + "}\n", "haz_iv = InputVar(haz_func, haz_distr)\n", "\n", "impf = ImpfStormEurope.from_schwierz()\n", "impf_set = ImpactFuncSet()\n", "impf_set.append(impf)\n", - "impf_iv = InputVar.impfset([impf_set], bounds_mdd = [0.9, 1.1])" + "impf_iv = InputVar.impfset([impf_set], bounds_mdd=[0.9, 1.1])" ] }, { @@ -5321,7 +5397,7 @@ "metadata": {}, "outputs": [], "source": [ - "output_imp = calc_imp.make_sample(N=2**2, sampling_kwargs={'skip_values': 2**3})\n" + "output_imp = calc_imp.make_sample(N=2**2, sampling_kwargs={\"skip_values\": 2**3})" ] }, { @@ -5457,6 +5533,7 @@ "outputs": [], "source": [ "from climada.util.api_client import Client\n", + "\n", "client = Client()" ] }, @@ -5468,21 +5545,26 @@ "source": [ "def get_litpop_path(iso):\n", " properties = {\n", - " 'country_iso3alpha': iso,\n", - " 'res_arcsec': '150',\n", - " 'exponents': '(1,1)',\n", - " 'fin_mode': 'pc'\n", + " \"country_iso3alpha\": iso,\n", + " \"res_arcsec\": \"150\",\n", + " \"exponents\": \"(1,1)\",\n", + " \"fin_mode\": \"pc\",\n", " }\n", - " litpop_datasets = client.list_dataset_infos(data_type='litpop', properties=properties)\n", + " litpop_datasets = client.list_dataset_infos(\n", + " data_type=\"litpop\", properties=properties\n", + " )\n", " ds = litpop_datasets[0]\n", " download_dir, ds_files = client.download_dataset(ds)\n", " return ds_files[0]\n", "\n", + "\n", "def get_ws_path(iso):\n", " properties = {\n", - " 'country_iso3alpha': iso,\n", + " \"country_iso3alpha\": iso,\n", " }\n", - " hazard_datasets = client.list_dataset_infos(data_type='storm_europe', properties=properties)\n", + " hazard_datasets = client.list_dataset_infos(\n", + " data_type=\"storm_europe\", properties=properties\n", + " )\n", " ds = hazard_datasets[0]\n", " download_dir, ds_files = client.download_dataset(ds)\n", " return ds_files[0]" @@ -5494,10 +5576,10 @@ "metadata": {}, "outputs": [], "source": [ - "#Define list of exposures and/or of hazard files\n", + "# Define list of exposures and/or of hazard files\n", "\n", - "f_exp_list = [get_litpop_path(iso) for iso in ['CHE', 'DEU', 'ITA']]\n", - "f_haz_list = [get_ws_path(iso) for iso in ['CHE', 'DEU', 'ITA']]" + "f_exp_list = [get_litpop_path(iso) for iso in [\"CHE\", \"DEU\", \"ITA\"]]\n", + "f_haz_list = [get_ws_path(iso) for iso in [\"CHE\", \"DEU\", \"ITA\"]]" ] }, { @@ -5506,40 +5588,43 @@ "metadata": {}, "outputs": [], "source": [ - "#Define the input variable for the loading files\n", - "#The trick is to not reload a file if it is already in memory. This is done using a global variable.\n", + "# Define the input variable for the loading files\n", + "# The trick is to not reload a file if it is already in memory. This is done using a global variable.\n", "from climada.entity import ImpactFunc, ImpactFuncSet, Exposures\n", "from climada.hazard import Hazard\n", "from climada.engine.unsequa import InputVar\n", "import scipy as sp\n", "import copy\n", "\n", + "\n", "def exp_func(f_exp, x_exp, filename_list=f_exp_list):\n", " filename = filename_list[int(f_exp)]\n", " global exp_base\n", - " if 'exp_base' in globals():\n", + " if \"exp_base\" in globals():\n", " if isinstance(exp_base, Exposures):\n", - " if exp_base.gdf['filename'] != str(filename):\n", + " if exp_base.gdf[\"filename\"] != str(filename):\n", " exp_base = Exposures.from_hdf5(filename)\n", - " exp_base.gdf['filename'] = str(filename)\n", + " exp_base.gdf[\"filename\"] = str(filename)\n", " else:\n", " exp_base = Exposures.from_hdf5(filename)\n", - " exp_base.gdf['filename'] = str(filename)\n", + " exp_base.gdf[\"filename\"] = str(filename)\n", "\n", " exp = exp_base.copy()\n", - " exp.gdf['value'] *= x_exp\n", + " exp.gdf[\"value\"] *= x_exp\n", " return exp\n", "\n", - "exp_distr = {\"x_exp\": sp.stats.uniform(0.9, 0.2),\n", - " \"f_exp\": sp.stats.randint(low=0, high=len(f_exp_list))\n", - " }\n", + "\n", + "exp_distr = {\n", + " \"x_exp\": sp.stats.uniform(0.9, 0.2),\n", + " \"f_exp\": sp.stats.randint(low=0, high=len(f_exp_list)),\n", + "}\n", "exp_iv = InputVar(exp_func, exp_distr)\n", "\n", "\n", "def haz_func(f_haz, i_haz, filename_list=f_haz_list):\n", " filename = filename_list[int(f_haz)]\n", " global haz_base\n", - " if 'haz_base' in globals():\n", + " if \"haz_base\" in globals():\n", " if isinstance(haz_base, Hazard):\n", " if haz_base.filename != str(filename):\n", " haz_base = Hazard.from_hdf5(filename)\n", @@ -5552,9 +5637,11 @@ " haz.intensity *= i_haz\n", " return haz\n", "\n", - "haz_distr = {\"i_haz\": sp.stats.norm(1, 0.2),\n", - " \"f_haz\": sp.stats.randint(low=0, high=len(f_haz_list))\n", - " }\n", + "\n", + "haz_distr = {\n", + " \"i_haz\": sp.stats.norm(1, 0.2),\n", + " \"f_haz\": sp.stats.randint(low=0, high=len(f_haz_list)),\n", + "}\n", "haz_iv = InputVar(haz_func, haz_distr)\n", "\n", "\n", @@ -5564,29 +5651,33 @@ " return max([(v - vmin), 0]) / (v_half - vmin)\n", "\n", " def sigmoid_func(v, G, v_half, vmin, k):\n", - " return G * xhi(v, v_half, vmin)**k / (1 + xhi(v, v_half, vmin)**k)\n", + " return G * xhi(v, v_half, vmin) ** k / (1 + xhi(v, v_half, vmin) ** k)\n", "\n", - " #In-function imports needed only for parallel computing on Windows\n", + " # In-function imports needed only for parallel computing on Windows\n", " import numpy as np\n", " from climada.entity import ImpactFunc, ImpactFuncSet\n", + "\n", " imp_fun = ImpactFunc()\n", - " imp_fun.haz_type = 'WS'\n", + " imp_fun.haz_type = \"WS\"\n", " imp_fun.id = _id\n", - " imp_fun.intensity_unit = 'm/s'\n", + " imp_fun.intensity_unit = \"m/s\"\n", " imp_fun.intensity = np.linspace(0, 150, num=100)\n", " imp_fun.mdd = np.repeat(1, len(imp_fun.intensity))\n", - " imp_fun.paa = np.array([sigmoid_func(v, G, v_half, vmin, k) for v in imp_fun.intensity])\n", + " imp_fun.paa = np.array(\n", + " [sigmoid_func(v, G, v_half, vmin, k) for v in imp_fun.intensity]\n", + " )\n", " imp_fun.check()\n", " impf_set = ImpactFuncSet()\n", " impf_set.append(imp_fun)\n", " return impf_set\n", "\n", + "\n", "impf_distr = {\n", " \"G\": sp.stats.truncnorm(0.5, 1.5),\n", " \"v_half\": sp.stats.uniform(35, 65),\n", " \"vmin\": sp.stats.uniform(0, 15),\n", - " \"k\": sp.stats.uniform(1, 4)\n", - " }\n", + " \"k\": sp.stats.uniform(1, 4),\n", + "}\n", "impf_iv = InputVar(impf_func, impf_distr)" ] }, @@ -5615,8 +5706,8 @@ "outputs": [], "source": [ "# Ordering of the samples by hazard first and exposures second\n", - "output_imp = calc_imp.make_sample(N=2**2, sampling_kwargs={'skip_values': 2**3})\n", - "output_imp.order_samples(by=['f_haz', 'f_exp'])" + "output_imp = calc_imp.make_sample(N=2**2, sampling_kwargs={\"skip_values\": 2**3})\n", + "output_imp.order_samples(by=[\"f_haz\", \"f_exp\"])" ] }, { @@ -5633,8 +5724,9 @@ "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", - "e = output_imp.samples_df['f_exp'].values\n", - "h = output_imp.samples_df['f_haz'].values" + "\n", + "e = output_imp.samples_df[\"f_exp\"].values\n", + "h = output_imp.samples_df[\"f_haz\"].values" ] }, { @@ -5650,12 +5742,12 @@ "metadata": {}, "outputs": [], "source": [ - "plt.plot(e, label='exposures');\n", - "plt.plot(h, label='hazards');\n", - "plt.xlabel('samples');\n", - "plt.ylabel('file number');\n", - "plt.title('Order of exposures and hazards files in samples');\n", - "plt.legend(loc='upper right');" + "plt.plot(e, label=\"exposures\")\n", + "plt.plot(h, label=\"hazards\")\n", + "plt.xlabel(\"samples\")\n", + "plt.ylabel(\"file number\")\n", + "plt.title(\"Order of exposures and hazards files in samples\")\n", + "plt.legend(loc=\"upper right\");" ] }, { @@ -5727,4 +5819,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/doc/tutorial/climada_engine_unsequa_helper.ipynb b/doc/tutorial/climada_engine_unsequa_helper.ipynb index 831f5f4bdd..adad223232 100644 --- a/doc/tutorial/climada_engine_unsequa_helper.ipynb +++ b/doc/tutorial/climada_engine_unsequa_helper.ipynb @@ -37,7 +37,8 @@ "outputs": [], "source": [ "import warnings\n", - "warnings.filterwarnings('ignore') #Ignore warnings for making the tutorial's pdf." + "\n", + "warnings.filterwarnings(\"ignore\") # Ignore warnings for making the tutorial's pdf." ] }, { @@ -101,9 +102,10 @@ } ], "source": [ - "#Define the base exposure\n", + "# Define the base exposure\n", "from climada.util.constants import EXP_DEMO_H5\n", "from climada.entity import Exposures\n", + "\n", "exp_base = Exposures.from_hdf5(EXP_DEMO_H5)" ] }, @@ -120,8 +122,9 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", - "bounds_totval = [0.9, 1.1] #+- 10% noise on the total exposures value\n", - "bounds_noise = [0.9, 1.2] #-10% - +20% noise each exposures point\n", + "\n", + "bounds_totval = [0.9, 1.1] # +- 10% noise on the total exposures value\n", + "bounds_noise = [0.9, 1.2] # -10% - +20% noise each exposures point\n", "exp_iv = InputVar.exp([exp_base], bounds_totval, bounds_noise)" ] }, @@ -148,10 +151,10 @@ } ], "source": [ - "#The difference in total value between the base exposure and the average input uncertainty exposure\n", - "#due to the random noise on each exposures point (the average change in the total value is 1.0).\n", + "# The difference in total value between the base exposure and the average input uncertainty exposure\n", + "# due to the random noise on each exposures point (the average change in the total value is 1.0).\n", "avg_exp = exp_iv.evaluate()\n", - "(sum(avg_exp.gdf['value']) - sum(exp_base.gdf['value'])) / sum(exp_base.gdf['value'])" + "(sum(avg_exp.gdf[\"value\"]) - sum(exp_base.gdf[\"value\"])) / sum(exp_base.gdf[\"value\"])" ] }, { @@ -177,8 +180,8 @@ } ], "source": [ - "#The values for EN are seeds for the random number generator for the noise sampling and\n", - "#thus are uniformly sampled numbers between (0, 2**32-1)\n", + "# The values for EN are seeds for the random number generator for the noise sampling and\n", + "# thus are uniformly sampled numbers between (0, 2**32-1)\n", "exp_iv.plot();" ] }, @@ -208,19 +211,23 @@ }, "outputs": [], "source": [ - "#Define a generic method to make litpop instances with different exponent pairs.\n", + "# Define a generic method to make litpop instances with different exponent pairs.\n", "from climada.entity import LitPop\n", - "def generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs,\n", - " choice_mn, **litpop_kwargs):\n", - " #In-function imports needed only for parallel computing on Windows\n", + "\n", + "\n", + "def generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + "):\n", + " # In-function imports needed only for parallel computing on Windows\n", " from climada.entity import LitPop\n", + "\n", " litpop_base = []\n", " for [m, n] in choice_mn:\n", - " print('\\n Computing litpop for m=%d, n=%d \\n' %(m, n))\n", - " litpop_kwargs['exponents'] = (m, n)\n", + " print(\"\\n Computing litpop for m=%d, n=%d \\n\" % (m, n))\n", + " litpop_kwargs[\"exponents\"] = (m, n)\n", " exp = LitPop.from_countries(**litpop_kwargs)\n", - " exp.gdf['impf_' + haz.haz_type] = impf_id\n", - " exp.gdf.drop('impf_', axis=1, inplace=True)\n", + " exp.gdf[\"impf_\" + haz.haz_type] = impf_id\n", + " exp.gdf.drop(\"impf_\", axis=1, inplace=True)\n", " if value_unit is not None:\n", " exp.value_unit = value_unit\n", " exp.assign_centroids(haz, **assign_centr_kwargs)\n", @@ -248,22 +255,23 @@ } ], "source": [ - "#Define the parameters of the LitPop instances\n", + "# Define the parameters of the LitPop instances\n", "tot_pop = 11.317e6\n", "impf_id = 1\n", - "value_unit = 'people'\n", + "value_unit = \"people\"\n", "litpop_kwargs = {\n", - " 'countries' : ['CUB'],\n", - " 'res_arcsec' : 150,\n", - " 'reference_year' : 2020,\n", - " 'fin_mode' : 'norm',\n", - " 'total_values' : [tot_pop]\n", + " \"countries\": [\"CUB\"],\n", + " \"res_arcsec\": 150,\n", + " \"reference_year\": 2020,\n", + " \"fin_mode\": \"norm\",\n", + " \"total_values\": [tot_pop],\n", "}\n", - "assign_centr_kwargs={}\n", + "assign_centr_kwargs = {}\n", "\n", "# The hazard is needed to assign centroids\n", "from climada.util.constants import HAZ_DEMO_H5\n", "from climada.hazard import Hazard\n", + "\n", "haz = Hazard.from_hdf5(HAZ_DEMO_H5)" ] }, @@ -460,11 +468,13 @@ } ], "source": [ - "#Generate the LitPop list\n", + "# Generate the LitPop list\n", "\n", - "choice_mn = [[0, 0.5], [0, 1], [0, 2]] #Choice of exponents m,n\n", + "choice_mn = [[0, 0.5], [0, 1], [0, 2]] # Choice of exponents m,n\n", "\n", - "litpop_list = generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs)\n" + "litpop_list = generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + ")" ] }, { @@ -480,9 +490,9 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", - "bounds_totval = [0.9, 1.1] #+- 10% noise on the total exposures value\n", - "litpop_iv = InputVar.exp(exp_list = litpop_list,\n", - " bounds_totval=bounds_totval)" + "\n", + "bounds_totval = [0.9, 1.1] # +- 10% noise on the total exposures value\n", + "litpop_iv = InputVar.exp(exp_list=litpop_list, bounds_totval=bounds_totval)" ] }, { @@ -848,8 +858,8 @@ } ], "source": [ - "#The values for EN are seeds for the random number generator for the noise sampling and\n", - "#thus are uniformly sampled numbers between (0, 2**32-1)\n", + "# The values for EN are seeds for the random number generator for the noise sampling and\n", + "# thus are uniformly sampled numbers between (0, 2**32-1)\n", "litpop_iv.plot();" ] }, @@ -912,9 +922,10 @@ } ], "source": [ - "#Define the base exposure\n", + "# Define the base exposure\n", "from climada.util.constants import HAZ_DEMO_H5\n", "from climada.hazard import Hazard\n", + "\n", "haz_base = Hazard.from_hdf5(HAZ_DEMO_H5)" ] }, @@ -931,10 +942,13 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", - "bounds_freq = [0.9, 1.1] #+- 10% noise on the frequency of all events\n", - "bounds_int = None #No uncertainty on the intensity\n", + "\n", + "bounds_freq = [0.9, 1.1] # +- 10% noise on the frequency of all events\n", + "bounds_int = None # No uncertainty on the intensity\n", "n_ev = None\n", - "haz_iv = InputVar.haz([haz_base], n_ev=n_ev, bounds_freq=bounds_freq, bounds_int=bounds_int)" + "haz_iv = InputVar.haz(\n", + " [haz_base], n_ev=n_ev, bounds_freq=bounds_freq, bounds_int=bounds_int\n", + ")" ] }, { @@ -960,8 +974,8 @@ } ], "source": [ - "#The difference in frequency for HF=1.1 is indeed 10%.\n", - "haz_high_freq = haz_iv.evaluate(HE=n_ev, HI=None, HF = 1.1)\n", + "# The difference in frequency for HF=1.1 is indeed 10%.\n", + "haz_high_freq = haz_iv.evaluate(HE=n_ev, HI=None, HF=1.1)\n", "(sum(haz_high_freq.frequency) - sum(haz_base.frequency)) / sum(haz_base.frequency)" ] }, @@ -977,12 +991,18 @@ }, "outputs": [], "source": [ - "bounds_freq = [0.9, 1.1] #+- 10% noise on the frequency of all events\n", - "bounds_int = None #No uncertainty on the intensity\n", - "bounds_frac = [0.7, 1.1] #noise on the fraction of all events\n", - "n_ev = round(0.8 * haz_base.size) #sub-sample with re-draw events to obtain hazards with n=0.8*tot_number_events\n", + "bounds_freq = [0.9, 1.1] # +- 10% noise on the frequency of all events\n", + "bounds_int = None # No uncertainty on the intensity\n", + "bounds_frac = [0.7, 1.1] # noise on the fraction of all events\n", + "n_ev = round(\n", + " 0.8 * haz_base.size\n", + ") # sub-sample with re-draw events to obtain hazards with n=0.8*tot_number_events\n", "haz_iv = InputVar.haz(\n", - " [haz_base], n_ev=n_ev, bounds_freq=bounds_freq, bounds_int=bounds_int, bounds_frac=bounds_frac\n", + " [haz_base],\n", + " n_ev=n_ev,\n", + " bounds_freq=bounds_freq,\n", + " bounds_int=bounds_int,\n", + " bounds_frac=bounds_frac,\n", ")" ] }, @@ -1007,9 +1027,12 @@ "outputs": [], "source": [ "import numpy as np\n", - "HE = 2618981871 #The random seed (number between 0 and 2**32)\n", - "rng = np.random.RandomState(int(HE)) #Initialize a random state with the seed\n", - "chosen_ev = list(rng.choice(haz_base.event_name, int(n_ev))) #Obtain the corresponding events" + "\n", + "HE = 2618981871 # The random seed (number between 0 and 2**32)\n", + "rng = np.random.RandomState(int(HE)) # Initialize a random state with the seed\n", + "chosen_ev = list(\n", + " rng.choice(haz_base.event_name, int(n_ev))\n", + ") # Obtain the corresponding events" ] }, { @@ -1035,7 +1058,7 @@ } ], "source": [ - "#The first event is\n", + "# The first event is\n", "chosen_ev[0]" ] }, @@ -1062,8 +1085,8 @@ } ], "source": [ - "#The values for HE are seeds for the random number generator for the noise sampling and\n", - "#thus are uniformly sampled numbers between (0, 2**32-1)\n", + "# The values for HE are seeds for the random number generator for the noise sampling and\n", + "# thus are uniformly sampled numbers between (0, 2**32-1)\n", "haz_iv.plot();" ] }, @@ -1098,9 +1121,9 @@ } ], "source": [ - "#The number of events per sample is equal to n_ev\n", - "haz_sub = haz_iv.evaluate(HE=928165924, HI=None, HF = 1.1, HA=None)\n", - "#The number for HE is irrelevant, as all samples have the same n_Ev\n", + "# The number of events per sample is equal to n_ev\n", + "haz_sub = haz_iv.evaluate(HE=928165924, HI=None, HF=1.1, HA=None)\n", + "# The number for HE is irrelevant, as all samples have the same n_Ev\n", "haz_sub.size - n_ev" ] }, @@ -1149,6 +1172,7 @@ "outputs": [], "source": [ "from climada.entity import ImpactFuncSet, ImpfTropCyclone\n", + "\n", "impf = ImpfTropCyclone.from_emanuel_usa()\n", "impf_set_base = ImpactFuncSet([impf])" ] @@ -1174,14 +1198,17 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", - "bounds_impfi = [-10, 10] #-10 m/s ; +10m/s uncertainty on the intensity\n", - "bounds_mdd = [0.7, 1.1] #-30% - +10% uncertainty on the mdd\n", - "bounds_paa = None #No uncertainty in the paa\n", - "impf_iv = InputVar.impfset(impf_set_list=[impf_set_base],\n", - " bounds_impfi=bounds_impfi,\n", - " bounds_mdd=bounds_mdd,\n", - " bounds_paa=bounds_paa,\n", - " haz_id_dict={'TC': [1]})" + "\n", + "bounds_impfi = [-10, 10] # -10 m/s ; +10m/s uncertainty on the intensity\n", + "bounds_mdd = [0.7, 1.1] # -30% - +10% uncertainty on the mdd\n", + "bounds_paa = None # No uncertainty in the paa\n", + "impf_iv = InputVar.impfset(\n", + " impf_set_list=[impf_set_base],\n", + " bounds_impfi=bounds_impfi,\n", + " bounds_mdd=bounds_mdd,\n", + " bounds_paa=bounds_paa,\n", + " haz_id_dict={\"TC\": [1]},\n", + ")" ] }, { @@ -1207,11 +1234,11 @@ } ], "source": [ - "#Plot the impact function for 50 random samples (note for the expert, these are not global)\n", + "# Plot the impact function for 50 random samples (note for the expert, these are not global)\n", "n = 50\n", "ax = impf_iv.evaluate().plot()\n", - "inten = impf_iv.distr_dict['IFi'].rvs(size=n)\n", - "mdd = impf_iv.distr_dict['MDD'].rvs(size=n)\n", + "inten = impf_iv.distr_dict[\"IFi\"].rvs(size=n)\n", + "mdd = impf_iv.distr_dict[\"MDD\"].rvs(size=n)\n", "for i, m in zip(inten, mdd):\n", " impf_iv.evaluate(IFi=i, MDD=m).plot(axis=ax)\n", "ax.get_legend().remove()" @@ -1286,6 +1313,7 @@ "source": [ "from climada.entity import Entity\n", "from climada.util.constants import ENT_DEMO_TODAY\n", + "\n", "ent = Entity.from_excel(ENT_DEMO_TODAY)\n", "ent.exposures.ref_year = 2018\n", "ent.check()" @@ -1304,11 +1332,12 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", + "\n", "ent_iv = InputVar.ent(\n", - " impf_set_list = [ent.impact_funcs],\n", - " disc_rate = ent.disc_rates,\n", - " exp_list = [ent.exposures],\n", - " meas_set = ent.measures,\n", + " impf_set_list=[ent.impact_funcs],\n", + " disc_rate=ent.disc_rates,\n", + " exp_list=[ent.exposures],\n", + " meas_set=ent.measures,\n", " bounds_disc=[0, 0.08],\n", " bounds_cost=[0.5, 1.5],\n", " bounds_totval=[0.9, 1.1],\n", @@ -1316,8 +1345,8 @@ " bounds_mdd=[0.9, 1.05],\n", " bounds_paa=None,\n", " bounds_impfi=[-2, 5],\n", - " haz_id_dict={'TC': [1]}\n", - " )" + " haz_id_dict={\"TC\": [1]},\n", + ")" ] }, { @@ -1367,19 +1396,23 @@ }, "outputs": [], "source": [ - "#Define a generic method to make litpop instances with different exponent pairs.\n", + "# Define a generic method to make litpop instances with different exponent pairs.\n", "from climada.entity import LitPop\n", - "def generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs,\n", - " choice_mn, **litpop_kwargs):\n", - " #In-function imports needed only for parallel computing on Windows\n", + "\n", + "\n", + "def generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + "):\n", + " # In-function imports needed only for parallel computing on Windows\n", " from climada.entity import LitPop\n", + "\n", " litpop_base = []\n", " for [m, n] in choice_mn:\n", - " print('\\n Computing litpop for m=%d, n=%d \\n' %(m, n))\n", - " litpop_kwargs['exponents'] = (m, n)\n", + " print(\"\\n Computing litpop for m=%d, n=%d \\n\" % (m, n))\n", + " litpop_kwargs[\"exponents\"] = (m, n)\n", " exp = LitPop.from_countries(**litpop_kwargs)\n", - " exp.gdf['impf_' + haz.haz_type] = impf_id\n", - " exp.gdf.drop('impf_', axis=1, inplace=True)\n", + " exp.gdf[\"impf_\" + haz.haz_type] = impf_id\n", + " exp.gdf.drop(\"impf_\", axis=1, inplace=True)\n", " if value_unit is not None:\n", " exp.value_unit = value_unit\n", " exp.assign_centroids(haz, **assign_centr_kwargs)\n", @@ -1407,19 +1440,20 @@ } ], "source": [ - "#Define the parameters of the LitPop instances\n", + "# Define the parameters of the LitPop instances\n", "impf_id = 1\n", "value_unit = None\n", "litpop_kwargs = {\n", - " 'countries' : ['CUB'],\n", - " 'res_arcsec' : 300,\n", - " 'reference_year' : 2020,\n", + " \"countries\": [\"CUB\"],\n", + " \"res_arcsec\": 300,\n", + " \"reference_year\": 2020,\n", "}\n", - "assign_centr_kwargs={}\n", + "assign_centr_kwargs = {}\n", "\n", "# The hazard is needed to assign centroids\n", "from climada.util.constants import HAZ_DEMO_H5\n", "from climada.hazard import Hazard\n", + "\n", "haz = Hazard.from_hdf5(HAZ_DEMO_H5)" ] }, @@ -1661,11 +1695,13 @@ } ], "source": [ - "#Generate the LitPop list\n", + "# Generate the LitPop list\n", "\n", - "choice_mn = [[1, 0.5], [0.5, 1], [1, 1]] #Choice of exponents m,n\n", + "choice_mn = [[1, 0.5], [0.5, 1], [1, 1]] # Choice of exponents m,n\n", "\n", - "litpop_list = generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs)\n" + "litpop_list = generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + ")" ] }, { @@ -1693,6 +1729,7 @@ "source": [ "from climada.entity import Entity\n", "from climada.util.constants import ENT_DEMO_TODAY\n", + "\n", "ent = Entity.from_excel(ENT_DEMO_TODAY)\n", "ent.exposures.ref_year = 2020\n", "ent.check()" @@ -1711,11 +1748,12 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", + "\n", "ent_iv = InputVar.ent(\n", - " impf_set_list = [ent.impact_funcs],\n", - " disc_rate = ent.disc_rates,\n", - " exp_list = litpop_list,\n", - " meas_set = ent.measures,\n", + " impf_set_list=[ent.impact_funcs],\n", + " disc_rate=ent.disc_rates,\n", + " exp_list=litpop_list,\n", + " meas_set=ent.measures,\n", " bounds_disc=[0, 0.08],\n", " bounds_cost=[0.5, 1.5],\n", " bounds_totval=[0.9, 1.1],\n", @@ -1723,8 +1761,8 @@ " bounds_mdd=[0.9, 1.05],\n", " bounds_paa=None,\n", " bounds_impfi=[-2, 5],\n", - " haz_id_dict={'TC': [1]}\n", - " )" + " haz_id_dict={\"TC\": [1]},\n", + ")" ] }, { @@ -1847,16 +1885,16 @@ "outputs": [], "source": [ "entfut_iv = InputVar.entfut(\n", - " impf_set_list = [ent_fut.impact_funcs],\n", - " exp_list = [ent_fut.exposures],\n", - " meas_set = ent_fut.measures,\n", + " impf_set_list=[ent_fut.impact_funcs],\n", + " exp_list=[ent_fut.exposures],\n", + " meas_set=ent_fut.measures,\n", " bounds_cost=[0.6, 1.2],\n", " bounds_eg=[0.8, 1.5],\n", " bounds_noise=None,\n", " bounds_mdd=[0.7, 0.9],\n", " bounds_paa=[1.3, 2],\n", - " haz_id_dict={'TC': [1]}\n", - " )" + " haz_id_dict={\"TC\": [1]},\n", + ")" ] }, { @@ -1879,19 +1917,23 @@ }, "outputs": [], "source": [ - "#Define a generic method to make litpop instances with different exponent pairs.\n", + "# Define a generic method to make litpop instances with different exponent pairs.\n", "from climada.entity import LitPop\n", - "def generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs,\n", - " choice_mn, **litpop_kwargs):\n", - " #In-function imports needed only for parallel computing on Windows\n", + "\n", + "\n", + "def generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + "):\n", + " # In-function imports needed only for parallel computing on Windows\n", " from climada.entity import LitPop\n", + "\n", " litpop_base = []\n", " for [m, n] in choice_mn:\n", - " print('\\n Computing litpop for m=%d, n=%d \\n' %(m, n))\n", - " litpop_kwargs['exponents'] = (m, n)\n", + " print(\"\\n Computing litpop for m=%d, n=%d \\n\" % (m, n))\n", + " litpop_kwargs[\"exponents\"] = (m, n)\n", " exp = LitPop.from_countries(**litpop_kwargs)\n", - " exp.gdf['impf_' + haz.haz_type] = impf_id\n", - " exp.gdf.drop('impf_', axis=1, inplace=True)\n", + " exp.gdf[\"impf_\" + haz.haz_type] = impf_id\n", + " exp.gdf.drop(\"impf_\", axis=1, inplace=True)\n", " if value_unit is not None:\n", " exp.value_unit = value_unit\n", " exp.assign_centroids(haz, **assign_centr_kwargs)\n", @@ -1919,19 +1961,20 @@ } ], "source": [ - "#Define the parameters of the LitPop instances\n", + "# Define the parameters of the LitPop instances\n", "impf_id = 1\n", "value_unit = None\n", "litpop_kwargs = {\n", - " 'countries' : ['CUB'],\n", - " 'res_arcsec' : 300,\n", - " 'reference_year' : 2040,\n", + " \"countries\": [\"CUB\"],\n", + " \"res_arcsec\": 300,\n", + " \"reference_year\": 2040,\n", "}\n", - "assign_centr_kwargs={}\n", + "assign_centr_kwargs = {}\n", "\n", "# The hazard is needed to assign centroids\n", "from climada.util.constants import HAZ_DEMO_H5\n", "from climada.hazard import Hazard\n", + "\n", "haz = Hazard.from_hdf5(HAZ_DEMO_H5)" ] }, @@ -2306,11 +2349,13 @@ } ], "source": [ - "#Generate the LitPop list\n", + "# Generate the LitPop list\n", "\n", - "choice_mn = [[1, 0.5], [0.5, 1], [1, 1]] #Choice of exponents m,n\n", + "choice_mn = [[1, 0.5], [0.5, 1], [1, 1]] # Choice of exponents m,n\n", "\n", - "litpop_list = generate_litpop_base(impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs)\n" + "litpop_list = generate_litpop_base(\n", + " impf_id, value_unit, haz, assign_centr_kwargs, choice_mn, **litpop_kwargs\n", + ")" ] }, { @@ -2358,17 +2403,18 @@ "outputs": [], "source": [ "from climada.engine.unsequa import InputVar\n", + "\n", "entfut_iv = InputVar.entfut(\n", - " impf_set_list = [ent_fut.impact_funcs],\n", - " exp_list = litpop_list,\n", - " meas_set = ent_fut.measures,\n", + " impf_set_list=[ent_fut.impact_funcs],\n", + " exp_list=litpop_list,\n", + " meas_set=ent_fut.measures,\n", " bounds_cost=[0.6, 1.2],\n", " bounds_eg=[0.8, 1.5],\n", " bounds_noise=None,\n", " bounds_mdd=[0.7, 0.9],\n", " bounds_paa=[1.3, 2],\n", - " haz_id_dict={'TC': [1]}\n", - " )" + " haz_id_dict={\"TC\": [1]},\n", + ")" ] } ], diff --git a/doc/tutorial/climada_entity_DiscRates.ipynb b/doc/tutorial/climada_entity_DiscRates.ipynb index acb33de016..375e2167fc 100644 --- a/doc/tutorial/climada_entity_DiscRates.ipynb +++ b/doc/tutorial/climada_entity_DiscRates.ipynb @@ -74,11 +74,11 @@ "# Compute net present value between present year and future year.\n", "ini_year = 2019\n", "end_year = 2050\n", - "val_years = np.zeros(end_year-ini_year+1)\n", - "val_years[0] = 100000000 # initial investment\n", - "val_years[10:] = 75000 # maintenance from 10th year\n", + "val_years = np.zeros(end_year - ini_year + 1)\n", + "val_years[0] = 100000000 # initial investment\n", + "val_years[10:] = 75000 # maintenance from 10th year\n", "npv = disc.net_present_value(ini_year, end_year, val_years)\n", - "print('net present value: {:.5e}'.format(npv))" + "print(\"net present value: {:.5e}\".format(npv))" ] }, { @@ -135,8 +135,8 @@ "from climada.util import ENT_TEMPLATE_XLS\n", "\n", "# Fill DataFrame from Excel file\n", - "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", - "print('Read file:', ENT_TEMPLATE_XLS)\n", + "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", + "print(\"Read file:\", ENT_TEMPLATE_XLS)\n", "disc = DiscRates.from_excel(file_name)\n", "disc.plot();" ] @@ -170,11 +170,11 @@ "from climada.util import ENT_TEMPLATE_XLS\n", "\n", "# Fill DataFrame from Excel file\n", - "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", + "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", "disc = DiscRates.from_excel(file_name)\n", "\n", "# write file\n", - "disc.write_excel('results/tutorial_disc.xlsx')" + "disc.write_excel(\"results/tutorial_disc.xlsx\")" ] }, { @@ -192,8 +192,9 @@ "outputs": [], "source": [ "from climada.util.save import save\n", + "\n", "# this generates a results folder in the current path and stores the output there\n", - "save('tutorial_disc.p', disc)" + "save(\"tutorial_disc.p\", disc)" ] } ], diff --git a/doc/tutorial/climada_entity_Exposures.ipynb b/doc/tutorial/climada_entity_Exposures.ipynb index b5db1520ec..d46903e8f2 100644 --- a/doc/tutorial/climada_entity_Exposures.ipynb +++ b/doc/tutorial/climada_entity_Exposures.ipynb @@ -112,13 +112,15 @@ "# Fill a pandas DataFrame with the 3 mandatory variables (latitude, longitude, value) for a number of assets (10'000).\n", "# We will do this with random dummy data for purely illustrative reasons:\n", "exp_df = DataFrame()\n", - "n_exp = 100*100\n", + "n_exp = 100 * 100\n", "# provide value\n", - "exp_df['value'] = np.arange(n_exp)\n", + "exp_df[\"value\"] = np.arange(n_exp)\n", "# provide latitude and longitude\n", - "lat, lon = np.mgrid[15 : 35 : complex(0, np.sqrt(n_exp)), 20 : 40 : complex(0, np.sqrt(n_exp))]\n", - "exp_df['latitude'] = lat.flatten()\n", - "exp_df['longitude'] = lon.flatten()" + "lat, lon = np.mgrid[\n", + " 15 : 35 : complex(0, np.sqrt(n_exp)), 20 : 40 : complex(0, np.sqrt(n_exp))\n", + "]\n", + "exp_df[\"latitude\"] = lat.flatten()\n", + "exp_df[\"longitude\"] = lon.flatten()" ] }, { @@ -131,7 +133,7 @@ "# In this case, we only specify the IDs for tropical cyclone (TC); here, each exposure entry will be treated with\n", "# the same impact function: the one that has ID '1':\n", "# Of course, this will only be relevant at later steps during impact calculations.\n", - "exp_df['impf_TC'] = np.ones(n_exp, int)" + "exp_df[\"impf_TC\"] = np.ones(n_exp, int)" ] }, { @@ -156,8 +158,8 @@ ], "source": [ "# Let's have a look at the pandas DataFrame\n", - "print('exp_df is a DataFrame:', str(type(exp_df)))\n", - "print('exp_df looks like:')\n", + "print(\"exp_df is a DataFrame:\", str(type(exp_df)))\n", + "print(\"exp_df looks like:\")\n", "print(exp_df.head())" ] }, @@ -195,12 +197,12 @@ "# Generate Exposures from the pandas DataFrame. This step converts the DataFrame into\n", "# a CLIMADA Exposures instance!\n", "exp = Exposures(exp_df)\n", - "print('exp has the type:', str(type(exp)))\n", - "print('and contains a GeoDataFrame exp.gdf:', str(type(exp.gdf)))\n", + "print(\"exp has the type:\", str(type(exp)))\n", + "print(\"and contains a GeoDataFrame exp.gdf:\", str(type(exp.gdf)))\n", "\n", "# set geometry attribute (shapely Points) from GeoDataFrame from latitude and longitude\n", "exp.set_geometry_points()\n", - "print('\\n' + 'check method logs:')\n", + "print(\"\\n\" + \"check method logs:\")\n", "\n", "# always apply the check() method in the end. It puts metadata that has not been assigned,\n", "# and points out missing mandatory data\n", @@ -243,7 +245,7 @@ ], "source": [ "# let's have a look at the Exposures instance we created!\n", - "print('\\n' + 'exp looks like:')\n", + "print(\"\\n\" + \"exp looks like:\")\n", "print(exp)" ] }, @@ -292,9 +294,9 @@ "from climada.entity import Exposures\n", "\n", "# Read spatial info from an external file into GeoDataFrame\n", - "world = gpd.read_file(gpd.datasets.get_path('naturalearth_cities'))\n", - "print('World is a GeoDataFrame:', str(type(world)))\n", - "print('World looks like:')\n", + "world = gpd.read_file(gpd.datasets.get_path(\"naturalearth_cities\"))\n", + "print(\"World is a GeoDataFrame:\", str(type(world)))\n", + "print(\"World looks like:\")\n", "print(world.head())" ] }, @@ -317,9 +319,9 @@ "# Generate Exposures: value, latitude and longitude for each exposure entry.\n", "# Convert GeoDataFrame into Exposure instance\n", "exp_gpd = Exposures(world)\n", - "print('\\n' + 'exp_gpd is an Exposures:', str(type(exp_gpd)))\n", + "print(\"\\n\" + \"exp_gpd is an Exposures:\", str(type(exp_gpd)))\n", "# add random values to entries\n", - "exp_gpd.gdf['value'] = np.arange(world.shape[0])\n", + "exp_gpd.gdf[\"value\"] = np.arange(world.shape[0])\n", "# set latitude and longitude attributes from geometry\n", "exp_gpd.set_lat_lon()" ] @@ -348,8 +350,8 @@ "# In this case, we only specify the IDs for tropical cyclone (TC); here, each exposure entry will be treated with\n", "# the same impact function: the one that has ID '1':\n", "# Of course, this will only be relevant at later steps during impact calculations.\n", - "exp_gpd.gdf['impf_TC'] = np.ones(world.shape[0], int)\n", - "print('\\n' + 'check method logs:')\n", + "exp_gpd.gdf[\"impf_TC\"] = np.ones(world.shape[0], int)\n", + "print(\"\\n\" + \"check method logs:\")\n", "\n", "# as always, run check method to assign meta-data and check for missing mandatory variables.\n", "exp_gpd.check()" @@ -414,7 +416,7 @@ ], "source": [ "# let's have a look at the Exposures instance we created!\n", - "print('\\n' + '\\x1b[1;03;30;30m' + 'exp_gpd looks like:' + '\\x1b[0m')\n", + "print(\"\\n\" + \"\\x1b[1;03;30;30m\" + \"exp_gpd looks like:\" + \"\\x1b[0m\")\n", "print(exp_gpd)" ] }, @@ -536,7 +538,7 @@ "sel_exp = exp_gpd.copy() # to keep the original exp_gpd Exposures data\n", "sel_exp.gdf = sel_exp.gdf.cx[:, -5:5]\n", "\n", - "print('\\n' + 'sel_exp contains a subset of the original data')\n", + "print(\"\\n\" + \"sel_exp contains a subset of the original data\")\n", "sel_exp.gdf.head()" ] }, @@ -659,13 +661,14 @@ "source": [ "# Example 2: extract data in a polygon\n", "from shapely.geometry import Polygon\n", + "\n", "sel_polygon = exp_gpd.copy() # to keep the original exp_gpd Exposures data\n", "\n", "poly = Polygon([(0, -10), (0, 10), (10, 5)])\n", "sel_polygon.gdf = sel_polygon.gdf[sel_polygon.gdf.intersects(poly)]\n", "\n", "# Let's have a look. Again, the sub-selection is a GeoDataFrame!\n", - "print('\\n' + 'sel_exp contains a subset of the original data')\n", + "print(\"\\n\" + \"sel_exp contains a subset of the original data\")\n", "sel_polygon.gdf" ] }, @@ -799,8 +802,10 @@ "# Example 3: change coordinate reference system\n", "# use help to see more options: help(sel_exp.to_crs)\n", "sel_polygon.to_crs(epsg=3395, inplace=True)\n", - "print('\\n' + 'the crs has changed to ' +str(sel_polygon.crs))\n", - "print('the values for latitude and longitude are now according to the new coordinate system: ')\n", + "print(\"\\n\" + \"the crs has changed to \" + str(sel_polygon.crs))\n", + "print(\n", + " \"the values for latitude and longitude are now according to the new coordinate system: \"\n", + ")\n", "sel_polygon.gdf" ] }, @@ -922,8 +927,8 @@ "exp_all = Exposures.concat([sel_polygon, sel_exp.to_crs(epsg=3395)])\n", "\n", "# the output is of type Exposures\n", - "print('exp_all type and number of rows:', type(exp_all), exp_all.gdf.shape[0])\n", - "print('number of unique rows:', exp_all.gdf.drop_duplicates().shape[0])\n", + "print(\"exp_all type and number of rows:\", type(exp_all), exp_all.gdf.shape[0])\n", + "print(\"number of unique rows:\", exp_all.gdf.drop_duplicates().shape[0])\n", "\n", "# NaNs will appear in the missing values\n", "exp_all.gdf.head()" @@ -1103,8 +1108,8 @@ "exp_templ = pd.read_excel(file_name)\n", "\n", "# Let's have a look at the data:\n", - "print('exp_templ is a DataFrame:', str(type(exp_templ)))\n", - "print('exp_templ looks like:')\n", + "print(\"exp_templ is a DataFrame:\", str(type(exp_templ)))\n", + "print(\"exp_templ looks like:\")\n", "exp_templ.head()" ] }, @@ -1145,14 +1150,14 @@ "source": [ "# Generate an Exposures instance from the dataframe.\n", "exp_templ = Exposures(exp_templ)\n", - "print('\\n' + 'exp_templ is now an Exposures:', str(type(exp_templ)))\n", + "print(\"\\n\" + \"exp_templ is now an Exposures:\", str(type(exp_templ)))\n", "\n", "# set geometry attribute (shapely Points) from GeoDataFrame from latitude and longitude\n", - "print('\\n' + 'set_geometry logs:')\n", + "print(\"\\n\" + \"set_geometry logs:\")\n", "exp_templ.set_geometry_points()\n", "# as always, run check method to include metadata and check for missing mandatory parameters\n", "\n", - "print('\\n' + 'check exp_templ:')\n", + "print(\"\\n\" + \"check exp_templ:\")\n", "exp_templ.check()" ] }, @@ -1314,7 +1319,7 @@ ], "source": [ "# Let's have a look at our Exposures instance!\n", - "print('\\n' + 'exp_templ.gdf looks like:')\n", + "print(\"\\n\" + \"exp_templ.gdf looks like:\")\n", "exp_templ.gdf.head()" ] }, @@ -1347,7 +1352,7 @@ "\n", "# We take an example with a dummy raster file (HAZ_DEMO_FL), running the method set_from_raster directly loads the\n", "# necessary info from the file into an Exposures instance.\n", - "exp_raster = Exposures.from_raster(HAZ_DEMO_FL, window= Window(10, 20, 50, 60))\n", + "exp_raster = Exposures.from_raster(HAZ_DEMO_FL, window=Window(10, 20, 50, 60))\n", "# There are several keyword argument options that come with the set_from_raster method (such as\n", "# specifying a window, if not the entire file should be read, or a bounding box. Check them out." ] @@ -1376,7 +1381,7 @@ "source": [ "# As always, run the check method, such that metadata can be assigned and checked for missing mandatory parameters.\n", "exp_raster.check()\n", - "print('Meta:', exp_raster.meta)" + "print(\"Meta:\", exp_raster.meta)" ] }, { @@ -1475,7 +1480,7 @@ ], "source": [ "# Let's have a look at the Exposures instance!\n", - "print('\\n' + 'exp_raster looks like:')\n", + "print(\"\\n\" + \"exp_raster looks like:\")\n", "exp_raster.gdf.head()" ] }, @@ -1567,7 +1572,7 @@ ], "source": [ "# Example 1: plot_hexbin method\n", - "print('Plotting exp_df.')\n", + "print(\"Plotting exp_df.\")\n", "axs = exp.plot_hexbin();\n", "\n", "# further methods to check out:\n", @@ -1606,7 +1611,7 @@ "source": [ "# Example 2: plot_scatter method\n", "\n", - "exp_gpd.to_crs('epsg:3035', inplace=True)\n", + "exp_gpd.to_crs(\"epsg:3035\", inplace=True)\n", "exp_gpd.plot_scatter(pop_name=False);" ] }, @@ -1637,9 +1642,19 @@ ], "source": [ "# Example 3: plot_raster method\n", - "from climada.util.plot import add_cntry_names # use climada's plotting utilities\n", - "ax = exp.plot_raster(); # plot with same resolution as data\n", - "add_cntry_names(ax, [exp.gdf['longitude'].min(), exp.gdf['longitude'].max(), exp.gdf['latitude'].min(), exp.gdf['latitude'].max()])\n", + "from climada.util.plot import add_cntry_names # use climada's plotting utilities\n", + "\n", + "ax = exp.plot_raster()\n", + "# plot with same resolution as data\n", + "add_cntry_names(\n", + " ax,\n", + " [\n", + " exp.gdf[\"longitude\"].min(),\n", + " exp.gdf[\"longitude\"].max(),\n", + " exp.gdf[\"latitude\"].min(),\n", + " exp.gdf[\"latitude\"].max(),\n", + " ],\n", + ")\n", "\n", "# use keyword argument save_tiff='filepath.tiff' to save the corresponding raster in tiff format\n", "# use keyword argument raster_res='desired number' to change resolution of the raster." @@ -1674,11 +1689,16 @@ "source": [ "# Example 4: plot_basemap method\n", "import contextily as ctx\n", + "\n", "# select the background image from the available ctx.providers\n", - "ax = exp_templ.plot_basemap(buffer=30000, cmap='brg'); # using Positron from CartoDB\n", - "ax = exp_templ.plot_basemap(buffer=30000, cmap='brg',\n", - " url=ctx.providers.OpenStreetMap.Mapnik, # Using OpenStreetmap,\n", - " zoom=9); # select the zoom level of the map, affects the font size of labelled objects" + "ax = exp_templ.plot_basemap(buffer=30000, cmap=\"brg\")\n", + "# using Positron from CartoDB\n", + "ax = exp_templ.plot_basemap(\n", + " buffer=30000,\n", + " cmap=\"brg\",\n", + " url=ctx.providers.OpenStreetMap.Mapnik, # Using OpenStreetmap,\n", + " zoom=9,\n", + "); # select the zoom level of the map, affects the font size of labelled objects" ] }, { @@ -1718,7 +1738,7 @@ ], "source": [ "# other visualization types\n", - "exp_templ.gdf.hist(column='value');" + "exp_templ.gdf.hist(column=\"value\");" ] }, { @@ -1737,12 +1757,15 @@ "metadata": {}, "outputs": [], "source": [ - "import fiona; fiona.supported_drivers\n", + "import fiona\n", + "\n", + "fiona.supported_drivers\n", "from climada import CONFIG\n", + "\n", "results = CONFIG.local_data.save_dir.dir()\n", "\n", "# DataFrame save to csv format. geometry writen as string, metadata not saved!\n", - "exp_templ.gdf.to_csv(results.joinpath('exp_templ.csv'), sep='\\t')" + "exp_templ.gdf.to_csv(results.joinpath(\"exp_templ.csv\"), sep=\"\\t\")" ] }, { @@ -1752,7 +1775,7 @@ "outputs": [], "source": [ "# write as hdf5 file\n", - "exp_templ.write_hdf5(results.joinpath('exp_temp.h5'))" + "exp_templ.write_hdf5(results.joinpath(\"exp_temp.h5\"))" ] }, { @@ -1771,8 +1794,9 @@ "source": [ "# save in pickle format\n", "from climada.util.save import save\n", + "\n", "# this generates a results folder in the current path and stores the output there\n", - "save('exp_templ.pkl.p', exp_templ) # creates results folder and stores there" + "save(\"exp_templ.pkl.p\", exp_templ) # creates results folder and stores there" ] }, { @@ -1814,7 +1838,7 @@ "source": [ "# set_geometry_points is expensive for big exposures\n", "# for small amount of data, the execution time might be even greater when using dask\n", - "exp.gdf.drop(columns=['geometry'], inplace=True)\n", + "exp.gdf.drop(columns=[\"geometry\"], inplace=True)\n", "print(exp.gdf.head())\n", "%time exp.set_geometry_points(scheduler='processes')\n", "print(exp.gdf.head())" diff --git a/doc/tutorial/climada_entity_Exposures_polygons_lines.ipynb b/doc/tutorial/climada_entity_Exposures_polygons_lines.ipynb index 22c5827f2c..904d00f4d4 100644 --- a/doc/tutorial/climada_entity_Exposures_polygons_lines.ipynb +++ b/doc/tutorial/climada_entity_Exposures_polygons_lines.ipynb @@ -58,11 +58,13 @@ "from climada.entity.impact_funcs.storm_europe import ImpfStormEurope\n", "from climada.entity import Exposures\n", "\n", - "HAZ = Client().get_hazard('storm_europe', name='test_haz_WS_nl', status='test_dataset');\n", + "HAZ = Client().get_hazard(\"storm_europe\", name=\"test_haz_WS_nl\", status=\"test_dataset\")\n", "\n", - "EXP_POLY = Client().get_exposures('base', name='test_polygon_exp', status='test_dataset');\n", - "EXP_LINE = Client().get_exposures('base', name='test_line_exp', status='test_dataset');\n", - "EXP_POINT = Client().get_exposures('base', name='test_point_exp', status='test_dataset');\n", + "EXP_POLY = Client().get_exposures(\n", + " \"base\", name=\"test_polygon_exp\", status=\"test_dataset\"\n", + ")\n", + "EXP_LINE = Client().get_exposures(\"base\", name=\"test_line_exp\", status=\"test_dataset\")\n", + "EXP_POINT = Client().get_exposures(\"base\", name=\"test_point_exp\", status=\"test_dataset\")\n", "\n", "EXP_MIX = Exposures.concat([EXP_POLY, EXP_LINE, EXP_POINT])\n", "\n", @@ -109,15 +111,20 @@ } ], "source": [ - "#disaggregate in the same CRS as the exposures are defined (here degrees), resolution 1degree\n", - "#divide values on points\n", - "#aggregate by summing\n", + "# disaggregate in the same CRS as the exposures are defined (here degrees), resolution 1degree\n", + "# divide values on points\n", + "# aggregate by summing\n", "\n", "impact = u_lp.calc_geom_impact(\n", - " exp=EXP_MIX, impf_set=IMPF_SET, haz=HAZ,\n", - " res=0.2, to_meters=False, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", - " )" + " exp=EXP_MIX,\n", + " impf_set=IMPF_SET,\n", + " haz=HAZ,\n", + " res=0.2,\n", + " to_meters=False,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", + ")" ] }, { @@ -170,15 +177,20 @@ } ], "source": [ - "#disaggregate in meters\n", - "#same value for each point, fixed to 1 (allows to get percentages of affected surface/distance)\n", - "#aggregate by summing\n", + "# disaggregate in meters\n", + "# same value for each point, fixed to 1 (allows to get percentages of affected surface/distance)\n", + "# aggregate by summing\n", "\n", "impact = u_lp.calc_geom_impact(\n", - " exp=EXP_MIX, impf_set=IMPF_SET, haz=HAZ,\n", - " res=1000, to_meters=True, disagg_met=u_lp.DisaggMethod.FIX, disagg_val=1.0,\n", - " agg_met=u_lp.AggMethod.SUM\n", - " );" + " exp=EXP_MIX,\n", + " impf_set=IMPF_SET,\n", + " haz=HAZ,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.FIX,\n", + " disagg_val=1.0,\n", + " agg_met=u_lp.AggMethod.SUM,\n", + ");" ] }, { @@ -206,7 +218,10 @@ ], "source": [ "import matplotlib.pyplot as plt\n", - "ax = u_lp.plot_eai_exp_geom(impact, legend_kwds={'label': 'percentage', 'orientation': 'horizontal'})" + "\n", + "ax = u_lp.plot_eai_exp_geom(\n", + " impact, legend_kwds={\"label\": \"percentage\", \"orientation\": \"horizontal\"}\n", + ")" ] }, { @@ -282,36 +297,60 @@ " from climada_petals.entity.exposures.black_marble import country_iso_geom\n", "\n", " # open the file containing the Netherlands admin-1 polygons\n", - " shp_file = shapereader.natural_earth(resolution='10m',\n", - " category='cultural',\n", - " name='admin_0_countries')\n", + " shp_file = shapereader.natural_earth(\n", + " resolution=\"10m\", category=\"cultural\", name=\"admin_0_countries\"\n", + " )\n", " shp_file = shapereader.Reader(shp_file)\n", "\n", " # extract the NL polygons\n", - " prov_names = {'Netherlands': ['Groningen', 'Drenthe',\n", - " 'Overijssel', 'Gelderland',\n", - " 'Limburg', 'Zeeland',\n", - " 'Noord-Brabant', 'Zuid-Holland',\n", - " 'Noord-Holland', 'Friesland',\n", - " 'Flevoland', 'Utrecht']\n", - " }\n", - " polygon_Netherlands, polygons_prov_NL = country_iso_geom(prov_names,\n", - " shp_file)\n", - " prov_geom_NL = {prov: geom for prov, geom in zip(list(prov_names.values())[0], list(polygons_prov_NL.values())[0])}\n", + " prov_names = {\n", + " \"Netherlands\": [\n", + " \"Groningen\",\n", + " \"Drenthe\",\n", + " \"Overijssel\",\n", + " \"Gelderland\",\n", + " \"Limburg\",\n", + " \"Zeeland\",\n", + " \"Noord-Brabant\",\n", + " \"Zuid-Holland\",\n", + " \"Noord-Holland\",\n", + " \"Friesland\",\n", + " \"Flevoland\",\n", + " \"Utrecht\",\n", + " ]\n", + " }\n", + " polygon_Netherlands, polygons_prov_NL = country_iso_geom(prov_names, shp_file)\n", + " prov_geom_NL = {\n", + " prov: geom\n", + " for prov, geom in zip(\n", + " list(prov_names.values())[0], list(polygons_prov_NL.values())[0]\n", + " )\n", + " }\n", "\n", " # assign a value to each admin-1 area (assumption 100'000 USD per inhabitant)\n", - " population_prov_NL = {'Drenthe':493449, 'Flevoland':422202,\n", - " 'Friesland':649988, 'Gelderland':2084478,\n", - " 'Groningen':585881, 'Limburg':1118223,\n", - " 'Noord-Brabant':2562566, 'Noord-Holland':2877909,\n", - " 'Overijssel':1162215, 'Zuid-Holland':3705625,\n", - " 'Utrecht':1353596, 'Zeeland':383689}\n", - " value_prov_NL = {n: 100000 * population_prov_NL[n] for n in population_prov_NL.keys()}\n", + " population_prov_NL = {\n", + " \"Drenthe\": 493449,\n", + " \"Flevoland\": 422202,\n", + " \"Friesland\": 649988,\n", + " \"Gelderland\": 2084478,\n", + " \"Groningen\": 585881,\n", + " \"Limburg\": 1118223,\n", + " \"Noord-Brabant\": 2562566,\n", + " \"Noord-Holland\": 2877909,\n", + " \"Overijssel\": 1162215,\n", + " \"Zuid-Holland\": 3705625,\n", + " \"Utrecht\": 1353596,\n", + " \"Zeeland\": 383689,\n", + " }\n", + " value_prov_NL = {\n", + " n: 100000 * population_prov_NL[n] for n in population_prov_NL.keys()\n", + " }\n", "\n", " # combine into GeoDataFrame and add a coordinate reference system to it:\n", - " df1 = pd.DataFrame.from_dict(population_prov_NL, orient='index', columns=['population']).join(\n", - " pd.DataFrame.from_dict(value_prov_NL, orient='index', columns=['value']))\n", - " df1['geometry'] = [prov_geom_NL[prov] for prov in df1.index]\n", + " df1 = pd.DataFrame.from_dict(\n", + " population_prov_NL, orient=\"index\", columns=[\"population\"]\n", + " ).join(pd.DataFrame.from_dict(value_prov_NL, orient=\"index\", columns=[\"value\"]))\n", + " df1[\"geometry\"] = [prov_geom_NL[prov] for prov in df1.index]\n", " gdf_polys = gpd.GeoDataFrame(df1)\n", " gdf_polys = gdf_polys.set_crs(epsg=4326)\n", " return gdf_polys" @@ -417,7 +456,7 @@ ], "source": [ "exp_nl_poly = Exposures(gdf_poly())\n", - "exp_nl_poly.gdf['impf_WS'] = 1\n", + "exp_nl_poly.gdf[\"impf_WS\"] = 1\n", "exp_nl_poly.gdf.head()" ] }, @@ -456,7 +495,7 @@ ], "source": [ "# take a look\n", - "exp_nl_poly.gdf.plot('value', legend=True, cmap='OrRd')" + "exp_nl_poly.gdf.plot(\"value\", legend=True, cmap=\"OrRd\")" ] }, { @@ -557,9 +596,13 @@ ], "source": [ "imp_deg = u_lp.calc_geom_impact(\n", - " exp=exp_nl_poly, impf_set=impf_set, haz=storms,\n", - " res=0.005, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", + " exp=exp_nl_poly,\n", + " impf_set=impf_set,\n", + " haz=storms,\n", + " res=0.005,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", ")" ] }, @@ -621,9 +664,14 @@ ], "source": [ "imp_m = u_lp.calc_geom_impact(\n", - " exp=exp_nl_poly, impf_set=impf_set, haz=storms,\n", - " res=500, to_meters=True, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", + " exp=exp_nl_poly,\n", + " impf_set=impf_set,\n", + " haz=storms,\n", + " res=500,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", ")" ] }, @@ -711,15 +759,14 @@ }, "outputs": [], "source": [ - "#regular grid from exposures bounds\n", + "# regular grid from exposures bounds\n", "import climada.util.coordinates as u_coord\n", + "\n", "res = 0.1\n", "(_, _, xmax, ymax) = exp_nl_poly.gdf.geometry.bounds.max()\n", "(xmin, ymin, _, _) = exp_nl_poly.gdf.geometry.bounds.min()\n", "bounds = (xmin, ymin, xmax, ymax)\n", - "height, width, trafo = u_coord.pts_to_raster_meta(\n", - " bounds, (res, res)\n", - " )\n", + "height, width, trafo = u_coord.pts_to_raster_meta(bounds, (res, res))\n", "x_grid, y_grid = u_coord.raster_to_meshgrid(trafo, width, height)" ] }, @@ -747,9 +794,13 @@ ], "source": [ "imp_g = u_lp.calc_grid_impact(\n", - " exp=exp_nl_poly, impf_set=impf_set, haz=storms,\n", - " grid=(x_grid, y_grid), disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", + " exp=exp_nl_poly,\n", + " impf_set=impf_set,\n", + " haz=storms,\n", + " grid=(x_grid, y_grid),\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", ")" ] }, @@ -931,8 +982,11 @@ "source": [ "# Disaggregate exposure to 10'000 metre grid, each point gets average value within polygon.\n", "exp_pnt = u_lp.exp_geom_to_pnt(\n", - " exp_nl_poly, res=10000, to_meters=True,\n", - " disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None\n", + " exp_nl_poly,\n", + " res=10000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", ")\n", "exp_pnt.gdf.head()" ] @@ -1073,8 +1127,12 @@ "source": [ "# Disaggregate exposure to 0.1° grid, no value disaggregation specified --> replicate initial value\n", "exp_pnt2 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_poly, res=0.1, to_meters=False,\n", - " disagg_met=u_lp.DisaggMethod.FIX, disagg_val=None)\n", + " exp_nl_poly,\n", + " res=0.1,\n", + " to_meters=False,\n", + " disagg_met=u_lp.DisaggMethod.FIX,\n", + " disagg_val=None,\n", + ")\n", "exp_pnt2.gdf.head()" ] }, @@ -1214,8 +1272,12 @@ "# Disaggregate exposure to 1'000 metre grid, each point gets value corresponding to\n", "# its representative area (1'000^2).\n", "exp_pnt3 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_poly, res=1000, to_meters=True,\n", - " disagg_met=u_lp.DisaggMethod.FIX, disagg_val=10e6)\n", + " exp_nl_poly,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.FIX,\n", + " disagg_val=10e6,\n", + ")\n", "exp_pnt3.gdf.head()" ] }, @@ -1355,8 +1417,12 @@ "# Disaggregate exposure to 1'000 metre grid, each point gets value corresponding to 1\n", "# After dissagregation, each point has a value equal to the percentage of area of the polygon\n", "exp_pnt4 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_poly, res=1000, to_meters=True,\n", - " disagg_met=u_lp.DisaggMethod.DIV, disagg_val=1)\n", + " exp_nl_poly,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=1,\n", + ")\n", "exp_pnt4.gdf.tail()" ] }, @@ -1494,19 +1560,18 @@ ], "source": [ "# disaggregate on pre-defined grid\n", - "#regular grid from exposures bounds\n", + "# regular grid from exposures bounds\n", "import climada.util.coordinates as u_coord\n", + "\n", "res = 0.1\n", "(_, _, xmax, ymax) = exp_nl_poly.gdf.geometry.bounds.max()\n", "(xmin, ymin, _, _) = exp_nl_poly.gdf.geometry.bounds.min()\n", "bounds = (xmin, ymin, xmax, ymax)\n", - "height, width, trafo = u_coord.pts_to_raster_meta(\n", - " bounds, (res, res)\n", - " )\n", + "height, width, trafo = u_coord.pts_to_raster_meta(bounds, (res, res))\n", "x_grid, y_grid = u_coord.raster_to_meshgrid(trafo, width, height)\n", "exp_pnt5 = u_lp.exp_geom_to_grid(\n", - " exp_nl_poly, grid=(x_grid, y_grid),\n", - " disagg_met=u_lp.DisaggMethod.DIV, disagg_val=1)\n", + " exp_nl_poly, grid=(x_grid, y_grid), disagg_met=u_lp.DisaggMethod.DIV, disagg_val=1\n", + ")\n", "exp_pnt5.gdf.tail()" ] }, @@ -1589,7 +1654,7 @@ ], "source": [ "# Plot point-impacts and aggregated impacts\n", - "imp_pnt.plot_hexbin_eai_exposure();\n", + "imp_pnt.plot_hexbin_eai_exposure()\n", "u_lp.plot_eai_exp_geom(imp_geom);" ] }, @@ -1727,7 +1792,7 @@ "outputs": [], "source": [ "def gdf_lines():\n", - " gdf_lines = gpd.read_file(Path(DEMO_DIR,'nl_rails.gpkg'))\n", + " gdf_lines = gpd.read_file(Path(DEMO_DIR, \"nl_rails.gpkg\"))\n", " gdf_lines = gdf_lines.to_crs(epsg=4326)\n", " return gdf_lines" ] @@ -1832,8 +1897,8 @@ ], "source": [ "exp_nl_lines = Exposures(gdf_lines())\n", - "exp_nl_lines.gdf['impf_WS'] = 1\n", - "exp_nl_lines.gdf['value'] = 1\n", + "exp_nl_lines.gdf[\"impf_WS\"] = 1\n", + "exp_nl_lines.gdf[\"value\"] = 1\n", "exp_nl_lines.gdf.head()" ] }, @@ -1861,7 +1926,7 @@ } ], "source": [ - "exp_nl_lines.gdf.plot('value', cmap='inferno');" + "exp_nl_lines.gdf.plot(\"value\", cmap=\"inferno\");" ] }, { @@ -1911,9 +1976,13 @@ ], "source": [ "imp_deg = u_lp.calc_geom_impact(\n", - " exp=exp_nl_lines, impf_set=impf_set, haz=storms,\n", - " res=0.005, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", + " exp=exp_nl_lines,\n", + " impf_set=impf_set,\n", + " haz=storms,\n", + " res=0.005,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", ")" ] }, @@ -1975,9 +2044,14 @@ ], "source": [ "imp_m = u_lp.calc_geom_impact(\n", - " exp=exp_nl_lines, impf_set=impf_set, haz=storms,\n", - " res=500, to_meters=True, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None,\n", - " agg_met=u_lp.AggMethod.SUM\n", + " exp=exp_nl_lines,\n", + " impf_set=impf_set,\n", + " haz=storms,\n", + " res=500,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", + " agg_met=u_lp.AggMethod.SUM,\n", ")" ] }, @@ -2028,8 +2102,11 @@ ], "source": [ "import numpy as np\n", + "\n", "diff = np.max((imp_deg.eai_exp - imp_m.eai_exp) / imp_deg.eai_exp)\n", - "print(f\"The largest relative different between degrees and meters impact in this example is {diff}\")" + "print(\n", + " f\"The largest relative different between degrees and meters impact in this example is {diff}\"\n", + ")" ] }, { @@ -2184,7 +2261,11 @@ "source": [ "# 0.1° distance between points, average value disaggregation\n", "exp_pnt = u_lp.exp_geom_to_pnt(\n", - " exp_nl_lines, res=0.1, to_meters=False, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None\n", + " exp_nl_lines,\n", + " res=0.1,\n", + " to_meters=False,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", ")\n", "exp_pnt.gdf.head()" ] @@ -2317,7 +2398,11 @@ "source": [ "# 1000m distance between points, no value disaggregation\n", "exp_pnt2 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_lines, res=1000, to_meters=True, disagg_met=u_lp.DisaggMethod.FIX, disagg_val=None\n", + " exp_nl_lines,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.FIX,\n", + " disagg_val=None,\n", ")\n", "exp_pnt2.gdf.head()" ] @@ -2450,7 +2535,11 @@ "source": [ "# 1000m distance between points, equal value disaggregation\n", "exp_pnt3 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_lines, res=1000, to_meters=True, disagg_met=u_lp.DisaggMethod.DIV, disagg_val=None\n", + " exp_nl_lines,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.DIV,\n", + " disagg_val=None,\n", ")\n", "exp_pnt3.gdf.head()" ] @@ -2583,7 +2672,11 @@ "source": [ "# 1000m distance between points, disaggregation of value according to representative distance\n", "exp_pnt4 = u_lp.exp_geom_to_pnt(\n", - " exp_nl_lines, res=1000, to_meters=True, disagg_met=u_lp.DisaggMethod.FIX, disagg_val=1000\n", + " exp_nl_lines,\n", + " res=1000,\n", + " to_meters=True,\n", + " disagg_met=u_lp.DisaggMethod.FIX,\n", + " disagg_val=1000,\n", ")\n", "exp_pnt4.gdf.head()" ] diff --git a/doc/tutorial/climada_entity_ImpactFuncSet.ipynb b/doc/tutorial/climada_entity_ImpactFuncSet.ipynb index 2702aa60f8..6df482925f 100644 --- a/doc/tutorial/climada_entity_ImpactFuncSet.ipynb +++ b/doc/tutorial/climada_entity_ImpactFuncSet.ipynb @@ -113,7 +113,7 @@ ")\n", "\n", "# check if the all the attributes are set correctly\n", - "imp_fun.check()\n" + "imp_fun.check()" ] }, { @@ -131,7 +131,7 @@ ], "source": [ "# Calculate the mdr at hazard intensity 18.7 m/s\n", - "print('Mean damage ratio at intensity 18.7 m/s: ', imp_fun.calc_mdr(18.7))" + "print(\"Mean damage ratio at intensity 18.7 m/s: \", imp_fun.calc_mdr(18.7))" ] }, { @@ -282,7 +282,7 @@ "imp_fun_3.check()\n", "\n", "# add the 2 impact functions into ImpactFuncSet\n", - "imp_fun_set = ImpactFuncSet([imp_fun_1, imp_fun_3])\n" + "imp_fun_set = ImpactFuncSet([imp_fun_1, imp_fun_3])" ] }, { @@ -345,7 +345,7 @@ ], "source": [ "# extract the TC impact function with id 1\n", - "impf_tc_1 = imp_fun_set.get_func('TC', 1)\n", + "impf_tc_1 = imp_fun_set.get_func(\"TC\", 1)\n", "# plot the impact function\n", "impf_tc_1.plot();" ] @@ -404,7 +404,7 @@ ], "source": [ "# removing the TC impact function with id 3\n", - "imp_fun_set.remove_func('TC', 3)\n", + "imp_fun_set.remove_func(\"TC\", 3)\n", "# plot all the remaining impact functions in imp_fun_set\n", "imp_fun_set.plot();" ] @@ -464,7 +464,7 @@ "# plot all the impact functions from the ImpactFuncSet\n", "imp_set_xlsx.plot()\n", "# adjust the plots\n", - "plt.subplots_adjust(right=1., top=4., hspace=0.4, wspace=0.4)" + "plt.subplots_adjust(right=1.0, top=4.0, hspace=0.4, wspace=0.4)" ] }, { @@ -483,7 +483,7 @@ "outputs": [], "source": [ "# write imp_set_xlsx into an excel file\n", - "imp_set_xlsx.write_excel('tutorial_impf_set.xlsx')" + "imp_set_xlsx.write_excel(\"tutorial_impf_set.xlsx\")" ] }, { @@ -512,7 +512,7 @@ "from climada.util.save import save\n", "\n", "# this generates a results folder in the current path and stores the output there\n", - "save('tutorial_impf_set.p', imp_set_xlsx)" + "save(\"tutorial_impf_set.p\", imp_set_xlsx)" ] }, { @@ -563,7 +563,7 @@ "# plot all the impact functions\n", "imp_fun_set_TC.plot()\n", "# adjust the plots\n", - "plt.subplots_adjust(right=1., top=4., hspace=0.4, wspace=0.4)" + "plt.subplots_adjust(right=1.0, top=4.0, hspace=0.4, wspace=0.4)" ] } ], diff --git a/doc/tutorial/climada_entity_LitPop.ipynb b/doc/tutorial/climada_entity_LitPop.ipynb index 8625fe394c..56c2d065a0 100644 --- a/doc/tutorial/climada_entity_LitPop.ipynb +++ b/doc/tutorial/climada_entity_LitPop.ipynb @@ -155,15 +155,19 @@ "source": [ "# Initiate a default LitPop exposure entity for Switzerland and Liechtenstein (ISO3-Codes 'CHE' and 'LIE'):\n", "try:\n", - " exp = LitPop.from_countries(['CHE', 'Liechtenstein']) # you can provide either single countries or a list of countries\n", + " exp = LitPop.from_countries(\n", + " [\"CHE\", \"Liechtenstein\"]\n", + " ) # you can provide either single countries or a list of countries\n", "except FileExistsError as err:\n", - " print(\"Reason for error: The GPW population data has not been downloaded, c.f. section 'Input data' above.\")\n", + " print(\n", + " \"Reason for error: The GPW population data has not been downloaded, c.f. section 'Input data' above.\"\n", + " )\n", " raise err\n", - "exp.plot_scatter();\n", + "exp.plot_scatter()\n", "\n", "# Note that `exp.gdf['region_id']` is a number identifying each country:\n", - "print('\\n Region IDs (`region_id`) in this exposure:')\n", - "print(exp.gdf['region_id'].unique())" + "print(\"\\n Region IDs (`region_id`) in this exposure:\")\n", + "print(exp.gdf[\"region_id\"].unique())" ] }, { @@ -240,9 +244,12 @@ ], "source": [ "# Initiate a LitPop exposure entity for Costa Rica with varied resolution, fin_mode, and exponents:\n", - "exp = LitPop.from_countries('Costa Rica', fin_mode='income_group', res_arcsec=120, exponents=(1,1)) # change the parameters and see what happens...\n", + "exp = LitPop.from_countries(\n", + " \"Costa Rica\", fin_mode=\"income_group\", res_arcsec=120, exponents=(1, 1)\n", + ") # change the parameters and see what happens...\n", "# exp = LitPop.from_countries('Costa Rica', fin_mode='gdp', res_arcsec=90, exponents=(3,0)) # example of variation\n", - "exp.plot_raster(); # note the log scale of the colorbar\n", + "exp.plot_raster()\n", + "# note the log scale of the colorbar\n", "exp.plot_scatter();" ] }, @@ -312,12 +319,16 @@ "source": [ "# You may want to check if you have downloaded dataset Gridded Population of the World (GPW), v4: Population Count, v4.11\n", "# (2000 and 2020) first\n", - "pop_2000 = LitPop.from_countries('CHE', fin_mode='pop', res_arcsec=300, exponents=(0,1), reference_year=2000)\n", + "pop_2000 = LitPop.from_countries(\n", + " \"CHE\", fin_mode=\"pop\", res_arcsec=300, exponents=(0, 1), reference_year=2000\n", + ")\n", "# Alternatively, we ca use `from_population`:\n", - "pop_2021 = LitPop.from_population(countries='Switzerland', res_arcsec=300, reference_year=2021)\n", + "pop_2021 = LitPop.from_population(\n", + " countries=\"Switzerland\", res_arcsec=300, reference_year=2021\n", + ")\n", "# Since no population data for 2021 is available, the closest data point, 2020, is used (see LOGGER.warning)\n", - "pop_2000.plot_scatter();\n", - "pop_2021.plot_scatter();\n", + "pop_2000.plot_scatter()\n", + "pop_2021.plot_scatter()\n", "\"\"\"Note the difference in total values on the color bar.\"\"\"" ] }, @@ -398,16 +409,18 @@ } ], "source": [ - "res = 30 # If you don't get an output after a very long time with country = \"MEX\", try with res = 100\n", - "country = 'JAM' # Try different countries, i.e. 'JAM', 'CHE', 'RWA', 'MEX'\n", - "markersize = 4 # for plotting\n", - "buffer_deg=.04\n", - "\n", - "exp_nightlights = LitPop.from_nightlight_intensity(countries=country, res_arcsec=res) # nightlight intensity\n", - "exp_nightlights.plot_hexbin(linewidth=markersize, buffer=buffer_deg);\n", + "res = 30 # If you don't get an output after a very long time with country = \"MEX\", try with res = 100\n", + "country = \"JAM\" # Try different countries, i.e. 'JAM', 'CHE', 'RWA', 'MEX'\n", + "markersize = 4 # for plotting\n", + "buffer_deg = 0.04\n", + "\n", + "exp_nightlights = LitPop.from_nightlight_intensity(\n", + " countries=country, res_arcsec=res\n", + ") # nightlight intensity\n", + "exp_nightlights.plot_hexbin(linewidth=markersize, buffer=buffer_deg)\n", "# Compare to the population map:\n", "exp_population = LitPop().from_population(countries=country, res_arcsec=res)\n", - "exp_population.plot_hexbin(linewidth=markersize, buffer=buffer_deg);\n", + "exp_population.plot_hexbin(linewidth=markersize, buffer=buffer_deg)\n", "# Compare to default LitPop exposures:\n", "exp = LitPop.from_countries(countries=country, res_arcsec=res)\n", "exp.plot_hexbin(linewidth=markersize, buffer=buffer_deg);" @@ -495,29 +508,31 @@ "import climada.util.coordinates as u_coord\n", "import climada.entity.exposures.litpop as lp\n", "\n", - "country_iso3a = 'USA'\n", - "state_name = 'Florida'\n", + "country_iso3a = \"USA\"\n", + "state_name = \"Florida\"\n", "reslution_arcsec = 600\n", "\"\"\"First, we need to get the shape of Florida:\"\"\"\n", "admin1_info, admin1_shapes = u_coord.get_admin1_info(country_iso3a)\n", "admin1_info = admin1_info[country_iso3a]\n", "admin1_shapes = admin1_shapes[country_iso3a]\n", - "admin1_names = [record['name'] for record in admin1_info]\n", + "admin1_names = [record[\"name\"] for record in admin1_info]\n", "print(admin1_names)\n", "for idx, name in enumerate(admin1_names):\n", - " if admin1_names[idx]==state_name:\n", + " if admin1_names[idx] == state_name:\n", " break\n", - "print('Florida index: ' + str(idx))\n", + "print(\"Florida index: \" + str(idx))\n", "\n", "\"\"\"Secondly, we estimate the `total_value`\"\"\"\n", "# `total_value` required user input for `from_shape`, here we assume 5% of total value of the whole USA:\n", - "total_value = 0.05 * lp._get_total_value_per_country(country_iso3a, 'pc', 2020)\n", + "total_value = 0.05 * lp._get_total_value_per_country(country_iso3a, \"pc\", 2020)\n", "\n", "\"\"\"Then, we can initiate the exposures for Florida:\"\"\"\n", "start = time.process_time()\n", - "exp = LitPop.from_shape(admin1_shapes[idx], total_value, res_arcsec=600, reference_year=2020)\n", - "print(f'\\n Runtime `from_shape` : {time.process_time() - start:1.2f} sec.\\n')\n", - "exp.plot_scatter(vmin=100, buffer=.5);\n" + "exp = LitPop.from_shape(\n", + " admin1_shapes[idx], total_value, res_arcsec=600, reference_year=2020\n", + ")\n", + "print(f\"\\n Runtime `from_shape` : {time.process_time() - start:1.2f} sec.\\n\")\n", + "exp.plot_scatter(vmin=100, buffer=0.5);" ] }, { @@ -561,9 +576,13 @@ "# `from_shape_and_countries` does not require `total_value`, but is slower to compute than `from_shape`,\n", "# because first, the exposure for the whole USA is initiated:\n", "start = time.process_time()\n", - "exp = LitPop.from_shape_and_countries(admin1_shapes[idx], country_iso3a, res_arcsec=600, reference_year=2020)\n", - "print(f'\\n Runtime `from_shape_and_countries` : {time.process_time() - start:1.2f} sec.\\n')\n", - "exp.plot_scatter(vmin=100, buffer=.5);\n", + "exp = LitPop.from_shape_and_countries(\n", + " admin1_shapes[idx], country_iso3a, res_arcsec=600, reference_year=2020\n", + ")\n", + "print(\n", + " f\"\\n Runtime `from_shape_and_countries` : {time.process_time() - start:1.2f} sec.\\n\"\n", + ")\n", + "exp.plot_scatter(vmin=100, buffer=0.5)\n", "\"\"\"Note the differences in computational speed and total value between the two approaches\"\"\"" ] }, @@ -655,31 +674,36 @@ "from shapely.geometry import Polygon\n", "\n", "\"\"\"initiate LitPop exposures for a geographical box around the city of Zurich:\"\"\"\n", - "bounds = (8.41, 47.25, 8.70, 47.47) # (min_lon, max_lon, min_lat, max_lat)\n", - "total_value=1000 # required user input for `from_shape`, here we just assume USD 1000 of total value\n", - "shape = Polygon([\n", - " (bounds[0], bounds[3]),\n", - " (bounds[2], bounds[3]),\n", - " (bounds[2], bounds[1]),\n", - " (bounds[0], bounds[1])\n", - " ])\n", + "bounds = (8.41, 47.25, 8.70, 47.47) # (min_lon, max_lon, min_lat, max_lat)\n", + "total_value = 1000 # required user input for `from_shape`, here we just assume USD 1000 of total value\n", + "shape = Polygon(\n", + " [\n", + " (bounds[0], bounds[3]),\n", + " (bounds[2], bounds[3]),\n", + " (bounds[2], bounds[1]),\n", + " (bounds[0], bounds[1]),\n", + " ]\n", + ")\n", "import time\n", + "\n", "start = time.process_time()\n", "exp = LitPop.from_shape(shape, total_value)\n", - "print(f'\\n Runtime `from_shape` : {time.process_time() - start:1.2f} sec.\\n')\n", - "exp.plot_scatter();\n", + "print(f\"\\n Runtime `from_shape` : {time.process_time() - start:1.2f} sec.\\n\")\n", + "exp.plot_scatter()\n", "# `from_shape_and_countries` does not require `total_value`, but is slower to compute:\n", "start = time.process_time()\n", - "exp = LitPop.from_shape_and_countries(shape, 'Switzerland')\n", - "print(f'\\n Runtime `from_shape_and_countries` : {time.process_time() - start:1.2f} sec.\\n')\n", - "exp.plot_scatter();\n", + "exp = LitPop.from_shape_and_countries(shape, \"Switzerland\")\n", + "print(\n", + " f\"\\n Runtime `from_shape_and_countries` : {time.process_time() - start:1.2f} sec.\\n\"\n", + ")\n", + "exp.plot_scatter()\n", "\"\"\"Note the difference in total value between the two exposure sets!\"\"\"\n", "\n", "\"\"\"For comparison, initiate population exposure for a geographical box around the city of Zurich:\"\"\"\n", "start = time.process_time()\n", "exp_pop = LitPop.from_population(shape=shape)\n", - "print(f'\\n Runtime `from_population` : {time.process_time() - start:1.2f} sec.\\n')\n", - "exp_pop.plot_scatter();\n", + "print(f\"\\n Runtime `from_population` : {time.process_time() - start:1.2f} sec.\\n\")\n", + "exp_pop.plot_scatter()\n", "\n", "\"\"\"Population exposure for a custom shape can be initiated directly via `set_population` without providing `total_value`\"\"\"" ] @@ -727,14 +751,18 @@ "source": [ "# Initiate GDP-Entity for Switzerland, with and without admin1_calc:\n", "\n", - "ent_adm0 = LitPop.from_countries('CHE', res_arcsec=120, fin_mode='gdp', admin1_calc=False)\n", + "ent_adm0 = LitPop.from_countries(\n", + " \"CHE\", res_arcsec=120, fin_mode=\"gdp\", admin1_calc=False\n", + ")\n", "ent_adm0.set_geometry_points()\n", "\n", - "ent_adm1 = LitPop.from_countries('CHE', res_arcsec=120, fin_mode='gdp', admin1_calc=True)\n", + "ent_adm1 = LitPop.from_countries(\n", + " \"CHE\", res_arcsec=120, fin_mode=\"gdp\", admin1_calc=True\n", + ")\n", "\n", "ent_adm0.check()\n", "ent_adm1.check()\n", - "print('Done.')" + "print(\"Done.\")" ] }, { @@ -788,14 +816,15 @@ "source": [ "# Plotting:\n", "from matplotlib import colors\n", - "norm=colors.LogNorm(vmin=1e5, vmax=1e9) # setting range for the log-normal scale\n", + "\n", + "norm = colors.LogNorm(vmin=1e5, vmax=1e9) # setting range for the log-normal scale\n", "markersize = 5\n", - "ent_adm0.plot_hexbin(buffer=.3, norm=norm, linewidth=markersize);\n", - "ent_adm1.plot_hexbin(buffer=.3, norm=norm, linewidth=markersize);\n", + "ent_adm0.plot_hexbin(buffer=0.3, norm=norm, linewidth=markersize)\n", + "ent_adm1.plot_hexbin(buffer=0.3, norm=norm, linewidth=markersize)\n", "\n", - "print('admin-0: First figure')\n", - "print('admin-1: Second figure')\n", - "'''Do you spot the small differences in Graubünden (eastern Switzerland)?'''" + "print(\"admin-0: First figure\")\n", + "print(\"admin-1: Second figure\")\n", + "\"\"\"Do you spot the small differences in Graubünden (eastern Switzerland)?\"\"\"" ] } ], diff --git a/doc/tutorial/climada_entity_MeasureSet.ipynb b/doc/tutorial/climada_entity_MeasureSet.ipynb index e1b93a1035..812198362b 100644 --- a/doc/tutorial/climada_entity_MeasureSet.ipynb +++ b/doc/tutorial/climada_entity_MeasureSet.ipynb @@ -127,28 +127,28 @@ "\n", "# define measure\n", "meas = Measure(\n", - " name='Mangrove',\n", - " haz_type='TC',\n", + " name=\"Mangrove\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=500000000,\n", " mdd_impact=(1, 0),\n", " paa_impact=(1, -0.15),\n", - " hazard_inten_imp=(1, -10), # reduces intensity by 10\n", + " hazard_inten_imp=(1, -10), # reduces intensity by 10\n", ")\n", "\n", "# impact functions\n", "impf_tc = ImpfTropCyclone.from_emanuel_usa()\n", "impf_all = ImpactFuncSet([impf_tc])\n", - "impf_all.plot();\n", + "impf_all.plot()\n", "\n", "# dummy Hazard and Exposures\n", - "haz = Hazard('TC') # this measure does not change hazard\n", - "exp = Exposures() # this measure does not change exposures\n", + "haz = Hazard(\"TC\") # this measure does not change hazard\n", + "exp = Exposures() # this measure does not change exposures\n", "\n", "# new impact functions\n", "new_exp, new_impfs, new_haz = meas.apply(exp, impf_all, haz)\n", - "axes = new_impfs.plot();\n", - "axes.set_title('TC: Modified impact function')" + "axes = new_impfs.plot()\n", + "axes.set_title(\"TC: Modified impact function\")" ] }, { @@ -228,8 +228,8 @@ "\n", "# define measure\n", "meas = Measure(\n", - " name='Mangrove',\n", - " haz_type='TC',\n", + " name=\"Mangrove\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=500000000,\n", " hazard_freq_cutoff=0.0255,\n", @@ -250,14 +250,16 @@ "# new hazard\n", "new_exp, new_impfs, new_haz = meas.apply(exp, impf_all, haz)\n", "# if you look at the maximum intensity per centroid: new_haz does not contain the event with smaller impact (the most frequent)\n", - "haz.plot_intensity(0);\n", - "new_haz.plot_intensity(0);\n", + "haz.plot_intensity(0)\n", + "new_haz.plot_intensity(0)\n", "# you might also compute the exceedance frequency curve of both hazard\n", "imp = ImpactCalc(exp, impf_all, haz).impact()\n", - "ax = imp.calc_freq_curve().plot(label='original');\n", + "ax = imp.calc_freq_curve().plot(label=\"original\")\n", "\n", "new_imp = ImpactCalc(new_exp, new_impfs, new_haz).impact()\n", - "new_imp.calc_freq_curve().plot(axis=ax, label='measure'); # the damages for events with return periods > 1/0.0255 ~ 40 are 0" + "new_imp.calc_freq_curve().plot(\n", + " axis=ax, label=\"measure\"\n", + "); # the damages for events with return periods > 1/0.0255 ~ 40 are 0" ] }, { @@ -361,12 +363,12 @@ "\n", "# define measure\n", "meas = Measure(\n", - " name='Building code',\n", - " haz_type='TC',\n", + " name=\"Building code\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=500000000,\n", " hazard_freq_cutoff=0.00455,\n", - " exp_region_id=[1], # apply measure to points close to exposures with region_id=1\n", + " exp_region_id=[1], # apply measure to points close to exposures with region_id=1\n", ")\n", "\n", "# impact functions\n", @@ -379,7 +381,7 @@ "\n", "# Exposures\n", "exp = Exposures.from_hdf5(EXP_DEMO_H5)\n", - "#exp['region_id'] = np.ones(exp.shape[0])\n", + "# exp['region_id'] = np.ones(exp.shape[0])\n", "exp.check()\n", "# all exposures have region_id=1\n", "exp.plot_hexbin(buffer=1.0)\n", @@ -449,8 +451,8 @@ "\n", "# define measure\n", "meas = Measure(\n", - " name='Insurance',\n", - " haz_type='TC',\n", + " name=\"Insurance\",\n", + " haz_type=\"TC\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=500000000,\n", " risk_transf_attach=5.0e8,\n", @@ -471,12 +473,12 @@ "\n", "# impact before\n", "imp = ImpactCalc(exp, impf_all, haz).impact()\n", - "ax = imp.calc_freq_curve().plot(label='original');\n", + "ax = imp.calc_freq_curve().plot(label=\"original\")\n", "\n", "# impact after. risk_transf will be added to the cost of the measure\n", "imp_new, risk_transf = meas.calc_impact(exp, impf_all, haz)\n", - "imp_new.calc_freq_curve().plot(axis=ax, label='measure');\n", - "print('risk_transfer {:.3}'.format(risk_transf.aai_agg))" + "imp_new.calc_freq_curve().plot(axis=ax, label=\"measure\")\n", + "print(\"risk_transfer {:.3}\".format(risk_transf.aai_agg))" ] }, { @@ -515,8 +517,8 @@ "from climada.entity.measures import Measure, MeasureSet\n", "\n", "meas_1 = Measure(\n", - " haz_type='TC',\n", - " name='Mangrove',\n", + " haz_type=\"TC\",\n", + " name=\"Mangrove\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=500000000,\n", " mdd_impact=(1, 2),\n", @@ -526,8 +528,8 @@ ")\n", "\n", "meas_2 = Measure(\n", - " haz_type='TC',\n", - " name='Sandbags',\n", + " haz_type=\"TC\",\n", + " name=\"Sandbags\",\n", " color_rgb=np.array([1, 1, 1]),\n", " cost=22000000,\n", " mdd_impact=(1, 2),\n", @@ -543,7 +545,7 @@ "meas_set.check()\n", "\n", "# select one measure\n", - "meas_sel = meas_set.get_measure(name='Sandbags')\n", + "meas_sel = meas_set.get_measure(name=\"Sandbags\")\n", "print(meas_sel[0].name, meas_sel[0].cost)" ] }, @@ -582,7 +584,7 @@ "from climada.util import ENT_TEMPLATE_XLS\n", "\n", "# Fill DataFrame from Excel file\n", - "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", + "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", "meas_set = MeasureSet.from_excel(file_name)\n", "meas_set" ] @@ -611,11 +613,11 @@ "from climada.util import ENT_TEMPLATE_XLS\n", "\n", "# Fill DataFrame from Excel file\n", - "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", + "file_name = ENT_TEMPLATE_XLS # provide absolute path of the excel file\n", "meas_set = MeasureSet.from_excel(file_name)\n", "\n", "# write file\n", - "meas_set.write_excel('results/tutorial_meas_set.xlsx')" + "meas_set.write_excel(\"results/tutorial_meas_set.xlsx\")" ] }, { @@ -638,8 +640,9 @@ "outputs": [], "source": [ "from climada.util.save import save\n", + "\n", "# this generates a results folder in the current path and stores the output there\n", - "save('tutorial_meas_set.p', meas_set)" + "save(\"tutorial_meas_set.p\", meas_set)" ] } ], diff --git a/doc/tutorial/climada_hazard_Hazard.ipynb b/doc/tutorial/climada_hazard_Hazard.ipynb index 94dd517dc5..19cc98a0f7 100644 --- a/doc/tutorial/climada_hazard_Hazard.ipynb +++ b/doc/tutorial/climada_hazard_Hazard.ipynb @@ -95,27 +95,33 @@ "import numpy as np\n", "from climada.hazard import Hazard\n", "from climada.util.constants import HAZ_DEMO_FL\n", + "\n", "# to hide the warnings\n", "import warnings\n", - "warnings.filterwarnings('ignore')\n", "\n", - "# read intensity from raster file HAZ_DEMO_FL and set frequency for the contained event\n", - "haz_ven = Hazard.from_raster([HAZ_DEMO_FL], attrs={'frequency':np.ones(1)/2}, haz_type='FL')\n", + "warnings.filterwarnings(\"ignore\")\n", + "\n", + "# read intensity from raster file HAZ_DEMO_FL and set frequency for the contained event\n", + "haz_ven = Hazard.from_raster(\n", + " [HAZ_DEMO_FL], attrs={\"frequency\": np.ones(1) / 2}, haz_type=\"FL\"\n", + ")\n", "haz_ven.check()\n", "\n", "# The masked values of the raster are set to 0\n", "# Sometimes the raster file does not contain all the information, as in this case the mask value -9999\n", "# We mask it manuall and plot it using plot_intensity()\n", - "haz_ven.intensity[haz_ven.intensity==-9999] = 0\n", - "haz_ven.plot_intensity(1, smooth=False) # if smooth=True (default value) is used, the computation time might increase\n", + "haz_ven.intensity[haz_ven.intensity == -9999] = 0\n", + "haz_ven.plot_intensity(\n", + " 1, smooth=False\n", + ") # if smooth=True (default value) is used, the computation time might increase\n", "\n", "# per default the following attributes have been set\n", - "print('event_id: ', haz_ven.event_id)\n", - "print('event_name: ', haz_ven.event_name)\n", - "print('date: ', haz_ven.date)\n", - "print('frequency: ', haz_ven.frequency)\n", - "print('orig: ', haz_ven.orig)\n", - "print('min, max fraction: ', haz_ven.fraction.min(), haz_ven.fraction.max())" + "print(\"event_id: \", haz_ven.event_id)\n", + "print(\"event_name: \", haz_ven.event_name)\n", + "print(\"date: \", haz_ven.date)\n", + "print(\"frequency: \", haz_ven.frequency)\n", + "print(\"orig: \", haz_ven.orig)\n", + "print(\"min, max fraction: \", haz_ven.fraction.min(), haz_ven.fraction.max())" ] }, { @@ -135,10 +141,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Put your code here\n", - "\n", - "\n", - "\n" + "# Put your code here" ] }, { @@ -212,30 +215,42 @@ "# Solution:\n", "\n", "# 1. The CRS can be reprojected using dst_crs option\n", - "haz = Hazard.from_raster([HAZ_DEMO_FL], dst_crs='epsg:2201', haz_type='FL')\n", + "haz = Hazard.from_raster([HAZ_DEMO_FL], dst_crs=\"epsg:2201\", haz_type=\"FL\")\n", "haz.check()\n", - "print('\\n Solution 1:')\n", - "print('centroids CRS:', haz.centroids.crs)\n", - "print('raster info:', haz.centroids.get_meta())\n", + "print(\"\\n Solution 1:\")\n", + "print(\"centroids CRS:\", haz.centroids.crs)\n", + "print(\"raster info:\", haz.centroids.get_meta())\n", "\n", "# 2. Transformations of the coordinates can be set using the transform option and Affine\n", "from rasterio import Affine\n", - "haz = Hazard.from_raster([HAZ_DEMO_FL], haz_type='FL',\n", - " transform=Affine(0.009000000000000341, 0.0, -69.33714959699981, \\\n", - " 0.0, -0.009000000000000341, 10.42822096697894),\n", - " height=500, width=501)\n", + "\n", + "haz = Hazard.from_raster(\n", + " [HAZ_DEMO_FL],\n", + " haz_type=\"FL\",\n", + " transform=Affine(\n", + " 0.009000000000000341,\n", + " 0.0,\n", + " -69.33714959699981,\n", + " 0.0,\n", + " -0.009000000000000341,\n", + " 10.42822096697894,\n", + " ),\n", + " height=500,\n", + " width=501,\n", + ")\n", "haz.check()\n", - "print('\\n Solution 2:')\n", - "print('raster info:', haz.centroids.get_meta())\n", - "print('intensity size:', haz.intensity.shape)\n", + "print(\"\\n Solution 2:\")\n", + "print(\"raster info:\", haz.centroids.get_meta())\n", + "print(\"intensity size:\", haz.intensity.shape)\n", "\n", "# 3. A partial part of the raster can be loaded using the window or geometry\n", "from rasterio.windows import Window\n", - "haz = Hazard.from_raster([HAZ_DEMO_FL], haz_type='FL', window=Window(10, 10, 20, 30))\n", + "\n", + "haz = Hazard.from_raster([HAZ_DEMO_FL], haz_type=\"FL\", window=Window(10, 10, 20, 30))\n", "haz.check()\n", - "print('\\n Solution 3:')\n", - "print('raster info:', haz.centroids.get_meta())\n", - "print('intensity size:', haz.intensity.shape)" + "print(\"\\n Solution 3:\")\n", + "print(\"raster info:\", haz.centroids.get_meta())\n", + "print(\"intensity size:\", haz.intensity.shape)" ] }, { @@ -266,10 +281,13 @@ ], "source": [ "from climada.hazard import Hazard, Centroids\n", - "from climada.util import HAZ_DEMO_H5 # CLIMADA's Python file\n", + "from climada.util import HAZ_DEMO_H5 # CLIMADA's Python file\n", + "\n", "# Hazard needs to know the acronym of the hazard type to be constructed!!! Use 'NA' if not known.\n", - "haz_tc_fl = Hazard.from_hdf5(HAZ_DEMO_H5) # Historic tropical cyclones in Florida from 1990 to 2004\n", - "haz_tc_fl.check() # Use always the check() method to see if the hazard has been loaded correctly" + "haz_tc_fl = Hazard.from_hdf5(\n", + " HAZ_DEMO_H5\n", + ") # Historic tropical cyclones in Florida from 1990 to 2004\n", + "haz_tc_fl.check() # Use always the check() method to see if the hazard has been loaded correctly" ] }, { @@ -298,50 +316,152 @@ } ], "source": [ - "# setting points\n", + "# setting points\n", "import numpy as np\n", "from scipy import sparse\n", "\n", - "lat = np.array([26.933899, 26.957203, 26.783846, 26.645524, 26.897796, 26.925359, \\\n", - " 26.914768, 26.853491, 26.845099, 26.82651 , 26.842772, 26.825905, \\\n", - " 26.80465 , 26.788649, 26.704277, 26.71005 , 26.755412, 26.678449, \\\n", - " 26.725649, 26.720599, 26.71255 , 26.6649 , 26.664699, 26.663149, \\\n", - " 26.66875 , 26.638517, 26.59309 , 26.617449, 26.620079, 26.596795, \\\n", - " 26.577049, 26.524585, 26.524158, 26.523737, 26.520284, 26.547349, \\\n", - " 26.463399, 26.45905 , 26.45558 , 26.453699, 26.449999, 26.397299, \\\n", - " 26.4084 , 26.40875 , 26.379113, 26.3809 , 26.349068, 26.346349, \\\n", - " 26.348015, 26.347957])\n", - "\n", - "lon = np.array([-80.128799, -80.098284, -80.748947, -80.550704, -80.596929, \\\n", - " -80.220966, -80.07466 , -80.190281, -80.083904, -80.213493, \\\n", - " -80.0591 , -80.630096, -80.075301, -80.069885, -80.656841, \\\n", - " -80.190085, -80.08955 , -80.041179, -80.1324 , -80.091746, \\\n", - " -80.068579, -80.090698, -80.1254 , -80.151401, -80.058749, \\\n", - " -80.283371, -80.206901, -80.090649, -80.055001, -80.128711, \\\n", - " -80.076435, -80.080105, -80.06398 , -80.178973, -80.110519, \\\n", - " -80.057701, -80.064251, -80.07875 , -80.139247, -80.104316, \\\n", - " -80.188545, -80.21902 , -80.092391, -80.1575 , -80.102028, \\\n", - " -80.16885 , -80.116401, -80.08385 , -80.241305, -80.158855])\n", - "\n", - "n_cen = lon.size # number of centroids\n", - "n_ev = 10 # number of events\n", + "lat = np.array(\n", + " [\n", + " 26.933899,\n", + " 26.957203,\n", + " 26.783846,\n", + " 26.645524,\n", + " 26.897796,\n", + " 26.925359,\n", + " 26.914768,\n", + " 26.853491,\n", + " 26.845099,\n", + " 26.82651,\n", + " 26.842772,\n", + " 26.825905,\n", + " 26.80465,\n", + " 26.788649,\n", + " 26.704277,\n", + " 26.71005,\n", + " 26.755412,\n", + " 26.678449,\n", + " 26.725649,\n", + " 26.720599,\n", + " 26.71255,\n", + " 26.6649,\n", + " 26.664699,\n", + " 26.663149,\n", + " 26.66875,\n", + " 26.638517,\n", + " 26.59309,\n", + " 26.617449,\n", + " 26.620079,\n", + " 26.596795,\n", + " 26.577049,\n", + " 26.524585,\n", + " 26.524158,\n", + " 26.523737,\n", + " 26.520284,\n", + " 26.547349,\n", + " 26.463399,\n", + " 26.45905,\n", + " 26.45558,\n", + " 26.453699,\n", + " 26.449999,\n", + " 26.397299,\n", + " 26.4084,\n", + " 26.40875,\n", + " 26.379113,\n", + " 26.3809,\n", + " 26.349068,\n", + " 26.346349,\n", + " 26.348015,\n", + " 26.347957,\n", + " ]\n", + ")\n", + "\n", + "lon = np.array(\n", + " [\n", + " -80.128799,\n", + " -80.098284,\n", + " -80.748947,\n", + " -80.550704,\n", + " -80.596929,\n", + " -80.220966,\n", + " -80.07466,\n", + " -80.190281,\n", + " -80.083904,\n", + " -80.213493,\n", + " -80.0591,\n", + " -80.630096,\n", + " -80.075301,\n", + " -80.069885,\n", + " -80.656841,\n", + " -80.190085,\n", + " -80.08955,\n", + " -80.041179,\n", + " -80.1324,\n", + " -80.091746,\n", + " -80.068579,\n", + " -80.090698,\n", + " -80.1254,\n", + " -80.151401,\n", + " -80.058749,\n", + " -80.283371,\n", + " -80.206901,\n", + " -80.090649,\n", + " -80.055001,\n", + " -80.128711,\n", + " -80.076435,\n", + " -80.080105,\n", + " -80.06398,\n", + " -80.178973,\n", + " -80.110519,\n", + " -80.057701,\n", + " -80.064251,\n", + " -80.07875,\n", + " -80.139247,\n", + " -80.104316,\n", + " -80.188545,\n", + " -80.21902,\n", + " -80.092391,\n", + " -80.1575,\n", + " -80.102028,\n", + " -80.16885,\n", + " -80.116401,\n", + " -80.08385,\n", + " -80.241305,\n", + " -80.158855,\n", + " ]\n", + ")\n", + "\n", + "n_cen = lon.size # number of centroids\n", + "n_ev = 10 # number of events\n", "\n", "intensity = sparse.csr_matrix(np.random.random((n_ev, n_cen)))\n", "fraction = intensity.copy()\n", "fraction.data.fill(1)\n", "\n", - "haz = Hazard(haz_type='TC',\n", - " intensity=intensity,\n", - " fraction=fraction,\n", - " centroids=Centroids(lat=lat, lon=lon), # default crs used\n", - " units='m',\n", - " event_id=np.arange(n_ev, dtype=int),\n", - " event_name=['ev_12', 'ev_21', 'Maria', 'ev_35',\n", - " 'Irma', 'ev_16', 'ev_15', 'Edgar', 'ev_1', 'ev_9'],\n", - " date=np.array([721166, 734447, 734447, 734447, 721167,\n", - " 721166, 721167, 721200, 721166, 721166]),\n", - " orig=np.zeros(n_ev, bool),\n", - " frequency=np.ones(n_ev)/n_ev,)\n", + "haz = Hazard(\n", + " haz_type=\"TC\",\n", + " intensity=intensity,\n", + " fraction=fraction,\n", + " centroids=Centroids(lat=lat, lon=lon), # default crs used\n", + " units=\"m\",\n", + " event_id=np.arange(n_ev, dtype=int),\n", + " event_name=[\n", + " \"ev_12\",\n", + " \"ev_21\",\n", + " \"Maria\",\n", + " \"ev_35\",\n", + " \"Irma\",\n", + " \"ev_16\",\n", + " \"ev_15\",\n", + " \"Edgar\",\n", + " \"ev_1\",\n", + " \"ev_9\",\n", + " ],\n", + " date=np.array(\n", + " [721166, 734447, 734447, 734447, 721167, 721166, 721167, 721200, 721166, 721166]\n", + " ),\n", + " orig=np.zeros(n_ev, bool),\n", + " frequency=np.ones(n_ev) / n_ev,\n", + ")\n", "\n", "haz.check()\n", "haz.centroids.plot();" @@ -363,10 +483,17 @@ "# using from_pnt_bounds\n", "\n", "# bounds\n", - "left, bottom, right, top = -72, -3.0, -52.0, 22 # the bounds refer to the bounds of the center of the pixel\n", + "left, bottom, right, top = (\n", + " -72,\n", + " -3.0,\n", + " -52.0,\n", + " 22,\n", + ") # the bounds refer to the bounds of the center of the pixel\n", "# resolution\n", "res = 0.5\n", - "centroids = Centroids.from_pnt_bounds((left, bottom, right, top), res) # default crs used" + "centroids = Centroids.from_pnt_bounds(\n", + " (left, bottom, right, top), res\n", + ") # default crs used" ] }, { @@ -393,26 +520,24 @@ "\n", "# raster info:\n", "# border upper left corner (of the pixel, not of the center of the pixel)\n", - "max_lat = top + res/2\n", - "min_lon = left - res/2\n", + "max_lat = top + res / 2\n", + "min_lon = left - res / 2\n", "# resolution in lat and lon\n", - "d_lat = -res # negative because starting in upper corner\n", - "d_lon = res # same step as d_lat\n", + "d_lat = -res # negative because starting in upper corner\n", + "d_lon = res # same step as d_lat\n", "# number of points\n", "n_lat, n_lon = centroids.shape\n", "\n", "# meta: raster specification\n", "meta = {\n", - " 'dtype': 'float32',\n", - " 'width': n_lon,\n", - " 'height': n_lat,\n", - " 'crs': DEF_CRS,\n", - " 'transform': rasterio.Affine(\n", - " a=d_lon, b=0.0, c=min_lon,\n", - " d=0.0, e=d_lat, f=max_lat),\n", + " \"dtype\": \"float32\",\n", + " \"width\": n_lon,\n", + " \"height\": n_lat,\n", + " \"crs\": DEF_CRS,\n", + " \"transform\": rasterio.Affine(a=d_lon, b=0.0, c=min_lon, d=0.0, e=d_lat, f=max_lat),\n", "}\n", "\n", - "centroids_from_meta = Centroids.from_meta(meta) # default crs used\n", + "centroids_from_meta = Centroids.from_meta(meta) # default crs used\n", "\n", "centroids_from_meta == centroids" ] @@ -446,27 +571,40 @@ "import numpy as np\n", "from scipy import sparse\n", "\n", - "n_ev = 10 # number of events\n", + "n_ev = 10 # number of events\n", "\n", "intensity = sparse.csr_matrix(np.random.random((n_ev, centroids.size)))\n", "fraction = intensity.copy()\n", "fraction.data.fill(1)\n", "\n", - "haz = Hazard('TC',\n", - " centroids=centroids,\n", - " intensity=intensity,\n", - " fraction=fraction,\n", - " units='m',\n", - " event_id=np.arange(n_ev, dtype=int),\n", - " event_name=['ev_12', 'ev_21', 'Maria', 'ev_35',\n", - " 'Irma', 'ev_16', 'ev_15', 'Edgar', 'ev_1', 'ev_9'],\n", - " date=np.array([721166, 734447, 734447, 734447, 721167,\n", - " 721166, 721167, 721200, 721166, 721166]),\n", - " orig=np.zeros(n_ev, bool),\n", - " frequency=np.ones(n_ev)/n_ev,)\n", + "haz = Hazard(\n", + " \"TC\",\n", + " centroids=centroids,\n", + " intensity=intensity,\n", + " fraction=fraction,\n", + " units=\"m\",\n", + " event_id=np.arange(n_ev, dtype=int),\n", + " event_name=[\n", + " \"ev_12\",\n", + " \"ev_21\",\n", + " \"Maria\",\n", + " \"ev_35\",\n", + " \"Irma\",\n", + " \"ev_16\",\n", + " \"ev_15\",\n", + " \"Edgar\",\n", + " \"ev_1\",\n", + " \"ev_9\",\n", + " ],\n", + " date=np.array(\n", + " [721166, 734447, 734447, 734447, 721167, 721166, 721167, 721200, 721166, 721166]\n", + " ),\n", + " orig=np.zeros(n_ev, bool),\n", + " frequency=np.ones(n_ev) / n_ev,\n", + ")\n", "\n", "haz.check()\n", - "print('Check centroids borders:', haz.centroids.total_bounds)\n", + "print(\"Check centroids borders:\", haz.centroids.total_bounds)\n", "haz.centroids.plot();" ] }, @@ -512,8 +650,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Put your code here:\n", - "\n" + "# Put your code here:" ] }, { @@ -522,7 +659,7 @@ "metadata": {}, "outputs": [], "source": [ - "#help(hist_tc.centroids) # If you want to run it, do it after you execute the next block" + "# help(hist_tc.centroids) # If you want to run it, do it after you execute the next block" ] }, { @@ -548,26 +685,26 @@ "# SOLUTION:\n", "\n", "# 1.How many synthetic events are contained?\n", - "print('Number of total events:', haz_tc_fl.size)\n", - "print('Number of synthetic events:', np.logical_not(haz_tc_fl.orig).astype(int).sum())\n", + "print(\"Number of total events:\", haz_tc_fl.size)\n", + "print(\"Number of synthetic events:\", np.logical_not(haz_tc_fl.orig).astype(int).sum())\n", "\n", "# 2. Generate a hazard with historical hurricanes ocurring between 1995 and 2001.\n", - "hist_tc = haz_tc_fl.select(date=('1995-01-01', '2001-12-31'), orig=True)\n", - "print('Number of historical events between 1995 and 2001:', hist_tc.size)\n", + "hist_tc = haz_tc_fl.select(date=(\"1995-01-01\", \"2001-12-31\"), orig=True)\n", + "print(\"Number of historical events between 1995 and 2001:\", hist_tc.size)\n", "\n", "# 3. How many historical hurricanes occured in 1999? Which was the year with most hurricanes between 1995 and 2001?\n", - "ev_per_year = hist_tc.calc_year_set() # events ids per year\n", - "print('Number of events in 1999:', ev_per_year[1999].size)\n", + "ev_per_year = hist_tc.calc_year_set() # events ids per year\n", + "print(\"Number of events in 1999:\", ev_per_year[1999].size)\n", "max_year = 1995\n", "max_ev = ev_per_year[1995].size\n", "for year, ev in ev_per_year.items():\n", " if ev.size > max_ev:\n", " max_year = year\n", - "print('Year with most hurricanes between 1995 and 2001:', max_year)\n", + "print(\"Year with most hurricanes between 1995 and 2001:\", max_year)\n", "\n", - "# 4. What is the number of centroids with distance to coast smaller than 1km?\n", + "# 4. What is the number of centroids with distance to coast smaller than 1km?\n", "num_cen_coast = np.argwhere(hist_tc.centroids.get_dist_coast() < 1000).size\n", - "print('Number of centroids close to coast: ', num_cen_coast)" + "print(\"Number of centroids close to coast: \", num_cen_coast)" ] }, { @@ -745,14 +882,16 @@ ], "source": [ "# 1. intensities of the largest event (defined as greater sum of intensities):\n", - "# all events:\n", - "haz_tc_fl.plot_intensity(event=-1) # largest historical event: 1992230N11325 hurricane ANDREW\n", + "# all events:\n", + "haz_tc_fl.plot_intensity(\n", + " event=-1\n", + ") # largest historical event: 1992230N11325 hurricane ANDREW\n", "\n", "# 2. maximum intensities at each centroid:\n", "haz_tc_fl.plot_intensity(event=0)\n", "\n", "# 3. intensities of hurricane 1998295N12284:\n", - "haz_tc_fl.plot_intensity(event='1998295N12284', cmap='BuGn') # setting color map\n", + "haz_tc_fl.plot_intensity(event=\"1998295N12284\", cmap=\"BuGn\") # setting color map\n", "\n", "# 4. tropical cyclone intensities maps for the return periods [10, 50, 75, 100]\n", "_, res = haz_tc_fl.plot_rp_intensity([10, 50, 75, 100])\n", @@ -760,6 +899,7 @@ "# 5. tropical cyclone return period maps for the threshold intensities [30, 40]\n", "return_periods, label, column_label = haz_tc_fl.local_return_period([30, 40])\n", "from climada.util.plot import plot_from_gdf\n", + "\n", "plot_from_gdf(return_periods, colorbar_name=label, title_subplots=column_label)\n", "\n", "# 6. intensities of all the events in centroid with id 50\n", @@ -791,9 +931,9 @@ "import matplotlib.pyplot as plt\n", "\n", "fig, ax1, fontsize = make_map(1) # map\n", - "ax2 = fig.add_subplot(2, 1, 2) # add regular axes\n", - "haz_tc_fl.plot_intensity(axis=ax1, event=0) # plot original resolution\n", - "ax1.plot(-80, 26, 'or', mfc='none', markersize=12)\n", + "ax2 = fig.add_subplot(2, 1, 2) # add regular axes\n", + "haz_tc_fl.plot_intensity(axis=ax1, event=0) # plot original resolution\n", + "ax1.plot(-80, 26, \"or\", mfc=\"none\", markersize=12)\n", "haz_tc_fl.plot_intensity(axis=ax2, centr=(26, -80))\n", "fig.subplots_adjust(hspace=6.5)" ] @@ -830,9 +970,9 @@ ], "source": [ "# If you see an error message, try to create a depository named results in the repository tutorial.\n", - "haz_tc_fl.write_hdf5('results/haz_tc_fl.h5')\n", + "haz_tc_fl.write_hdf5(\"results/haz_tc_fl.h5\")\n", "\n", - "haz = Hazard.from_hdf5('results/haz_tc_fl.h5')\n", + "haz = Hazard.from_hdf5(\"results/haz_tc_fl.h5\")\n", "haz.check()" ] }, @@ -857,7 +997,7 @@ } ], "source": [ - "haz_ven.write_raster('results/haz_ven.tif') # each event is a band of the tif file" + "haz_ven.write_raster(\"results/haz_ven.tif\") # each event is a band of the tif file" ] }, { @@ -882,8 +1022,9 @@ ], "source": [ "from climada.util.save import save\n", + "\n", "# this generates a results folder in the current path and stores the output there\n", - "save('tutorial_haz_tc_fl.p', haz_tc_fl)" + "save(\"tutorial_haz_tc_fl.p\", haz_tc_fl)" ] } ], diff --git a/doc/tutorial/climada_hazard_StormEurope.ipynb b/doc/tutorial/climada_hazard_StormEurope.ipynb index 3c0ba68658..7772d60579 100644 --- a/doc/tutorial/climada_hazard_StormEurope.ipynb +++ b/doc/tutorial/climada_hazard_StormEurope.ipynb @@ -21,7 +21,8 @@ "source": [ "%matplotlib inline\n", "import matplotlib.pyplot as plt\n", - "plt.rcParams['figure.figsize'] = [15, 10]" + "\n", + "plt.rcParams[\"figure.figsize\"] = [15, 10]" ] }, { @@ -107,7 +108,7 @@ } ], "source": [ - "storm_instance?" + "?storm_instance" ] }, { @@ -144,12 +145,12 @@ "outputs": [], "source": [ "storm_instance.set_ssi(\n", - " method = 'wind_gust',\n", - " intensity = storm_instance.intensity,\n", + " method=\"wind_gust\",\n", + " intensity=storm_instance.intensity,\n", " # the above is just a more explicit way of passing the default\n", - " on_land = True,\n", - " threshold = 25,\n", - " sel_cen = None\n", + " on_land=True,\n", + " threshold=25,\n", + " sel_cen=None,\n", " # None is default. sel_cen could be used to subset centroids\n", ")" ] @@ -244,16 +245,16 @@ "outputs": [], "source": [ "ssi_args = {\n", - " 'on_land': True,\n", - " 'threshold': 25,\n", + " \"on_land\": True,\n", + " \"threshold\": 25,\n", "}\n", "\n", "storm_prob_xtreme = storm_instance.generate_prob_storms(\n", - " reg_id=[56, 528], # BEL and NLD\n", + " reg_id=[56, 528], # BEL and NLD\n", " spatial_shift=2,\n", " ssi_args=ssi_args,\n", " power=1.5,\n", - " scale=0.3\n", + " scale=0.3,\n", ")" ] }, @@ -306,7 +307,7 @@ } ], "source": [ - "storm_prob_xtreme.plot_ssi(full_area=True);\n", + "storm_prob_xtreme.plot_ssi(full_area=True)\n", "storm_prob.plot_ssi(full_area=True);" ] } diff --git a/doc/tutorial/climada_hazard_TropCyclone.ipynb b/doc/tutorial/climada_hazard_TropCyclone.ipynb index 79b63981a0..480d5c0b49 100644 --- a/doc/tutorial/climada_hazard_TropCyclone.ipynb +++ b/doc/tutorial/climada_hazard_TropCyclone.ipynb @@ -142,26 +142,35 @@ "%matplotlib inline\n", "from climada.hazard import TCTracks\n", "\n", - "tr_irma = TCTracks.from_ibtracs_netcdf(provider='usa', storm_id='2017242N16333') # IRMA 2017\n", - "ax = tr_irma.plot();\n", - "ax.set_title('IRMA') # set title\n", + "tr_irma = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", storm_id=\"2017242N16333\"\n", + ") # IRMA 2017\n", + "ax = tr_irma.plot()\n", + "ax.set_title(\"IRMA\") # set title\n", "\n", "# other ibtracs selection options\n", "from climada.hazard import TCTracks\n", + "\n", "# years 1993 and 1994 in basin EP.\n", "# correct_pres ignores tracks with not enough data. For statistics (frequency of events), these should be considered as well\n", - "sel_ibtracs = TCTracks.from_ibtracs_netcdf(provider='usa', year_range=(1993, 1994), basin='EP', correct_pres=False)\n", - "print('Number of tracks:', sel_ibtracs.size)\n", - "ax = sel_ibtracs.plot();\n", - "ax.get_legend()._loc = 2 # correct legend location\n", - "ax.set_title('1993-1994, EP') # set title\n", + "sel_ibtracs = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", year_range=(1993, 1994), basin=\"EP\", correct_pres=False\n", + ")\n", + "print(\"Number of tracks:\", sel_ibtracs.size)\n", + "ax = sel_ibtracs.plot()\n", + "ax.get_legend()._loc = 2 # correct legend location\n", + "ax.set_title(\"1993-1994, EP\") # set title\n", "\n", - "track1 = TCTracks.from_ibtracs_netcdf(provider='usa', storm_id='2007314N10093') # SIDR 2007\n", - "track2 = TCTracks.from_ibtracs_netcdf(provider='usa', storm_id='2016138N10081') # ROANU 2016\n", - "track1.append(track2.data) # put both tracks together\n", - "ax = track1.plot();\n", - "ax.get_legend()._loc = 2 # correct legend location\n", - "ax.set_title('SIDR and ROANU'); # set title" + "track1 = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", storm_id=\"2007314N10093\"\n", + ") # SIDR 2007\n", + "track2 = TCTracks.from_ibtracs_netcdf(\n", + " provider=\"usa\", storm_id=\"2016138N10081\"\n", + ") # ROANU 2016\n", + "track1.append(track2.data) # put both tracks together\n", + "ax = track1.plot()\n", + "ax.get_legend()._loc = 2 # correct legend location\n", + "ax.set_title(\"SIDR and ROANU\"); # set title" ] }, { @@ -781,7 +790,7 @@ } ], "source": [ - "tr_irma.get_track('2017242N16333')" + "tr_irma.get_track(\"2017242N16333\")" ] }, { @@ -1675,7 +1684,7 @@ } ], "source": [ - "tr_irma.data[-1] # last synthetic track. notice the value of orig_event_flag and name" + "tr_irma.data[-1] # last synthetic track. notice the value of orig_event_flag and name" ] }, { @@ -1701,10 +1710,7 @@ }, "outputs": [], "source": [ - "# Put your code here\n", - "\n", - "\n", - "\n" + "# Put your code here" ] }, { @@ -1737,20 +1743,23 @@ "source": [ "# SOLUTION:\n", "import numpy as np\n", + "\n", "# select the track\n", - "tc_syn = tr_irma.get_track('2017242N16333_gen1')\n", + "tc_syn = tr_irma.get_track(\"2017242N16333_gen1\")\n", "\n", "# 1. Which is the time frequency of the data?\n", "# The values of a DataArray are numpy.arrays.\n", "# The nummpy.ediff1d computes the different between elements in an array\n", "diff_time_ns = np.ediff1d(tc_syn[\"time\"])\n", - "diff_time_h = diff_time_ns.astype(int)/1000/1000/1000/60/60\n", - "print('Mean time frequency in hours:', diff_time_h.mean())\n", - "print('Std time frequency in hours:', diff_time_h.std())\n", + "diff_time_h = diff_time_ns.astype(int) / 1000 / 1000 / 1000 / 60 / 60\n", + "print(\"Mean time frequency in hours:\", diff_time_h.mean())\n", + "print(\"Std time frequency in hours:\", diff_time_h.std())\n", "print()\n", "\n", "# 2. Compute the maximum sustained wind for each day.\n", - "print('Daily max sustained wind:', tc_syn[\"max_sustained_wind\"].groupby('time.day').max())" + "print(\n", + " \"Daily max sustained wind:\", tc_syn[\"max_sustained_wind\"].groupby(\"time.day\").max()\n", + ")" ] }, { @@ -1887,15 +1896,16 @@ "min_lat, max_lat, min_lon, max_lon = 16.99375, 21.95625, -72.48125, -61.66875\n", "cent = Centroids.from_pnt_bounds((min_lon, min_lat, max_lon, max_lat), res=0.12)\n", "cent.check()\n", - "cent.plot();\n", + "cent.plot()\n", "\n", "# construct tropical cyclones\n", "tc_irma = TropCyclone.from_tracks(tr_irma, centroids=cent)\n", "# tc_irma = TropCyclone.from_tracks(tr_irma) # try without given centroids. It might take too much space of your memory\n", "# and then the kernel will be killed: So, don't use this function without given centroids!\n", "tc_irma.check()\n", - "tc_irma.plot_intensity('2017242N16333'); # IRMA\n", - "tc_irma.plot_intensity('2017242N16333_gen2'); # IRMA's synthetic track 2" + "tc_irma.plot_intensity(\"2017242N16333\")\n", + "# IRMA\n", + "tc_irma.plot_intensity(\"2017242N16333_gen2\"); # IRMA's synthetic track 2" ] }, { @@ -1944,13 +1954,18 @@ "source": [ "# an Irma event-like in 2055 under RCP 4.5:\n", "tc_irma = TropCyclone.from_tracks(tr_irma, centroids=cent)\n", - "tc_irma_cc = tc_irma.apply_climate_scenario_knu(target_year=2055, scenario='4.5')\n", + "tc_irma_cc = tc_irma.apply_climate_scenario_knu(target_year=2055, scenario=\"4.5\")\n", "\n", "rel_freq_incr = np.round(\n", - " (np.mean(tc_irma_cc.frequency) - np.mean(tc_irma.frequency)\n", - " ) / np.mean(tc_irma.frequency)*100, 0)\n", + " (np.mean(tc_irma_cc.frequency) - np.mean(tc_irma.frequency))\n", + " / np.mean(tc_irma.frequency)\n", + " * 100,\n", + " 0,\n", + ")\n", "\n", - "print(f\"\\nA TC like Irma would undergo a frequency increase of about {rel_freq_incr} % in 2055 under RCP 45\")" + "print(\n", + " f\"\\nA TC like Irma would undergo a frequency increase of about {rel_freq_incr} % in 2055 under RCP 45\"\n", + ")" ] }, { @@ -2067,9 +2082,9 @@ "\n", "from climada.hazard import Centroids, TropCyclone, TCTracks\n", "\n", - "track_name = '2017242N16333' #'2016273N13300' #'1992230N11325'\n", + "track_name = \"2017242N16333\" #'2016273N13300' #'1992230N11325'\n", "\n", - "tr_irma = TCTracks.from_ibtracs_netcdf(provider='usa', storm_id='2017242N16333')\n", + "tr_irma = TCTracks.from_ibtracs_netcdf(provider=\"usa\", storm_id=\"2017242N16333\")\n", "\n", "lon_min, lat_min, lon_max, lat_max = -83.5, 24.4, -79.8, 29.6\n", "centr_video = Centroids.from_pnt_bounds((lon_min, lat_min, lon_max, lat_max), 0.04)\n", @@ -2077,7 +2092,9 @@ "\n", "tc_video = TropCyclone()\n", "\n", - "tc_list, tr_coord = tc_video.video_intensity(track_name, tr_irma, centr_video, file_name='results/irma_tc_fl.gif')" + "tc_list, tr_coord = tc_video.video_intensity(\n", + " track_name, tr_irma, centr_video, file_name=\"results/irma_tc_fl.gif\"\n", + ")" ] }, { @@ -2156,9 +2173,11 @@ "from matplotlib import animation\n", "from matplotlib.pyplot import rcParams\n", "\n", - "rcParams['animation.ffmpeg_path'] = shutil.which('ffmpeg')\n", + "rcParams[\"animation.ffmpeg_path\"] = shutil.which(\"ffmpeg\")\n", "writer = animation.FFMpegWriter(bitrate=500)\n", - "tc_list, tr_coord = tc_video.video_intensity(track_name, tr_irma, centr_video, file_name='results/irma_tc_fl.mp4', writer=writer)" + "tc_list, tr_coord = tc_video.video_intensity(\n", + " track_name, tr_irma, centr_video, file_name=\"results/irma_tc_fl.mp4\", writer=writer\n", + ")" ] }, { diff --git a/doc/tutorial/climada_util_api_client.ipynb b/doc/tutorial/climada_util_api_client.ipynb index 580e0b08da..215f8b6d0f 100644 --- a/doc/tutorial/climada_util_api_client.ipynb +++ b/doc/tutorial/climada_util_api_client.ipynb @@ -28,6 +28,7 @@ "outputs": [], "source": [ "from climada.util.api_client import Client\n", + "\n", "client = Client()" ] }, @@ -150,10 +151,11 @@ ], "source": [ "import pandas as pd\n", + "\n", "data_types = client.list_data_type_infos()\n", "\n", "dtf = pd.DataFrame(data_types)\n", - "dtf.sort_values(['data_type_group', 'data_type'])" + "dtf.sort_values([\"data_type_group\", \"data_type\"])" ] }, { @@ -170,7 +172,7 @@ "metadata": {}, "outputs": [], "source": [ - "litpop_dataset_infos = client.list_dataset_infos(data_type='litpop')" + "litpop_dataset_infos = client.list_dataset_infos(data_type=\"litpop\")" ] }, { @@ -233,7 +235,9 @@ "source": [ "# as datasets are usually available per country, chosing a country or global dataset reduces the options\n", "# here we want to see which datasets are available for litpop globally:\n", - "client.get_property_values(litpop_dataset_infos, known_property_values = {'spatial_coverage':'global'})" + "client.get_property_values(\n", + " litpop_dataset_infos, known_property_values={\"spatial_coverage\": \"global\"}\n", + ")" ] }, { @@ -259,8 +263,10 @@ } ], "source": [ - "#and here for Switzerland:\n", - "client.get_property_values(litpop_dataset_infos, known_property_values = {'country_name':'Switzerland'})" + "# and here for Switzerland:\n", + "client.get_property_values(\n", + " litpop_dataset_infos, known_property_values={\"country_name\": \"Switzerland\"}\n", + ")" ] }, { @@ -313,8 +319,10 @@ } ], "source": [ - "tc_dataset_infos = client.list_dataset_infos(data_type='tropical_cyclone')\n", - "client.get_property_values(tc_dataset_infos, known_property_values = {'country_name':'Haiti'})" + "tc_dataset_infos = client.list_dataset_infos(data_type=\"tropical_cyclone\")\n", + "client.get_property_values(\n", + " tc_dataset_infos, known_property_values={\"country_name\": \"Haiti\"}\n", + ")" ] }, { @@ -347,7 +355,15 @@ ], "source": [ "client = Client()\n", - "tc_haiti = client.get_hazard('tropical_cyclone', properties={'country_name': 'Haiti', 'climate_scenario': 'rcp45', 'ref_year':'2040', 'nb_synth_tracks':'10'})\n", + "tc_haiti = client.get_hazard(\n", + " \"tropical_cyclone\",\n", + " properties={\n", + " \"country_name\": \"Haiti\",\n", + " \"climate_scenario\": \"rcp45\",\n", + " \"ref_year\": \"2040\",\n", + " \"nb_synth_tracks\": \"10\",\n", + " },\n", + ")\n", "tc_haiti.plot_intensity(0);" ] }, @@ -365,7 +381,9 @@ "metadata": {}, "outputs": [], "source": [ - "litpop_default = client.get_property_values(litpop_dataset_infos, known_property_values = {'fin_mode':'pc', 'exponents':'(1,1)'})" + "litpop_default = client.get_property_values(\n", + " litpop_dataset_infos, known_property_values={\"fin_mode\": \"pc\", \"exponents\": \"(1,1)\"}\n", + ")" ] }, { @@ -385,7 +403,7 @@ } ], "source": [ - "litpop = client.get_litpop(country='Haiti')" + "litpop = client.get_litpop(country=\"Haiti\")" ] }, { @@ -446,6 +464,7 @@ "outputs": [], "source": [ "from climada.engine import ImpactCalc\n", + "\n", "impact = ImpactCalc(litpop, imp_fun_set, tc_haiti).impact()" ] }, @@ -476,7 +495,7 @@ } ], "source": [ - "crop_dataset_infos = client.list_dataset_infos(data_type='crop_production')\n", + "crop_dataset_infos = client.list_dataset_infos(data_type=\"crop_production\")\n", "\n", "client.get_property_values(crop_dataset_infos)" ] @@ -487,7 +506,10 @@ "metadata": {}, "outputs": [], "source": [ - "rice_exposure = client.get_exposures(exposures_type='crop_production', properties = {'crop':'ric', 'unit': 'USD','irrigation_status': 'noirr'})" + "rice_exposure = client.get_exposures(\n", + " exposures_type=\"crop_production\",\n", + " properties={\"crop\": \"ric\", \"unit\": \"USD\", \"irrigation_status\": \"noirr\"},\n", + ")" ] }, { @@ -584,7 +606,7 @@ } ], "source": [ - "centroids_nopoles = client.get_centroids(extent=[-180,180,-60,50])\n", + "centroids_nopoles = client.get_centroids(extent=[-180, 180, -60, 50])\n", "centroids_nopoles.plot()" ] }, @@ -612,7 +634,7 @@ } ], "source": [ - "centroids_hti = client.get_centroids(country='HTI')" + "centroids_hti = client.get_centroids(country=\"HTI\")" ] }, { @@ -667,7 +689,7 @@ } ], "source": [ - "Client?" + "?Client" ] }, { @@ -741,7 +763,7 @@ } ], "source": [ - "client.get_dataset_info_by_uuid('b1c76120-4e60-4d8f-99c0-7e1e7b7860ec')" + "client.get_dataset_info_by_uuid(\"b1c76120-4e60-4d8f-99c0-7e1e7b7860ec\")" ] }, { @@ -810,7 +832,8 @@ ], "source": [ "from climada.util.api_client import DatasetInfo\n", - "DatasetInfo?" + "\n", + "?DatasetInfo" ] }, { @@ -849,7 +872,8 @@ ], "source": [ "from climada.util.api_client import FileInfo\n", - "FileInfo?" + "\n", + "?FileInfo" ] }, { @@ -890,7 +914,7 @@ } ], "source": [ - "client.into_datasets_df?" + "?client.into_datasets_df" ] }, { @@ -1059,8 +1083,12 @@ ], "source": [ "from climada.util.api_client import Client\n", + "\n", "client = Client()\n", - "litpop_datasets = client.list_dataset_infos(data_type='litpop', properties={'country_name': 'South Georgia and the South Sandwich Islands'})\n", + "litpop_datasets = client.list_dataset_infos(\n", + " data_type=\"litpop\",\n", + " properties={\"country_name\": \"South Georgia and the South Sandwich Islands\"},\n", + ")\n", "litpop_df = client.into_datasets_df(litpop_datasets)\n", "litpop_df" ] @@ -1127,7 +1155,7 @@ } ], "source": [ - "client.download_dataset?" + "?client.download_dataset" ] }, { @@ -1161,7 +1189,9 @@ ], "source": [ "# Let's have a look at an example for downloading a litpop dataset first\n", - "ds = litpop_datasets[0] # litpop_datasets is a list and download_dataset expects a single object as argument.\n", + "ds = litpop_datasets[\n", + " 0\n", + "] # litpop_datasets is a list and download_dataset expects a single object as argument.\n", "download_dir, ds_files = client.download_dataset(ds)\n", "ds_files[0], ds_files[0].is_file()" ] @@ -1214,9 +1244,14 @@ ], "source": [ "from climada.util.api_client import Client\n", + "\n", "Client().get_dataset_file(\n", - " data_type='litpop',\n", - " properties={'country_name': 'South Georgia and the South Sandwich Islands', 'fin_mode': 'pop'})" + " data_type=\"litpop\",\n", + " properties={\n", + " \"country_name\": \"South Georgia and the South Sandwich Islands\",\n", + " \"fin_mode\": \"pop\",\n", + " },\n", + ")" ] }, { diff --git a/doc/tutorial/climada_util_earth_engine.ipynb b/doc/tutorial/climada_util_earth_engine.ipynb index d6ca785cec..10811ce4d7 100644 --- a/doc/tutorial/climada_util_earth_engine.ipynb +++ b/doc/tutorial/climada_util_earth_engine.ipynb @@ -53,8 +53,9 @@ "import webbrowser\n", "\n", "import ee\n", + "\n", "ee.Initialize()\n", - "image = ee.Image('srtm90_v4')\n", + "image = ee.Image(\"srtm90_v4\")\n", "print(image.getInfo())" ] }, @@ -75,10 +76,11 @@ "outputs": [], "source": [ "# Access a specific image\n", - "image = ee.Image('LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318'); #Landsat 8 image, with Top of Atmosphere processing, on 2014/03/18 \n", + "image = ee.Image(\"LANDSAT/LC08/C01/T1_TOA/LC08_044034_20140318\")\n", + "# Landsat 8 image, with Top of Atmosphere processing, on 2014/03/18\n", "\n", "# Access a collection\n", - "collection = 'LANDSAT/LE07/C01/T1' #Landsat 7 raw images collection" + "collection = \"LANDSAT/LE07/C01/T1\" # Landsat 7 raw images collection" ] }, { @@ -109,32 +111,38 @@ } ], "source": [ - "#Landsat_composite in Dresden area\n", - "area_dresden = list([(13.6, 50.96), (13.9, 50.96), (13.9, 51.12), (13.6, 51.12), (13.6, 50.96)])\n", - "area_dresden = ee.Geometry.Polygon(area_dresden) \n", - "time_range_dresden = ['2002-07-28', '2002-08-05']\n", - "\n", - "collection_dresden = ('LANDSAT/LE07/C01/T1')\n", + "# Landsat_composite in Dresden area\n", + "area_dresden = list(\n", + " [(13.6, 50.96), (13.9, 50.96), (13.9, 51.12), (13.6, 51.12), (13.6, 50.96)]\n", + ")\n", + "area_dresden = ee.Geometry.Polygon(area_dresden)\n", + "time_range_dresden = [\"2002-07-28\", \"2002-08-05\"]\n", + "\n", + "collection_dresden = \"LANDSAT/LE07/C01/T1\"\n", "print(type(area_dresden))\n", "\n", - "#Population density in Switzerland\n", - "list_swiss = list([(6.72, 47.88),(6.72, 46.55),(9.72, 46.55),(9.72, 47.88),(6.72, 47.88)]) \n", - "area_swiss = ee.Geometry.Polygon(list_swiss) \n", - "time_range_swiss=['2002-01-01', '2005-12-30']\n", + "# Population density in Switzerland\n", + "list_swiss = list(\n", + " [(6.72, 47.88), (6.72, 46.55), (9.72, 46.55), (9.72, 47.88), (6.72, 47.88)]\n", + ")\n", + "area_swiss = ee.Geometry.Polygon(list_swiss)\n", + "time_range_swiss = [\"2002-01-01\", \"2005-12-30\"]\n", "\n", - "collection_swiss = ee.ImageCollection('CIESIN/GPWv4/population-density')\n", + "collection_swiss = ee.ImageCollection(\"CIESIN/GPWv4/population-density\")\n", "print(type(collection_swiss))\n", "\n", - "#Sentinel 2 cloud-free image in Zürich\n", - "collection_zurich = ('COPERNICUS/S2')\n", - "list_zurich = list([(8.53, 47.355),(8.55, 47.355),(8.55, 47.376),(8.53, 47.376),(8.53, 47.355)]) \n", - "area_zurich = ee.Geometry.Polygon(list_swiss) \n", - "time_range_zurich = ['2018-05-01', '2018-07-30']\n", + "# Sentinel 2 cloud-free image in Zürich\n", + "collection_zurich = \"COPERNICUS/S2\"\n", + "list_zurich = list(\n", + " [(8.53, 47.355), (8.55, 47.355), (8.55, 47.376), (8.53, 47.376), (8.53, 47.355)]\n", + ")\n", + "area_zurich = ee.Geometry.Polygon(list_swiss)\n", + "time_range_zurich = [\"2018-05-01\", \"2018-07-30\"]\n", "\n", "\n", - "#Landcover in Europe with CORINE dataset\n", - "dataset_landcover = ee.Image('COPERNICUS/CORINE/V18_5_1/100m/2012')\n", - "landCover_layer = dataset_landcover.select('landcover')\n", + "# Landcover in Europe with CORINE dataset\n", + "dataset_landcover = ee.Image(\"COPERNICUS/CORINE/V18_5_1/100m/2012\")\n", + "landCover_layer = dataset_landcover.select(\"landcover\")\n", "print(type(landCover_layer))" ] }, @@ -144,9 +152,9 @@ "metadata": {}, "outputs": [], "source": [ - "#Methods from climada.util.earth_engine module\n", + "# Methods from climada.util.earth_engine module\n", "def obtain_image_landsat_composite(collection, time_range, area):\n", - " \"\"\" Selection of Landsat cloud-free composites in the Earth Engine library\n", + " \"\"\"Selection of Landsat cloud-free composites in the Earth Engine library\n", " See also: https://developers.google.com/earth-engine/landsat\n", "\n", " Parameters:\n", @@ -156,7 +164,7 @@ "\n", " Returns:\n", " image_composite (ee.image.Image)\n", - " \"\"\"\n", + " \"\"\"\n", " collection = ee.ImageCollection(collection)\n", "\n", " ## Filter by time range and location\n", @@ -165,8 +173,9 @@ " image_composite = ee.Algorithms.Landsat.simpleComposite(image_area, 75, 3)\n", " return image_composite\n", "\n", + "\n", "def obtain_image_median(collection, time_range, area):\n", - " \"\"\" Selection of median from a collection of images in the Earth Engine library\n", + " \"\"\"Selection of median from a collection of images in the Earth Engine library\n", " See also: https://developers.google.com/earth-engine/reducers_image_collection\n", "\n", " Parameters:\n", @@ -176,7 +185,7 @@ "\n", " Returns:\n", " image_median (ee.image.Image)\n", - " \"\"\"\n", + " \"\"\"\n", " collection = ee.ImageCollection(collection)\n", "\n", " ## Filter by time range and location\n", @@ -185,8 +194,9 @@ " image_median = image_area.median()\n", " return image_median\n", "\n", + "\n", "def obtain_image_sentinel(collection, time_range, area):\n", - " \"\"\" Selection of median, cloud-free image from a collection of images in the Sentinel 2 dataset\n", + " \"\"\"Selection of median, cloud-free image from a collection of images in the Sentinel 2 dataset\n", " See also: https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_S2\n", "\n", " Parameters:\n", @@ -196,24 +206,28 @@ "\n", " Returns:\n", " sentinel_median (ee.image.Image)\n", - " \"\"\"\n", - "#First, method to remove cloud from the image\n", + " \"\"\"\n", + "\n", + " # First, method to remove cloud from the image\n", " def maskclouds(image):\n", - " band_qa = image.select('QA60')\n", + " band_qa = image.select(\"QA60\")\n", " cloud_mask = ee.Number(2).pow(10).int()\n", " cirrus_mask = ee.Number(2).pow(11).int()\n", - " mask = band_qa.bitwiseAnd(cloud_mask).eq(0) and(\n", - " band_qa.bitwiseAnd(cirrus_mask).eq(0))\n", + " mask = band_qa.bitwiseAnd(cloud_mask).eq(0) and (\n", + " band_qa.bitwiseAnd(cirrus_mask).eq(0)\n", + " )\n", " return image.updateMask(mask).divide(10000)\n", "\n", - " sentinel_filtered = (ee.ImageCollection(collection).\n", - " filterBounds(area).\n", - " filterDate(time_range[0], time_range[1]).\n", - " filter(ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE', 20)).\n", - " map(maskclouds))\n", + " sentinel_filtered = (\n", + " ee.ImageCollection(collection)\n", + " .filterBounds(area)\n", + " .filterDate(time_range[0], time_range[1])\n", + " .filter(ee.Filter.lt(\"CLOUDY_PIXEL_PERCENTAGE\", 20))\n", + " .map(maskclouds)\n", + " )\n", "\n", " sentinel_median = sentinel_filtered.median()\n", - " return sentinel_median\n" + " return sentinel_median" ] }, { @@ -232,13 +246,15 @@ } ], "source": [ - "#Application to examples\n", - "composite_dresden = obtain_image_landsat_composite(collection_dresden, time_range_dresden, area_dresden)\n", + "# Application to examples\n", + "composite_dresden = obtain_image_landsat_composite(\n", + " collection_dresden, time_range_dresden, area_dresden\n", + ")\n", "median_swiss = obtain_image_median(collection_swiss, time_range_swiss, area_swiss)\n", "zurich_median = obtain_image_sentinel(collection_zurich, time_range_zurich, area_zurich)\n", "\n", - "#Selection of specific bands from an image\n", - "zurich_band = zurich_median.select(['B4','B3','B2']) \n", + "# Selection of specific bands from an image\n", + "zurich_band = zurich_median.select([\"B4\", \"B3\", \"B2\"])\n", "\n", "\n", "print(composite_dresden.getInfo())\n", @@ -279,7 +295,7 @@ "\n", "region_dresden = get_region(area_dresden)\n", "region_swiss = get_region(area_swiss)\n", - "region_zurich= get_region(area_zurich)" + "region_zurich = get_region(area_zurich)" ] }, { @@ -321,24 +337,19 @@ "\n", " Returns:\n", " path (str)\n", - " \"\"\"\n", - " path = image.getDownloadURL({\n", - " 'name':(name),\n", - " 'scale': scale,\n", - " 'region':(region)\n", - " })\n", + " \"\"\"\n", + " path = image.getDownloadURL({\"name\": (name), \"scale\": scale, \"region\": (region)})\n", "\n", " webbrowser.open_new_tab(path)\n", " return path\n", "\n", - " \n", - " \n", - "url_swiss = get_url('swiss_pop', median_swiss, 900, region_swiss)\n", - "url_dresden = get_url('dresden', composite_dresden, 30, region_dresden)\n", - "url_landcover = get_url('landcover_swiss', landCover_layer, 100, region_swiss)\n", "\n", - "#For the example of Zürich, due to size, it doesn't work on Jupyter Notebook but it works on Python\n", - "#url_zurich = get_url('sentinel', zurich_band, 10, region_zurich)\n", + "url_swiss = get_url(\"swiss_pop\", median_swiss, 900, region_swiss)\n", + "url_dresden = get_url(\"dresden\", composite_dresden, 30, region_dresden)\n", + "url_landcover = get_url(\"landcover_swiss\", landCover_layer, 100, region_swiss)\n", + "\n", + "# For the example of Zürich, due to size, it doesn't work on Jupyter Notebook but it works on Python\n", + "# url_zurich = get_url('sentinel', zurich_band, 10, region_zurich)\n", "\n", "print(url_swiss)\n", "print(url_dresden)\n", @@ -387,7 +398,7 @@ "from skimage.filters import try_all_threshold\n", "from skimage.filters import threshold_otsu, threshold_local\n", "from skimage import measure\n", - "from skimage import feature\n" + "from skimage import feature" ] }, { @@ -398,8 +409,8 @@ "source": [ "from climada.util import DEMO_DIR\n", "\n", - "swiss_pop = DEMO_DIR.joinpath('earth_engine', 'population-density_median.tif')\n", - "dresden = DEMO_DIR.joinpath('earth_engine', 'dresden.tif') #B4 of Dresden example\n" + "swiss_pop = DEMO_DIR.joinpath(\"earth_engine\", \"population-density_median.tif\")\n", + "dresden = DEMO_DIR.joinpath(\"earth_engine\", \"dresden.tif\") # B4 of Dresden example" ] }, { @@ -433,19 +444,19 @@ } ], "source": [ - "#Read a tif in python and Visualize the image\n", + "# Read a tif in python and Visualize the image\n", "image_dresden = imread(dresden)\n", "plt.figure(figsize=(10, 10))\n", - "plt.imshow(image_dresden, cmap='gray', interpolation='nearest')\n", + "plt.imshow(image_dresden, cmap=\"gray\", interpolation=\"nearest\")\n", "plt.axis()\n", "plt.show()\n", "\n", - "#Crop the image\n", - "image_dresden_crop=image_dresden[300:700,600:1400]\n", + "# Crop the image\n", + "image_dresden_crop = image_dresden[300:700, 600:1400]\n", "plt.figure(figsize=(10, 10))\n", - "plt.imshow(image_dresden_crop, cmap='gray', interpolation='nearest')\n", + "plt.imshow(image_dresden_crop, cmap=\"gray\", interpolation=\"nearest\")\n", "plt.axis()\n", - "plt.show()\n" + "plt.show()" ] }, { @@ -467,12 +478,12 @@ } ], "source": [ - "image_pop= imread(swiss_pop)\n", + "image_pop = imread(swiss_pop)\n", "plt.figure(figsize=(12, 12))\n", - "plt.imshow(image_pop, cmap='Reds', interpolation='nearest')\n", + "plt.imshow(image_pop, cmap=\"Reds\", interpolation=\"nearest\")\n", "plt.colorbar()\n", "plt.axis()\n", - "plt.show()\n" + "plt.show()" ] }, { @@ -501,7 +512,7 @@ } ], "source": [ - "#Thresholding: Selection of pixels with regards with their value\n", + "# Thresholding: Selection of pixels with regards with their value\n", "\n", "global_thresh = threshold_otsu(image_dresden_crop)\n", "binary_global = image_dresden_crop > global_thresh\n", @@ -515,19 +526,19 @@ "plt.gray()\n", "\n", "ax[0].imshow(image_dresden_crop)\n", - "ax[0].set_title('Original')\n", + "ax[0].set_title(\"Original\")\n", "\n", "ax[1].imshow(binary_global)\n", - "ax[1].set_title('Global thresholding')\n", + "ax[1].set_title(\"Global thresholding\")\n", "\n", "ax[2].imshow(binary_adaptive)\n", - "ax[2].set_title('Adaptive thresholding')\n", + "ax[2].set_title(\"Adaptive thresholding\")\n", "\n", "for a in ax:\n", - " a.axis('off')\n", + " a.axis(\"off\")\n", "plt.show()\n", "\n", - "print(np.sum(binary_global))\n" + "print(np.sum(binary_global))" ] } ], diff --git a/doc/tutorial/climada_util_yearsets.ipynb b/doc/tutorial/climada_util_yearsets.ipynb index 747d29fcf2..9ead010193 100644 --- a/doc/tutorial/climada_util_yearsets.ipynb +++ b/doc/tutorial/climada_util_yearsets.ipynb @@ -40,11 +40,11 @@ "import climada.util.yearsets as yearsets\n", "from climada.engine import Impact\n", "\n", - "# dummy event_impacts object containing 10 event_impacts with the values 10-110 \n", + "# dummy event_impacts object containing 10 event_impacts with the values 10-110\n", "# and the frequency 0.2 (Return period of 5 years)\n", "imp = Impact()\n", - "imp.at_event = np.arange(10,110,10)\n", - "imp.frequency = np.array(np.ones(10)*0.2)\n", + "imp.at_event = np.arange(10, 110, 10)\n", + "imp.frequency = np.array(np.ones(10) * 0.2)\n", "\n", "# the number of years to sample impacts for (length(yimp.at_event) = sampled_years)\n", "sampled_years = 10\n", @@ -147,11 +147,13 @@ ], "source": [ "# compare the resulting yimp with our step-by-step computation without applying the correction factor:\n", - "yimp, sampling_vect = yearsets.impact_yearset(imp, sampled_years=list(range(1,11)), correction_fac=False)\n", + "yimp, sampling_vect = yearsets.impact_yearset(\n", + " imp, sampled_years=list(range(1, 11)), correction_fac=False\n", + ")\n", "\n", - "print('The yimp.at_event values equal our step-by-step computed imp_per_year:')\n", - "print('yimp.at_event = ', yimp.at_event)\n", - "print('imp_per_year = ', imp_per_year)" + "print(\"The yimp.at_event values equal our step-by-step computed imp_per_year:\")\n", + "print(\"yimp.at_event = \", yimp.at_event)\n", + "print(\"imp_per_year = \", imp_per_year)" ] }, { @@ -173,12 +175,14 @@ ], "source": [ "# and here the same comparison with applying the correction factor (default settings):\n", - "yimp, sampling_vect = yearsets.impact_yearset(imp, sampled_years=list(range(1,11)))\n", + "yimp, sampling_vect = yearsets.impact_yearset(imp, sampled_years=list(range(1, 11)))\n", "\n", - "print('The same can be shown for the case of applying the correction factor.' \n", - " 'The yimp.at_event values equal our step-by-step computed imp_per year:')\n", - "print('yimp.at_event = ', yimp.at_event)\n", - "print('imp_per_year = ', imp_per_year/correction_factor)" + "print(\n", + " \"The same can be shown for the case of applying the correction factor.\"\n", + " \"The yimp.at_event values equal our step-by-step computed imp_per year:\"\n", + ")\n", + "print(\"yimp.at_event = \", yimp.at_event)\n", + "print(\"imp_per_year = \", imp_per_year / correction_factor)" ] } ], diff --git a/script/applications/eca_san_salvador/README.txt b/script/applications/eca_san_salvador/README.txt index e81b3188ee..7b3fa3df35 100644 --- a/script/applications/eca_san_salvador/README.txt +++ b/script/applications/eca_san_salvador/README.txt @@ -2,4 +2,4 @@ These notebooks show how to use CLIMADA in local case studies. The data shown was generated for the Economics of Climate Adaptation study developed with KfW in San Salvador, El Salvador. These represent only a partial outcome of the project. Execute first San_Salvador_Risk.ipynb and then San_Salvador_Adaptation.ipynb. -Contact Gabriela Aznar Siguan for any questions. +Contact Gabriela Aznar Siguan for any questions. diff --git a/script/applications/eca_san_salvador/San_Salvador_Adaptacion.ipynb b/script/applications/eca_san_salvador/San_Salvador_Adaptacion.ipynb index 21fb05cdbf..5a50f09d50 100644 --- a/script/applications/eca_san_salvador/San_Salvador_Adaptacion.ipynb +++ b/script/applications/eca_san_salvador/San_Salvador_Adaptacion.ipynb @@ -128,18 +128,20 @@ "import contextily as ctx\n", "from climada.entity import Entity\n", "\n", - "ent_2015 = Entity.from_excel('FL_entity_Acelhuate_houses.xlsx')\n", - "ent_2015.exposures.ref_year = 2015 # fijamos el año de referencia\n", + "ent_2015 = Entity.from_excel(\"FL_entity_Acelhuate_houses.xlsx\")\n", + "ent_2015.exposures.ref_year = 2015 # fijamos el año de referencia\n", "ent_2015.check()\n", "\n", "# Exposures (bienes): los utilizados en el script San Salvador Risk\n", - "print('Total value in 2015: {:.3e}'.format(ent_2015.exposures.gdf.value.sum()))\n", - "ax = ent_2015.exposures.plot_basemap(s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap='autumn')\n", - "ax.set_title('Exposure 2015')\n", + "print(\"Total value in 2015: {:.3e}\".format(ent_2015.exposures.gdf.value.sum()))\n", + "ax = ent_2015.exposures.plot_basemap(\n", + " s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap=\"autumn\"\n", + ")\n", + "ax.set_title(\"Exposure 2015\")\n", "\n", "# Impact Functions (funciones de impacto): los utilizados en el script San Salvador Risk\n", - "ent_2015.impact_funcs.get_func('FL', 101).plot()\n", - "ent_2015.impact_funcs.get_func('FL', 102).plot()\n", + "ent_2015.impact_funcs.get_func(\"FL\", 101).plot()\n", + "ent_2015.impact_funcs.get_func(\"FL\", 102).plot()\n", "\n", "# Discount rate (tasas de descuento): 2% anual hasta 2040\n", "ent_2015.disc_rates.plot();\n", @@ -230,12 +232,16 @@ "# Exposures (bienes): crecimiento anual del 2%\n", "ent_2040.exposures.ref_year = 2040\n", "growth = 0.02\n", - "ent_2040.exposures.gdf['value'] = ent_2040.exposures.gdf.value.values*(1 + growth)**(ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", - "ent_2040.check() # check values are well set and assignes default values\n", + "ent_2040.exposures.gdf[\"value\"] = ent_2040.exposures.gdf.value.values * (\n", + " 1 + growth\n", + ") ** (ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", + "ent_2040.check() # check values are well set and assignes default values\n", "\n", - "print('Valor total en 2040: {:.3e}'.format(ent_2040.exposures.gdf.value.sum()))\n", - "ax = ent_2040.exposures.plot_basemap(s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap='autumn')\n", - "ax.set_title('Exposure 2040');" + "print(\"Valor total en 2040: {:.3e}\".format(ent_2040.exposures.gdf.value.sum()))\n", + "ax = ent_2040.exposures.plot_basemap(\n", + " s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap=\"autumn\"\n", + ")\n", + "ax.set_title(\"Exposure 2040\");" ] }, { @@ -286,11 +292,13 @@ "import matplotlib.patches as patches\n", "from climada.hazard import Hazard\n", "\n", - "HAZ_FILE = 'Salvador_hazard_FL_2015.mat'\n", + "HAZ_FILE = \"Salvador_hazard_FL_2015.mat\"\n", "\n", - "haz_2015 = Hazard.from_mat(HAZ_FILE) # cargamos el fichero\n", + "haz_2015 = Hazard.from_mat(HAZ_FILE) # cargamos el fichero\n", "ax = haz_2015.plot_intensity(0) # intensidad máxima alcanzada en cada punto\n", - "rect = patches.Rectangle((-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor='r', facecolor='none') # add exposures focus\n", + "rect = patches.Rectangle(\n", + " (-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor=\"r\", facecolor=\"none\"\n", + ") # add exposures focus\n", "ax.add_patch(rect);" ] }, @@ -348,11 +356,13 @@ "# inundaciones en 2040 bajo un fuerte cambio climático\n", "from climada.hazard import Hazard\n", "\n", - "HAZ_FILE = 'Salvador_hazard_FL_2040_extreme_cc.mat'\n", + "HAZ_FILE = \"Salvador_hazard_FL_2040_extreme_cc.mat\"\n", "\n", - "haz_2040 = Hazard.from_mat(HAZ_FILE) # cargamos el fichero\n", + "haz_2040 = Hazard.from_mat(HAZ_FILE) # cargamos el fichero\n", "ax = haz_2040.plot_intensity(0) # intensidad máxima alcanzada en cada punto\n", - "rect = patches.Rectangle((-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor='r', facecolor='none') # add exposures focus\n", + "rect = patches.Rectangle(\n", + " (-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor=\"r\", facecolor=\"none\"\n", + ") # add exposures focus\n", "ax.add_patch(rect);" ] }, @@ -410,7 +420,7 @@ "\n", "cb_acel = CostBenefit()\n", "ax = cb_acel.plot_waterfall(haz_2015, ent_2015, haz_2040, ent_2040)\n", - "ax.set_title('Expected Annual Impact');" + "ax.set_title(\"Expected Annual Impact\");" ] }, { @@ -460,8 +470,10 @@ "from climada.engine import risk_rp_100\n", "\n", "cb_acel = CostBenefit()\n", - "ax = cb_acel.plot_waterfall(haz_2015, ent_2015, haz_2040, ent_2040, risk_func=risk_rp_100)\n", - "ax.set_title('Impact Exceedance at 100 years Return Period');" + "ax = cb_acel.plot_waterfall(\n", + " haz_2015, ent_2015, haz_2040, ent_2040, risk_func=risk_rp_100\n", + ")\n", + "ax.set_title(\"Impact Exceedance at 100 years Return Period\");" ] }, { @@ -524,22 +536,25 @@ ], "source": [ "# impacto de la medida en 2015: No descargas en Lluvia\n", - "meas = ent_2015.measures.get_measure('FL', 'No descargas en Lluvia')\n", - "print('Measure cost {:.3e} USD'.format(meas.cost))\n", - "meas_exp_2015, meas_impf_2015, meas_haz_2015 = meas.apply(ent_2015.exposures, ent_2015.impact_funcs, haz_2015)\n", + "meas = ent_2015.measures.get_measure(\"FL\", \"No descargas en Lluvia\")\n", + "print(\"Measure cost {:.3e} USD\".format(meas.cost))\n", + "meas_exp_2015, meas_impf_2015, meas_haz_2015 = meas.apply(\n", + " ent_2015.exposures, ent_2015.impact_funcs, haz_2015\n", + ")\n", "\n", "# los bienes no cambian\n", "\n", "# las funciones de impacto mejoran ligeramente:\n", - "ax = meas_impf_2015.get_func('FL', 101).plot()\n", - "ax.set_title('Flooding AUP House with measure')\n", + "ax = meas_impf_2015.get_func(\"FL\", 101).plot()\n", + "ax.set_title(\"Flooding AUP House with measure\")\n", "\n", "# inundación reducida hasta 4.8 metros en los eventos más graves:\n", "import numpy as np\n", + "\n", "haz_diff = copy.deepcopy(haz_2015)\n", - "haz_diff.intensity = (haz_2015.intensity - meas_haz_2015.intensity)\n", - "ax = haz_diff.plot_intensity(0) # maximum intensity difference at each point\n", - "ax.set_title('Maximumm reduced intensity with measure');" + "haz_diff.intensity = haz_2015.intensity - meas_haz_2015.intensity\n", + "ax = haz_diff.plot_intensity(0) # maximum intensity difference at each point\n", + "ax.set_title(\"Maximumm reduced intensity with measure\");" ] }, { @@ -569,7 +584,7 @@ ], "source": [ "# nombre de cada medida considerada\n", - "for meas in ent_2040.measures.get_measure('FL'): # measures related to flood (FL)\n", + "for meas in ent_2040.measures.get_measure(\"FL\"): # measures related to flood (FL)\n", " print(meas.name)" ] }, @@ -668,8 +683,8 @@ ], "source": [ "cb_acel = CostBenefit()\n", - "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # calcula\n", - "cb_acel.plot_cost_benefit(); # dibuja el cociente beneficio/costo por medida" + "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # calcula\n", + "cb_acel.plot_cost_benefit(); # dibuja el cociente beneficio/costo por medida" ] }, { @@ -718,8 +733,13 @@ ], "source": [ "import matplotlib.colors as colors\n", - "cb_comb = cb_acel.combine_measures(['No descargas en Lluvia', 'Ahorradores en Agua en casas'],\n", - " 'Domestico', colors.to_rgb('lightcoral'), ent_2015.disc_rates)" + "\n", + "cb_comb = cb_acel.combine_measures(\n", + " [\"No descargas en Lluvia\", \"Ahorradores en Agua en casas\"],\n", + " \"Domestico\",\n", + " colors.to_rgb(\"lightcoral\"),\n", + " ent_2015.disc_rates,\n", + ")" ] }, { @@ -765,8 +785,14 @@ } ], "source": [ - "cb_comb.apply_risk_transfer('Domestico', attachment=1000, cover=22000000, \n", - " disc_rates=ent_2015.disc_rates, cost_fix=0, cost_factor=1.5)" + "cb_comb.apply_risk_transfer(\n", + " \"Domestico\",\n", + " attachment=1000,\n", + " cover=22000000,\n", + " disc_rates=ent_2015.disc_rates,\n", + " cost_fix=0,\n", + " cost_factor=1.5,\n", + ")" ] }, { @@ -859,7 +885,9 @@ ], "source": [ "ax = cb_acel.plot_waterfall_accumulated(haz_2015, ent_2015, ent_2040)\n", - "cb_acel.plot_arrow_averted(ax, accumulate=True, combine=True, disc_rates=ent_2015.disc_rates) # plot total averted damages" + "cb_acel.plot_arrow_averted(\n", + " ax, accumulate=True, combine=True, disc_rates=ent_2015.disc_rates\n", + ") # plot total averted damages" ] }, { @@ -893,6 +921,7 @@ "source": [ "# show difference between sublinear, linear and superlinear expected annual damage growth\n", "import functions_ss\n", + "\n", "functions_ss.non_linear_growth(cb_acel)" ] }, @@ -996,10 +1025,14 @@ ], "source": [ "# change growth\n", - "growth_fact = 0.5 # < 1: sublinear, >1: superlinear\n", + "growth_fact = 0.5 # < 1: sublinear, >1: superlinear\n", "cb_acel_sub = CostBenefit()\n", - "cb_acel_sub.calc(haz_2015, ent_2015, haz_2040, ent_2040, imp_time_depen=growth_fact, save_imp=True)\n", - "cb_acel_sub.plot_waterfall_accumulated(haz_2015, ent_2015, ent_2040, imp_time_depen=growth_fact);" + "cb_acel_sub.calc(\n", + " haz_2015, ent_2015, haz_2040, ent_2040, imp_time_depen=growth_fact, save_imp=True\n", + ")\n", + "cb_acel_sub.plot_waterfall_accumulated(\n", + " haz_2015, ent_2015, ent_2040, imp_time_depen=growth_fact\n", + ");" ] }, { diff --git a/script/applications/eca_san_salvador/San_Salvador_Adaptation.ipynb b/script/applications/eca_san_salvador/San_Salvador_Adaptation.ipynb index 0701e47598..98388d9913 100644 --- a/script/applications/eca_san_salvador/San_Salvador_Adaptation.ipynb +++ b/script/applications/eca_san_salvador/San_Salvador_Adaptation.ipynb @@ -92,22 +92,25 @@ ], "source": [ "from warnings import simplefilter\n", - "simplefilter(action='ignore')\n", + "\n", + "simplefilter(action=\"ignore\")\n", "import contextily as ctx\n", "from climada.entity import Entity\n", "\n", - "ent_2015 = Entity.from_excel('FL_entity_Acelhuate_houses.xlsx')\n", - "ent_2015.exposures.ref_year = 2015 # fix reference year\n", + "ent_2015 = Entity.from_excel(\"FL_entity_Acelhuate_houses.xlsx\")\n", + "ent_2015.exposures.ref_year = 2015 # fix reference year\n", "ent_2015.check()\n", "\n", "# Exposures: the ones we used in San Salvador Risk script\n", - "print('Total value in 2015: {:.3e}'.format(ent_2015.exposures.gdf.value.sum()))\n", - "ax = ent_2015.exposures.plot_basemap(s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap='autumn')\n", - "ax.set_title('Exposure 2015')\n", + "print(\"Total value in 2015: {:.3e}\".format(ent_2015.exposures.gdf.value.sum()))\n", + "ax = ent_2015.exposures.plot_basemap(\n", + " s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap=\"autumn\"\n", + ")\n", + "ax.set_title(\"Exposure 2015\")\n", "\n", "# Impact Functions: the ones we used in San Salvador Risk script\n", - "ent_2015.impact_funcs.get_func('FL', 101).plot()\n", - "ent_2015.impact_funcs.get_func('FL', 102).plot()\n", + "ent_2015.impact_funcs.get_func(\"FL\", 101).plot()\n", + "ent_2015.impact_funcs.get_func(\"FL\", 102).plot()\n", "\n", "# Discount rate: 2% yearly discount year until 2040\n", "ent_2015.disc_rates.plot();\n", @@ -165,12 +168,16 @@ "# Exposures: yearl economic growth of 2% in exposures\n", "ent_2040.exposures.ref_year = 2040\n", "growth = 0.02\n", - "ent_2040.exposures.gdf['value'] = ent_2040.exposures.gdf.value.values*(1 + growth)**(ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", - "ent_2040.check() # check values are well set and assignes default values\n", + "ent_2040.exposures.gdf[\"value\"] = ent_2040.exposures.gdf.value.values * (\n", + " 1 + growth\n", + ") ** (ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", + "ent_2040.check() # check values are well set and assignes default values\n", "\n", - "print('Total value in 2040: {:.3e}'.format(ent_2040.exposures.gdf.value.sum()))\n", - "ax = ent_2040.exposures.plot_basemap(s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap='autumn')\n", - "ax.set_title('Exposure 2040');" + "print(\"Total value in 2040: {:.3e}\".format(ent_2040.exposures.gdf.value.sum()))\n", + "ax = ent_2040.exposures.plot_basemap(\n", + " s=1, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, vmax=60000, cmap=\"autumn\"\n", + ")\n", + "ax.set_title(\"Exposure 2040\");" ] }, { @@ -212,11 +219,13 @@ "import matplotlib.patches as patches\n", "from climada.hazard import Hazard\n", "\n", - "HAZ_FILE = 'Salvador_hazard_FL_2015.mat'\n", + "HAZ_FILE = \"Salvador_hazard_FL_2015.mat\"\n", "\n", - "haz_2015 = Hazard.from_mat(HAZ_FILE) # load file\n", + "haz_2015 = Hazard.from_mat(HAZ_FILE) # load file\n", "ax = haz_2015.plot_intensity(0) # maximum intensity reached at each point\n", - "rect = patches.Rectangle((-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor='r', facecolor='none') # add exposures focus\n", + "rect = patches.Rectangle(\n", + " (-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor=\"r\", facecolor=\"none\"\n", + ") # add exposures focus\n", "ax.add_patch(rect);" ] }, @@ -265,11 +274,13 @@ "# flood as for 2040 with extreme climate change\n", "from climada.hazard import Hazard\n", "\n", - "HAZ_FILE = 'Salvador_hazard_FL_2040_extreme_cc.mat'\n", + "HAZ_FILE = \"Salvador_hazard_FL_2040_extreme_cc.mat\"\n", "\n", - "haz_2040 = Hazard.from_mat(HAZ_FILE) # load file\n", + "haz_2040 = Hazard.from_mat(HAZ_FILE) # load file\n", "ax = haz_2040.plot_intensity(0) # maximum intensity reached at each point\n", - "rect = patches.Rectangle((-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor='r', facecolor='none') # add exposures focus\n", + "rect = patches.Rectangle(\n", + " (-0.0027, 13.6738), 0.0355, 0.0233, linewidth=1, edgecolor=\"r\", facecolor=\"none\"\n", + ") # add exposures focus\n", "ax.add_patch(rect);" ] }, @@ -310,7 +321,7 @@ "\n", "cb_acel = CostBenefit()\n", "ax = cb_acel.plot_waterfall(haz_2015, ent_2015, haz_2040, ent_2040)\n", - "ax.set_title('Expected Annual Impact');" + "ax.set_title(\"Expected Annual Impact\");" ] }, { @@ -345,8 +356,10 @@ "from climada.engine import risk_rp_100\n", "\n", "cb_acel = CostBenefit()\n", - "ax = cb_acel.plot_waterfall(haz_2015, ent_2015, haz_2040, ent_2040, risk_func=risk_rp_100)\n", - "ax.set_title('Impact Exceedance at 100 years Return Period');" + "ax = cb_acel.plot_waterfall(\n", + " haz_2015, ent_2015, haz_2040, ent_2040, risk_func=risk_rp_100\n", + ")\n", + "ax.set_title(\"Impact Exceedance at 100 years Return Period\");" ] }, { @@ -400,22 +413,25 @@ ], "source": [ "# Measure impact in 2015: No descargas en Lluvia\n", - "meas = ent_2015.measures.get_measure('FL', 'No descargas en Lluvia')\n", - "print('Measure cost {:.3e} USD'.format(meas.cost))\n", - "meas_exp_2015, meas_impf2015, meas_haz_2015 = meas.apply(ent_2015.exposures, ent_2015.impact_funcs, haz_2015)\n", + "meas = ent_2015.measures.get_measure(\"FL\", \"No descargas en Lluvia\")\n", + "print(\"Measure cost {:.3e} USD\".format(meas.cost))\n", + "meas_exp_2015, meas_impf2015, meas_haz_2015 = meas.apply(\n", + " ent_2015.exposures, ent_2015.impact_funcs, haz_2015\n", + ")\n", "\n", "# exposures stay the same\n", "\n", "# impact functions slightly improved:\n", - "ax = meas_impf2015.get_func('FL', 101).plot()\n", - "ax.set_title('Flooding AUP House with measure')\n", + "ax = meas_impf2015.get_func(\"FL\", 101).plot()\n", + "ax.set_title(\"Flooding AUP House with measure\")\n", "\n", "# flood reduced up to 4.8 meters in worst events:\n", "import numpy as np\n", + "\n", "haz_diff = copy.deepcopy(haz_2015)\n", - "haz_diff.intensity = (haz_2015.intensity - meas_haz_2015.intensity)\n", - "ax = haz_diff.plot_intensity(0) # maximum intensity difference at each point\n", - "ax.set_title('Maximumm reduced intensity with measure');" + "haz_diff.intensity = haz_2015.intensity - meas_haz_2015.intensity\n", + "ax = haz_diff.plot_intensity(0) # maximum intensity difference at each point\n", + "ax.set_title(\"Maximumm reduced intensity with measure\");" ] }, { @@ -445,7 +461,7 @@ ], "source": [ "# name of every considered measure\n", - "for meas in ent_2040.measures.get_measure('FL'): # measures related to flood (FL)\n", + "for meas in ent_2040.measures.get_measure(\"FL\"): # measures related to flood (FL)\n", " print(meas.name)" ] }, @@ -491,8 +507,8 @@ ], "source": [ "cb_acel = CostBenefit()\n", - "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # compute\n", - "cb_acel.plot_cost_benefit(); # plot benefit/cost ratio per measure" + "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # compute\n", + "cb_acel.plot_cost_benefit(); # plot benefit/cost ratio per measure" ] }, { @@ -541,8 +557,13 @@ ], "source": [ "import matplotlib.colors as colors\n", - "cb_comb = cb_acel.combine_measures(['No descargas en Lluvia', 'Ahorradores en Agua en casas'],\n", - " 'Domestico', colors.to_rgb('lightcoral'), ent_2015.disc_rates)" + "\n", + "cb_comb = cb_acel.combine_measures(\n", + " [\"No descargas en Lluvia\", \"Ahorradores en Agua en casas\"],\n", + " \"Domestico\",\n", + " colors.to_rgb(\"lightcoral\"),\n", + " ent_2015.disc_rates,\n", + ")" ] }, { @@ -588,8 +609,14 @@ } ], "source": [ - "cb_comb.apply_risk_transfer('Domestico', attachment=1000, cover=22000000, \n", - " disc_rates=ent_2015.disc_rates, cost_fix=0, cost_factor=1.5)" + "cb_comb.apply_risk_transfer(\n", + " \"Domestico\",\n", + " attachment=1000,\n", + " cover=22000000,\n", + " disc_rates=ent_2015.disc_rates,\n", + " cost_fix=0,\n", + " cost_factor=1.5,\n", + ")" ] }, { @@ -674,7 +701,9 @@ ], "source": [ "ax = cb_acel.plot_waterfall_accumulated(haz_2015, ent_2015, ent_2040)\n", - "cb_acel.plot_arrow_averted(ax, accumulate=True, combine=True, disc_rates=ent_2015.disc_rates); # plot total averted damages" + "cb_acel.plot_arrow_averted(\n", + " ax, accumulate=True, combine=True, disc_rates=ent_2015.disc_rates\n", + "); # plot total averted damages" ] }, { @@ -705,6 +734,7 @@ "source": [ "# show difference between sublinear, linear and superlinear expected annual damage growth\n", "import functions_ss\n", + "\n", "functions_ss.non_linear_growth(cb_acel)" ] }, @@ -750,10 +780,14 @@ ], "source": [ "# change growth\n", - "growth_fact = 0.5 # < 1: sublinear, >1: superlinear\n", + "growth_fact = 0.5 # < 1: sublinear, >1: superlinear\n", "cb_acel_sub = CostBenefit()\n", - "cb_acel_sub.calc(haz_2015, ent_2015, haz_2040, ent_2040, imp_time_depen=growth_fact, save_imp=True)\n", - "cb_acel_sub.plot_waterfall_accumulated(haz_2015, ent_2015, ent_2040, imp_time_depen=growth_fact);" + "cb_acel_sub.calc(\n", + " haz_2015, ent_2015, haz_2040, ent_2040, imp_time_depen=growth_fact, save_imp=True\n", + ")\n", + "cb_acel_sub.plot_waterfall_accumulated(\n", + " haz_2015, ent_2015, ent_2040, imp_time_depen=growth_fact\n", + ");" ] }, { diff --git a/script/applications/eca_san_salvador/San_Salvador_Parametric.ipynb b/script/applications/eca_san_salvador/San_Salvador_Parametric.ipynb index 3cafb8b3c5..360be75115 100644 --- a/script/applications/eca_san_salvador/San_Salvador_Parametric.ipynb +++ b/script/applications/eca_san_salvador/San_Salvador_Parametric.ipynb @@ -44,13 +44,13 @@ "import contextily as ctx\n", "from climada.engine import Impact\n", "\n", - "ent_2015_param = Entity.from_excel('FL_entity_Acelhuate_parametric.xlsx')\n", - "ent_2015_param.exposures.ref_year = 2015 # fix reference year\n", + "ent_2015_param = Entity.from_excel(\"FL_entity_Acelhuate_parametric.xlsx\")\n", + "ent_2015_param.exposures.ref_year = 2015 # fix reference year\n", "ent_2015_param.check()\n", "\n", "# flood as for 2015\n", - "HAZ_FILE = 'Salvador_hazard_FL_2015.mat'\n", - "haz_2015 = Hazard.from_mat(HAZ_FILE) # load file" + "HAZ_FILE = \"Salvador_hazard_FL_2015.mat\"\n", + "haz_2015 = Hazard.from_mat(HAZ_FILE) # load file" ] }, { @@ -97,9 +97,13 @@ ], "source": [ "param_payout = Impact()\n", - "param_payout.calc(ent_2015_param.exposures, ent_2015_param.impact_funcs, haz_2015) # compute parametric payout\n", - "print('Annual expected payout: {:} USD'.format(param_payout.aai_agg)) # get average annual payout\n", - "param_payout.calc_freq_curve().plot() " + "param_payout.calc(\n", + " ent_2015_param.exposures, ent_2015_param.impact_funcs, haz_2015\n", + ") # compute parametric payout\n", + "print(\n", + " \"Annual expected payout: {:} USD\".format(param_payout.aai_agg)\n", + ") # get average annual payout\n", + "param_payout.calc_freq_curve().plot()" ] }, { @@ -163,8 +167,8 @@ } ], "source": [ - "ent_2015 = Entity.from_excel('FL_entity_Acelhuate_houses.xlsx')\n", - "ent_2015.exposures.ref_year = 2015 # fix reference year\n", + "ent_2015 = Entity.from_excel(\"FL_entity_Acelhuate_houses.xlsx\")\n", + "ent_2015.exposures.ref_year = 2015 # fix reference year\n", "ent_2015.check()\n", "\n", "ent_2040 = copy.deepcopy(ent_2015)\n", @@ -172,19 +176,25 @@ "# Exposures: yearly economic growth of 2% in exposures\n", "ent_2040.exposures.ref_year = 2040\n", "growth = 0.02\n", - "ent_2040.exposures.gdf['value'] = ent_2040.exposures.gdf.value.values*(1 + growth)**(ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", - "ent_2040.check() # check values are well set and assignes default values\n", + "ent_2040.exposures.gdf[\"value\"] = ent_2040.exposures.gdf.value.values * (\n", + " 1 + growth\n", + ") ** (ent_2040.exposures.ref_year - ent_2015.exposures.ref_year)\n", + "ent_2040.check() # check values are well set and assignes default values\n", "\n", "# flood as for 2040 with extreme climate change\n", - "HAZ_FILE = 'Salvador_hazard_FL_2040_extreme_cc.mat'\n", - "haz_2040 = Hazard.from_mat(HAZ_FILE) # load file\n", + "HAZ_FILE = \"Salvador_hazard_FL_2040_extreme_cc.mat\"\n", + "haz_2040 = Hazard.from_mat(HAZ_FILE) # load file\n", "\n", "# expected annual impact\n", "cb_acel = CostBenefit()\n", - "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # compute\n", + "cb_acel.calc(haz_2015, ent_2015, haz_2040, ent_2040, save_imp=True) # compute\n", "\n", - "cb_comb = cb_acel.combine_measures(['No descargas en Lluvia', 'Ahorradores en Agua en casas'],\n", - " 'Domestico', colors.to_rgb('lightcoral'), ent_2015.disc_rates)" + "cb_comb = cb_acel.combine_measures(\n", + " [\"No descargas en Lluvia\", \"Ahorradores en Agua en casas\"],\n", + " \"Domestico\",\n", + " colors.to_rgb(\"lightcoral\"),\n", + " ent_2015.disc_rates,\n", + ")" ] }, { @@ -208,10 +218,12 @@ } ], "source": [ - "damage_after_measures=cb_comb.imp_meas_present['Domestico']['impact'].at_event\n", - "paramteric_payout=param_payout.at_event\n", - "residual_damage=np.sum((damage_after_measures-paramteric_payout)*haz_2015.frequency)\n", - "print('residual damage: {:.3e} USD'.format(residual_damage))" + "damage_after_measures = cb_comb.imp_meas_present[\"Domestico\"][\"impact\"].at_event\n", + "paramteric_payout = param_payout.at_event\n", + "residual_damage = np.sum(\n", + " (damage_after_measures - paramteric_payout) * haz_2015.frequency\n", + ")\n", + "print(\"residual damage: {:.3e} USD\".format(residual_damage))" ] } ], diff --git a/script/applications/eca_san_salvador/San_Salvador_Risk.ipynb b/script/applications/eca_san_salvador/San_Salvador_Risk.ipynb index 29da95b789..b73180b385 100644 --- a/script/applications/eca_san_salvador/San_Salvador_Risk.ipynb +++ b/script/applications/eca_san_salvador/San_Salvador_Risk.ipynb @@ -16,6 +16,7 @@ "%%capture\n", "# generate plots used in this script\n", "import functions_ss\n", + "\n", "fig_ma, fig_point, fig_houses, fig_if = functions_ss.generate_plots_risk()" ] }, @@ -245,7 +246,7 @@ } ], "source": [ - "acc_df = functions_ss.load_accounting() # load accounting.xlsx\n", + "acc_df = functions_ss.load_accounting() # load accounting.xlsx\n", "acc_df.head()" ] }, @@ -424,10 +425,10 @@ "import pandas as pd\n", "from climada.entity import Exposures\n", "\n", - "ENT_FILE = 'FL_entity_Acelhuate_houses.xlsx' # entity file name\n", + "ENT_FILE = \"FL_entity_Acelhuate_houses.xlsx\" # entity file name\n", "\n", "exp_acel = Exposures(pd.read_excel(ENT_FILE))\n", - "exp_acel.check() # check values are well set and assigns default values\n", + "exp_acel.check() # check values are well set and assigns default values\n", "exp_acel.gdf.head() # show first 5 rows" ] }, @@ -459,8 +460,12 @@ ], "source": [ "# some statistics on AUPs and non AUPs\n", - "print('Number of houses, mean and total value of AUP and non AUP: \\n')\n", - "print(exp_acel.gdf[['category', 'value']].groupby('category').agg(['count', 'mean', 'sum']))" + "print(\"Number of houses, mean and total value of AUP and non AUP: \\n\")\n", + "print(\n", + " exp_acel.gdf[[\"category\", \"value\"]]\n", + " .groupby(\"category\")\n", + " .agg([\"count\", \"mean\", \"sum\"])\n", + ")" ] }, { @@ -488,7 +493,7 @@ } ], "source": [ - "print(exp_acel.gdf[['category', 'impf_FL']].groupby('category').agg(['unique']))" + "print(exp_acel.gdf[[\"category\", \"impf_FL\"]].groupby(\"category\").agg([\"unique\"]))" ] }, { @@ -551,9 +556,11 @@ "impf_acel = ImpactFuncSet.from_excel(ENT_FILE)\n", "impf_acel.check()\n", "\n", - "print('MDD: mean damage ratio; PAA: percentage of afected assets; MDR = PAA*MDD: mean damage ratio:')\n", - "impf_acel.get_func('FL', 101).plot() # plot flood function 101\n", - "impf_acel.get_func('FL', 102).plot(); # plot flood function 102" + "print(\n", + " \"MDD: mean damage ratio; PAA: percentage of afected assets; MDR = PAA*MDD: mean damage ratio:\"\n", + ")\n", + "impf_acel.get_func(\"FL\", 101).plot() # plot flood function 101\n", + "impf_acel.get_func(\"FL\", 102).plot(); # plot flood function 102" ] }, { @@ -573,9 +580,9 @@ "source": [ "from climada.hazard import Hazard\n", "\n", - "HAZ_FILE = 'Salvador_hazard_FL_2015.mat'\n", + "HAZ_FILE = \"Salvador_hazard_FL_2015.mat\"\n", "\n", - "haz_acel = Hazard.from_mat(HAZ_FILE) # load file" + "haz_acel = Hazard.from_mat(HAZ_FILE) # load file" ] }, { @@ -680,7 +687,7 @@ "from climada.engine import Impact\n", "\n", "imp_acel = Impact()\n", - "imp_acel.calc(exp_acel, impf_acel, haz_acel) # compute hazard's impact over exposure" + "imp_acel.calc(exp_acel, impf_acel, haz_acel) # compute hazard's impact over exposure" ] }, { @@ -719,8 +726,10 @@ } ], "source": [ - "print('Annual expected impact: {:.3e} USD'.format(imp_acel.aai_agg)) # get average annual impact\n", - "imp_acel.calc_freq_curve().plot(); # plot exceedance frequency curve" + "print(\n", + " \"Annual expected impact: {:.3e} USD\".format(imp_acel.aai_agg)\n", + ") # get average annual impact\n", + "imp_acel.calc_freq_curve().plot(); # plot exceedance frequency curve" ] }, { @@ -748,7 +757,11 @@ "point_lat = exp_acel.gdf.latitude.values[point_idx]\n", "point_lon = exp_acel.gdf.longitude.values[point_idx]\n", "point_eai = imp_acel.eai_exp[point_idx]\n", - "print('Annual expected impact in {:.4f}° N {:.4f}° W is {:.0f} USD.'.format(-point_lat, point_lon, point_eai))" + "print(\n", + " \"Annual expected impact in {:.4f}° N {:.4f}° W is {:.0f} USD.\".format(\n", + " -point_lat, point_lon, point_eai\n", + " )\n", + ")" ] }, { @@ -796,7 +809,10 @@ ], "source": [ "import contextily as ctx\n", - "imp_acel.plot_basemap_eai_exposure(url=ctx.providers.OpenStreetMap.Mapnik, zoom=15, s=2, cmap='gnuplot');" + "\n", + "imp_acel.plot_basemap_eai_exposure(\n", + " url=ctx.providers.OpenStreetMap.Mapnik, zoom=15, s=2, cmap=\"gnuplot\"\n", + ");" ] }, { @@ -837,8 +853,15 @@ ], "source": [ "import numpy as np\n", - "ax = imp_acel.plot_basemap_eai_exposure(mask=np.argwhere((exp_acel.gdf.category==2).to_numpy()).reshape(-1), url=ctx.providers.OpenStreetMap.Mapnik, zoom=15, s=2, cmap='gnuplot')\n", - "ax.set_title('Expected Annual Impact - no AUP');" + "\n", + "ax = imp_acel.plot_basemap_eai_exposure(\n", + " mask=np.argwhere((exp_acel.gdf.category == 2).to_numpy()).reshape(-1),\n", + " url=ctx.providers.OpenStreetMap.Mapnik,\n", + " zoom=15,\n", + " s=2,\n", + " cmap=\"gnuplot\",\n", + ")\n", + "ax.set_title(\"Expected Annual Impact - no AUP\");" ] }, { @@ -878,8 +901,14 @@ } ], "source": [ - "ax = imp_acel.plot_basemap_eai_exposure(mask=np.argwhere((exp_acel.gdf.category==1).to_numpy()).reshape(-1), url=ctx.providers.OpenStreetMap.Mapnik, zoom=15, s=2, cmap='gnuplot')\n", - "ax.set_title('Expected Annual Impact - AUP');" + "ax = imp_acel.plot_basemap_eai_exposure(\n", + " mask=np.argwhere((exp_acel.gdf.category == 1).to_numpy()).reshape(-1),\n", + " url=ctx.providers.OpenStreetMap.Mapnik,\n", + " zoom=15,\n", + " s=2,\n", + " cmap=\"gnuplot\",\n", + ")\n", + "ax.set_title(\"Expected Annual Impact - AUP\");" ] }, { @@ -906,15 +935,21 @@ } ], "source": [ - "eai_aup = imp_acel.eai_exp[exp_acel.gdf[exp_acel.gdf.category==1].index].sum()\n", - "print('Annual expected impact of AUPs: {:.3e} USD.'.format(eai_aup))\n", - "eai_per_aup = eai_aup/exp_acel.gdf[exp_acel.gdf.category==1].value.sum()*100\n", - "print('Annual expected impact of AUPs over its total value: {:.2f}%.'.format(eai_per_aup))\n", + "eai_aup = imp_acel.eai_exp[exp_acel.gdf[exp_acel.gdf.category == 1].index].sum()\n", + "print(\"Annual expected impact of AUPs: {:.3e} USD.\".format(eai_aup))\n", + "eai_per_aup = eai_aup / exp_acel.gdf[exp_acel.gdf.category == 1].value.sum() * 100\n", + "print(\n", + " \"Annual expected impact of AUPs over its total value: {:.2f}%.\".format(eai_per_aup)\n", + ")\n", "\n", - "eai_no_aup = imp_acel.eai_exp[exp_acel.gdf[exp_acel.gdf.category==2].index].sum()\n", - "print('Annual expected impact of non AUPs: {:.3e} USD.'.format(eai_no_aup))\n", - "eai_per_no_aup = eai_no_aup/exp_acel.gdf[exp_acel.gdf.category==1].value.sum()*100\n", - "print('Annual expected impact of non AUPs over its total value: {:.2f}%.'.format(eai_per_no_aup))" + "eai_no_aup = imp_acel.eai_exp[exp_acel.gdf[exp_acel.gdf.category == 2].index].sum()\n", + "print(\"Annual expected impact of non AUPs: {:.3e} USD.\".format(eai_no_aup))\n", + "eai_per_no_aup = eai_no_aup / exp_acel.gdf[exp_acel.gdf.category == 1].value.sum() * 100\n", + "print(\n", + " \"Annual expected impact of non AUPs over its total value: {:.2f}%.\".format(\n", + " eai_per_no_aup\n", + " )\n", + ")" ] } ], diff --git a/script/applications/eca_san_salvador/functions_ss.py b/script/applications/eca_san_salvador/functions_ss.py index caee8a4f5f..3d04785589 100755 --- a/script/applications/eca_san_salvador/functions_ss.py +++ b/script/applications/eca_san_salvador/functions_ss.py @@ -19,47 +19,59 @@ Define WaterScarcity (WS) class. WORK IN PROGRESS """ + import contextily as ctx import geopandas as gpd import matplotlib.patches as patches from matplotlib import colormaps as cm from shapely import wkt + def plot_salvador_ma(): - risk_shape = 'POLYGON ((-89.25090785340315 13.671, -89.251 13.671, -89.251 13.67108933717579, -89.251 13.67117867435158, -89.251 13.67126801152738, -89.251 13.67135734870317, -89.251 13.67144668587896, -89.251 13.67153602305475, -89.251 13.67162536023055, -89.251 13.67171469740634, -89.251 13.67180403458213, -89.251 13.67189337175792, -89.251 13.67198270893372, -89.251 13.67207204610951, -89.251 13.6721613832853, -89.251 13.6722507204611, -89.251 13.67234005763689, -89.251 13.67242939481268, -89.251 13.67251873198847, -89.251 13.67260806916426, -89.251 13.67269740634006, -89.251 13.67278674351585, -89.251 13.67287608069164, -89.251 13.67296541786743, -89.251 13.67305475504323, -89.251 13.67314409221902, -89.251 13.67323342939481, -89.251 13.6733227665706, -89.251 13.6734121037464, -89.251 13.67350144092219, -89.251 13.67359077809798, -89.251 13.67368011527378, -89.251 13.67376945244957, -89.251 13.67385878962536, -89.251 13.67394812680115, -89.251 13.67403746397694, -89.251 13.67412680115274, -89.251 13.67421613832853, -89.251 13.67430547550432, -89.251 13.67439481268011, -89.251 13.67448414985591, -89.251 13.6745734870317, -89.251 13.67466282420749, -89.251 13.67475216138329, -89.251 13.67484149855908, -89.251 13.67493083573487, -89.251 13.67502017291066, -89.251 13.67510951008645, -89.251 13.67519884726225, -89.251 13.67528818443804, -89.251 13.67537752161383, -89.251 13.67546685878962, -89.251 13.67555619596542, -89.251 13.67564553314121, -89.251 13.675734870317, -89.251 13.67582420749279, -89.251 13.67591354466859, -89.251 13.67600288184438, -89.251 13.67609221902017, -89.251 13.67618155619597, -89.251 13.67627089337176, -89.251 13.67636023054755, -89.251 13.67644956772334, -89.251 13.67653890489913, -89.251 13.67662824207493, -89.251 13.67671757925072, -89.251 13.67680691642651, -89.251 13.6768962536023, -89.251 13.6769855907781, -89.251 13.67707492795389, -89.251 13.67716426512968, -89.251 13.67725360230548, -89.251 13.67734293948127, -89.251 13.67743227665706, -89.251 13.67752161383285, -89.251 13.67761095100865, -89.251 13.67770028818444, -89.251 13.67778962536023, -89.251 13.67787896253602, -89.251 13.67796829971181, -89.251 13.67805763688761, -89.25090785340315 13.67832564841498, -89.25081570680629 13.67850432276657, -89.25072356020942 13.67868299711816, -89.25063141361257 13.67886167146974, -89.250354973822 13.67921902017291, -89.25017068062827 13.67948703170029, -89.2498942408377 13.67984438040346, -89.24961780104712 13.68020172910663, -89.24934136125655 13.6805590778098, -89.24915706806283 13.68082708933717, -89.24888062827226 13.68118443804035, -89.24860418848168 13.68154178674352, -89.24832774869111 13.68189913544669, -89.24814345549738 13.68216714697406, -89.24786701570682 13.68252449567723, -89.24759057591623 13.6828818443804, -89.24740628272252 13.68314985590778, -89.24712984293194 13.68350720461095, -89.24685340314137 13.68386455331412, -89.24657696335079 13.68422190201729, -89.24639267015708 13.68448991354467, -89.24556335078535 13.68556195965418, -89.24510261780105 13.68609798270893, -89.2450104712042 13.68618731988473, -89.24491832460734 13.68627665706052, -89.24436544502618 13.68690201729107, -89.24427329842932 13.68699135446686, -89.24372041884817 13.68761671469741, -89.24362827225131 13.6877060518732, -89.24353612565446 13.68779538904899, -89.24298324607331 13.68842074927954, -89.24289109947644 13.68851008645533, -89.24233821989529 13.68913544668588, -89.24224607329843 13.68922478386167, -89.24169319371728 13.68985014409222, -89.24160104712043 13.68993948126801, -89.24150890052357 13.6900288184438, -89.24095602094241 13.69065417867435, -89.24086387434555 13.69074351585014, -89.24077172774869 13.69083285302594, -89.24067958115184 13.69092219020173, -89.24058743455498 13.69101152737752, -89.24049528795813 13.69110086455331, -89.24040314136126 13.69119020172911, -89.2403109947644 13.6912795389049, -89.24021884816754 13.69136887608069, -89.23975811518325 13.69163688760807, -89.23929738219896 13.69190489913545, -89.23865235602095 13.69226224783862, -89.23819162303666 13.69253025936599, -89.23773089005236 13.69279827089337, -89.23708586387436 13.69315561959654, -89.23662513089006 13.69342363112392, -89.23616439790577 13.6936916426513, -89.23570366492147 13.69395965417867, -89.23505863874345 13.69431700288184, -89.23459790575916 13.69458501440922, -89.23413717277488 13.6948530259366, -89.23376858638744 13.69494236311239, -89.23321570680629 13.69503170028818, -89.23266282722513 13.69512103746398, -89.23220209424085 13.69521037463977, -89.23164921465968 13.69529971181556, -89.23109633507853 13.69538904899135, -89.23054345549738 13.69547838616715, -89.23008272251309 13.69556772334294, -89.22952984293194 13.69565706051873, -89.22897696335079 13.69574639769452, -89.22851623036649 13.69583573487032, -89.22741047120419 13.6960144092219, -89.22685759162304 13.69610374639769, -89.22621256544502 13.69619308357349, -89.22612041884817 13.69619308357349, -89.22602827225131 13.69619308357349, -89.22593612565446 13.69619308357349, -89.2258439790576 13.69619308357349, -89.22575183246073 13.69619308357349, -89.22565968586387 13.69619308357349, -89.22556753926702 13.69619308357349, -89.22547539267016 13.69619308357349, -89.22538324607331 13.69619308357349, -89.22529109947644 13.69619308357349, -89.22519895287958 13.69619308357349, -89.22510680628272 13.69619308357349, -89.22501465968587 13.69619308357349, -89.22492251308901 13.69619308357349, -89.22483036649214 13.69619308357349, -89.22473821989529 13.69619308357349, -89.22464607329843 13.69619308357349, -89.22455392670157 13.69619308357349, -89.22446178010472 13.69619308357349, -89.22436963350786 13.69619308357349, -89.22427748691099 13.69619308357349, -89.22418534031414 13.69619308357349, -89.22409319371728 13.69619308357349, -89.22400104712042 13.69619308357349, -89.22390890052357 13.69619308357349, -89.2238167539267 13.69619308357349, -89.22372460732984 13.69619308357349, -89.22363246073299 13.69619308357349, -89.22354031413613 13.69619308357349, -89.22344816753927 13.69619308357349, -89.2233560209424 13.69619308357349, -89.22326387434555 13.69619308357349, -89.22317172774869 13.69619308357349, -89.2222502617801 13.69628242074928, -89.22215811518325 13.69628242074928, -89.22206596858639 13.69628242074928, -89.22197382198954 13.69628242074928, -89.22188167539267 13.69628242074928, -89.22178952879581 13.69628242074928, -89.22169738219895 13.69628242074928, -89.2216052356021 13.69628242074928, -89.22151308900524 13.69628242074928, -89.22142094240837 13.69628242074928, -89.22132879581152 13.69628242074928, -89.22123664921466 13.69628242074928, -89.2211445026178 13.69628242074928, -89.22105235602095 13.69628242074928, -89.22096020942409 13.69628242074928, -89.22086806282722 13.69628242074928, -89.22077591623037 13.69628242074928, -89.22059162303665 13.69619308357349, -89.22031518324607 13.6960144092219, -89.21985445026178 13.69574639769452, -89.21957801047121 13.69556772334294, -89.21930157068063 13.69538904899135, -89.21902513089006 13.69521037463977, -89.21874869109948 13.69503170028818, -89.21828795811518 13.69476368876081, -89.21801151832462 13.69458501440922, -89.21773507853403 13.69440634005764, -89.21745863874345 13.69422766570605, -89.21699790575916 13.69395965417867, -89.21672146596859 13.69378097982709, -89.21644502617801 13.6936023054755, -89.21616858638744 13.69342363112392, -89.21589214659686 13.69324495677233, -89.21543141361256 13.69297694524496, -89.215154973822 13.69279827089337, -89.21091623036649 13.69226224783862, -89.21063979057591 13.69235158501441, -89.21036335078534 13.6924409221902, -89.21008691099476 13.69253025936599, -89.20981047120419 13.69261959654179, -89.2093497382199 13.69279827089337, -89.20907329842932 13.69288760806916, -89.20879685863875 13.69297694524496, -89.20852041884817 13.69306628242075, -89.2082439790576 13.69315561959654, -89.20815183246073 13.69315561959654, -89.20805968586387 13.69315561959654, -89.20796753926702 13.69315561959654, -89.20787539267016 13.69315561959654, -89.2077832460733 13.69315561959654, -89.20769109947643 13.69315561959654, -89.20695392670157 13.69306628242075, -89.20630890052357 13.69297694524496, -89.20557172774869 13.69288760806916, -89.20492670157068 13.69279827089337, -89.20418952879581 13.69270893371758, -89.2035445026178 13.69261959654179, -89.20280732984293 13.69253025936599, -89.19976649214659 13.69288760806916, -89.19912146596859 13.69315561959654, -89.19847643979058 13.69342363112392, -89.19829214659686 13.69351296829971, -89.19764712041885 13.69378097982709, -89.19700209424084 13.69404899135447, -89.19681780104712 13.69413832853026, -89.19617277486911 13.69440634005764, -89.19552774869111 13.69467435158501, -89.19534345549738 13.69476368876081, -89.19469842931937 13.69503170028818, -89.19405340314137 13.69529971181556, -89.19386910994764 13.69538904899135, -89.19322408376964 13.69565706051873, -89.19303979057591 13.69574639769452, -89.1923947643979 13.6960144092219, -89.1917497382199 13.69628242074928, -89.19156544502617 13.69637175792507, -89.19092041884817 13.69663976945245, -89.19027539267016 13.69690778097983, -89.19009109947643 13.69699711815562, -89.18944607329843 13.697265129683, -89.18880104712042 13.69753314121037, -89.18861675392669 13.69762247838617, -89.18797172774869 13.69789048991355, -89.18732670157068 13.69815850144092, -89.18714240837696 13.69824783861671, -89.18668167539266 13.6984265129683, -89.18658952879581 13.6984265129683, -89.18649738219895 13.6984265129683, -89.18640523560209 13.6984265129683, -89.18631308900524 13.6984265129683, -89.18566806282722 13.69815850144092, -89.18502303664921 13.69789048991355, -89.18456230366492 13.69771181556196, -89.18391727748691 13.69744380403458, -89.18327225130889 13.6971757925072, -89.18262722513089 13.69690778097983, -89.1790335078534 13.69672910662824, -89.17875706806282 13.69681844380403, -89.17848062827225 13.69690778097983, -89.17820418848167 13.69699711815562, -89.1779277486911 13.69708645533141, -89.17765130890052 13.6971757925072, -89.17700628272252 13.69735446685879, -89.17672984293193 13.69744380403458, -89.17645340314137 13.69753314121037, -89.17617696335078 13.69762247838617, -89.1759005235602 13.69771181556196, -89.17562408376963 13.69780115273775, -89.17534764397905 13.69789048991355, -89.17470261780105 13.69806916426513, -89.1725832460733 13.69931988472622, -89.17249109947643 13.69940922190202, -89.17193821989528 13.70003458213256, -89.17184607329843 13.70012391930836, -89.17175392670157 13.70021325648415, -89.17120104712042 13.7008386167147, -89.17110890052356 13.70092795389049, -89.17101675392669 13.70101729106628, -89.17092460732984 13.70110662824207, -89.17037172774869 13.70173198847262, -89.17027958115183 13.70182132564842, -89.17018743455498 13.70191066282421, -89.17009528795812 13.702, -89.17000314136125 13.702, -89.16991099476439 13.702, -89.16981884816754 13.702, -89.16972670157068 13.702, -89.16963455497383 13.702, -89.16954240837696 13.702, -89.1694502617801 13.702, -89.16935811518324 13.702, -89.16926596858639 13.702, -89.16917382198953 13.702, -89.16908167539266 13.702, -89.16898952879581 13.702, -89.16889738219895 13.702, -89.16880523560209 13.702, -89.16871308900524 13.702, -89.16862094240837 13.702, -89.16852879581151 13.702, -89.16843664921466 13.702, -89.1683445026178 13.702, -89.16825235602094 13.702, -89.16816020942409 13.702, -89.16806806282722 13.702, -89.16797591623036 13.702, -89.16788376963351 13.702, -89.16779162303665 13.702, -89.16769947643979 13.702, -89.16760732984292 13.702, -89.16751518324607 13.702, -89.16742303664921 13.702, -89.16733089005236 13.702, -89.1672387434555 13.702, -89.16714659685863 13.702, -89.16705445026177 13.702, -89.16696230366492 13.702, -89.16687015706806 13.702, -89.16677801047121 13.702, -89.16668586387435 13.702, -89.16659371727748 13.702, -89.16650157068062 13.702, -89.16640942408377 13.702, -89.16631727748691 13.702, -89.16622513089006 13.702, -89.16613298429318 13.702, -89.16604083769633 13.702, -89.16594869109947 13.702, -89.16585654450262 13.702, -89.16576439790576 13.702, -89.16567225130889 13.702, -89.16558010471203 13.702, -89.16548795811518 13.702, -89.16539581151832 13.702, -89.16530366492147 13.702, -89.16521151832461 13.702, -89.16511937172774 13.702, -89.16502722513088 13.702, -89.16493507853403 13.702, -89.16484293193717 13.702, -89.16475078534032 13.702, -89.16465863874345 13.702, -89.16456649214659 13.702, -89.16447434554973 13.702, -89.16438219895288 13.702, -89.16429005235602 13.702, -89.16419790575915 13.702, -89.1641057591623 13.702, -89.16401361256544 13.702, -89.16392146596858 13.702, -89.16382931937173 13.702, -89.16373717277487 13.702, -89.163645026178 13.702, -89.16355287958115 13.702, -89.16346073298429 13.702, -89.16336858638743 13.702, -89.16327643979058 13.702, -89.16318429319371 13.702, -89.16309214659685 13.702, -89.163 13.702, -89.163 13.70191066282421, -89.163 13.70182132564842, -89.163 13.70173198847262, -89.163 13.70164265129683, -89.163 13.70155331412104, -89.163 13.70146397694525, -89.163 13.70137463976945, -89.163 13.70128530259366, -89.163 13.70119596541787, -89.163 13.70110662824207, -89.163 13.70101729106628, -89.163 13.70092795389049, -89.163 13.7008386167147, -89.163 13.7007492795389, -89.163 13.70065994236311, -89.163 13.70057060518732, -89.163 13.70048126801153, -89.163 13.70039193083574, -89.163 13.70030259365994, -89.163 13.70021325648415, -89.163 13.70012391930836, -89.163 13.70003458213256, -89.163 13.69994524495677, -89.163 13.69985590778098, -89.163 13.69976657060519, -89.163 13.69967723342939, -89.163 13.6995878962536, -89.163 13.69949855907781, -89.163 13.69940922190202, -89.163 13.69931988472622, -89.163 13.69923054755043, -89.163 13.69914121037464, -89.163 13.69905187319885, -89.163 13.69896253602306, -89.163 13.69887319884726, -89.163 13.69878386167147, -89.163 13.69869452449568, -89.163 13.69860518731988, -89.163 13.69851585014409, -89.163 13.6984265129683, -89.163 13.69833717579251, -89.163 13.69824783861671, -89.163 13.69815850144092, -89.163 13.69806916426513, -89.163 13.69797982708934, -89.163 13.69789048991355, -89.163 13.69780115273775, -89.163 13.69771181556196, -89.163 13.69762247838617, -89.163 13.69753314121037, -89.163 13.69744380403458, -89.163 13.69735446685879, -89.163 13.697265129683, -89.163 13.6971757925072, -89.163 13.69708645533141, -89.163 13.69699711815562, -89.163 13.69690778097983, -89.163 13.69681844380403, -89.163 13.69672910662824, -89.163 13.69663976945245, -89.163 13.69655043227666, -89.163 13.69646109510087, -89.163 13.69637175792507, -89.163 13.69628242074928, -89.163 13.69619308357349, -89.163 13.69610374639769, -89.163 13.6960144092219, -89.163 13.69592507204611, -89.163 13.69583573487032, -89.163 13.69574639769452, -89.163 13.69565706051873, -89.163 13.69556772334294, -89.163 13.69547838616715, -89.163 13.69538904899135, -89.163 13.69529971181556, -89.163 13.69521037463977, -89.163 13.69512103746398, -89.163 13.69503170028818, -89.163 13.69494236311239, -89.163 13.6948530259366, -89.163 13.69476368876081, -89.163 13.69467435158501, -89.163 13.69458501440922, -89.163 13.69449567723343, -89.163 13.69440634005764, -89.163 13.69431700288184, -89.163 13.69422766570605, -89.163 13.69413832853026, -89.163 13.69404899135447, -89.163 13.69395965417867, -89.163 13.69387031700288, -89.163 13.69378097982709, -89.163 13.6936916426513, -89.163 13.6936023054755, -89.163 13.69351296829971, -89.163 13.69342363112392, -89.163 13.69333429394813, -89.163 13.69324495677233, -89.16327643979058 13.69306628242075, -89.16355287958115 13.69288760806916, -89.16382931937173 13.69270893371758, -89.1641057591623 13.69253025936599, -89.16438219895288 13.69235158501441, -89.16465863874345 13.69217291066282, -89.16493507853403 13.69199423631124, -89.16521151832461 13.69181556195965, -89.16548795811518 13.69163688760807, -89.16576439790576 13.69145821325648, -89.16604083769633 13.6912795389049, -89.16631727748691 13.69110086455331, -89.16659371727748 13.69092219020173, -89.16677801047121 13.69083285302594, -89.16696230366492 13.69074351585014, -89.16714659685863 13.69065417867435, -89.16733089005236 13.69056484149856, -89.16797591623036 13.69020749279539, -89.16816020942409 13.6901181556196, -89.1683445026178 13.6900288184438, -89.16852879581151 13.68993948126801, -89.16871308900524 13.68985014409222, -89.16889738219895 13.68976080691643, -89.16908167539266 13.68967146974063, -89.16972670157068 13.68931412103746, -89.16991099476439 13.68922478386167, -89.17009528795812 13.68913544668588, -89.17027958115183 13.68904610951009, -89.17046387434554 13.68895677233429, -89.17064816753927 13.6888674351585, -89.17083246073298 13.68877809798271, -89.17147748691099 13.68842074927954, -89.17166178010471 13.68833141210375, -89.17350471204188 13.68681268011527, -89.17378115183246 13.6864553314121, -89.17405759162304 13.68609798270893, -89.17442617801046 13.68565129682997, -89.17470261780105 13.6852939481268, -89.17507120418848 13.68484726224784, -89.17534764397905 13.68448991354467, -89.17571623036649 13.68404322766571, -89.17580837696335 13.68395389048991, -89.17599267015707 13.68386455331412, -89.17617696335078 13.68377521613833, -89.1763612565445 13.68368587896254, -89.17654554973822 13.68359654178674, -89.17672984293193 13.68350720461095, -89.17691413612565 13.68341786743516, -89.17709842931937 13.68332853025936, -89.17728272251308 13.68323919308357, -89.17746701570681 13.68314985590778, -89.17765130890052 13.68306051873199, -89.17783560209423 13.6829711815562, -89.17801989528796 13.6828818443804, -89.17820418848167 13.68279250720461, -89.17884921465968 13.68252449567723, -89.1790335078534 13.68243515850144, -89.17921780104712 13.68234582132565, -89.17940209424084 13.68225648414986, -89.17958638743455 13.68216714697406, -89.17977068062827 13.68207780979827, -89.17995497382199 13.68198847262248, -89.1801392670157 13.68189913544669, -89.18032356020942 13.68180979827089, -89.18050785340314 13.6817204610951, -89.18069214659685 13.68163112391931, -89.18087643979058 13.68154178674352, -89.18152146596859 13.68127377521614, -89.1817057591623 13.68118443804035, -89.18189005235602 13.68109510086455, -89.18198219895288 13.68109510086455, -89.18207434554974 13.68109510086455, -89.18216649214659 13.68109510086455, -89.18225863874345 13.68109510086455, -89.18235078534032 13.68109510086455, -89.18244293193717 13.68109510086455, -89.18253507853403 13.68109510086455, -89.18262722513089 13.68109510086455, -89.18271937172774 13.68109510086455, -89.18281151832461 13.68109510086455, -89.18290366492147 13.68109510086455, -89.18299581151832 13.68109510086455, -89.18308795811518 13.68109510086455, -89.18318010471204 13.68109510086455, -89.18327225130889 13.68109510086455, -89.18336439790576 13.68109510086455, -89.18345654450262 13.68109510086455, -89.18354869109947 13.68109510086455, -89.18364083769633 13.68109510086455, -89.18373298429319 13.68109510086455, -89.18382513089006 13.68109510086455, -89.18806387434554 13.67868299711816, -89.18843246073298 13.67805763688761, -89.18880104712042 13.67743227665706, -89.18889319371728 13.67725360230548, -89.18926178010472 13.67662824207493, -89.18963036649214 13.67600288184438, -89.18972251308901 13.67582420749279, -89.19009109947643 13.67519884726225, -89.19045968586387 13.6745734870317, -89.19055183246073 13.67439481268011, -89.19092041884817 13.67376945244957, -89.19119685863875 13.6733227665706, -89.19138115183246 13.67323342939481, -89.19156544502617 13.67314409221902, -89.1917497382199 13.67305475504323, -89.1923947643979 13.67278674351585, -89.19257905759163 13.67269740634006, -89.19276335078534 13.67260806916426, -89.19294764397905 13.67251873198847, -89.19313193717278 13.67242939481268, -89.19331623036649 13.67234005763689, -89.1935005235602 13.6722507204611, -89.19368481675393 13.6721613832853, -89.19386910994764 13.67207204610951, -89.19405340314137 13.67198270893372, -89.19469842931937 13.67171469740634, -89.19488272251309 13.67162536023055, -89.19506701570681 13.67153602305475, -89.19525130890052 13.67144668587896, -89.19543560209424 13.67135734870317, -89.19561989528796 13.67126801152738, -89.19580418848167 13.67117867435158, -89.19598848167539 13.67108933717579, -89.19617277486911 13.671, -89.19626492146597 13.671, -89.19635706806282 13.671, -89.19644921465968 13.671, -89.19654136125655 13.671, -89.19663350785341 13.671, -89.19672565445026 13.671, -89.19681780104712 13.671, -89.19690994764397 13.671, -89.19700209424084 13.671, -89.1970942408377 13.671, -89.19718638743456 13.671, -89.19727853403141 13.671, -89.19737068062827 13.671, -89.19746282722512 13.671, -89.19755497382199 13.671, -89.19764712041885 13.671, -89.19773926701571 13.671, -89.19783141361256 13.671, -89.19792356020942 13.671, -89.19801570680629 13.671, -89.19810785340314 13.671, -89.1982 13.671, -89.19829214659686 13.671, -89.19838429319371 13.671, -89.19847643979058 13.671, -89.19856858638744 13.671, -89.19866073298429 13.671, -89.19875287958115 13.671, -89.19884502617801 13.671, -89.19893717277488 13.671, -89.19902931937173 13.671, -89.19912146596859 13.671, -89.19921361256544 13.671, -89.1993057591623 13.671, -89.19939790575916 13.671, -89.19949005235603 13.671, -89.19958219895288 13.671, -89.19967434554974 13.671, -89.19976649214659 13.671, -89.19985863874345 13.671, -89.19995078534032 13.671, -89.20004293193718 13.671, -89.20013507853403 13.671, -89.20022722513089 13.671, -89.20031937172774 13.671, -89.20041151832461 13.671, -89.20050366492147 13.671, -89.20059581151833 13.671, -89.20068795811518 13.671, -89.20078010471204 13.671, -89.20087225130889 13.671, -89.20096439790576 13.671, -89.20105654450262 13.671, -89.20114869109948 13.671, -89.20124083769633 13.671, -89.20133298429319 13.671, -89.20142513089006 13.671, -89.20151727748691 13.671, -89.20160942408377 13.671, -89.20170157068063 13.671, -89.20179371727748 13.671, -89.20188586387435 13.671, -89.20197801047121 13.671, -89.20207015706806 13.671, -89.20216230366492 13.671, -89.20225445026178 13.671, -89.20234659685863 13.671, -89.2024387434555 13.671, -89.20253089005236 13.671, -89.20262303664921 13.671, -89.20271518324607 13.671, -89.20280732984293 13.671, -89.2028994764398 13.671, -89.20299162303665 13.671, -89.20308376963351 13.671, -89.20317591623036 13.671, -89.20326806282722 13.671, -89.20336020942409 13.671, -89.20345235602095 13.671, -89.2035445026178 13.671, -89.20363664921466 13.671, -89.20372879581151 13.671, -89.20382094240837 13.671, -89.20391308900524 13.671, -89.2040052356021 13.671, -89.20409738219895 13.671, -89.20418952879581 13.671, -89.20428167539266 13.671, -89.20437382198953 13.671, -89.20446596858639 13.671, -89.20455811518325 13.671, -89.2046502617801 13.671, -89.20474240837696 13.671, -89.20483455497383 13.671, -89.20492670157068 13.671, -89.20501884816754 13.671, -89.2051109947644 13.671, -89.20520314136125 13.671, -89.20529528795812 13.671, -89.20538743455498 13.671, -89.20547958115183 13.671, -89.20557172774869 13.671, -89.20566387434555 13.671, -89.2057560209424 13.671, -89.20584816753927 13.671, -89.20594031413613 13.671, -89.20603246073298 13.671, -89.20612460732984 13.671, -89.2062167539267 13.671, -89.20640104712042 13.67108933717579, -89.20658534031413 13.67117867435158, -89.20676963350786 13.67126801152738, -89.20695392670157 13.67135734870317, -89.20713821989528 13.67144668587896, -89.20723036649214 13.67153602305475, -89.20723036649214 13.67162536023055, -89.20723036649214 13.67171469740634, -89.20723036649214 13.67180403458213, -89.20704607329843 13.67242939481268, -89.20686178010472 13.67305475504323, -89.20667748691099 13.67368011527378, -89.20658534031413 13.67394812680115, -89.20640104712042 13.6745734870317, -89.21036335078534 13.68118443804035, -89.21091623036649 13.68127377521614, -89.21146910994764 13.68136311239193, -89.21211413612565 13.68145244956772, -89.21266701570681 13.68154178674352, -89.21321989528796 13.68163112391931, -89.21377277486911 13.6817204610951, -89.21441780104712 13.68180979827089, -89.2146942408377 13.68189913544669, -89.21487853403141 13.68198847262248, -89.215154973822 13.68216714697406, -89.21543141361256 13.68234582132565, -89.21589214659686 13.68261383285303, -89.21616858638744 13.68279250720461, -89.21644502617801 13.6829711815562, -89.21672146596859 13.68314985590778, -89.21699790575916 13.68332853025936, -89.21727434554974 13.68350720461095, -89.21773507853403 13.68377521613833, -89.21801151832462 13.68395389048991, -89.21828795811518 13.6841325648415, -89.21856439790577 13.68431123919308, -89.21884083769633 13.68448991354467, -89.21911727748692 13.68466858789625, -89.22031518324607 13.68520461095101, -89.22059162303665 13.6852939481268, -89.22086806282722 13.68538328530259, -89.2211445026178 13.68547262247839, -89.22142094240837 13.68556195965418, -89.22169738219895 13.68565129682997, -89.22197382198954 13.68574063400576, -89.2222502617801 13.68582997118156, -89.22252670157069 13.68591930835735, -89.22280314136125 13.68600864553314, -89.22307958115184 13.68609798270893, -89.2233560209424 13.68618731988473, -89.22363246073299 13.68627665706052, -89.22390890052357 13.68636599423631, -89.22768691099476 13.68618731988473, -89.22833193717278 13.68591930835735, -89.22851623036649 13.68582997118156, -89.22870052356021 13.68574063400576, -89.22888481675393 13.68565129682997, -89.22906910994764 13.68556195965418, -89.22925340314137 13.68547262247839, -89.22943769633508 13.68538328530259, -89.22962198952879 13.6852939481268, -89.22980628272252 13.68520461095101, -89.22999057591623 13.68511527377522, -89.23017486910994 13.68502593659942, -89.23035916230367 13.68493659942363, -89.23054345549738 13.68484726224784, -89.23072774869111 13.68475792507205, -89.23091204188482 13.68466858789625, -89.23109633507853 13.68457925072046, -89.23174136125655 13.68431123919308, -89.23192565445027 13.68422190201729, -89.23358429319372 13.6828818443804, -89.23413717277488 13.68225648414986, -89.23469005235603 13.68163112391931, -89.23478219895289 13.68154178674352, -89.23533507853404 13.68091642651297, -89.23588795811519 13.68029106628242, -89.23598010471204 13.68020172910663, -89.2360722513089 13.68011239193084, -89.23616439790577 13.68002305475504, -89.23625654450262 13.67993371757925, -89.23634869109948 13.67984438040346, -89.23644083769634 13.67975504322767, -89.23653298429319 13.67966570605187, -89.23662513089006 13.67957636887608, -89.23671727748692 13.67948703170029, -89.23680942408377 13.67939769452449, -89.23690157068063 13.6793083573487, -89.23699371727749 13.67921902017291, -89.23754659685864 13.67859365994236, -89.23763874345551 13.67850432276657, -89.23773089005236 13.67841498559078, -89.23782303664922 13.67832564841498, -89.23791518324607 13.67823631123919, -89.23800732984293 13.6781469740634, -89.2380994764398 13.67805763688761, -89.23819162303666 13.67796829971181, -89.23828376963351 13.67787896253602, -89.23837591623037 13.67778962536023, -89.23846806282722 13.67770028818444, -89.23856020942409 13.67761095100865, -89.23865235602095 13.67752161383285, -89.23874450261781 13.67743227665706, -89.23883664921466 13.67734293948127, -89.23938952879581 13.67671757925072, -89.23948167539267 13.67662824207493, -89.23957382198954 13.67653890489913, -89.23966596858639 13.67644956772334, -89.23975811518325 13.67636023054755, -89.23985026178011 13.67627089337176, -89.23994240837696 13.67618155619597, -89.24003455497383 13.67609221902017, -89.24012670157069 13.67600288184438, -89.24021884816754 13.67591354466859, -89.2403109947644 13.67582420749279, -89.24040314136126 13.675734870317, -89.24049528795813 13.67564553314121, -89.24058743455498 13.67555619596542, -89.24114031413613 13.67493083573487, -89.24123246073299 13.67484149855908, -89.24132460732984 13.67475216138329, -89.2414167539267 13.67466282420749, -89.24150890052357 13.6745734870317, -89.24160104712043 13.67448414985591, -89.24169319371728 13.67439481268011, -89.24178534031414 13.67430547550432, -89.24298324607331 13.67251873198847, -89.24316753926702 13.67207204610951, -89.24335183246073 13.67162536023055, -89.24353612565446 13.67117867435158, -89.24362827225131 13.671, -89.24372041884817 13.671, -89.24381256544503 13.671, -89.24390471204188 13.671, -89.24399685863875 13.671, -89.24408900523561 13.671, -89.24418115183246 13.671, -89.24427329842932 13.671, -89.24436544502618 13.671, -89.24445759162305 13.671, -89.2445497382199 13.671, -89.24464188481676 13.671, -89.24473403141361 13.671, -89.24482617801047 13.671, -89.24491832460734 13.671, -89.2450104712042 13.671, -89.24510261780105 13.671, -89.24519476439791 13.671, -89.24528691099476 13.671, -89.24537905759163 13.671, -89.24547120418849 13.671, -89.24556335078535 13.671, -89.2456554973822 13.671, -89.24574764397906 13.671, -89.24583979057591 13.671, -89.24593193717278 13.671, -89.24602408376964 13.671, -89.2461162303665 13.671, -89.24620837696335 13.671, -89.24630052356021 13.671, -89.24639267015708 13.671, -89.24648481675393 13.671, -89.24657696335079 13.671, -89.24666910994765 13.671, -89.2467612565445 13.671, -89.24685340314137 13.671, -89.24694554973823 13.671, -89.24703769633508 13.671, -89.24712984293194 13.671, -89.2472219895288 13.671, -89.24731413612565 13.671, -89.24740628272252 13.671, -89.24749842931938 13.671, -89.24759057591623 13.671, -89.24768272251309 13.671, -89.24777486910995 13.671, -89.24786701570682 13.671, -89.24795916230367 13.671, -89.24805130890053 13.671, -89.24814345549738 13.671, -89.24823560209424 13.671, -89.24832774869111 13.671, -89.24841989528797 13.671, -89.24851204188482 13.671, -89.24860418848168 13.671, -89.24869633507853 13.671, -89.24878848167539 13.671, -89.24888062827226 13.671, -89.24897277486912 13.671, -89.24906492146597 13.671, -89.24915706806283 13.671, -89.24924921465968 13.671, -89.24934136125655 13.671, -89.24943350785341 13.671, -89.24952565445027 13.671, -89.24961780104712 13.671, -89.24970994764398 13.671, -89.24980209424085 13.671, -89.2498942408377 13.671, -89.24998638743456 13.671, -89.25007853403142 13.671, -89.25017068062827 13.671, -89.25026282722513 13.671, -89.250354973822 13.671, -89.25044712041885 13.671, -89.25053926701571 13.671, -89.25063141361257 13.671, -89.25072356020942 13.671, -89.25081570680629 13.671, -89.25090785340315 13.671))' + risk_shape = "POLYGON ((-89.25090785340315 13.671, -89.251 13.671, -89.251 13.67108933717579, -89.251 13.67117867435158, -89.251 13.67126801152738, -89.251 13.67135734870317, -89.251 13.67144668587896, -89.251 13.67153602305475, -89.251 13.67162536023055, -89.251 13.67171469740634, -89.251 13.67180403458213, -89.251 13.67189337175792, -89.251 13.67198270893372, -89.251 13.67207204610951, -89.251 13.6721613832853, -89.251 13.6722507204611, -89.251 13.67234005763689, -89.251 13.67242939481268, -89.251 13.67251873198847, -89.251 13.67260806916426, -89.251 13.67269740634006, -89.251 13.67278674351585, -89.251 13.67287608069164, -89.251 13.67296541786743, -89.251 13.67305475504323, -89.251 13.67314409221902, -89.251 13.67323342939481, -89.251 13.6733227665706, -89.251 13.6734121037464, -89.251 13.67350144092219, -89.251 13.67359077809798, -89.251 13.67368011527378, -89.251 13.67376945244957, -89.251 13.67385878962536, -89.251 13.67394812680115, -89.251 13.67403746397694, -89.251 13.67412680115274, -89.251 13.67421613832853, -89.251 13.67430547550432, -89.251 13.67439481268011, -89.251 13.67448414985591, -89.251 13.6745734870317, -89.251 13.67466282420749, -89.251 13.67475216138329, -89.251 13.67484149855908, -89.251 13.67493083573487, -89.251 13.67502017291066, -89.251 13.67510951008645, -89.251 13.67519884726225, -89.251 13.67528818443804, -89.251 13.67537752161383, -89.251 13.67546685878962, -89.251 13.67555619596542, -89.251 13.67564553314121, -89.251 13.675734870317, -89.251 13.67582420749279, -89.251 13.67591354466859, -89.251 13.67600288184438, -89.251 13.67609221902017, -89.251 13.67618155619597, -89.251 13.67627089337176, -89.251 13.67636023054755, -89.251 13.67644956772334, -89.251 13.67653890489913, -89.251 13.67662824207493, -89.251 13.67671757925072, -89.251 13.67680691642651, -89.251 13.6768962536023, -89.251 13.6769855907781, -89.251 13.67707492795389, -89.251 13.67716426512968, -89.251 13.67725360230548, -89.251 13.67734293948127, -89.251 13.67743227665706, -89.251 13.67752161383285, -89.251 13.67761095100865, -89.251 13.67770028818444, -89.251 13.67778962536023, -89.251 13.67787896253602, -89.251 13.67796829971181, -89.251 13.67805763688761, -89.25090785340315 13.67832564841498, -89.25081570680629 13.67850432276657, -89.25072356020942 13.67868299711816, -89.25063141361257 13.67886167146974, -89.250354973822 13.67921902017291, -89.25017068062827 13.67948703170029, -89.2498942408377 13.67984438040346, -89.24961780104712 13.68020172910663, -89.24934136125655 13.6805590778098, -89.24915706806283 13.68082708933717, -89.24888062827226 13.68118443804035, -89.24860418848168 13.68154178674352, -89.24832774869111 13.68189913544669, -89.24814345549738 13.68216714697406, -89.24786701570682 13.68252449567723, -89.24759057591623 13.6828818443804, -89.24740628272252 13.68314985590778, -89.24712984293194 13.68350720461095, -89.24685340314137 13.68386455331412, -89.24657696335079 13.68422190201729, -89.24639267015708 13.68448991354467, -89.24556335078535 13.68556195965418, -89.24510261780105 13.68609798270893, -89.2450104712042 13.68618731988473, -89.24491832460734 13.68627665706052, -89.24436544502618 13.68690201729107, -89.24427329842932 13.68699135446686, -89.24372041884817 13.68761671469741, -89.24362827225131 13.6877060518732, -89.24353612565446 13.68779538904899, -89.24298324607331 13.68842074927954, -89.24289109947644 13.68851008645533, -89.24233821989529 13.68913544668588, -89.24224607329843 13.68922478386167, -89.24169319371728 13.68985014409222, -89.24160104712043 13.68993948126801, -89.24150890052357 13.6900288184438, -89.24095602094241 13.69065417867435, -89.24086387434555 13.69074351585014, -89.24077172774869 13.69083285302594, -89.24067958115184 13.69092219020173, -89.24058743455498 13.69101152737752, -89.24049528795813 13.69110086455331, -89.24040314136126 13.69119020172911, -89.2403109947644 13.6912795389049, -89.24021884816754 13.69136887608069, -89.23975811518325 13.69163688760807, -89.23929738219896 13.69190489913545, -89.23865235602095 13.69226224783862, -89.23819162303666 13.69253025936599, -89.23773089005236 13.69279827089337, -89.23708586387436 13.69315561959654, -89.23662513089006 13.69342363112392, -89.23616439790577 13.6936916426513, -89.23570366492147 13.69395965417867, -89.23505863874345 13.69431700288184, -89.23459790575916 13.69458501440922, -89.23413717277488 13.6948530259366, -89.23376858638744 13.69494236311239, -89.23321570680629 13.69503170028818, -89.23266282722513 13.69512103746398, -89.23220209424085 13.69521037463977, -89.23164921465968 13.69529971181556, -89.23109633507853 13.69538904899135, -89.23054345549738 13.69547838616715, -89.23008272251309 13.69556772334294, -89.22952984293194 13.69565706051873, -89.22897696335079 13.69574639769452, -89.22851623036649 13.69583573487032, -89.22741047120419 13.6960144092219, -89.22685759162304 13.69610374639769, -89.22621256544502 13.69619308357349, -89.22612041884817 13.69619308357349, -89.22602827225131 13.69619308357349, -89.22593612565446 13.69619308357349, -89.2258439790576 13.69619308357349, -89.22575183246073 13.69619308357349, -89.22565968586387 13.69619308357349, -89.22556753926702 13.69619308357349, -89.22547539267016 13.69619308357349, -89.22538324607331 13.69619308357349, -89.22529109947644 13.69619308357349, -89.22519895287958 13.69619308357349, -89.22510680628272 13.69619308357349, -89.22501465968587 13.69619308357349, -89.22492251308901 13.69619308357349, -89.22483036649214 13.69619308357349, -89.22473821989529 13.69619308357349, -89.22464607329843 13.69619308357349, -89.22455392670157 13.69619308357349, -89.22446178010472 13.69619308357349, -89.22436963350786 13.69619308357349, -89.22427748691099 13.69619308357349, -89.22418534031414 13.69619308357349, -89.22409319371728 13.69619308357349, -89.22400104712042 13.69619308357349, -89.22390890052357 13.69619308357349, -89.2238167539267 13.69619308357349, -89.22372460732984 13.69619308357349, -89.22363246073299 13.69619308357349, -89.22354031413613 13.69619308357349, -89.22344816753927 13.69619308357349, -89.2233560209424 13.69619308357349, -89.22326387434555 13.69619308357349, -89.22317172774869 13.69619308357349, -89.2222502617801 13.69628242074928, -89.22215811518325 13.69628242074928, -89.22206596858639 13.69628242074928, -89.22197382198954 13.69628242074928, -89.22188167539267 13.69628242074928, -89.22178952879581 13.69628242074928, -89.22169738219895 13.69628242074928, -89.2216052356021 13.69628242074928, -89.22151308900524 13.69628242074928, -89.22142094240837 13.69628242074928, -89.22132879581152 13.69628242074928, -89.22123664921466 13.69628242074928, -89.2211445026178 13.69628242074928, -89.22105235602095 13.69628242074928, -89.22096020942409 13.69628242074928, -89.22086806282722 13.69628242074928, -89.22077591623037 13.69628242074928, -89.22059162303665 13.69619308357349, -89.22031518324607 13.6960144092219, -89.21985445026178 13.69574639769452, -89.21957801047121 13.69556772334294, -89.21930157068063 13.69538904899135, -89.21902513089006 13.69521037463977, -89.21874869109948 13.69503170028818, -89.21828795811518 13.69476368876081, -89.21801151832462 13.69458501440922, -89.21773507853403 13.69440634005764, -89.21745863874345 13.69422766570605, -89.21699790575916 13.69395965417867, -89.21672146596859 13.69378097982709, -89.21644502617801 13.6936023054755, -89.21616858638744 13.69342363112392, -89.21589214659686 13.69324495677233, -89.21543141361256 13.69297694524496, -89.215154973822 13.69279827089337, -89.21091623036649 13.69226224783862, -89.21063979057591 13.69235158501441, -89.21036335078534 13.6924409221902, -89.21008691099476 13.69253025936599, -89.20981047120419 13.69261959654179, -89.2093497382199 13.69279827089337, -89.20907329842932 13.69288760806916, -89.20879685863875 13.69297694524496, -89.20852041884817 13.69306628242075, -89.2082439790576 13.69315561959654, -89.20815183246073 13.69315561959654, -89.20805968586387 13.69315561959654, -89.20796753926702 13.69315561959654, -89.20787539267016 13.69315561959654, -89.2077832460733 13.69315561959654, -89.20769109947643 13.69315561959654, -89.20695392670157 13.69306628242075, -89.20630890052357 13.69297694524496, -89.20557172774869 13.69288760806916, -89.20492670157068 13.69279827089337, -89.20418952879581 13.69270893371758, -89.2035445026178 13.69261959654179, -89.20280732984293 13.69253025936599, -89.19976649214659 13.69288760806916, -89.19912146596859 13.69315561959654, -89.19847643979058 13.69342363112392, -89.19829214659686 13.69351296829971, -89.19764712041885 13.69378097982709, -89.19700209424084 13.69404899135447, -89.19681780104712 13.69413832853026, -89.19617277486911 13.69440634005764, -89.19552774869111 13.69467435158501, -89.19534345549738 13.69476368876081, -89.19469842931937 13.69503170028818, -89.19405340314137 13.69529971181556, -89.19386910994764 13.69538904899135, -89.19322408376964 13.69565706051873, -89.19303979057591 13.69574639769452, -89.1923947643979 13.6960144092219, -89.1917497382199 13.69628242074928, -89.19156544502617 13.69637175792507, -89.19092041884817 13.69663976945245, -89.19027539267016 13.69690778097983, -89.19009109947643 13.69699711815562, -89.18944607329843 13.697265129683, -89.18880104712042 13.69753314121037, -89.18861675392669 13.69762247838617, -89.18797172774869 13.69789048991355, -89.18732670157068 13.69815850144092, -89.18714240837696 13.69824783861671, -89.18668167539266 13.6984265129683, -89.18658952879581 13.6984265129683, -89.18649738219895 13.6984265129683, -89.18640523560209 13.6984265129683, -89.18631308900524 13.6984265129683, -89.18566806282722 13.69815850144092, -89.18502303664921 13.69789048991355, -89.18456230366492 13.69771181556196, -89.18391727748691 13.69744380403458, -89.18327225130889 13.6971757925072, -89.18262722513089 13.69690778097983, -89.1790335078534 13.69672910662824, -89.17875706806282 13.69681844380403, -89.17848062827225 13.69690778097983, -89.17820418848167 13.69699711815562, -89.1779277486911 13.69708645533141, -89.17765130890052 13.6971757925072, -89.17700628272252 13.69735446685879, -89.17672984293193 13.69744380403458, -89.17645340314137 13.69753314121037, -89.17617696335078 13.69762247838617, -89.1759005235602 13.69771181556196, -89.17562408376963 13.69780115273775, -89.17534764397905 13.69789048991355, -89.17470261780105 13.69806916426513, -89.1725832460733 13.69931988472622, -89.17249109947643 13.69940922190202, -89.17193821989528 13.70003458213256, -89.17184607329843 13.70012391930836, -89.17175392670157 13.70021325648415, -89.17120104712042 13.7008386167147, -89.17110890052356 13.70092795389049, -89.17101675392669 13.70101729106628, -89.17092460732984 13.70110662824207, -89.17037172774869 13.70173198847262, -89.17027958115183 13.70182132564842, -89.17018743455498 13.70191066282421, -89.17009528795812 13.702, -89.17000314136125 13.702, -89.16991099476439 13.702, -89.16981884816754 13.702, -89.16972670157068 13.702, -89.16963455497383 13.702, -89.16954240837696 13.702, -89.1694502617801 13.702, -89.16935811518324 13.702, -89.16926596858639 13.702, -89.16917382198953 13.702, -89.16908167539266 13.702, -89.16898952879581 13.702, -89.16889738219895 13.702, -89.16880523560209 13.702, -89.16871308900524 13.702, -89.16862094240837 13.702, -89.16852879581151 13.702, -89.16843664921466 13.702, -89.1683445026178 13.702, -89.16825235602094 13.702, -89.16816020942409 13.702, -89.16806806282722 13.702, -89.16797591623036 13.702, -89.16788376963351 13.702, -89.16779162303665 13.702, -89.16769947643979 13.702, -89.16760732984292 13.702, -89.16751518324607 13.702, -89.16742303664921 13.702, -89.16733089005236 13.702, -89.1672387434555 13.702, -89.16714659685863 13.702, -89.16705445026177 13.702, -89.16696230366492 13.702, -89.16687015706806 13.702, -89.16677801047121 13.702, -89.16668586387435 13.702, -89.16659371727748 13.702, -89.16650157068062 13.702, -89.16640942408377 13.702, -89.16631727748691 13.702, -89.16622513089006 13.702, -89.16613298429318 13.702, -89.16604083769633 13.702, -89.16594869109947 13.702, -89.16585654450262 13.702, -89.16576439790576 13.702, -89.16567225130889 13.702, -89.16558010471203 13.702, -89.16548795811518 13.702, -89.16539581151832 13.702, -89.16530366492147 13.702, -89.16521151832461 13.702, -89.16511937172774 13.702, -89.16502722513088 13.702, -89.16493507853403 13.702, -89.16484293193717 13.702, -89.16475078534032 13.702, -89.16465863874345 13.702, -89.16456649214659 13.702, -89.16447434554973 13.702, -89.16438219895288 13.702, -89.16429005235602 13.702, -89.16419790575915 13.702, -89.1641057591623 13.702, -89.16401361256544 13.702, -89.16392146596858 13.702, -89.16382931937173 13.702, -89.16373717277487 13.702, -89.163645026178 13.702, -89.16355287958115 13.702, -89.16346073298429 13.702, -89.16336858638743 13.702, -89.16327643979058 13.702, -89.16318429319371 13.702, -89.16309214659685 13.702, -89.163 13.702, -89.163 13.70191066282421, -89.163 13.70182132564842, -89.163 13.70173198847262, -89.163 13.70164265129683, -89.163 13.70155331412104, -89.163 13.70146397694525, -89.163 13.70137463976945, -89.163 13.70128530259366, -89.163 13.70119596541787, -89.163 13.70110662824207, -89.163 13.70101729106628, -89.163 13.70092795389049, -89.163 13.7008386167147, -89.163 13.7007492795389, -89.163 13.70065994236311, -89.163 13.70057060518732, -89.163 13.70048126801153, -89.163 13.70039193083574, -89.163 13.70030259365994, -89.163 13.70021325648415, -89.163 13.70012391930836, -89.163 13.70003458213256, -89.163 13.69994524495677, -89.163 13.69985590778098, -89.163 13.69976657060519, -89.163 13.69967723342939, -89.163 13.6995878962536, -89.163 13.69949855907781, -89.163 13.69940922190202, -89.163 13.69931988472622, -89.163 13.69923054755043, -89.163 13.69914121037464, -89.163 13.69905187319885, -89.163 13.69896253602306, -89.163 13.69887319884726, -89.163 13.69878386167147, -89.163 13.69869452449568, -89.163 13.69860518731988, -89.163 13.69851585014409, -89.163 13.6984265129683, -89.163 13.69833717579251, -89.163 13.69824783861671, -89.163 13.69815850144092, -89.163 13.69806916426513, -89.163 13.69797982708934, -89.163 13.69789048991355, -89.163 13.69780115273775, -89.163 13.69771181556196, -89.163 13.69762247838617, -89.163 13.69753314121037, -89.163 13.69744380403458, -89.163 13.69735446685879, -89.163 13.697265129683, -89.163 13.6971757925072, -89.163 13.69708645533141, -89.163 13.69699711815562, -89.163 13.69690778097983, -89.163 13.69681844380403, -89.163 13.69672910662824, -89.163 13.69663976945245, -89.163 13.69655043227666, -89.163 13.69646109510087, -89.163 13.69637175792507, -89.163 13.69628242074928, -89.163 13.69619308357349, -89.163 13.69610374639769, -89.163 13.6960144092219, -89.163 13.69592507204611, -89.163 13.69583573487032, -89.163 13.69574639769452, -89.163 13.69565706051873, -89.163 13.69556772334294, -89.163 13.69547838616715, -89.163 13.69538904899135, -89.163 13.69529971181556, -89.163 13.69521037463977, -89.163 13.69512103746398, -89.163 13.69503170028818, -89.163 13.69494236311239, -89.163 13.6948530259366, -89.163 13.69476368876081, -89.163 13.69467435158501, -89.163 13.69458501440922, -89.163 13.69449567723343, -89.163 13.69440634005764, -89.163 13.69431700288184, -89.163 13.69422766570605, -89.163 13.69413832853026, -89.163 13.69404899135447, -89.163 13.69395965417867, -89.163 13.69387031700288, -89.163 13.69378097982709, -89.163 13.6936916426513, -89.163 13.6936023054755, -89.163 13.69351296829971, -89.163 13.69342363112392, -89.163 13.69333429394813, -89.163 13.69324495677233, -89.16327643979058 13.69306628242075, -89.16355287958115 13.69288760806916, -89.16382931937173 13.69270893371758, -89.1641057591623 13.69253025936599, -89.16438219895288 13.69235158501441, -89.16465863874345 13.69217291066282, -89.16493507853403 13.69199423631124, -89.16521151832461 13.69181556195965, -89.16548795811518 13.69163688760807, -89.16576439790576 13.69145821325648, -89.16604083769633 13.6912795389049, -89.16631727748691 13.69110086455331, -89.16659371727748 13.69092219020173, -89.16677801047121 13.69083285302594, -89.16696230366492 13.69074351585014, -89.16714659685863 13.69065417867435, -89.16733089005236 13.69056484149856, -89.16797591623036 13.69020749279539, -89.16816020942409 13.6901181556196, -89.1683445026178 13.6900288184438, -89.16852879581151 13.68993948126801, -89.16871308900524 13.68985014409222, -89.16889738219895 13.68976080691643, -89.16908167539266 13.68967146974063, -89.16972670157068 13.68931412103746, -89.16991099476439 13.68922478386167, -89.17009528795812 13.68913544668588, -89.17027958115183 13.68904610951009, -89.17046387434554 13.68895677233429, -89.17064816753927 13.6888674351585, -89.17083246073298 13.68877809798271, -89.17147748691099 13.68842074927954, -89.17166178010471 13.68833141210375, -89.17350471204188 13.68681268011527, -89.17378115183246 13.6864553314121, -89.17405759162304 13.68609798270893, -89.17442617801046 13.68565129682997, -89.17470261780105 13.6852939481268, -89.17507120418848 13.68484726224784, -89.17534764397905 13.68448991354467, -89.17571623036649 13.68404322766571, -89.17580837696335 13.68395389048991, -89.17599267015707 13.68386455331412, -89.17617696335078 13.68377521613833, -89.1763612565445 13.68368587896254, -89.17654554973822 13.68359654178674, -89.17672984293193 13.68350720461095, -89.17691413612565 13.68341786743516, -89.17709842931937 13.68332853025936, -89.17728272251308 13.68323919308357, -89.17746701570681 13.68314985590778, -89.17765130890052 13.68306051873199, -89.17783560209423 13.6829711815562, -89.17801989528796 13.6828818443804, -89.17820418848167 13.68279250720461, -89.17884921465968 13.68252449567723, -89.1790335078534 13.68243515850144, -89.17921780104712 13.68234582132565, -89.17940209424084 13.68225648414986, -89.17958638743455 13.68216714697406, -89.17977068062827 13.68207780979827, -89.17995497382199 13.68198847262248, -89.1801392670157 13.68189913544669, -89.18032356020942 13.68180979827089, -89.18050785340314 13.6817204610951, -89.18069214659685 13.68163112391931, -89.18087643979058 13.68154178674352, -89.18152146596859 13.68127377521614, -89.1817057591623 13.68118443804035, -89.18189005235602 13.68109510086455, -89.18198219895288 13.68109510086455, -89.18207434554974 13.68109510086455, -89.18216649214659 13.68109510086455, -89.18225863874345 13.68109510086455, -89.18235078534032 13.68109510086455, -89.18244293193717 13.68109510086455, -89.18253507853403 13.68109510086455, -89.18262722513089 13.68109510086455, -89.18271937172774 13.68109510086455, -89.18281151832461 13.68109510086455, -89.18290366492147 13.68109510086455, -89.18299581151832 13.68109510086455, -89.18308795811518 13.68109510086455, -89.18318010471204 13.68109510086455, -89.18327225130889 13.68109510086455, -89.18336439790576 13.68109510086455, -89.18345654450262 13.68109510086455, -89.18354869109947 13.68109510086455, -89.18364083769633 13.68109510086455, -89.18373298429319 13.68109510086455, -89.18382513089006 13.68109510086455, -89.18806387434554 13.67868299711816, -89.18843246073298 13.67805763688761, -89.18880104712042 13.67743227665706, -89.18889319371728 13.67725360230548, -89.18926178010472 13.67662824207493, -89.18963036649214 13.67600288184438, -89.18972251308901 13.67582420749279, -89.19009109947643 13.67519884726225, -89.19045968586387 13.6745734870317, -89.19055183246073 13.67439481268011, -89.19092041884817 13.67376945244957, -89.19119685863875 13.6733227665706, -89.19138115183246 13.67323342939481, -89.19156544502617 13.67314409221902, -89.1917497382199 13.67305475504323, -89.1923947643979 13.67278674351585, -89.19257905759163 13.67269740634006, -89.19276335078534 13.67260806916426, -89.19294764397905 13.67251873198847, -89.19313193717278 13.67242939481268, -89.19331623036649 13.67234005763689, -89.1935005235602 13.6722507204611, -89.19368481675393 13.6721613832853, -89.19386910994764 13.67207204610951, -89.19405340314137 13.67198270893372, -89.19469842931937 13.67171469740634, -89.19488272251309 13.67162536023055, -89.19506701570681 13.67153602305475, -89.19525130890052 13.67144668587896, -89.19543560209424 13.67135734870317, -89.19561989528796 13.67126801152738, -89.19580418848167 13.67117867435158, -89.19598848167539 13.67108933717579, -89.19617277486911 13.671, -89.19626492146597 13.671, -89.19635706806282 13.671, -89.19644921465968 13.671, -89.19654136125655 13.671, -89.19663350785341 13.671, -89.19672565445026 13.671, -89.19681780104712 13.671, -89.19690994764397 13.671, -89.19700209424084 13.671, -89.1970942408377 13.671, -89.19718638743456 13.671, -89.19727853403141 13.671, -89.19737068062827 13.671, -89.19746282722512 13.671, -89.19755497382199 13.671, -89.19764712041885 13.671, -89.19773926701571 13.671, -89.19783141361256 13.671, -89.19792356020942 13.671, -89.19801570680629 13.671, -89.19810785340314 13.671, -89.1982 13.671, -89.19829214659686 13.671, -89.19838429319371 13.671, -89.19847643979058 13.671, -89.19856858638744 13.671, -89.19866073298429 13.671, -89.19875287958115 13.671, -89.19884502617801 13.671, -89.19893717277488 13.671, -89.19902931937173 13.671, -89.19912146596859 13.671, -89.19921361256544 13.671, -89.1993057591623 13.671, -89.19939790575916 13.671, -89.19949005235603 13.671, -89.19958219895288 13.671, -89.19967434554974 13.671, -89.19976649214659 13.671, -89.19985863874345 13.671, -89.19995078534032 13.671, -89.20004293193718 13.671, -89.20013507853403 13.671, -89.20022722513089 13.671, -89.20031937172774 13.671, -89.20041151832461 13.671, -89.20050366492147 13.671, -89.20059581151833 13.671, -89.20068795811518 13.671, -89.20078010471204 13.671, -89.20087225130889 13.671, -89.20096439790576 13.671, -89.20105654450262 13.671, -89.20114869109948 13.671, -89.20124083769633 13.671, -89.20133298429319 13.671, -89.20142513089006 13.671, -89.20151727748691 13.671, -89.20160942408377 13.671, -89.20170157068063 13.671, -89.20179371727748 13.671, -89.20188586387435 13.671, -89.20197801047121 13.671, -89.20207015706806 13.671, -89.20216230366492 13.671, -89.20225445026178 13.671, -89.20234659685863 13.671, -89.2024387434555 13.671, -89.20253089005236 13.671, -89.20262303664921 13.671, -89.20271518324607 13.671, -89.20280732984293 13.671, -89.2028994764398 13.671, -89.20299162303665 13.671, -89.20308376963351 13.671, -89.20317591623036 13.671, -89.20326806282722 13.671, -89.20336020942409 13.671, -89.20345235602095 13.671, -89.2035445026178 13.671, -89.20363664921466 13.671, -89.20372879581151 13.671, -89.20382094240837 13.671, -89.20391308900524 13.671, -89.2040052356021 13.671, -89.20409738219895 13.671, -89.20418952879581 13.671, -89.20428167539266 13.671, -89.20437382198953 13.671, -89.20446596858639 13.671, -89.20455811518325 13.671, -89.2046502617801 13.671, -89.20474240837696 13.671, -89.20483455497383 13.671, -89.20492670157068 13.671, -89.20501884816754 13.671, -89.2051109947644 13.671, -89.20520314136125 13.671, -89.20529528795812 13.671, -89.20538743455498 13.671, -89.20547958115183 13.671, -89.20557172774869 13.671, -89.20566387434555 13.671, -89.2057560209424 13.671, -89.20584816753927 13.671, -89.20594031413613 13.671, -89.20603246073298 13.671, -89.20612460732984 13.671, -89.2062167539267 13.671, -89.20640104712042 13.67108933717579, -89.20658534031413 13.67117867435158, -89.20676963350786 13.67126801152738, -89.20695392670157 13.67135734870317, -89.20713821989528 13.67144668587896, -89.20723036649214 13.67153602305475, -89.20723036649214 13.67162536023055, -89.20723036649214 13.67171469740634, -89.20723036649214 13.67180403458213, -89.20704607329843 13.67242939481268, -89.20686178010472 13.67305475504323, -89.20667748691099 13.67368011527378, -89.20658534031413 13.67394812680115, -89.20640104712042 13.6745734870317, -89.21036335078534 13.68118443804035, -89.21091623036649 13.68127377521614, -89.21146910994764 13.68136311239193, -89.21211413612565 13.68145244956772, -89.21266701570681 13.68154178674352, -89.21321989528796 13.68163112391931, -89.21377277486911 13.6817204610951, -89.21441780104712 13.68180979827089, -89.2146942408377 13.68189913544669, -89.21487853403141 13.68198847262248, -89.215154973822 13.68216714697406, -89.21543141361256 13.68234582132565, -89.21589214659686 13.68261383285303, -89.21616858638744 13.68279250720461, -89.21644502617801 13.6829711815562, -89.21672146596859 13.68314985590778, -89.21699790575916 13.68332853025936, -89.21727434554974 13.68350720461095, -89.21773507853403 13.68377521613833, -89.21801151832462 13.68395389048991, -89.21828795811518 13.6841325648415, -89.21856439790577 13.68431123919308, -89.21884083769633 13.68448991354467, -89.21911727748692 13.68466858789625, -89.22031518324607 13.68520461095101, -89.22059162303665 13.6852939481268, -89.22086806282722 13.68538328530259, -89.2211445026178 13.68547262247839, -89.22142094240837 13.68556195965418, -89.22169738219895 13.68565129682997, -89.22197382198954 13.68574063400576, -89.2222502617801 13.68582997118156, -89.22252670157069 13.68591930835735, -89.22280314136125 13.68600864553314, -89.22307958115184 13.68609798270893, -89.2233560209424 13.68618731988473, -89.22363246073299 13.68627665706052, -89.22390890052357 13.68636599423631, -89.22768691099476 13.68618731988473, -89.22833193717278 13.68591930835735, -89.22851623036649 13.68582997118156, -89.22870052356021 13.68574063400576, -89.22888481675393 13.68565129682997, -89.22906910994764 13.68556195965418, -89.22925340314137 13.68547262247839, -89.22943769633508 13.68538328530259, -89.22962198952879 13.6852939481268, -89.22980628272252 13.68520461095101, -89.22999057591623 13.68511527377522, -89.23017486910994 13.68502593659942, -89.23035916230367 13.68493659942363, -89.23054345549738 13.68484726224784, -89.23072774869111 13.68475792507205, -89.23091204188482 13.68466858789625, -89.23109633507853 13.68457925072046, -89.23174136125655 13.68431123919308, -89.23192565445027 13.68422190201729, -89.23358429319372 13.6828818443804, -89.23413717277488 13.68225648414986, -89.23469005235603 13.68163112391931, -89.23478219895289 13.68154178674352, -89.23533507853404 13.68091642651297, -89.23588795811519 13.68029106628242, -89.23598010471204 13.68020172910663, -89.2360722513089 13.68011239193084, -89.23616439790577 13.68002305475504, -89.23625654450262 13.67993371757925, -89.23634869109948 13.67984438040346, -89.23644083769634 13.67975504322767, -89.23653298429319 13.67966570605187, -89.23662513089006 13.67957636887608, -89.23671727748692 13.67948703170029, -89.23680942408377 13.67939769452449, -89.23690157068063 13.6793083573487, -89.23699371727749 13.67921902017291, -89.23754659685864 13.67859365994236, -89.23763874345551 13.67850432276657, -89.23773089005236 13.67841498559078, -89.23782303664922 13.67832564841498, -89.23791518324607 13.67823631123919, -89.23800732984293 13.6781469740634, -89.2380994764398 13.67805763688761, -89.23819162303666 13.67796829971181, -89.23828376963351 13.67787896253602, -89.23837591623037 13.67778962536023, -89.23846806282722 13.67770028818444, -89.23856020942409 13.67761095100865, -89.23865235602095 13.67752161383285, -89.23874450261781 13.67743227665706, -89.23883664921466 13.67734293948127, -89.23938952879581 13.67671757925072, -89.23948167539267 13.67662824207493, -89.23957382198954 13.67653890489913, -89.23966596858639 13.67644956772334, -89.23975811518325 13.67636023054755, -89.23985026178011 13.67627089337176, -89.23994240837696 13.67618155619597, -89.24003455497383 13.67609221902017, -89.24012670157069 13.67600288184438, -89.24021884816754 13.67591354466859, -89.2403109947644 13.67582420749279, -89.24040314136126 13.675734870317, -89.24049528795813 13.67564553314121, -89.24058743455498 13.67555619596542, -89.24114031413613 13.67493083573487, -89.24123246073299 13.67484149855908, -89.24132460732984 13.67475216138329, -89.2414167539267 13.67466282420749, -89.24150890052357 13.6745734870317, -89.24160104712043 13.67448414985591, -89.24169319371728 13.67439481268011, -89.24178534031414 13.67430547550432, -89.24298324607331 13.67251873198847, -89.24316753926702 13.67207204610951, -89.24335183246073 13.67162536023055, -89.24353612565446 13.67117867435158, -89.24362827225131 13.671, -89.24372041884817 13.671, -89.24381256544503 13.671, -89.24390471204188 13.671, -89.24399685863875 13.671, -89.24408900523561 13.671, -89.24418115183246 13.671, -89.24427329842932 13.671, -89.24436544502618 13.671, -89.24445759162305 13.671, -89.2445497382199 13.671, -89.24464188481676 13.671, -89.24473403141361 13.671, -89.24482617801047 13.671, -89.24491832460734 13.671, -89.2450104712042 13.671, -89.24510261780105 13.671, -89.24519476439791 13.671, -89.24528691099476 13.671, -89.24537905759163 13.671, -89.24547120418849 13.671, -89.24556335078535 13.671, -89.2456554973822 13.671, -89.24574764397906 13.671, -89.24583979057591 13.671, -89.24593193717278 13.671, -89.24602408376964 13.671, -89.2461162303665 13.671, -89.24620837696335 13.671, -89.24630052356021 13.671, -89.24639267015708 13.671, -89.24648481675393 13.671, -89.24657696335079 13.671, -89.24666910994765 13.671, -89.2467612565445 13.671, -89.24685340314137 13.671, -89.24694554973823 13.671, -89.24703769633508 13.671, -89.24712984293194 13.671, -89.2472219895288 13.671, -89.24731413612565 13.671, -89.24740628272252 13.671, -89.24749842931938 13.671, -89.24759057591623 13.671, -89.24768272251309 13.671, -89.24777486910995 13.671, -89.24786701570682 13.671, -89.24795916230367 13.671, -89.24805130890053 13.671, -89.24814345549738 13.671, -89.24823560209424 13.671, -89.24832774869111 13.671, -89.24841989528797 13.671, -89.24851204188482 13.671, -89.24860418848168 13.671, -89.24869633507853 13.671, -89.24878848167539 13.671, -89.24888062827226 13.671, -89.24897277486912 13.671, -89.24906492146597 13.671, -89.24915706806283 13.671, -89.24924921465968 13.671, -89.24934136125655 13.671, -89.24943350785341 13.671, -89.24952565445027 13.671, -89.24961780104712 13.671, -89.24970994764398 13.671, -89.24980209424085 13.671, -89.2498942408377 13.671, -89.24998638743456 13.671, -89.25007853403142 13.671, -89.25017068062827 13.671, -89.25026282722513 13.671, -89.250354973822 13.671, -89.25044712041885 13.671, -89.25053926701571 13.671, -89.25063141361257 13.671, -89.25072356020942 13.671, -89.25081570680629 13.671, -89.25090785340315 13.671))" shape_poly = wkt.loads(risk_shape) shape = gpd.GeoDataFrame() - shape['geometry'] = [shape_poly] - shape.crs = 'epsg:4326' + shape["geometry"] = [shape_poly] + shape.crs = "epsg:4326" shape.to_crs(epsg=3857, inplace=True) ax = shape.plot(figsize=(10, 10), alpha=0.5) ax.set_xlim(-9943223.896891385, -9911000.065720687) ax.set_ylim(1530712.637786494, 1555600.2891258441) ctx.add_basemap(ax, zoom=12, url=ctx.providers.Stamen.Terrain) - rect = patches.Rectangle((-9931038.907412536, 1536570.51725147), 4354.653554389253, - 2941.9125608841423, linewidth=1, edgecolor='r', facecolor='none') + rect = patches.Rectangle( + (-9931038.907412536, 1536570.51725147), + 4354.653554389253, + 2941.9125608841423, + linewidth=1, + edgecolor="r", + facecolor="none", + ) ax.add_patch(rect) ax.set_axis_off() fig = ax.get_figure() - ax.set_title('Metropolitan Area of San Salvador', fontsize=10) + ax.set_title("Metropolitan Area of San Salvador", fontsize=10) fig.tight_layout() return fig -from climada.entity import Exposures, Entity + +from climada.entity import Entity, Exposures from climada.hazard import Hazard + def load_entity(): - ent_file = 'FL_entity_Acelhuate_houses.xlsx' + ent_file = "FL_entity_Acelhuate_houses.xlsx" ent = Entity.from_excel(ent_file) ent.exposures.set_geometry_points() ent.check() return ent + +import cartopy.crs as ccrs import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np -import cartopy.crs as ccrs + def scale_bar(ax, length=None, location=(0.5, 0.05), linewidth=3): """ @@ -69,55 +81,75 @@ def scale_bar(ax, length=None, location=(0.5, 0.05), linewidth=3): (ie. 0.5 is the middle of the plot) linewidth is the thickness of the scalebar. """ - #Get the limits of the axis in lat long + # Get the limits of the axis in lat long llx0, llx1, lly0, lly1 = ax.get_extent(ccrs.PlateCarree()) - #Make tmc horizontally centred on the middle of the map, - #vertically at scale bar location + # Make tmc horizontally centred on the middle of the map, + # vertically at scale bar location sbllx = (llx1 + llx0) / 2 sblly = lly0 + (lly1 - lly0) * location[1] tmc = ccrs.TransverseMercator(sbllx, sblly) - #Get the extent of the plotted area in coordinates in metres + # Get the extent of the plotted area in coordinates in metres x0, x1, y0, y1 = ax.get_extent(tmc) - #Turn the specified scalebar location into coordinates in metres + # Turn the specified scalebar location into coordinates in metres sbx = x0 + (x1 - x0) * location[0] sby = y0 + (y1 - y0) * location[1] - #Calculate a scale bar length if none has been given - #(Theres probably a more pythonic way of rounding the number but this works) + # Calculate a scale bar length if none has been given + # (Theres probably a more pythonic way of rounding the number but this works) if not length: - length = (x1 - x0) / 5000 #in km - ndim = int(np.floor(np.log10(length))) #number of digits in number - length = round(length, -ndim) #round to 1sf - #Returns numbers starting with the list + length = (x1 - x0) / 5000 # in km + ndim = int(np.floor(np.log10(length))) # number of digits in number + length = round(length, -ndim) # round to 1sf + + # Returns numbers starting with the list def scale_number(x): - if str(x)[0] in ['1', '2', '5']: return int(x) - else: return scale_number(x - 10 ** ndim) + if str(x)[0] in ["1", "2", "5"]: + return int(x) + else: + return scale_number(x - 10**ndim) + length = scale_number(length) - #Generate the x coordinate for the ends of the scalebar + # Generate the x coordinate for the ends of the scalebar bar_xs = [sbx - length * 500, sbx + length * 500] - #Plot the scalebar - ax.plot(bar_xs, [sby, sby], transform=tmc, color='k', linewidth=linewidth) - #Plot the scalebar label - ax.text(sbx, sby, str(int(length*1000)) + ' m', transform=tmc, - horizontalalignment='center', verticalalignment='bottom') + # Plot the scalebar + ax.plot(bar_xs, [sby, sby], transform=tmc, color="k", linewidth=linewidth) + # Plot the scalebar label + ax.text( + sbx, + sby, + str(int(length * 1000)) + " m", + transform=tmc, + horizontalalignment="center", + verticalalignment="bottom", + ) + def plot_exposure_ss(exposures, point=None): if point is not None: - fig, ax = plt.subplots(figsize=(15, 15), subplot_kw=dict(projection=ccrs.Mercator())) - ax.scatter(exposures.gdf[point:point+1].geometry[:].x, exposures.gdf[point:point+1].geometry[:].y, c='k', - marker='+', s=800) + fig, ax = plt.subplots( + figsize=(15, 15), subplot_kw=dict(projection=ccrs.Mercator()) + ) + ax.scatter( + exposures.gdf[point : point + 1].geometry[:].x, + exposures.gdf[point : point + 1].geometry[:].y, + c="k", + marker="+", + s=800, + ) ax.set_xlim(-9931038.907412536, -9926684.253858147) ax.set_ylim(1536680.51725147, 1539512.429812354) else: # create new map for viviendas - cmap_viv = cm.get_cmap('autumn').resampled(4) - cmap_viv = mpl.colors.LinearSegmentedColormap.from_list('Custom cmap', - [cmap_viv(i) for i in range(cmap_viv.N)], cmap_viv.N) + cmap_viv = cm.get_cmap("autumn").resampled(4) + cmap_viv = mpl.colors.LinearSegmentedColormap.from_list( + "Custom cmap", [cmap_viv(i) for i in range(cmap_viv.N)], cmap_viv.N + ) # create new map for aups - cmap_aup = cm.get_cmap('winter').resampled(4) - cmap_aup = mpl.colors.LinearSegmentedColormap.from_list('Custom cmap', - [cmap_aup(i) for i in range(cmap_aup.N)], cmap_aup.N) + cmap_aup = cm.get_cmap("winter").resampled(4) + cmap_aup = mpl.colors.LinearSegmentedColormap.from_list( + "Custom cmap", [cmap_aup(i) for i in range(cmap_aup.N)], cmap_aup.N + ) # define the bins and normalize bounds_aup = np.array([6000, 8800, 10000, 12000, 14600]) @@ -125,34 +157,79 @@ def plot_exposure_ss(exposures, point=None): bounds_viv = np.array([7500, 11000, 16500, 33000, 56300]) norm_viv = mpl.colors.BoundaryNorm(bounds_viv, cmap_viv.N) - exp_merc_aup = exposures.gdf[exposures.gdf.category==1] - exp_merc_house = exposures.gdf[exposures.gdf.category==2] - - fig, ax = plt.subplots(figsize=(15, 15), subplot_kw=dict(projection=ccrs.Mercator())) - clr_1 = ax.scatter(exp_merc_aup.geometry[:].x, exp_merc_aup.geometry[:].y, c=exp_merc_aup.value.values, - marker='+', s=25, cmap=cmap_aup, norm=norm_aup) - clr_2 = ax.scatter(exp_merc_house.geometry[:].x, exp_merc_house.geometry[:].y, c=exp_merc_house.value.values, - marker='o', s=8, cmap=cmap_viv, norm=norm_viv) + exp_merc_aup = exposures.gdf[exposures.gdf.category == 1] + exp_merc_house = exposures.gdf[exposures.gdf.category == 2] + + fig, ax = plt.subplots( + figsize=(15, 15), subplot_kw=dict(projection=ccrs.Mercator()) + ) + clr_1 = ax.scatter( + exp_merc_aup.geometry[:].x, + exp_merc_aup.geometry[:].y, + c=exp_merc_aup.value.values, + marker="+", + s=25, + cmap=cmap_aup, + norm=norm_aup, + ) + clr_2 = ax.scatter( + exp_merc_house.geometry[:].x, + exp_merc_house.geometry[:].y, + c=exp_merc_house.value.values, + marker="o", + s=8, + cmap=cmap_viv, + norm=norm_viv, + ) lines_legend = [] text_legend = [] for i_col, x_col in enumerate(np.linspace(0, 1, 4)): - lines_legend.append(mpl.lines.Line2D(range(1), range(1), color='white', marker='o', markerfacecolor=cmap_viv(x_col))) - text_legend.append(str(bounds_viv[i_col]) + ' - ' + str(bounds_viv[i_col+1])) - legend1 = plt.legend(lines_legend, text_legend, numpoints=1, loc=4, title='no AUP housing') + lines_legend.append( + mpl.lines.Line2D( + range(1), + range(1), + color="white", + marker="o", + markerfacecolor=cmap_viv(x_col), + ) + ) + text_legend.append( + str(bounds_viv[i_col]) + " - " + str(bounds_viv[i_col + 1]) + ) + legend1 = plt.legend( + lines_legend, text_legend, numpoints=1, loc=4, title="no AUP housing" + ) lines_legend = [] text_legend = [] for i_col, x_col in enumerate(np.linspace(0, 1, 4)): - lines_legend.append(mpl.lines.Line2D(range(1), range(1), color=cmap_aup(x_col), marker='+', markerfacecolor=cmap_aup(x_col))) - text_legend.append(str(bounds_aup[i_col]) + ' - ' + str(bounds_aup[i_col+1])) - plt.legend(lines_legend, text_legend, numpoints=1, loc=3, title='AUP housing') + lines_legend.append( + mpl.lines.Line2D( + range(1), + range(1), + color=cmap_aup(x_col), + marker="+", + markerfacecolor=cmap_aup(x_col), + ) + ) + text_legend.append( + str(bounds_aup[i_col]) + " - " + str(bounds_aup[i_col + 1]) + ) + plt.legend(lines_legend, text_legend, numpoints=1, loc=3, title="AUP housing") plt.gca().add_artist(legend1) - ctx.add_basemap(ax, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, origin='upper') + ctx.add_basemap(ax, zoom=15, url=ctx.providers.OpenStreetMap.Mapnik, origin="upper") scale_bar(ax, 0.5, location=(0.93, 0.4), linewidth=2) - rect = patches.Rectangle((-9931033.307412536, 1536686.51725147), 4345.053554389253, - 2934.0125608841423, linewidth=2, edgecolor='r', facecolor='none', zorder=200) + rect = patches.Rectangle( + (-9931033.307412536, 1536686.51725147), + 4345.053554389253, + 2934.0125608841423, + linewidth=2, + edgecolor="r", + facecolor="none", + zorder=200, + ) ax.add_patch(rect) ax.set_axis_off() if point is not None: @@ -162,37 +239,41 @@ def plot_exposure_ss(exposures, point=None): # fig.savefig('ss_points.png', format='png', bbox_inches='tight') return fig + def flooding_aup_if(impact_funcs): - mdd = impact_funcs.get_func('FL', 101).mdd - intensity = impact_funcs.get_func('FL', 101).intensity + mdd = impact_funcs.get_func("FL", 101).mdd + intensity = impact_funcs.get_func("FL", 101).intensity fig, ax = plt.subplots() - ax.set_xlabel('Intensity (m)') - ax.set_ylabel('Mean Damage Ratio (%)') - ax.set_title('Impact Function - AUP flooding') - ax.plot(intensity, mdd*100) + ax.set_xlabel("Intensity (m)") + ax.set_ylabel("Mean Damage Ratio (%)") + ax.set_title("Impact Function - AUP flooding") + ax.plot(intensity, mdd * 100) fig.set_size_inches(4.5, 4.5) - #fig.savefig('if_house_aup.png', format='png', bbox_inches='tight') + # fig.savefig('if_house_aup.png', format='png', bbox_inches='tight') return fig + import pandas as pd + def load_accounting(): acc = pd.DataFrame() - acc['Return Period (year)'] = np.array([10, 25, 50, 100]) - acc['frequency (1/year)'] = np.array([1/10, 1/25, 1/50, 1/100]) - acc['intensity (m)'] = np.array([0.7744541, 2.820973, 4.828216, 5.742804]) - acc['Mean Damage Ration (%)'] = np.array([51.83603012, 100, 100, 100]) - acc['impact (USD)'] = np.array([4786.95371, 9234.8, 9234.8, 9234.8]) - acc['frequency * impact'] = np.array([478.695371, 369.392, 184.696, 92.348]) - acc['Expected Annual Impact'] = np.ones(4)*np.nan - acc['Expected Annual Impact'].values[0] = 1125.131371 - #acc_file = 'accounting.xlsx' - #acc_df = pd.read_excel(acc_file) + acc["Return Period (year)"] = np.array([10, 25, 50, 100]) + acc["frequency (1/year)"] = np.array([1 / 10, 1 / 25, 1 / 50, 1 / 100]) + acc["intensity (m)"] = np.array([0.7744541, 2.820973, 4.828216, 5.742804]) + acc["Mean Damage Ration (%)"] = np.array([51.83603012, 100, 100, 100]) + acc["impact (USD)"] = np.array([4786.95371, 9234.8, 9234.8, 9234.8]) + acc["frequency * impact"] = np.array([478.695371, 369.392, 184.696, 92.348]) + acc["Expected Annual Impact"] = np.ones(4) * np.nan + acc["Expected Annual Impact"].values[0] = 1125.131371 + # acc_file = 'accounting.xlsx' + # acc_df = pd.read_excel(acc_file) acc.index += 1 return acc + def generate_plots_risk(): fig_ma = plot_salvador_ma() ent = load_entity() @@ -205,26 +286,26 @@ def generate_plots_risk(): return fig_ma, fig_point, fig_houses, fig_if + def non_linear_growth(cb_acel): - risk_present = 3.562753447707e+06 - risk_future = 7.578426440635e+06 + risk_present = 3.562753447707e06 + risk_future = 7.578426440635e06 - x_var = np.arange(cb_acel.present_year, cb_acel.future_year+1) + x_var = np.arange(cb_acel.present_year, cb_acel.future_year + 1) time_dep = cb_acel._time_dependency_array(0.5) - y_sqr = risk_present + (risk_future-risk_present) * time_dep + y_sqr = risk_present + (risk_future - risk_present) * time_dep time_dep = cb_acel._time_dependency_array(1.0) - y_lin = risk_present + (risk_future-risk_present) * time_dep + y_lin = risk_present + (risk_future - risk_present) * time_dep time_dep = cb_acel._time_dependency_array(2.0) - y_quad = risk_present + (risk_future-risk_present) * time_dep + y_quad = risk_present + (risk_future - risk_present) * time_dep - plt.bar(x_var, y_sqr, color='green', label='sublinear') - plt.bar(x_var, y_lin, color='blue', label='linear') - plt.bar(x_var, y_quad, color='red', label='superlinear') - plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) + plt.bar(x_var, y_sqr, color="green", label="sublinear") + plt.bar(x_var, y_lin, color="blue", label="linear") + plt.bar(x_var, y_quad, color="red", label="superlinear") + plt.ticklabel_format(style="sci", axis="y", scilimits=(0, 0)) plt.ylim(3.0e6, 7.8e6) - plt.xlabel('Year') - plt.ylabel('Expected Annual Impact') + plt.xlabel("Year") + plt.ylabel("Expected Annual Impact") plt.legend() - diff --git a/script/jenkins/set_config.py b/script/jenkins/set_config.py index 406eabb5e9..75c4a16959 100644 --- a/script/jenkins/set_config.py +++ b/script/jenkins/set_config.py @@ -1,12 +1,12 @@ -import sys import json +import sys key = sys.argv[1] val = sys.argv[2] -jsonfile = 'climada.conf' +jsonfile = "climada.conf" -with open(jsonfile, encoding='UTF-8') as inf: +with open(jsonfile, encoding="UTF-8") as inf: data = json.load(inf) data[key] = val -with open(jsonfile, 'w', encoding='UTF-8') as outf: +with open(jsonfile, "w", encoding="UTF-8") as outf: json.dump(data, outf) diff --git a/script/jenkins/test_data_api.py b/script/jenkins/test_data_api.py index 42e9103744..38eec4cd30 100644 --- a/script/jenkins/test_data_api.py +++ b/script/jenkins/test_data_api.py @@ -19,31 +19,36 @@ Test files_handler module. """ +import datetime as dt +import unittest from pathlib import Path from sys import dont_write_bytecode -import pandas as pd -import unittest -import xmlrunner -import datetime as dt import numpy as np +import pandas as pd +import xmlrunner from pandas_datareader import wb from climada import CONFIG from climada.entity.exposures.litpop.nightlight import BM_FILENAMES, download_nl_files -from climada.hazard.tc_tracks import IBTRACS_URL, IBTRACS_FILE -from climada.util.finance import WORLD_BANK_WEALTH_ACC, WORLD_BANK_INC_GRP -from climada.util.dwd_icon_loader import (download_icon_grib, - delete_icon_grib, - download_icon_centroids_file) +from climada.hazard.tc_tracks import IBTRACS_FILE, IBTRACS_URL +from climada.util.dwd_icon_loader import ( + delete_icon_grib, + download_icon_centroids_file, + download_icon_grib, +) from climada.util.files_handler import download_file, download_ftp +from climada.util.finance import WORLD_BANK_INC_GRP, WORLD_BANK_WEALTH_ACC + class TestDataAvail(unittest.TestCase): """Test availability of data used through APIs""" def test_noaa_nl_pass(self): """Test NOAA nightlights used in BlackMarble.""" - file_down = download_file(f'{CONFIG.exposures.litpop.nightlights.noaa_url.str()}/F101992.v4.tar') + file_down = download_file( + f"{CONFIG.exposures.litpop.nightlights.noaa_url.str()}/F101992.v4.tar" + ) Path(file_down).unlink() def test_nasa_nl_pass(self): @@ -72,11 +77,11 @@ def test_wb_lev_hist_pass(self): def test_wb_api_pass(self): """Test World Bank API""" - wb.download(indicator='NY.GDP.MKTP.CD', country='CHE', start=1960, end=2030) + wb.download(indicator="NY.GDP.MKTP.CD", country="CHE", start=1960, end=2030) def test_ne_api_pass(self): """Test Natural Earth API""" - url = 'https://naturalearth.s3.amazonaws.com/10m_cultural/ne_10m_admin_0_countries.zip' + url = "https://naturalearth.s3.amazonaws.com/10m_cultural/ne_10m_admin_0_countries.zip" file_down = download_file(url) Path(file_down).unlink() @@ -87,41 +92,41 @@ def test_ibtracs_pass(self): def test_icon_eu_forecast_download(self): """Test availability of DWD icon forecast.""" run_datetime = dt.datetime.utcnow() - dt.timedelta(hours=5) - run_datetime = run_datetime.replace(hour=run_datetime.hour//12*12, - minute=0, - second=0, - microsecond=0) - icon_file = download_icon_grib(run_datetime,max_lead_time=1) + run_datetime = run_datetime.replace( + hour=run_datetime.hour // 12 * 12, minute=0, second=0, microsecond=0 + ) + icon_file = download_icon_grib(run_datetime, max_lead_time=1) self.assertEqual(len(icon_file), 1) - delete_icon_grib(run_datetime,max_lead_time=1) #deletes icon_file + delete_icon_grib(run_datetime, max_lead_time=1) # deletes icon_file self.assertFalse(Path(icon_file[0]).exists()) def test_icon_d2_forecast_download(self): """Test availability of DWD icon forecast.""" run_datetime = dt.datetime.utcnow() - dt.timedelta(hours=5) - run_datetime = run_datetime.replace(hour=run_datetime.hour//12*12, - minute=0, - second=0, - microsecond=0) - icon_file = download_icon_grib(run_datetime, - model_name='icon-d2-eps', - max_lead_time=1) + run_datetime = run_datetime.replace( + hour=run_datetime.hour // 12 * 12, minute=0, second=0, microsecond=0 + ) + icon_file = download_icon_grib( + run_datetime, model_name="icon-d2-eps", max_lead_time=1 + ) self.assertEqual(len(icon_file), 1) - delete_icon_grib(run_datetime, - model_name='icon-d2-eps', - max_lead_time=1) #deletes icon_file + delete_icon_grib( + run_datetime, model_name="icon-d2-eps", max_lead_time=1 + ) # deletes icon_file self.assertFalse(Path(icon_file[0]).exists()) def test_icon_centroids_download(self): """Test availablility of DWD icon grid information.""" grid_file = download_icon_centroids_file() Path(grid_file).unlink() - grid_file = download_icon_centroids_file(model_name='icon-d2-eps') + grid_file = download_icon_centroids_file(model_name="icon-d2-eps") Path(grid_file).unlink() + # Execute Tests -if __name__ == '__main__': +if __name__ == "__main__": TESTS = unittest.TestLoader().loadTestsFromTestCase(TestDataAvail) from sys import argv - outputdir = argv[1] if len(argv) > 1 else str(Path.cwd().joinpath('tests_xml')) + + outputdir = argv[1] if len(argv) > 1 else str(Path.cwd().joinpath("tests_xml")) xmlrunner.XMLTestRunner(output=outputdir).run(TESTS) diff --git a/script/jenkins/test_notebooks.py b/script/jenkins/test_notebooks.py index bb0420194c..f2e4fcdbcc 100644 --- a/script/jenkins/test_notebooks.py +++ b/script/jenkins/test_notebooks.py @@ -6,20 +6,20 @@ import sys import unittest from pathlib import Path + import nbformat import climada +BOUND_TO_FAIL = "# Note: execution of this cell will fail" +"""Cells containing this line will not be executed in the test""" -BOUND_TO_FAIL = '# Note: execution of this cell will fail' -'''Cells containing this line will not be executed in the test''' - -EXCLUDED_FROM_NOTEBOOK_TEST = ['climada_installation_step_by_step.ipynb'] -'''These notebooks are excluded from being tested''' +EXCLUDED_FROM_NOTEBOOK_TEST = ["climada_installation_step_by_step.ipynb"] +"""These notebooks are excluded from being tested""" class NotebookTest(unittest.TestCase): - '''Generic TestCase for testing the executability of notebooks + """Generic TestCase for testing the executability of notebooks Attributes ---------- @@ -28,7 +28,7 @@ class NotebookTest(unittest.TestCase): notebook : str File name of the notebook. - ''' + """ def __init__(self, methodName, wd=None, notebook=None): super(NotebookTest, self).__init__(methodName) @@ -36,64 +36,81 @@ def __init__(self, methodName, wd=None, notebook=None): self.notebook = notebook def test_notebook(self): - '''Extracts code cells from the notebook and executes them one by one, using `exec`. + """Extracts code cells from the notebook and executes them one by one, using `exec`. Magic lines and help/? calls are eliminated. Cells containing `BOUND_TO_FAIL` are elided. - Cells doing multiprocessing are elided.''' + Cells doing multiprocessing are elided.""" cwd = Path.cwd() try: # cd to the notebook directory os.chdir(self.wd) - print(f'start testing {self.notebook}') + print(f"start testing {self.notebook}") # read the notebook into a string - with open(self.notebook, encoding='utf8') as nb: + with open(self.notebook, encoding="utf8") as nb: content = nb.read() # parse the string with nbformat.reads - cells = nbformat.reads(content, 4)['cells'] + cells = nbformat.reads(content, 4)["cells"] # create namespace with IPython standards namespace = dict() - exec('from IPython.display import display', namespace) + exec("from IPython.display import display", namespace) # run all cells i = 0 for c in cells: # skip markdown cells - if c['cell_type'] != 'code': continue + if c["cell_type"] != "code": + continue i += 1 # skip deliberately failing cells - if BOUND_TO_FAIL in c['source']: continue + if BOUND_TO_FAIL in c["source"]: + continue # skip multiprocessing cells - if any([ tabu in c['source'].split() for tabu in [ - 'import multiprocessing', - 'from multiprocessing import', - ]]): - print('\n'.join([ - f'\nskip multiprocessing cell {i} in {self.notebook}', - '+'+'-'*68+'+', - c['source'] - ])) + if any( + [ + tabu in c["source"].split() + for tabu in [ + "import multiprocessing", + "from multiprocessing import", + ] + ] + ): + print( + "\n".join( + [ + f"\nskip multiprocessing cell {i} in {self.notebook}", + "+" + "-" * 68 + "+", + c["source"], + ] + ) + ) continue # remove non python lines and help calls which require user input # or involve pools being opened/closed - python_code = "\n".join([ - re.sub(r'pool=\w+', 'pool=None', ln) - for ln in c['source'].split("\n") - if not ln.startswith('%') - and not ln.startswith('help(') - and not ln.startswith('ask_ok(') - and not ln.startswith('ask_ok(') - and not ln.startswith('pool') # by convention Pool objects are called pool - and not ln.strip().endswith('?') - and not re.search(r'(\W|^)Pool\(', ln) # prevent Pool object creation - ]) + python_code = "\n".join( + [ + re.sub(r"pool=\w+", "pool=None", ln) + for ln in c["source"].split("\n") + if not ln.startswith("%") + and not ln.startswith("help(") + and not ln.startswith("ask_ok(") + and not ln.startswith("ask_ok(") + and not ln.startswith( + "pool" + ) # by convention Pool objects are called pool + and not ln.strip().endswith("?") + and not re.search( + r"(\W|^)Pool\(", ln + ) # prevent Pool object creation + ] + ) # execute the python code try: @@ -101,53 +118,60 @@ def test_notebook(self): # report failures except Exception as e: - failure = "\n".join([ - f"notebook {self.notebook} cell {i} failed with {e.__class__}", - f"{e}", - '+'+'-'*68+'+', - c['source'] - ]) - print(f'failed {self.notebook}') + failure = "\n".join( + [ + f"notebook {self.notebook} cell {i} failed with {e.__class__}", + f"{e}", + "+" + "-" * 68 + "+", + c["source"], + ] + ) + print(f"failed {self.notebook}") print(failure) self.fail(failure) - print(f'succeeded {self.notebook}') + print(f"succeeded {self.notebook}") finally: os.chdir(cwd) def main(install_dir): import xmlrunner - + sys.path.append(str(install_dir)) - - notebook_dir = install_dir.joinpath('doc', 'tutorial') - '''The path to the notebook directories.''' + + notebook_dir = install_dir.joinpath("doc", "tutorial") + """The path to the notebook directories.""" # list notebooks in the NOTEBOOK_DIR - notebooks = [f.absolute() - for f in sorted(notebook_dir.iterdir()) - if os.path.splitext(f)[1] == ('.ipynb') - and not f.name in EXCLUDED_FROM_NOTEBOOK_TEST] + notebooks = [ + f.absolute() + for f in sorted(notebook_dir.iterdir()) + if os.path.splitext(f)[1] == (".ipynb") + and not f.name in EXCLUDED_FROM_NOTEBOOK_TEST + ] # build a test suite with a test for each notebook suite = unittest.TestSuite() for notebook in notebooks: - class NBTest(NotebookTest): pass + + class NBTest(NotebookTest): + pass + test_name = "_".join(notebook.stem.split()) setattr(NBTest, test_name, NBTest.test_notebook) suite.addTest(NBTest(test_name, notebook.parent, notebook.name)) # run the tests and write xml reports to tests_xml - output_dir = install_dir.joinpath('tests_xml') + output_dir = install_dir.joinpath("tests_xml") xmlrunner.XMLTestRunner(output=str(output_dir)).run(suite) -if __name__ == '__main__': - if sys.argv[1] == 'report': +if __name__ == "__main__": + if sys.argv[1] == "report": install_dir = Path(sys.argv[2]) if len(sys.argv) > 2 else Path.cwd() main(install_dir) - + else: jd, nb = os.path.split(sys.argv[1]) - unittest.TextTestRunner(verbosity=2).run(NotebookTest('test_notebook', jd, nb)) + unittest.TextTestRunner(verbosity=2).run(NotebookTest("test_notebook", jd, nb))