-
Notifications
You must be signed in to change notification settings - Fork 2
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* formatted files, created a script to move the hydrolakes shp file into zarr * added reservoir code
- Loading branch information
Showing
9 changed files
with
203 additions
and
64 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,62 @@ | ||
import logging | ||
from pathlib import Path | ||
|
||
import geopandas as gpd | ||
import numpy as np | ||
import zarr | ||
from omegaconf import DictConfig | ||
from tqdm import tqdm | ||
|
||
log = logging.getLogger(name=__name__) | ||
|
||
def _map_lake_points(cfg: DictConfig, edges: zarr.Group) -> None: | ||
"""A function that reads in a gdf of hydrolakes information, finds its corresponding edge, then saves the data | ||
Parameters | ||
---------- | ||
cfg: DictConfig | ||
The configuration object | ||
edges: zarr.Group | ||
The zarr group containing the edges | ||
""" | ||
data_path = Path(cfg.map_lake_points.lake_points) | ||
if not data_path.exists(): | ||
msg = "Cannot find the lake points file" | ||
log.exception(msg) | ||
raise FileNotFoundError(msg) | ||
gdf = gpd.read_file(data_path) | ||
lake_comids = gdf["COMID"].astype(int).values | ||
edges_comids : np.ndarray = edges["merit_basin"][:].astype(np.int32) # type: ignore | ||
|
||
hylak_id = np.full(len(edges_comids), -1, dtype=np.int32) | ||
grand_id = np.full_like(edges_comids, -1, dtype=np.int32) | ||
lake_area = np.full_like(edges_comids, -1, dtype=np.float32) | ||
vol_total = np.full_like(edges_comids, -1, dtype=np.float32) | ||
depth_avg = np.full_like(edges_comids, -1, dtype=np.float32) | ||
|
||
for idx, lake_id in enumerate(tqdm( | ||
lake_comids, | ||
desc="Mapping Lake COMIDS to edges", | ||
ncols=140, | ||
ascii=True, | ||
)) : | ||
jdx = np.where(edges_comids == lake_id)[0] | ||
if not jdx.size: | ||
log.info(f"No lake found for COMID {lake_id}") | ||
else: | ||
# Assumung the pour point is at the end of the COMID | ||
edge_idx = jdx[-1] | ||
lake_row = gdf.iloc[idx] | ||
hylak_id[edge_idx] = lake_row["Hylak_id"] | ||
grand_id[edge_idx] = lake_row["Grand_id"] | ||
lake_area[edge_idx] = lake_row["Lake_area"] | ||
vol_total[edge_idx] = lake_row["Vol_total"] | ||
depth_avg[edge_idx] = lake_row["Depth_avg"] | ||
|
||
edges.array("hylak_id", data=hylak_id) | ||
edges.array("grand_id", data=grand_id) | ||
edges.array("lake_area", data=lake_area) | ||
edges.array("vol_total", data=vol_total) | ||
edges.array("depth_avg", data=depth_avg) | ||
|
||
log.info("Wrote Lake data for zones to zarr") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2,3 +2,5 @@ | |
cudf-cu12==24.6.* | ||
dask-cudf-cu12==24.6.* | ||
cugraph-cu12==24.6.* | ||
cuspatial-cu12==24.6.* | ||
cuproj-cu12==24.6.* |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,59 @@ | ||
import argparse | ||
from pathlib import Path | ||
|
||
import geopandas as gpd | ||
import numcodecs | ||
import zarr | ||
from shapely.wkt import dumps | ||
from tqdm import tqdm | ||
|
||
|
||
def split_hydrolakes(input_path: Path, output_path: Path) -> None: | ||
|
||
print(f"Reading gdf file: {input_path}") | ||
gdf = gpd.read_file(filename=input_path) | ||
|
||
print("Writing geometries") | ||
geometries = gdf["geometry"].apply(lambda geom: dumps(geom)).values | ||
|
||
# Create a Zarr store | ||
root: zarr.Group = zarr.open_group(output_path, mode="a") | ||
|
||
# Create datasets for each column | ||
for column in tqdm( | ||
gdf.columns, | ||
desc="writing gdf to zarr", | ||
ncols=140, | ||
ascii=True, | ||
): | ||
if column == "geometry": | ||
root.array(column, data=geometries, dtype=object, object_codec=numcodecs.VLenUTF8()) | ||
root.attrs["crs"] = gdf.crs.to_string() | ||
else: | ||
data = gdf[column].values | ||
if data.dtype == 'O': | ||
# Saving as an object | ||
root.array(column, data=geometries, dtype=object, object_codec=numcodecs.VLenUTF8()) | ||
else: | ||
root.array(column, data=data) | ||
|
||
print(f"Processing complete! Zarr store saved to: {output_path}") | ||
|
||
|
||
if __name__ == "__main__": | ||
parser = argparse.ArgumentParser(description="Convert a shapefile to a Zarr store") | ||
parser.add_argument("input_shp", type=Path, help="Path to the input shapefile") | ||
parser.add_argument( | ||
"output_zarr", type=Path, help="Path to save the output Zarr store" | ||
) | ||
|
||
args = parser.parse_args() | ||
|
||
input_path = Path(args.input_shp) | ||
output_path = Path(args.output_zarr) | ||
if not input_path.exists(): | ||
raise FileNotFoundError(f"Input shapefile not found: {input_path}") | ||
if output_path.exists(): | ||
raise FileExistsError(f"Output Zarr store already exists: {output_path}") | ||
|
||
split_hydrolakes(input_path, output_path) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -4,6 +4,7 @@ | |
import numpy as np | ||
import pytest | ||
|
||
|
||
@pytest.fixture | ||
def sample_gage_cfg(): | ||
with hydra.initialize( | ||
|