Skip to content

Commit

Permalink
Fix style and add opacity data (#34)
Browse files Browse the repository at this point in the history
- Fix c++ and python style
- Add stellar flux and absorption data read functions
  • Loading branch information
chengcli committed Mar 9, 2024
1 parent b26bf40 commit 98ab555
Show file tree
Hide file tree
Showing 53 changed files with 1,235 additions and 468 deletions.
3 changes: 1 addition & 2 deletions cmake/application.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,8 @@ set(FETCHCONTENT_QUIET FALSE)

FetchContent_Declare(
application
# GIT_REPOSITORY https://github.com/chengcli/application/ GIT_TAG cli/flush)
DOWNLOAD_EXTRACT_TIMESTAMP TRUE
URL https://github.com/chengcli/application/archive/refs/tags/v0.6.2.tar.gz)
URL https://github.com/chengcli/application/archive/refs/tags/v0.7.tar.gz)

FetchContent_MakeAvailable(application)

Expand Down
12 changes: 12 additions & 0 deletions data/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# hitran par file
*.par

# datafile
*.txt
*.hit

# data folder
*/

# tmp files
.*
1 change: 1 addition & 0 deletions data/check_data_integrity.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,3 +5,4 @@ $1/fetch_hitran2020.sh $1
$1/fetch_jup_midir_H2broaden.sh $1
$1/fetch_jup_atm_moses_modelc.sh $1
$1/fetch_H2-He-cia.sh $1
$1/fetch_exogcm_opacity.sh $1
1 change: 1 addition & 0 deletions data/checksums.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,4 @@ f522657383a3142d881ffc18814cc5b32b908068a77c28b3e6823292d1b624fd H2-He-eq.xiz.t
0f124edb384b24d84ab4c67f26cc5756130e6cbab2d804aa302168cd9c44dc19 H2-He-nm.orton.txt
d086c58045aeabd17f3d77efa78f477805785e7fd420e1f488fed8043f17b927 H2-He-nm.xiz.txt
e9da44380f416b2cff7f217abbe0d7c7c3e2123348919ec662facb183d7e5d1b HITRAN2020.par
35cb867f737d50bb4a89e4cd46b37a6f3a33f487857944b107f24df5c825e98b ck_data_01242024
47 changes: 47 additions & 0 deletions data/fetch_exogcm_opacity.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
#! /bin/bash
#
# Download exogcm opacity data from Dropbox
FILE="ck_data_01242024"
DATA_DIR=$1/

# Dropbox link
DROPBOX_LINK="https://www.dropbox.com/scl/fi/tt2vez9jadfcq3j0wdw8x/ck_data_01242024.tar.gz?rlkey=dv7tmqpfy2uvs0u5lt2wz3c4k&dl=0"

# Read expected SHA256 from the file
EXPECTED_SHA256=$(grep "$FILE" ${DATA_DIR}checksums.txt | awk '{print $1}')

# Check if the SHA256 has been found
if [ -z "$EXPECTED_SHA256" ]; then
echo "Cannot find SHA256 for $FILE in ${DATA_DIR}checksums.txt"
exit 1
fi

# Check if the folder exists
if [ -d "${DATA_DIR}$FILE" ]; then
echo "$FILE exists. Checking SHA256..."

# Check the operating system and calculate SHA256 of the existing file
SHA256=$(checksumdir -a sha256 ${DATA_DIR}$FILE)

# Compare the calculated SHA256 with the expected one
if [ "$SHA256" == "$EXPECTED_SHA256" ]; then
echo "SHA256 is correct. No need to download the file."
else
echo "SHA256 is not correct. Need to download the file..."
fi
else
echo "$FILE does not exist. Need to download the file..."
fi

# Download and extract the file if it does not exist or its SHA256 is incorrect
if [ ! -d "${DATA_DIR}$FILE" ] || [ "$SHA256" != "$EXPECTED_SHA256" ]; then
wget -q --show-progress -O ${DATA_DIR}${FILE}.tar.gz $DROPBOX_LINK
if tar -xzvf ${DATA_DIR}${FILE}.tar.gz -C ${DATA_DIR}; then
echo "Successfully extracted ${FILE}.tar.gz"
rm ${DATA_DIR}${FILE}.tar.gz
echo "Removed ${FILE}.tar.gz"
else
echo "Failed to extract ${FILE}.tar.gz"
exit 1
fi
fi
2 changes: 1 addition & 1 deletion doc/README-exo2.md
Original file line number Diff line number Diff line change
Expand Up @@ -88,4 +88,4 @@ ox3_bc = periodic # outer-X3 boundary flag

In the mesh, x2min, x2max, and x3min, x3max are not important. But make sure that the boundary conditions for x2 and x3 are set to be `periodic`.
The way that we allocate the grids in athena makes the panels be placed 2 by 3 in the mesh. So if you want to do a simulation with N by N grids in each panel, with $`N\times N\times 6`$ cells in total, specify `nx2` to be $`2N`$ and `nx3` to be $`3N`$.
It is required that each panel is divided into equal number of meshblocks along `x2` and `x3` directions. This is because the `x2` direction in one panel potentially becomes the `x3` direction in another. Additionally, the number of meshblock must be equal to the number of processors used. This requirement comes from the MPI communication used in the implementation.
It is required that each panel is divided into equal number of meshblocks along `x2` and `x3` directions. This is because the `x2` direction in one panel potentially becomes the `x3` direction in another. Additionally, the number of meshblock must be equal to the number of processors used. This requirement comes from the MPI communication used in the implementation.
132 changes: 81 additions & 51 deletions examples/2023-Chen-exo3/calc_avg.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,14 @@

import numpy as np
from netCDF4 import Dataset
from scipy.interpolate import interp1d
import os
from tqdm import tqdm

# Set the filepath to your single combined .nc file
filepath = 'cart_hotjupiter-main.nc' # Change this to your file path
filepath = "cart_hotjupiter-main.nc" # Change this to your file path

# Set the name of the output file where the results will be saved
output_file = 'averages_and_products.nc'
output_file = "averages_and_products.nc"

# Variables to process
variables_to_process = ["temp", "vlat", "vlon", "vel1"]
Expand All @@ -21,55 +20,69 @@
uv_variance_sum = None
timestep_count = 0

P0 = 1E5
P0 = 1e5
R = 3779
g = 8.0

pressure_levels = np.linspace(1E5, 0.01E5, 100) # Replace with actual pressure levels
pressure_levels = np.linspace(1e5, 0.01e5, 100) # Replace with actual pressure levels

# Extract number of time steps in the file
with Dataset(filepath, 'r') as nc:
num_time_steps = len(nc.variables['time'][:])
with Dataset(filepath, "r") as nc:
num_time_steps = len(nc.variables["time"][:])


# Process each time step
start_t = 100
Rp = 1E8
for t in tqdm(range(start_t,num_time_steps), desc="Processing time steps"):
with Dataset(filepath, mode='r') as nc:
if t==start_t:
x1 = nc.variables['x1'][:]
latitudes = nc.variables['lat'][:]
Rp = 1e8
for t in tqdm(range(start_t, num_time_steps), desc="Processing time steps"):
with Dataset(filepath, mode="r") as nc:
if t == start_t:
x1 = nc.variables["x1"][:]
latitudes = nc.variables["lat"][:]
# If this is the first time step, initialize data_sums and other accumulators
if timestep_count == 0:
for var in variables_to_process:
# Initialize the sum arrays
data_sums[var] = np.zeros((nc.variables[var].shape[1], nc.variables[var].shape[2]))
data_sums[var] = np.zeros(
(nc.variables[var].shape[1], nc.variables[var].shape[2])
)

upvp_sum = np.zeros(
(nc.variables["vlat"].shape[1], nc.variables["vlat"].shape[2])
)
upwp_sum = np.zeros(
(nc.variables["vlat"].shape[1], nc.variables["vlat"].shape[2])
)
uv_variance_sum = np.zeros(
(nc.variables["vlat"].shape[1], nc.variables["vlat"].shape[2])
)

upvp_sum = np.zeros((nc.variables['vlat'].shape[1], nc.variables['vlat'].shape[2]))
upwp_sum = np.zeros((nc.variables['vlat'].shape[1], nc.variables['vlat'].shape[2]))
uv_variance_sum = np.zeros((nc.variables['vlat'].shape[1], nc.variables['vlat'].shape[2]))

# Update sums
mean_rho = np.mean(nc.variables['rho'][t], axis=2)
mean_rho = np.mean(nc.variables["rho"][t], axis=2)
for var in variables_to_process:
data = nc.variables[var][t] # We take the first index of the time dimension
zonal_mean = np.mean(data, axis=2) # Zonal mean over longitude, reducing dimension
zonal_mean = np.mean(
data, axis=2
) # Zonal mean over longitude, reducing dimension
data_sums[var] += zonal_mean # Summing up the zonal mean data
if var in ['vlat', 'vlon', 'vel1']:
prime = data - np.expand_dims(zonal_mean, axis=2) # Subtracting zonal mean, keeping dimensions consistent
if var in ["vlat", "vlon", "vel1"]:
prime = data - np.expand_dims(
zonal_mean, axis=2
) # Subtracting zonal mean, keeping dimensions consistent

if var == 'vlat':
if var == "vlat":
u_prime = prime
else:
if var == 'vlon':
if var == "vlon":
v_prime = prime
else:
w_prime = prime
# Calculate and sum u'v' and (u'^2 + v'^2) / 2 for each time step
upvp = np.mean(u_prime * v_prime, axis=2) # Zonal mean of the product
upwp = np.mean(u_prime * w_prime, axis=2) # Zonal mean of the product
uv_variance = np.mean((u_prime**2 + v_prime**2 + w_prime**2) / 2, axis=2) # Zonal mean of the variance
uv_variance = np.mean(
(u_prime**2 + v_prime**2 + w_prime**2) / 2, axis=2
) # Zonal mean of the variance

upvp_sum += upvp
upwp_sum += upwp
Expand All @@ -78,13 +91,14 @@
timestep_count += 1 # Update the timestep count

# After processing all files, calculate the averages
averages = {var: data_sum / timestep_count for var, data_sum in data_sums.items()} # Averaging over all files
averages = {
var: data_sum / timestep_count for var, data_sum in data_sums.items()
} # Averaging over all files
upvp_avg = upvp_sum / timestep_count # Averaging over all files
upwp_avg = upwp_sum / timestep_count # Averaging over all files
uv_variance_avg = uv_variance_sum / timestep_count # Averaging over all files

temp_avg = averages['temp'] # Temperature average over all files

temp_avg = averages["temp"] # Temperature average over all files


# Calculate the pressure at each height level for each latitude using the hydrostatic equation
Expand All @@ -96,62 +110,78 @@
for j in range(temp_avg.shape[1]): # Loop over all latitudes
for i in range(1, temp_avg.shape[0]): # Loop over all height levels
dz = x1_levels[i] - x1_levels[i - 1] # Difference in height between two levels
T_avg = 0.5 * (temp_avg[i, j] + temp_avg[i - 1, j]) # Average temperature between two levels
pressures[i, j] = pressures[i - 1, j] * np.exp(-g * dz / (R * T_avg)) # Hydrostatic equation
T_avg = 0.5 * (
temp_avg[i, j] + temp_avg[i - 1, j]
) # Average temperature between two levels
pressures[i, j] = pressures[i - 1, j] * np.exp(
-g * dz / (R * T_avg)
) # Hydrostatic equation

# Interpolate the variables to the new pressure levels for each latitude
averages_interpolated = {}
for var, avg in averages.items():
averages_interpolated[var] = np.zeros((len(pressure_levels), temp_avg.shape[1])) # [pressure, lat]
averages_interpolated[var] = np.zeros(
(len(pressure_levels), temp_avg.shape[1])
) # [pressure, lat]
for j in range(temp_avg.shape[1]):
f = interp1d(pressures[:, j], avg[:, j], fill_value='extrapolate')
f = interp1d(pressures[:, j], avg[:, j], fill_value="extrapolate")
averages_interpolated[var][:, j] = f(pressure_levels)

# Also interpolate 'upvp' and 'uv_variance'
upvp_interpolated = np.zeros((len(pressure_levels), temp_avg.shape[1])) # [pressure, lat]
upwp_interpolated = np.zeros((len(pressure_levels), temp_avg.shape[1])) # [pressure, lat]
uv_variance_interpolated = np.zeros((len(pressure_levels), temp_avg.shape[1])) # [pressure, lat]
upvp_interpolated = np.zeros(
(len(pressure_levels), temp_avg.shape[1])
) # [pressure, lat]
upwp_interpolated = np.zeros(
(len(pressure_levels), temp_avg.shape[1])
) # [pressure, lat]
uv_variance_interpolated = np.zeros(
(len(pressure_levels), temp_avg.shape[1])
) # [pressure, lat]

for j in range(temp_avg.shape[1]):
upvp_function = interp1d(pressures[:, j], upvp_avg[:, j], fill_value='extrapolate')
upwp_function = interp1d(pressures[:, j], upwp_avg[:, j], fill_value='extrapolate')
uv_variance_function = interp1d(pressures[:, j], uv_variance_avg[:, j], fill_value='extrapolate')
upvp_function = interp1d(pressures[:, j], upvp_avg[:, j], fill_value="extrapolate")
upwp_function = interp1d(pressures[:, j], upwp_avg[:, j], fill_value="extrapolate")
uv_variance_function = interp1d(
pressures[:, j], uv_variance_avg[:, j], fill_value="extrapolate"
)
upvp_interpolated[:, j] = upvp_function(pressure_levels)
upwp_interpolated[:, j] = upwp_function(pressure_levels)
uv_variance_interpolated[:, j] = uv_variance_function(pressure_levels)

# Save the results to a new .nc file
with Dataset(output_file, mode='w') as new_nc:
with Dataset(output_file, mode="w") as new_nc:
# Create dimensions
new_nc.createDimension('pressure', len(pressure_levels))
new_nc.createDimension('lat', averages['temp'].shape[1])
new_nc.createDimension("pressure", len(pressure_levels))
new_nc.createDimension("lat", averages["temp"].shape[1])

# Create pressure variable
pressure_var = new_nc.createVariable('pressure', np.float32, ('pressure',))
pressure_var = new_nc.createVariable("pressure", np.float32, ("pressure",))
pressure_var[:] = pressure_levels
pressure_var.units = 'Pa'
pressure_var.units = "Pa"

# Create latitude variable
lat_var = new_nc.createVariable('lat', np.float32, ('lat',))
lat_var[:] = np.linspace(-90, 90, averages['temp'].shape[1])
lat_var.units = 'degrees_north'
lat_var = new_nc.createVariable("lat", np.float32, ("lat",))
lat_var[:] = np.linspace(-90, 90, averages["temp"].shape[1])
lat_var.units = "degrees_north"

# Create variables for the zonal means and other variables
for var, data in averages_interpolated.items():
new_var = new_nc.createVariable(var + '_avg', np.float32, ('pressure', 'lat'))
new_var = new_nc.createVariable(var + "_avg", np.float32, ("pressure", "lat"))
new_var[:] = data

# Create variables for 'upvp' and 'uv_variance'
upvp_var = new_nc.createVariable('upvp', np.float32, ('pressure', 'lat'))
upvp_var = new_nc.createVariable("upvp", np.float32, ("pressure", "lat"))
upvp_var[:] = upvp_interpolated

upwp_var = new_nc.createVariable('upwp', np.float32, ('pressure', 'lat'))
upwp_var = new_nc.createVariable("upwp", np.float32, ("pressure", "lat"))
upwp_var[:] = upwp_interpolated

uv_variance_var = new_nc.createVariable('uv_variance', np.float32, ('pressure', 'lat'))
uv_variance_var = new_nc.createVariable(
"uv_variance", np.float32, ("pressure", "lat")
)
uv_variance_var[:] = uv_variance_interpolated

# Optionally, add descriptions, units, or other metadata as attributes to the variables

# Print out a message indicating the script has finished
print(f"Data processing completed. Results saved to {output_file}.")
print(f"Data processing completed. Results saved to {output_file}.")
Loading

0 comments on commit 98ab555

Please sign in to comment.