diff --git a/.pep8speaks.yml b/.pep8speaks.yml index 506a199..bf7ae97 100644 --- a/.pep8speaks.yml +++ b/.pep8speaks.yml @@ -19,6 +19,9 @@ pycodestyle: ignore: # Errors and warnings to ignore - W391 - E203 + - W503 + - E731 + - E741 only_mention_files_with_errors: True # If False, a separate status comment for each file is made. -descending_issues_order: False # If True, PEP8 issues in message will be displayed in descending order of line numbers in the file \ No newline at end of file +descending_issues_order: False # If True, PEP8 issues in message will be displayed in descending order of line numbers in the file diff --git a/README.md b/README.md index 91d0ab6..726aba5 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,7 @@ [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.9991.svg)](https://doi.org/10.5281/zenodo.9991) [![Coverage Status](https://coveralls.io/repos/github/josephhardinee/PyDSD/badge.svg?branch=master)](https://coveralls.io/github/josephhardinee/PyDSD?branch=master) +[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/ambv/black) PyDSD is an open source Python package to work with disdrometer, particle probe, and DSD data. It is meant to make complex workflows easy. It includes support for different aspects of the research and operational workflow. This includes parametric fitting for different distributions, file reading, and scattering support. diff --git a/pydsd/DSDProcessor.py b/pydsd/DSDProcessor.py index dc22642..179304e 100644 --- a/pydsd/DSDProcessor.py +++ b/pydsd/DSDProcessor.py @@ -5,35 +5,38 @@ from pytmatrix import orientation, radar, tmatrix_aux, refractive from . import DSR + class DSDProcessor: - def calcParameters(self,D0, Nw, mu): + def calcParameters(self, D0, Nw, mu): self.moments = {} - self.scatterer.psd = GammaPSD(D0=D0, Nw=10**(Nw), mu=mu) + self.scatterer.psd = GammaPSD(D0=D0, Nw=10 ** (Nw), mu=mu) self.scatterer.set_geometry(tmatrix_aux.geom_horiz_back) - self.moments['Zh'] = 10*np.log10(radar.refl(self.scatterer)) - self.moments['Zdr'] = 10*np.log10(radar.Zdr(self.scatterer)) - self.moments['delta_hv'] = radar.delta_hv(self.scatterer) - self.moments['ldr_h'] = radar.ldr(self.scatterer) - self.moments['ldr_v'] = radar.ldr(self.scatterer, h_pol=False) + self.moments["Zh"] = 10 * np.log10(radar.refl(self.scatterer)) + self.moments["Zdr"] = 10 * np.log10(radar.Zdr(self.scatterer)) + self.moments["delta_hv"] = radar.delta_hv(self.scatterer) + self.moments["ldr_h"] = radar.ldr(self.scatterer) + self.moments["ldr_v"] = radar.ldr(self.scatterer, h_pol=False) self.scatterer.set_geometry(tmatrix_aux.geom_horiz_forw) - self.moments['Kdp'] = radar.Kdp(self.scatterer) - self.moments['Ah'] = radar.Ai(self.scatterer) - self.moments['Adr'] = self.moments['Ah']-radar.Ai(self.scatterer, h_pol=False) + self.moments["Kdp"] = radar.Kdp(self.scatterer) + self.moments["Ah"] = radar.Ai(self.scatterer) + self.moments["Adr"] = self.moments["Ah"] - radar.Ai(self.scatterer, h_pol=False) return self.moments - def __init__(self, wl=tmatrix_aux.wl_X, dr =1, shape='bc'): - DSR_list = {'tb':DSR.tb, 'bc': DSR.bc, 'pb': DSR.pb} + def __init__(self, wl=tmatrix_aux.wl_X, dr=1, shape="bc"): + DSR_list = {"tb": DSR.tb, "bc": DSR.bc, "pb": DSR.pb} self.scatterer = Scatterer(wavelength=wl, m=refractive.m_w_10C[wl]) self.scatterer.psd_integrator = PSDIntegrator() - self.scatterer.psd_integrator.axis_ratio_func = lambda D: 1.0/DSR_list[shape](D) + self.scatterer.psd_integrator.axis_ratio_func = lambda D: 1.0 / DSR_list[shape]( + D + ) self.scatterer.psd_integrator.D_max = 10.0 - self.scatterer.psd_integrator.geometries = (tmatrix_aux.geom_horiz_back, - tmatrix_aux.geom_horiz_forw) + self.scatterer.psd_integrator.geometries = ( + tmatrix_aux.geom_horiz_back, tmatrix_aux.geom_horiz_forw + ) self.scatterer.or_pdf = orientation.gaussian_pdf(20.0) self.scatterer.orient = orientation.orient_averaged_fixed self.scatterer.psd_integrator.init_scatter_table(self.scatterer) - self.dr=dr - + self.dr = dr diff --git a/pydsd/DSR.py b/pydsd/DSR.py index 96346f6..ec48a18 100644 --- a/pydsd/DSR.py +++ b/pydsd/DSR.py @@ -1,15 +1,14 @@ from __future__ import division import numpy as np -''' +""" The DSR module contains different drop shape relationships used in PyDisdrometer for the scattering calculations. -''' - +""" def tb(D_eq): - '''Thurai and Bringi Drop Shape relationship model. + """Thurai and Bringi Drop Shape relationship model. Implementation of the Thurai and Bringi Drop Shape model given in [1] . This gives the ratio of the major to minor axis. @@ -34,19 +33,17 @@ def tb(D_eq): .. [1] Thurai, Merhala, V. N. Bringi, "Drop axis ratios from a 2D video disdrometer." J. Atmos. Oceanic Technol., 22, 966 - 978. 2005 - ''' + """ if D_eq < 0.7: return 1.0 elif D_eq < 1.5: - return 1.173 - 0.5165 * D_eq + 0.4698 * D_eq ** 2 - 0.1317 * \ - D_eq ** 3 - 8.5e-3 * D_eq ** 4 + return 1.173 - 0.5165 * D_eq + 0.4698 * D_eq ** 2 - 0.1317 * D_eq ** 3 - 8.5e-3 * D_eq ** 4 else: - return 1.065 - 6.25e-2 * D_eq - 3.99e-3 * D_eq ** 2 + 7.66e-4 * \ - D_eq ** 3 - 4.095e-5 * D_eq ** 4 + return 1.065 - 6.25e-2 * D_eq - 3.99e-3 * D_eq ** 2 + 7.66e-4 * D_eq ** 3 - 4.095e-5 * D_eq ** 4 def pb(D_eq): - '''Pruppacher and Beard Drop Shape relationship model. + """Pruppacher and Beard Drop Shape relationship model. Implementation of the Pruppacher and Beard Drop Shape model given in [1] . This gives the ratio of the major to minor axis. @@ -72,13 +69,13 @@ def pb(D_eq): investigation of the internal circulation and shape of water drops falling at terminal velocity in air. Q.J.R. Meteorol. Soc., 96: 247-256 - ''' + """ return 1.03 - 0.062 * D_eq def bc(D_eq): - '''Beard and Chuang Drop Shape relationship model. + """Beard and Chuang Drop Shape relationship model. Implementation of the Beard and Chuang Drop Shape model given in [1] . This gives the ratio of the major to minor axis. @@ -102,8 +99,12 @@ def bc(D_eq): ---------- ..[1] Beard, Kenneth V., Catherine Chuang, 1987: A New Model for the Equilibrium Shape of Raindrops. J. Atmos. Sci., 44, 1509-1524. - ''' - - return 1.0048 + 5.7e-04 * np.power(D_eq, 1) - \ - 2.628e-02 * np.power(D_eq, 2) + 3.682e-03 * np.power(D_eq, 3) - \ - 1.677e-04 * np.power(D_eq, 4) + """ + + return 1.0048 + 5.7e-04 * np.power(D_eq, 1) - 2.628e-02 * np.power( + D_eq, 2 + ) + 3.682e-03 * np.power( + D_eq, 3 + ) - 1.677e-04 * np.power( + D_eq, 4 + ) diff --git a/pydsd/DropSizeDistribution.py b/pydsd/DropSizeDistribution.py index 4777feb..3666f3a 100644 --- a/pydsd/DropSizeDistribution.py +++ b/pydsd/DropSizeDistribution.py @@ -1,10 +1,10 @@ # -*- coding: utf-8 -*- -''' +""" The Drop Size Distribution model contains the DropSizeDistribution class. This class represents drop size distributions returned from the various readers in the io module. The class knows how to perform scattering simulations on itself. -''' +""" import numpy as np import pytmatrix @@ -20,11 +20,13 @@ from . import DSR from .utility import dielectric from .utility import configuration -SPEED_OF_LIGHT=299792458 + +SPEED_OF_LIGHT = 299792458 + class DropSizeDistribution(object): - ''' + """ DropSizeDistribution class to hold DSD's and calculate parameters and relationships. Should be returned from the disdrometer*reader style functions. @@ -55,10 +57,10 @@ class DropSizeDistribution(object): diameter: array_like The center size for each dsd bin. - ''' + """ - def __init__(self, reader, time_start = None, location=None): - '''Initializer for the DropSizeDistribution class. + def __init__(self, reader, time_start=None, location=None): + """Initializer for the DropSizeDistribution class. The DropSizeDistribution class holds dsd's returned from the various readers in the io module. @@ -77,27 +79,27 @@ def __init__(self, reader, time_start = None, location=None): dsd: `DropSizeDistribution` instance Drop Size Distribution instance. - ''' + """ - self.config = configuration.Configuration() + self.config = configuration.Configuration() self.time = reader.time - self.Nd = reader.fields['Nd'] + self.Nd = reader.fields["Nd"] self.spread = reader.spread try: - self.rain_rate = reader.fields['rain_rate'] + self.rain_rate = reader.fields["rain_rate"] except: self.rain_rate = None try: - self.velocity = reader.fields['terminal_velocity'] + self.velocity = reader.fields["terminal_velocity"] except: self.velocity = None try: - self.Z = reader.fields['reflectivity'] + self.Z = reader.fields["reflectivity"] except: self.Z = None try: - self.num_particles = reader.fields['num_particles'] + self.num_particles = reader.fields["num_particles"] except: self.num_particles = None try: @@ -116,16 +118,18 @@ def __init__(self, reader, time_start = None, location=None): except: self.info = {} - self.numt = len(reader.time['data']) + self.numt = len(reader.time["data"]) location = {} if location: - self.location = {'latitude': location[0], 'longitude': location[1]} + self.location = {"latitude": location[0], "longitude": location[1]} self.set_scattering_temperature_and_frequency() - def set_scattering_temperature_and_frequency(self, scattering_temp=10, scattering_freq = 9.7e9): - ''' Change the scattering temperature. After use, re-run calculate_radar_parameters + def set_scattering_temperature_and_frequency( + self, scattering_temp=10, scattering_freq=9.7e9 + ): + """ Change the scattering temperature. After use, re-run calculate_radar_parameters to see the effect this has on the parameters. Temperatures are in Celsius. Defaults to 10C X-band. Parameters @@ -134,14 +138,13 @@ def set_scattering_temperature_and_frequency(self, scattering_temp=10, scatterin Scattering temperature [C]. scattering_freq: optional, float Scattering frequency [Hz]. - ''' + """ self.scattering_freq = scattering_freq self.scattering_temp = scattering_temp self.m_w = dielectric.get_refractivity(scattering_freq, scattering_temp) - - def calculate_radar_parameters(self, dsr_func = DSR.bc, scatter_time_range = None ): - ''' Calculates radar parameters for the Drop Size Distribution. + def calculate_radar_parameters(self, dsr_func=DSR.bc, scatter_time_range=None): + """ Calculates radar parameters for the Drop Size Distribution. Calculates the radar parameters and stores them in the object. Defaults to X-Band,Beard and Chuang 10C setup. @@ -159,8 +162,8 @@ def calculate_radar_parameters(self, dsr_func = DSR.bc, scatter_time_range = Non scatter_time_range: optional, tuple Parameter to restrict the scattering to a time interval. The first element is the start time, while the second is the end time. - ''' - self._setup_scattering(SPEED_OF_LIGHT/self.scattering_freq*1000.0, dsr_func) + """ + self._setup_scattering(SPEED_OF_LIGHT / self.scattering_freq * 1000.0, dsr_func) self._setup_empty_fields() if scatter_time_range is None: @@ -174,40 +177,51 @@ def calculate_radar_parameters(self, dsr_func = DSR.bc, scatter_time_range = Non self.scatter_end_time = scatter_time_range[1] if scatter_time_range[1] > self.numt: - print("End of Scatter time is greater than end of file. Scattering to end of included time.") + print( + "End of Scatter time is greater than end of file." + + "Scattering to end of included time." + ) self.scatter_end_time = self.numt - self.scatterer.set_geometry(tmatrix_aux.geom_horiz_back) # We break up scattering to avoid regenerating table. + self.scatterer.set_geometry( + tmatrix_aux.geom_horiz_back + ) # We break up scattering to avoid regenerating table. for t in range(self.scatter_start_time, self.scatter_end_time): - if np.sum(self.Nd['data'][t]) is 0: + if np.sum(self.Nd["data"][t]) is 0: continue - BinnedDSD = pytmatrix.psd.BinnedPSD(self.bin_edges['data'], self.Nd['data'][t]) + BinnedDSD = pytmatrix.psd.BinnedPSD( + self.bin_edges["data"], self.Nd["data"][t] + ) self.scatterer.psd = BinnedDSD - self.fields['Zh']['data'][t] = 10 * \ - np.log10(radar.refl(self.scatterer)) - self.fields['Zdr']['data'][t] = 10 * \ - np.log10(radar.Zdr(self.scatterer)) + self.fields["Zh"]["data"][t] = 10 * np.log10(radar.refl(self.scatterer)) + self.fields["Zdr"]["data"][t] = 10 * np.log10(radar.Zdr(self.scatterer)) self.scatterer.set_geometry(tmatrix_aux.geom_horiz_forw) for t in range(self.scatter_start_time, self.scatter_end_time): - BinnedDSD = pytmatrix.psd.BinnedPSD(self.bin_edges['data'], self.Nd['data'][t]) + BinnedDSD = pytmatrix.psd.BinnedPSD( + self.bin_edges["data"], self.Nd["data"][t] + ) self.scatterer.psd = BinnedDSD - self.fields['Kdp']['data'][t] = radar.Kdp(self.scatterer) - self.fields['Ai']['data'][t] = radar.Ai(self.scatterer) - self.fields['Adr']['data'][t] = radar.Ai(self.scatterer) -radar.Ai(self.scatterer, h_pol=False) - - def _setup_empty_fields(self ): - ''' Preallocate arrays of zeros for the radar moments - ''' - params_list = ['Zh', 'Zdr', 'Kdp', 'Ai', 'Adr'] + self.fields["Kdp"]["data"][t] = radar.Kdp(self.scatterer) + self.fields["Ai"]["data"][t] = radar.Ai(self.scatterer) + self.fields["Adr"]["data"][t] = radar.Ai(self.scatterer) - radar.Ai( + self.scatterer, h_pol=False + ) + + def _setup_empty_fields(self): + """ Preallocate arrays of zeros for the radar moments + """ + params_list = ["Zh", "Zdr", "Kdp", "Ai", "Adr"] for param in params_list: - self.fields[param] = self.config.fill_in_metadata(param, np.ma.zeros(self.numt)) + self.fields[param] = self.config.fill_in_metadata( + param, np.ma.zeros(self.numt) + ) def _setup_scattering(self, wavelength, dsr_func): - ''' Internal Function to create scattering tables. + """ Internal Function to create scattering tables. This internal function sets up the scattering table. It takes a wavelength as an argument where wavelength is one of the pytmatrix @@ -220,22 +234,21 @@ def _setup_scattering(self, wavelength, dsr_func): dsr_func : function Drop Shape Relationship function. Several built-in are available in the `DSR` module. - ''' - self.scatterer = Scatterer(wavelength=wavelength, - m=self.m_w) + """ + self.scatterer = Scatterer(wavelength=wavelength, m=self.m_w) self.scatterer.psd_integrator = PSDIntegrator() - self.scatterer.psd_integrator.axis_ratio_func = lambda D: 1.0 / \ - dsr_func(D) + self.scatterer.psd_integrator.axis_ratio_func = lambda D: 1.0 / dsr_func(D) self.dsr_func = dsr_func self.scatterer.psd_integrator.D_max = 10.0 self.scatterer.psd_integrator.geometries = ( - tmatrix_aux.geom_horiz_back, tmatrix_aux.geom_horiz_forw) + tmatrix_aux.geom_horiz_back, tmatrix_aux.geom_horiz_forw + ) self.scatterer.or_pdf = orientation.gaussian_pdf(20.0) self.scatterer.orient = orientation.orient_averaged_fixed self.scatterer.psd_integrator.init_scatter_table(self.scatterer) def _calc_mth_moment(self, m): - '''Calculates the mth moment of the drop size distribution. + """Calculates the mth moment of the drop size distribution. Returns the mth moment of the drop size distribution E[D^m]. @@ -243,23 +256,25 @@ def _calc_mth_moment(self, m): ----------- m: float order of the moment - ''' + """ - if len(self.spread['data']) > 0: - bin_width = self.spread['data'] + if len(self.spread["data"]) > 0: + bin_width = self.spread["data"] else: - bin_width = [self.bin_edges['data'][i + 1] - self.bin_edges['data'][i] - for i in range(0, len(self.bin_edges['data']) - 1)] + bin_width = [ + self.bin_edges["data"][i + 1] - self.bin_edges["data"][i] + for i in range(0, len(self.bin_edges["data"]) - 1) + ] mth_moment = np.ma.zeros(self.numt) for t in range(0, self.numt): - dmth = np.power(self.diameter['data'], m) - mth_moment[t] = np.dot(np.multiply(dmth, self.Nd['data'][t]), bin_width) + dmth = np.power(self.diameter["data"], m) + mth_moment[t] = np.dot(np.multiply(dmth, self.Nd["data"][t]), bin_width) return mth_moment - def calculate_dsd_parameterization(self, method='bringi'): - '''Calculates DSD Parameterization. + def calculate_dsd_parameterization(self, method="bringi"): + """Calculates DSD Parameterization. This calculates the dsd parameterization and stores the result in the fields dictionary. This includes the following parameters: @@ -275,34 +290,43 @@ def calculate_dsd_parameterization(self, method='bringi'): ------ For D0 and Nw we use the method due to Bringi and Chandrasekar. - ''' + """ - params_list = ['D0', 'Dmax', 'Dm', 'Nt', 'Nw', 'N0', 'W', 'mu'] + params_list = ["D0", "Dmax", "Dm", "Nt", "Nw", "N0", "W", "mu"] for param in params_list: - self.fields[param] = self.config.fill_in_metadata(param, np.ma.zeros(self.numt)) + self.fields[param] = self.config.fill_in_metadata( + param, np.ma.zeros(self.numt) + ) rho_w = 1e-03 # grams per mm cubed Density of Water vol_constant = np.pi / 6.0 * rho_w - self.fields['Dm']['data'] = np.divide(self._calc_mth_moment(4), self._calc_mth_moment(3)) + self.fields["Dm"]["data"] = np.divide( + self._calc_mth_moment(4), self._calc_mth_moment(3) + ) for t in range(0, self.numt): - if np.sum(self.Nd['data'][t]) == 0: + if np.sum(self.Nd["data"][t]) == 0: continue - self.fields['Nt']['data'][t] = np.dot(self.spread['data'], self.Nd['data'][t]) - self.fields['W']['data'][t] = vol_constant * np.dot(np.multiply(self.Nd['data'][t], self.spread['data']), - np.array(self.diameter['data']) ** 3) - self.fields['D0']['data'][t] = self._calculate_D0(self.Nd['data'][t]) - self.fields['Nw']['data'][t] = 256.0 / \ - (np.pi * rho_w) * np.divide(self.fields['W']['data'][t], - self.fields['Dm']['data'][t] ** 4) - - self.fields['Dmax']['data'][t] = self.__get_last_nonzero(self.Nd['data'][t]) - - self.fields['mu']['data'][:] = list(map(self._estimate_mu, - list(range(0,self.numt)))) + self.fields["Nt"]["data"][t] = np.dot( + self.spread["data"], self.Nd["data"][t] + ) + self.fields["W"]["data"][t] = vol_constant * np.dot( + np.multiply(self.Nd["data"][t], self.spread["data"]), + np.array(self.diameter["data"]) ** 3, + ) + self.fields["D0"]["data"][t] = self._calculate_D0(self.Nd["data"][t]) + self.fields["Nw"]["data"][t] = 256.0 / (np.pi * rho_w) * np.divide( + self.fields["W"]["data"][t], self.fields["Dm"]["data"][t] ** 4 + ) + + self.fields["Dmax"]["data"][t] = self.__get_last_nonzero(self.Nd["data"][t]) + + self.fields["mu"]["data"][:] = list( + map(self._estimate_mu, list(range(0, self.numt))) + ) def __get_last_nonzero(self, N): - ''' Gets last nonzero entry in an array. Gets last non-zero entry in an array. + """ Gets last nonzero entry in an array. Gets last non-zero entry in an array. Parameters ---------- @@ -313,15 +337,15 @@ def __get_last_nonzero(self, N): ------- max: int last nonzero entry in an array. - ''' + """ if np.ma.count(N): - return self.diameter['data'][np.max(N.nonzero())] + return self.diameter["data"][np.max(N.nonzero())] else: return 0 def _calculate_D0(self, N): - ''' Calculate Median Drop diameter. + """ Calculate Median Drop diameter. Calculates the median drop diameter for the array N. This assumes diameter and bin widths in the dsd object have been properly set. @@ -335,7 +359,7 @@ def _calculate_D0(self, N): ------ This works by calculating the two bins where cumulative water content goes over 0.5, and then interpolates the correct D0 value between these two bins. - ''' + """ rho_w = 1e-3 W_const = rho_w * np.pi / 6.0 @@ -343,43 +367,55 @@ def _calculate_D0(self, N): if np.sum(N) == 0: return 0 - cum_W = W_const * \ - np.cumsum([N[k] * self.spread['data'][k] * (self.diameter['data'][k] ** 3) - for k in range(0, len(N))]) + cum_W = W_const * np.cumsum( + [ + N[k] * self.spread["data"][k] * (self.diameter["data"][k] ** 3) + for k in range(0, len(N)) + ] + ) cross_pt = list(cum_W < (cum_W[-1] * 0.5)).index(False) - 1 - slope = (cum_W[cross_pt + 1] - cum_W[cross_pt]) / \ - (self.diameter['data'][cross_pt + 1] - self.diameter['data'][cross_pt]) + slope = (cum_W[cross_pt + 1] - cum_W[cross_pt]) / ( + self.diameter["data"][cross_pt + 1] - self.diameter["data"][cross_pt] + ) run = (0.5 * cum_W[-1] - cum_W[cross_pt]) / slope - return self.diameter['data'][cross_pt] + run + return self.diameter["data"][cross_pt] + run def calculate_RR(self): - '''Calculate instantaneous rain rate. + """Calculate instantaneous rain rate. This calculates instantaneous rain rate based on the flux of water. - ''' - self.fields['rain_rate'] = {'data': np.ma.zeros(self.numt)} + """ + self.fields["rain_rate"] = {"data": np.ma.zeros(self.numt)} for t in range(0, self.numt): # self.rain_rate['data'][t] = 0.6*3.1415 * 10**(-3) * np.dot(np.multiply(self.rain_rate['data'],np.multiply(self.Nd['data'][t],self.spread['data'] )), # np.array(self.diameter['data'])**3) - velocity = 9.65 - 10.3 * np.exp(-0.6 * self.diameter['data']) + velocity = 9.65 - 10.3 * np.exp(-0.6 * self.diameter["data"]) velocity[0] = 0.5 - self.fields['rain_rate']['data'][t] = 0.6 * np.pi * 1e-03 * np.sum(self._mmultiply( - velocity, self.Nd['data'][t], self.spread['data'], np.array(self.diameter['data']) ** 3)) + self.fields["rain_rate"]["data"][t] = 0.6 * np.pi * 1e-03 * np.sum( + self._mmultiply( + velocity, + self.Nd["data"][t], + self.spread["data"], + np.array(self.diameter["data"]) ** 3, + ) + ) def calculate_R_Kdp_relationship(self): - ''' + """ calculate_R_kdp_relationship calculates a power fit for the rainfall-kdp relationship based upon the calculated radar parameters(which should have already been run). It returns the scale and exponential parameter a and b in the first tuple, and the second returned argument gives the covariance matrix of the fit. - ''' + """ - if 'rain_rate' in list(self.fields.keys()): + if "rain_rate" in list(self.fields.keys()): filt = np.logical_and( - self.fields['Kdp']['data'] > 0, self.fields['rain_rate']['data'] > 0) - popt, pcov = expfit(self.fields['Kdp']['data'][filt], - self.fields['rain_rate']['data'][filt]) + self.fields["Kdp"]["data"] > 0, self.fields["rain_rate"]["data"] > 0 + ) + popt, pcov = expfit( + self.fields["Kdp"]["data"][filt], self.fields["rain_rate"]["data"][filt] + ) return popt, pcov else: @@ -387,7 +423,7 @@ def calculate_R_Kdp_relationship(self): return None def calculate_R_Zh_relationship(self): - ''' + """ calculate_R_Zh_relationship calculates the power law fit for Zh based upon scattered radar parameters. It returns the scale and exponential parameter a and b in the first tuple, and the second returned argument @@ -399,14 +435,16 @@ def calculate_R_Zh_relationship(self): a,b,c fits for relationship. pcov: array Covariance matrix of fits. - ''' + """ - popt, pcov = expfit(np.power(10, 0.1 * self.fields['Zh']['data'][self.rain_rate['data'] > 0]), - self.fields['rain_rate']['data'][self.fields['rain_rate']['data'] > 0]) + popt, pcov = expfit( + np.power(10, 0.1 * self.fields["Zh"]["data"][self.rain_rate["data"] > 0]), + self.fields["rain_rate"]["data"][self.fields["rain_rate"]["data"] > 0], + ) return popt, pcov def calculate_R_Zh_Zdr_relationship(self): - ''' + """ calculate_R_Zh_Zdr_relationship calculates the power law fit for Zh,Zdr based upon scattered radar parameters. It returns the scale and exponential parameters a, b, and c in the first tuple, and the @@ -415,17 +453,26 @@ def calculate_R_Zh_Zdr_relationship(self): rain_rate > 0 Zdr > 0 Kdp > 0 - ''' + """ filt = np.logical_and( - np.logical_and(self.fields['rain_rate']['data'] > 0, np.greater(self.fields['Zdr']['data'], 0)), self.fields['Kdp']['data'] > 0) - popt, pcov = expfit2([self._idb(self.fields['Zh']['data'][filt]), - self._idb(self.fields['Zdr']['data'][filt])], - self.fields['rain_rate']['data'][filt]) + np.logical_and( + self.fields["rain_rate"]["data"] > 0, + np.greater(self.fields["Zdr"]["data"], 0), + ), + self.fields["Kdp"]["data"] > 0, + ) + popt, pcov = expfit2( + [ + self._idb(self.fields["Zh"]["data"][filt]), + self._idb(self.fields["Zdr"]["data"][filt]), + ], + self.fields["rain_rate"]["data"][filt], + ) return popt, pcov def calculate_R_Zh_Kdp_relationship(self): - ''' + """ calculate_R_Zh_Kdp_relationship calculates the power law fit for Zh,Kdp based upon scattered radar parameters. It returns the scale and exponential parameters a, b, and c in the first tuple, and the @@ -433,17 +480,25 @@ def calculate_R_Zh_Kdp_relationship(self): rain_rate > 0 Zdr > 0 Kdp > 0 - ''' + """ filt = np.logical_and( - np.logical_and(self.fields['rain_rate']['data'] > 0, self.fields['Zdr']['data'] > 0), self.fields['Kdp']['data'] > 0) - popt, pcov = expfit2([self._idb(self.fields['Zh']['data'][filt]), - self.fields['Kdp']['data'][filt]], - self.fields['rain_rate']['data'][filt]) + np.logical_and( + self.fields["rain_rate"]["data"] > 0, self.fields["Zdr"]["data"] > 0 + ), + self.fields["Kdp"]["data"] > 0, + ) + popt, pcov = expfit2( + [ + self._idb(self.fields["Zh"]["data"][filt]), + self.fields["Kdp"]["data"][filt], + ], + self.fields["rain_rate"]["data"][filt], + ) return popt, pcov def calculate_R_Zdr_Kdp_relationship(self): - ''' + """ calculate_R_Zdr_Kdp_relationship calculates the power law fit for Zdr,Kdp based upon scattered radar parameters. It returns the scale and exponential parameters a, b, and c in the first tuple, and the @@ -451,24 +506,32 @@ def calculate_R_Zdr_Kdp_relationship(self): rain_rate > 0 Zdr > 0 Kdp > 0 - ''' - - filt = np.logical_and(np.logical_and(self.fields['rain_rate']['data'] > 0, self.fields['Zdr']['data'] > 0), - self.fields['Kdp']['data'] > 0) + """ - popt, pcov = expfit2([self._idb(self.fields['Zdr']['data'][filt]), - self.fields['Kdp']['data'][filt]], - self.fields['rain_rate']['data'][filt]) + filt = np.logical_and( + np.logical_and( + self.fields["rain_rate"]["data"] > 0, self.fields["Zdr"]["data"] > 0 + ), + self.fields["Kdp"]["data"] > 0, + ) + + popt, pcov = expfit2( + [ + self._idb(self.fields["Zdr"]["data"][filt]), + self.fields["Kdp"]["data"][filt], + ], + self.fields["rain_rate"]["data"][filt], + ) return popt, pcov def _idb(self, db): - ''' + """ Converts dB to linear scale. - ''' + """ return np.power(10, np.multiply(0.1, db)) def _mmultiply(self, *args): - ''' + """ _mmultiply extends numpy multiply to arbitrary number of same sized matrices. Multiplication is elementwise. @@ -476,7 +539,7 @@ def _mmultiply(self, *args): ----------- *args: matrices Matrices to multiply. Must be same shape. - ''' + """ i_value = np.ones(len(args[0])) for i in args: i_value = np.multiply(i_value, i) @@ -504,9 +567,11 @@ def _estimate_mu(self, idx): mu: integer Best estimate for DSD shape parameter $\mu$. """ - if np.sum(self.Nd['data'][idx]) == 0 : + if np.sum(self.Nd["data"][idx]) == 0: return np.nan - res = scipy.optimize.minimize_scalar(self._mu_cost, bounds = (-10,20), args = (idx,), method='bounded') + res = scipy.optimize.minimize_scalar( + self._mu_cost, bounds=(-10, 20), args=(idx,), method="bounded" + ) if self._mu_cost(res.x, idx) == np.nan or res.x > 20: return np.nan else: @@ -525,7 +590,11 @@ def _mu_cost(self, mu, idx): Potential Mu value """ - gdsd = pytmatrix.psd.GammaPSD(self.fields['D0']['data'][idx], self.fields['Nw']['data'][idx],mu) - return np.sqrt(np.nansum(np.power(np.abs(self.Nd['data'][idx] - gdsd(self.diameter['data'])),2))) - - + gdsd = pytmatrix.psd.GammaPSD( + self.fields["D0"]["data"][idx], self.fields["Nw"]["data"][idx], mu + ) + return np.sqrt( + np.nansum( + np.power(np.abs(self.Nd["data"][idx] - gdsd(self.diameter["data"])), 2) + ) + ) diff --git a/pydsd/__init__.py b/pydsd/__init__.py index 228f389..17b9663 100644 --- a/pydsd/__init__.py +++ b/pydsd/__init__.py @@ -19,5 +19,6 @@ from ._version import get_versions -__version__ = get_versions()['version'] + +__version__ = get_versions()["version"] del get_versions diff --git a/pydsd/_version.py b/pydsd/_version.py index 2ae69f4..5d85389 100644 --- a/pydsd/_version.py +++ b/pydsd/_version.py @@ -58,17 +58,18 @@ class NotThisMethod(Exception): def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" + def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f + return decorate -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): +def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None @@ -76,10 +77,13 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) + p = subprocess.Popen( + [c] + args, + cwd=cwd, + env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr else None), + ) break except EnvironmentError: e = sys.exc_info()[1] @@ -116,16 +120,22 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): - return {"version": dirname[len(parentdir_prefix):], - "full-revisionid": None, - "dirty": False, "error": None, "date": None} + return { + "version": dirname[len(parentdir_prefix):], + "full-revisionid": None, + "dirty": False, + "error": None, + "date": None, + } else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: - print("Tried directories %s but none started with prefix %s" % - (str(rootdirs), parentdir_prefix)) + print( + "Tried directories %s but none started with prefix %s" + % (str(rootdirs), parentdir_prefix) + ) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @@ -190,7 +200,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) + tags = set([r for r in refs if re.search(r"\d", r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: @@ -201,16 +211,23 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) - return {"version": r, - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": None, - "date": date} + return { + "version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": None, + "date": date, + } # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") - return {"version": "0+unknown", - "full-revisionid": keywords["full"].strip(), - "dirty": False, "error": "no suitable tags", "date": None} + return { + "version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, + "error": "no suitable tags", + "date": None, + } @register_vcs_handler("git", "pieces_from_vcs") @@ -225,8 +242,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) + out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) @@ -234,10 +250,19 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%s*" % tag_prefix], - cwd=root) + describe_out, rc = run_command( + GITS, + [ + "describe", + "--tags", + "--dirty", + "--always", + "--long", + "--match", + "%s*" % tag_prefix, + ], + cwd=root, + ) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") @@ -266,11 +291,12 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if "-" in git_describe: # TAG-NUM-gHEX - mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? - pieces["error"] = ("unable to parse git-describe output: '%s'" - % describe_out) + pieces["error"] = ( + "unable to parse git-describe output: '%s'" % describe_out + ) return pieces # tag @@ -279,8 +305,9 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) - pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" - % (full_tag, tag_prefix)) + pieces["error"] = ( + "tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix) + ) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] @@ -293,13 +320,13 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): else: # HEX: no tags pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) + count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], - cwd=root)[0].strip() + date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[ + 0 + ].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces @@ -330,8 +357,7 @@ def render_pep440(pieces): rendered += ".dirty" else: # exception #1 - rendered = "0+untagged.%d.g%s" % (pieces["distance"], - pieces["short"]) + rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered @@ -445,11 +471,13 @@ def render_git_describe_long(pieces): def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: - return {"version": "unknown", - "full-revisionid": pieces.get("long"), - "dirty": None, - "error": pieces["error"], - "date": None} + return { + "version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None, + } if not style or style == "default": style = "pep440" # the default @@ -469,9 +497,13 @@ def render(pieces, style): else: raise ValueError("unknown style '%s'" % style) - return {"version": rendered, "full-revisionid": pieces["long"], - "dirty": pieces["dirty"], "error": None, - "date": pieces.get("date")} + return { + "version": rendered, + "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], + "error": None, + "date": pieces.get("date"), + } def get_versions(): @@ -485,8 +517,7 @@ def get_versions(): verbose = cfg.verbose try: - return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, - verbose) + return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass @@ -495,13 +526,16 @@ def get_versions(): # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. - for i in cfg.versionfile_source.split('/'): + for i in cfg.versionfile_source.split("/"): root = os.path.dirname(root) except NameError: - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to find root of source tree", - "date": None} + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree", + "date": None, + } try: pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) @@ -515,6 +549,10 @@ def get_versions(): except NotThisMethod: pass - return {"version": "0+unknown", "full-revisionid": None, - "dirty": None, - "error": "unable to compute version", "date": None} + return { + "version": "0+unknown", + "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", + "date": None, + } diff --git a/pydsd/aux_readers/ARM_APU_reader.py b/pydsd/aux_readers/ARM_APU_reader.py index d39c64f..0649d73 100644 --- a/pydsd/aux_readers/ARM_APU_reader.py +++ b/pydsd/aux_readers/ARM_APU_reader.py @@ -14,7 +14,7 @@ def read_parsivel_arm_netcdf(filename): - ''' + """ Takes a filename pointing to an ARM Parsivel netcdf file and returns a drop size distribution object. @@ -24,7 +24,7 @@ def read_parsivel_arm_netcdf(filename): Returns: DropSizeDistrometer object - ''' + """ reader = ARM_APU_reader(filename) @@ -34,18 +34,18 @@ def read_parsivel_arm_netcdf(filename): return None - class ARM_APU_reader(object): - ''' + """ This class reads and parses parsivel disdrometer data from ARM netcdf files. These conform to document (Need Document). Use the read_parsivel_arm_netcdf() function to interface with this. - ''' + """ + def __init__(self, filename): - ''' + """ Handles setting up a APU Reader. - ''' + """ self.fields = {} self.time = [] # Time in minutes from start of recording self.Nd = [] @@ -53,45 +53,52 @@ def __init__(self, filename): self.filename = filename self.nc_dataset = Dataset(filename) - time = np.ma.array(self.nc_dataset.variables['time'][:]) - base_time = datetime.strptime(self.nc_dataset['time'].units,"seconds since %Y-%m-%d %H:%M:%S 0:00") + time = np.ma.array(self.nc_dataset.variables["time"][:]) + base_time = datetime.strptime( + self.nc_dataset["time"].units, "seconds since %Y-%m-%d %H:%M:%S 0:00" + ) # Return a common epoch time dictionary - self.time = {'data': time + (base_time - datetime(1970,1,1)).total_seconds(), - 'units': common.EPOCH_UNITS, - 'standard_name': "Time", - 'long_name': "Time (UTC)" - } + self.time = { + "data": time + (base_time - datetime(1970, 1, 1)).total_seconds(), + "units": common.EPOCH_UNITS, + "standard_name": "Time", + "long_name": "Time (UTC)", + } # self.time = self._get_epoch_time(time, t_units) - Nd = np.ma.array( - self.nc_dataset.variables['number_density_drops'][:]) - velocity = np.ma.array( - self.nc_dataset.variables['fall_velocity_calculated'][:]) - rain_rate = np.ma.array( - self.nc_dataset.variables['precip_rate'][:]) + Nd = np.ma.array(self.nc_dataset.variables["number_density_drops"][:]) + velocity = np.ma.array(self.nc_dataset.variables["fall_velocity_calculated"][:]) + rain_rate = np.ma.array(self.nc_dataset.variables["precip_rate"][:]) self.diameter = common.var_to_dict( - 'diameter', self.nc_dataset.variables['particle_size'][:], - 'mm', 'Particle diameter of bins') + "diameter", + self.nc_dataset.variables["particle_size"][:], + "mm", + "Particle diameter of bins", + ) self.spread = common.var_to_dict( - 'spread', self.nc_dataset.variables['class_size_width'][:], - 'mm', 'Bin size spread of bins') + "spread", + self.nc_dataset.variables["class_size_width"][:], + "mm", + "Bin size spread of bins", + ) self.bin_edges = common.var_to_dict( - 'bin_edges', - np.hstack((0, self.diameter['data'] + np.array(self.spread['data']) / 2)), - 'mm', 'Boundaries of bin sizes') - - - self.fields['Nd'] = common.var_to_dict( - 'Nd', Nd, 'm^-3 mm^-1', - 'Liquid water particle concentration') - self.fields['velocity'] = common.var_to_dict( - 'velocity', velocity, 'm s^-1', - 'Terminal fall velocity for each bin') - self.fields['rain_rate'] = common.var_to_dict( - 'rain_rate', rain_rate, 'mm h^-1', - 'Rain rate') + "bin_edges", + np.hstack((0, self.diameter["data"] + np.array(self.spread["data"]) / 2)), + "mm", + "Boundaries of bin sizes", + ) + + self.fields["Nd"] = common.var_to_dict( + "Nd", Nd, "m^-3 mm^-1", "Liquid water particle concentration" + ) + self.fields["velocity"] = common.var_to_dict( + "velocity", velocity, "m s^-1", "Terminal fall velocity for each bin" + ) + self.fields["rain_rate"] = common.var_to_dict( + "rain_rate", rain_rate, "mm h^-1", "Rain rate" + ) def _get_epoch_time(self, sample_times, t_units): """Convert time to epoch time and return a dictionary.""" @@ -101,5 +108,9 @@ def _get_epoch_time(self, sample_times, t_units): timesec = date2num(dts, common.EPOCH_UNITS) # Now once again convert this data into a datetime instance time_unaware = num2date(timesec, common.EPOCH_UNITS) - eptime = {'data': time_unaware, 'units': common.EPOCH_UNITS, - 'standard_name': 'Time', 'long_name': 'Time (UTC)'} + eptime = { + "data": time_unaware, + "units": common.EPOCH_UNITS, + "standard_name": "Time", + "long_name": "Time (UTC)", + } diff --git a/pydsd/aux_readers/ARM_JWD_Reader.py b/pydsd/aux_readers/ARM_JWD_Reader.py index 9f516a4..5694159 100644 --- a/pydsd/aux_readers/ARM_JWD_Reader.py +++ b/pydsd/aux_readers/ARM_JWD_Reader.py @@ -14,7 +14,7 @@ def read_arm_jwd_b1(filename): - ''' + """ Takes a filename pointing to an ARM Parsivel netcdf file and returns a drop size distribution object. @@ -24,7 +24,7 @@ def read_arm_jwd_b1(filename): Returns: DropSizeDistrometer object - ''' + """ reader = ArmJwdReader(filename) @@ -37,81 +37,100 @@ def read_arm_jwd_b1(filename): class ArmJwdReader(object): - ''' + """ This class reads and parses parsivel disdrometer data from ARM netcdf files. These conform to document (Need Document). Use the read_arm_jwd_b1() function to interface with this. - ''' + """ def __init__(self, filename): - ''' + """ Handles setting up a reader. - ''' + """ self.fields = {} self.info = {} self.nc_dataset = Dataset(filename) self.filename = filename - time = np.ma.array(self.nc_dataset.variables['time_offset'][:] + self.nc_dataset.variables['base_time'][:]) + time = np.ma.array( + self.nc_dataset.variables["time_offset"][:] + + self.nc_dataset.variables["base_time"][:] + ) self.time = self._get_epoch_time(time) - Nd = np.ma.array( - self.nc_dataset.variables['nd'][:]) - velocity = np.ma.array( - self.nc_dataset.variables['fall_vel'][:]) - rain_rate = np.ma.array( - self.nc_dataset.variables['rain_rate'][:]) - self.diameter = np.ma.array(self.nc_dataset.variables['mean_diam_drop_class'][:]) - self.spread = np.ma.array(self.nc_dataset.variables['delta_diam'][:]) + Nd = np.ma.array(self.nc_dataset.variables["nd"][:]) + velocity = np.ma.array(self.nc_dataset.variables["fall_vel"][:]) + rain_rate = np.ma.array(self.nc_dataset.variables["rain_rate"][:]) + self.diameter = np.ma.array( + self.nc_dataset.variables["mean_diam_drop_class"][:] + ) + self.spread = np.ma.array(self.nc_dataset.variables["delta_diam"][:]) # TODO: Move this to new metadata utility, and just add information from raw netcdf where appropriate self.bin_edges = common.var_to_dict( - 'bin_edges', - np.hstack((0, self.diameter + np.array(self.spread) / 2)), - 'mm', 'Boundaries of bin sizes') + "bin_edges", + np.hstack((0, self.diameter + np.array(self.spread) / 2)), + "mm", + "Boundaries of bin sizes", + ) self.spread = common.var_to_dict( - 'spread', self.spread, - 'mm', 'Bin size spread of bins') + "spread", self.spread, "mm", "Bin size spread of bins" + ) self.diameter = common.var_to_dict( - 'diameter', self.diameter, - 'mm', 'Particle diameter of bins') - - self.fields['Nd'] = common.var_to_dict( - 'Nd', Nd, 'm^-3 mm^-1', - 'Liquid water particle concentration') - self.fields['velocity'] = common.var_to_dict( - 'velocity', velocity, 'm s^-1', - 'Terminal fall velocity for each bin') - self.fields['rain_rate'] = common.var_to_dict( - 'rain_rate', rain_rate, 'mm h^-1', - 'Rain rate') - - self.fields['num_drop'] = common.var_to_dict( - "num_drop", self.nc_dataset.variables['num_drop'][:], '#', - "Number of Drops") - - self.fields['d_max'] = common.var_to_dict( - "d_max", self.nc_dataset.variables['d_max'][:],"mm", - "Diameter of largest drop" + "diameter", self.diameter, "mm", "Particle diameter of bins" + ) + + self.fields["Nd"] = common.var_to_dict( + "Nd", Nd, "m^-3 mm^-1", "Liquid water particle concentration" + ) + self.fields["velocity"] = common.var_to_dict( + "velocity", velocity, "m s^-1", "Terminal fall velocity for each bin" + ) + self.fields["rain_rate"] = common.var_to_dict( + "rain_rate", rain_rate, "mm h^-1", "Rain rate" ) - self.fields['liq_water'] = common.var_to_dict( - "liq_water", self.nc_dataset.variables['liq_water'][:], - "gm/m^3", "Liquid water content") - self.fields['n_0'] = common.var_to_dict( - "n_0", self.nc_dataset.variables['n_0'][:], - "1/(m^3-mm)", "Distribution Intercept") - self.fields['lambda'] = common.var_to_dict( - "lambda", self.nc_dataset.variables['lambda'][:], - "1/mm", "Distribution Slope") + self.fields["num_drop"] = common.var_to_dict( + "num_drop", self.nc_dataset.variables["num_drop"][:], "#", "Number of Drops" + ) + + self.fields["d_max"] = common.var_to_dict( + "d_max", + self.nc_dataset.variables["d_max"][:], + "mm", + "Diameter of largest drop", + ) + self.fields["liq_water"] = common.var_to_dict( + "liq_water", + self.nc_dataset.variables["liq_water"][:], + "gm/m^3", + "Liquid water content", + ) + + self.fields["n_0"] = common.var_to_dict( + "n_0", + self.nc_dataset.variables["n_0"][:], + "1/(m^3-mm)", + "Distribution Intercept", + ) + self.fields["lambda"] = common.var_to_dict( + "lambda", + self.nc_dataset.variables["lambda"][:], + "1/mm", + "Distribution Slope", + ) for key in self.nc_dataset.ncattrs(): - self.info[key] =self.nc_dataset.getncattr(key) + self.info[key] = self.nc_dataset.getncattr(key) def _get_epoch_time(self, sample_times): """Convert time to epoch time and return a dictionary.""" - eptime = {'data': sample_times, 'units': common.EPOCH_UNITS, - 'standard_name': 'Time', 'long_name': 'Time (UTC)'} + eptime = { + "data": sample_times, + "units": common.EPOCH_UNITS, + "standard_name": "Time", + "long_name": "Time (UTC)", + } return eptime diff --git a/pydsd/aux_readers/ARM_Vdis_Reader.py b/pydsd/aux_readers/ARM_Vdis_Reader.py index dac9964..1d03071 100644 --- a/pydsd/aux_readers/ARM_Vdis_Reader.py +++ b/pydsd/aux_readers/ARM_Vdis_Reader.py @@ -14,7 +14,7 @@ def read_arm_vdis_b1(filename): - ''' + """ Takes a filename pointing to an ARM vdis netcdf file and returns a drop size distribution object. Tested on MC3E data. @@ -24,7 +24,7 @@ def read_arm_vdis_b1(filename): Returns: DropSizeDistrometer object - ''' + """ reader = ArmVdisReader(filename) @@ -37,56 +37,60 @@ def read_arm_vdis_b1(filename): class ArmVdisReader(object): - ''' + """ This class reads and parses parsivel disdrometer data from ARM netcdf files. These conform to document (Need Document). Use the read_arm_jwd_b1() function to interface with this. - ''' + """ def __init__(self, filename): - ''' + """ Handles setting up a reader. - ''' + """ self.fields = {} self.info = {} self.nc_dataset = Dataset(filename) self.filename = filename - time = np.ma.array(self.nc_dataset.variables['time_offset'][:] + self.nc_dataset.variables['base_time'][:]) + time = np.ma.array( + self.nc_dataset.variables["time_offset"][:] + + self.nc_dataset.variables["base_time"][:] + ) self.time = self._get_epoch_time(time) - Nd = np.ma.array( - self.nc_dataset.variables['num_density'][:]) + Nd = np.ma.array(self.nc_dataset.variables["num_density"][:]) # velocity = np.ma.array( # self.nc_dataset.variables['fall_vel'][:]) # rain_rate = np.ma.array( # self.nc_dataset.variables['rain_rate'][:]) - self.diameter = np.ma.array(self.nc_dataset.variables['drop_diameter'][:]) - self.spread = np.ma.array(self.nc_dataset.variables['delta_diam'][:]) + self.diameter = np.ma.array(self.nc_dataset.variables["drop_diameter"][:]) + self.spread = np.ma.array(self.nc_dataset.variables["delta_diam"][:]) # TODO: Move this to new metadata utility, and just add information from raw netcdf where appropriate self.bin_edges = common.var_to_dict( - 'bin_edges', - np.hstack((0, self.diameter + np.array(self.spread) / 2)), - 'mm', 'Boundaries of bin sizes') + "bin_edges", + np.hstack((0, self.diameter + np.array(self.spread) / 2)), + "mm", + "Boundaries of bin sizes", + ) self.spread = common.var_to_dict( - 'spread', self.spread, - 'mm', 'Bin size spread of bins') + "spread", self.spread, "mm", "Bin size spread of bins" + ) self.diameter = common.var_to_dict( - 'diameter', self.diameter, - 'mm', 'Particle diameter of bins') - - self.fields['Nd'] = common.var_to_dict( - 'Nd', Nd, 'm^-3 mm^-1', - 'Liquid water particle concentration') - self.fields['velocity'] = common.var_to_dict( - 'velocity', velocity, 'm s^-1', - 'Terminal fall velocity for each bin') - self.fields['rain_rate'] = common.var_to_dict( - 'rain_rate', rain_rate, 'mm h^-1', - 'Rain rate') + "diameter", self.diameter, "mm", "Particle diameter of bins" + ) + + self.fields["Nd"] = common.var_to_dict( + "Nd", Nd, "m^-3 mm^-1", "Liquid water particle concentration" + ) + self.fields["velocity"] = common.var_to_dict( + "velocity", velocity, "m s^-1", "Terminal fall velocity for each bin" + ) + self.fields["rain_rate"] = common.var_to_dict( + "rain_rate", rain_rate, "mm h^-1", "Rain rate" + ) # # self.fields['num_drop'] = common.var_to_dict( # "num_drop", self.nc_dataset.variables['num_drop'][:], '#', @@ -100,18 +104,28 @@ def __init__(self, filename): # "liq_water", self.nc_dataset.variables['liq_water'][:], # "gm/m^3", "Liquid water content") - self.fields['n_0'] = common.var_to_dict( - "n_0", self.nc_dataset.variables['n_0'][:], - "1/(m^3-mm)", "Distribution Intercept") - self.fields['lambda'] = common.var_to_dict( - "lambda", self.nc_dataset.variables['lambda'][:], - "1/mm", "Distribution Slope") + self.fields["n_0"] = common.var_to_dict( + "n_0", + self.nc_dataset.variables["n_0"][:], + "1/(m^3-mm)", + "Distribution Intercept", + ) + self.fields["lambda"] = common.var_to_dict( + "lambda", + self.nc_dataset.variables["lambda"][:], + "1/mm", + "Distribution Slope", + ) for key in self.nc_dataset.ncattrs(): - self.info[key] =self.nc_dataset.getncattr(key) + self.info[key] = self.nc_dataset.getncattr(key) def _get_epoch_time(self, sample_times): """Convert time to epoch time and return a dictionary.""" - eptime = {'data': sample_times, 'units': common.EPOCH_UNITS, - 'standard_name': 'Time', 'long_name': 'Time (UTC)'} + eptime = { + "data": sample_times, + "units": common.EPOCH_UNITS, + "standard_name": "Time", + "long_name": "Time (UTC)", + } return eptime diff --git a/pydsd/aux_readers/GPMApuWallopsRawReader.py b/pydsd/aux_readers/GPMApuWallopsRawReader.py index c7ccc59..787f576 100644 --- a/pydsd/aux_readers/GPMApuWallopsRawReader.py +++ b/pydsd/aux_readers/GPMApuWallopsRawReader.py @@ -6,8 +6,9 @@ from ..DropSizeDistribution import DropSizeDistribution from ..io import common + def read_gpm_nasa_apu_raw_wallops(filename): - ''' + """ Takes a filename pointing to a parsivel NASA Field Campaign file with RAW Data and returns a drop size distribution object. @@ -20,10 +21,10 @@ def read_gpm_nasa_apu_raw_wallops(filename): Note: NASA's processing strips out rain rate so we have to recalculate it based upon a fall speed relationship. - ''' + """ reader = GPMApuWallopsRawReader(filename) - #return reader + # return reader if reader: return DropSizeDistribution(reader) else: @@ -32,26 +33,27 @@ def read_gpm_nasa_apu_raw_wallops(filename): class GPMApuWallopsRawReader(object): - ''' + """ This class reads and parses parsivel disdrometer data from nasa ground campaigns. These conform to document. - ''' + """ + def __init__(self, filename): - ''' + """ Handles setting up a NASA APU Reader for Raw 1024 Size Data - ''' + """ self.fields = {} time = [] # Time in minutes from start of recording self.raw = [] self.num_drops = [] self.rr = [] - self.f = open(filename, 'r') + self.f = open(filename, "r") reader = csv.reader(self.f) for row in reader: time.append(self._parse_time((row[0]))) - self.raw.append([float(x) for x in row[9:9+1024]]) + self.raw.append([float(x) for x in row[9:9 + 1024]]) self.num_drops.append(float(row[3])) self.rr.append(float(row[4])) @@ -60,47 +62,51 @@ def __init__(self, filename): yyyy = os.path.basename(self.filename).split(".")[1][0:4] mm = os.path.basename(self.filename).split(".")[1][4:6] dd = os.path.basename(self.filename).split(".")[1][6:8] - t_units = 'minutes since ' + "-".join([yyyy, mm, dd]) + 'T00:00:00' + t_units = "minutes since " + "-".join([yyyy, mm, dd]) + "T00:00:00" # Return a common epoch time dictionary self.time = _get_epoch_time(time, t_units) self.Md = np.reshape(self.raw, (-1, 32, 32)) self.bin_edges = common.var_to_dict( - 'bin_edges', - np.hstack( - (0, self.diameter + np.array(self.spread) / 2)), - 'mm', 'Boundaries of bin sizes') + "bin_edges", + np.hstack((0, self.diameter + np.array(self.spread) / 2)), + "mm", + "Boundaries of bin sizes", + ) Nd = conv_md_nd(Md) - self.fields['Nd'] = common.var_to_dict( - 'Nd', Nd, 'm^-3', 'Liquid water particle concentration') + self.fields["Nd"] = common.var_to_dict( + "Nd", Nd, "m^-3", "Liquid water particle concentration" + ) self.f.close() def _regenerate_rainfall(self): - ''' + """ The goal of this function is to recreate the rainfall that the NASA processing removes. The alternative is to merge the dsd and raintables files together. We might add that later - ''' - print('Not implemented yet') + """ + print("Not implemented yet") pass def _parse_time(self, time_vector): # For now we just drop the day stuff, Eventually we'll make this a # proper time - return float(time_vector[8:10]) * 60.0 +\ - float(time_vector[10:12]) + float(time_vector[12:14])/60.0 + return float(time_vector[8:10]) * 60.0 + float(time_vector[10:12]) + float( + time_vector[12:14] + ) / 60.0 - def conv_md_to_nd(self, Md, t=10/60.0): + def conv_md_to_nd(self, Md, t=10 / 60.0): F = 0.0054 - #v = 9.65 - 10.3 * np.exp(-0.6 * self.diameter) + # v = 9.65 - 10.3 * np.exp(-0.6 * self.diameter) vel_mat = np.tile(self.velocity, (32, 1)) spread_mat = np.tile(np.ma.array(self.spread).T, (32, 1)) self.Nd_mat = np.ma.zeros(np.shape(Md)) Nd_array = np.ma.zeros((len(self.time), 32)) for ti in range(0, len(self.time)): Nd_mat[ti] = np.divide( - Md[ti], (F * 60*t * np.multiply(vel_mat, spread_mat))) + Md[ti], (F * 60 * t * np.multiply(vel_mat, spread_mat)) + ) Nd_array[ti] = np.sum(self.Nd_mat[ti], axis=0) return Nd_array @@ -112,33 +118,137 @@ def _get_epoch_time(sample_times, t_units): timesec = date2num(dts, common.EPOCH_UNITS) # Now once again convert this data into a datetime instance time_unaware = num2date(timesec, common.EPOCH_UNITS) - eptime = {'data': time_unaware, 'units': common.EPOCH_UNITS, - 'standard_name': 'Time', 'long_name': 'Time (UTC)'} - + eptime = { + "data": time_unaware, + "units": common.EPOCH_UNITS, + "standard_name": "Time", + "long_name": "Time (UTC)", + } spread = common.var_to_dict( - 'spread', - np.array([ - 0.129, 0.129, 0.129, 0.129, 0.129, 0.129, 0.129, 0.129, 0.129, 0.129, 0.257, - 0.257, 0.257, 0.257, 0.257, 0.515, 0.515, 0.515, 0.515, 0.515, 1.030, 1.030, - 1.030, 1.030, 1.030, 2.060, 2.060, 2.060, 2.060, 2.060, 3.090, 3.090]), - 'mm', 'Bin size spread of bins') + "spread", + np.array( + [ + 0.129, + 0.129, + 0.129, + 0.129, + 0.129, + 0.129, + 0.129, + 0.129, + 0.129, + 0.129, + 0.257, + 0.257, + 0.257, + 0.257, + 0.257, + 0.515, + 0.515, + 0.515, + 0.515, + 0.515, + 1.030, + 1.030, + 1.030, + 1.030, + 1.030, + 2.060, + 2.060, + 2.060, + 2.060, + 2.060, + 3.090, + 3.090, + ] + ), + "mm", + "Bin size spread of bins", + ) diameter = common.var_to_dict( - 'diameter', + "diameter", np.array( - [0.06, 0.19, 0.32, 0.45, 0.58, 0.71, 0.84, 0.96, 1.09, 1.22, - 1.42, 1.67, 1.93, 2.19, 2.45, 2.83, 3.35, 3.86, 4.38, 4.89, - 5.66, 6.70, 7.72, 8.76, 9.78, 11.33, 13.39, 15.45, 17.51, - 19.57, 22.15, 25.24]), - 'mm', 'Particle diameter of bins') + [ + 0.06, + 0.19, + 0.32, + 0.45, + 0.58, + 0.71, + 0.84, + 0.96, + 1.09, + 1.22, + 1.42, + 1.67, + 1.93, + 2.19, + 2.45, + 2.83, + 3.35, + 3.86, + 4.38, + 4.89, + 5.66, + 6.70, + 7.72, + 8.76, + 9.78, + 11.33, + 13.39, + 15.45, + 17.51, + 19.57, + 22.15, + 25.24, + ] + ), + "mm", + "Particle diameter of bins", + ) velocity = common.var_to_dict( - 'velocity', + "velocity", np.array( - [0.05, 0.15, 0.25, 0.35, 0.45, 0.55, 0.65, 0.75, 0.85, 0.96, 1.13, - 1.35, 1.59, 1.83, 2.08, 2.40, 2.78, 3.15, 3.50, 3.84, 4.40, 5.20, - 6.00, 6.80, 7.60, 8.80, 10.40, 12.00, 13.60, 15.20, 17.60, 20.80]), - 'm s^-1', 'Terminal fall velocity for each bin') - - supported_campaigns = ['ifloods', 'mc3e_dsd', 'mc3e_raw'] + [ + 0.05, + 0.15, + 0.25, + 0.35, + 0.45, + 0.55, + 0.65, + 0.75, + 0.85, + 0.96, + 1.13, + 1.35, + 1.59, + 1.83, + 2.08, + 2.40, + 2.78, + 3.15, + 3.50, + 3.84, + 4.40, + 5.20, + 6.00, + 6.80, + 7.60, + 8.80, + 10.40, + 12.00, + 13.60, + 15.20, + 17.60, + 20.80, + ] + ), + "m s^-1", + "Terminal fall velocity for each bin", + ) + + supported_campaigns = ["ifloods", "mc3e_dsd", "mc3e_raw"] diff --git a/pydsd/aux_readers/NASA_2DVD_reader.py b/pydsd/aux_readers/NASA_2DVD_reader.py index 91e9b16..d8fe45f 100644 --- a/pydsd/aux_readers/NASA_2DVD_reader.py +++ b/pydsd/aux_readers/NASA_2DVD_reader.py @@ -9,9 +9,8 @@ from ..utility.configuration import Configuration - -def read_2dvd_sav_nasa_gv(filename, campaign='ifloods'): - ''' +def read_2dvd_sav_nasa_gv(filename, campaign="ifloods"): + """ Takes a filename pointing to a 2D-Video Disdrometer NASA Field Campaign file and returns a drop size distribution object. @@ -27,7 +26,7 @@ def read_2dvd_sav_nasa_gv(filename, campaign='ifloods'): Returns: DropSizeDistrometer object - ''' + """ reader = NASA_2DVD_sav_reader(filename, campaign) if reader: @@ -35,11 +34,11 @@ def read_2dvd_sav_nasa_gv(filename, campaign='ifloods'): else: return None - del(reader) + del (reader) def read_2dvd_dsd_nasa_gv(filename, skip_header=None): - ''' + """ Takes a filename pointing to a 2D-Video Disdrometer NASA Field Campaign _dsd file and returns a drop size distribution object. @@ -51,7 +50,7 @@ def read_2dvd_dsd_nasa_gv(filename, skip_header=None): Returns: DropSizeDistrometer object - ''' + """ reader = NASA_2DVD_dsd_reader(filename, skip_header) if reader: @@ -59,22 +58,22 @@ def read_2dvd_dsd_nasa_gv(filename, skip_header=None): else: return None - del(reader) + del (reader) class NASA_2DVD_sav_reader(object): - ''' + """ This class reads and parses 2dvd disdrometer data from nasa ground campaigns. Use the read_2dvd_sav_nasa_gv() function to interface with this. - ''' + """ def __init__(self, filename, campaign): - ''' + """ Handles setting up a NASA 2DVD Reader. - ''' + """ self.fields = {} self.time = [] # Time in minutes from start of recording @@ -84,40 +83,51 @@ def __init__(self, filename, campaign): self.notes = [] if not campaign in self.supported_campaigns: - print('Campaign type not supported') + print("Campaign type not supported") return - record = scipy.io.readsav(filename)['dsd_struct'] + record = scipy.io.readsav(filename)["dsd_struct"] self.diameter = common.var_to_dict( - 'diameter', record.diam[0], 'mm', 'Particle diameter of bins') + "diameter", record.diam[0], "mm", "Particle diameter of bins" + ) self.velocity = 9.65 - 10.3 * np.exp(-0.6 * self.diameter) # Atlas1973 # The above equation not completely self.velocity = common.var_to_dict( - 'velocity', 0.808, 'm s^-1', 'Terminal fall velocity for each bin') - # stable so we use Atlas 1977 - self.notes.append('Velocities from formula, not disdrometer\n') + "velocity", 0.808, "m s^-1", "Terminal fall velocity for each bin" + ) + # stable so we use Atlas 1977 + self.notes.append("Velocities from formula, not disdrometer\n") time = self._parse_time(record) try: self.time = self._get_epoch_time(time) except: - raise ValueError('Conversion to Epoch did not work!') - self.time = {'data': np.array(time), 'units': None, - 'title': 'Time', 'full_name': 'Native file time'} - - self.fields['Nd'] = common.var_to_dict( - 'Nd', record.dsd[0].T, 'm^-3 mm^-1', - 'Liquid water particle concentration') + raise ValueError("Conversion to Epoch did not work!") + self.time = { + "data": np.array(time), + "units": None, + "title": "Time", + "full_name": "Native file time", + } + + self.fields["Nd"] = common.var_to_dict( + "Nd", record.dsd[0].T, "m^-3 mm^-1", "Liquid water particle concentration" + ) self.bin_edges = common.var_to_dict( - 'bin_edges', np.array(list(range(0, 42))) * 0.2, 'mm', - 'Boundaries of bin sizes') - self.fields['rain_rate'] = common.var_to_dict( - 'rain_rate', record.rain[0], 'mm h^-1', 'Rain rate') + "bin_edges", + np.array(list(range(0, 42))) * 0.2, + "mm", + "Boundaries of bin sizes", + ) + self.fields["rain_rate"] = common.var_to_dict( + "rain_rate", record.rain[0], "mm h^-1", "Rain rate" + ) self.spread = common.var_to_dict( - 'spread', np.array([0.2]*42), 'mm', 'Bin size spread of bins') + "spread", np.array([0.2] * 42), "mm", "Bin size spread of bins" + ) def _parse_time(self, record): # For now we just drop the day stuff, Eventually we'll make this a @@ -127,46 +137,50 @@ def _parse_time(self, record): return hour + minute def _get_epoch_time(self, sample_time): - ''' + """ Convert the time to an Epoch time using package standard. - ''' + """ # Convert the time array into a datetime instance - #dt_units = 'minutes since ' + StartDate + '00:00:00+0:00' - #dtMin = num2date(time, dt_units) + # dt_units = 'minutes since ' + StartDate + '00:00:00+0:00' + # dtMin = num2date(time, dt_units) # Convert this datetime instance into a number of seconds since Epoch - #timesec = date2num(dtMin, common.EPOCH_UNITS) + # timesec = date2num(dtMin, common.EPOCH_UNITS) # Once again convert this data into a datetime instance time_unaware = num2date(sample_time, common.EPOCH_UNITS) - eptime = {'data': time_unaware, 'units': common.EPOCH_UNITS, - 'title': 'Time', 'full_name': 'Time (UTC)'} + eptime = { + "data": time_unaware, + "units": common.EPOCH_UNITS, + "title": "Time", + "full_name": "Time (UTC)", + } return eptime - supported_campaigns = ['ifloods'] + supported_campaigns = ["ifloods"] class NASA_2DVD_dsd_reader(object): - ''' + """ This class reads and parses 2dvd disdrometer data from NASA ground campaigns. It works with the _dropCounts files from IFloodS. Use the read_2dvd_dsd_nasa_gv() function to interface with this. - ''' + """ def __init__(self, filename, skip_header): - ''' + """ Handles setting up a NASA 2DVD Reader Reader - ''' + """ self.config = Configuration() num_samples = self._get_number_of_samples(filename, skip_header) - self.Nd = np.ma.zeros((num_samples,50)) + self.Nd = np.ma.zeros((num_samples, 50)) self.notes = [] self.fields = {} # This part is troubling because time strings change in nasa files. So we'll go with what our e - # example files have. + # example files have. dt = [] with open(filename) as input: if skip_header is not None: @@ -180,36 +194,90 @@ def __init__(self, filename, skip_header): minute = float(data_array[3]) # TODO: Make this match time handling(units) from other readers. - dt.append(datetime.datetime(year, 1, 1) + datetime.timedelta(DOY-1, hours=hour, minutes=minute)) + dt.append( + datetime.datetime(year, 1, 1) + + datetime.timedelta(DOY - 1, hours=hour, minutes=minute) + ) self.Nd[idx, :] = [float(value) for value in data_array[4:]] - # TODO: Convert this to use new metadata - self.fields['Nd'] = common.var_to_dict( - 'Nd', self.Nd, 'm^-3 mm^-1', - 'Liquid water particle concentration') + self.fields["Nd"] = common.var_to_dict( + "Nd", self.Nd, "m^-3 mm^-1", "Liquid water particle concentration" + ) self.time = self._get_epoch_time(dt) - velocity =[0.248, 1.144, 2.018, 2.858, 3.649, 4.349, 4.916, 5.424, 5.892, 6.324, - 6.721, 7.084, 7.411, 7.703, 7.961, 8.187, 8.382, 8.548, 8.688, 8.805, - 8.900, 8.977, 9.038, 9.084, 9.118, 9.143, 9.159, 9.169, 9.174, 9.175, - 9.385, 9.415, 9.442, 9.465, 9.486, 9.505, 9.521, 9.536, 9.549, 9.560, - 9.570, 9.570, 9.570, 9.570, 9.570, 9.570, 9.570, 9.570, 9.570, 9.570] + velocity = [ + 0.248, + 1.144, + 2.018, + 2.858, + 3.649, + 4.349, + 4.916, + 5.424, + 5.892, + 6.324, + 6.721, + 7.084, + 7.411, + 7.703, + 7.961, + 8.187, + 8.382, + 8.548, + 8.688, + 8.805, + 8.900, + 8.977, + 9.038, + 9.084, + 9.118, + 9.143, + 9.159, + 9.169, + 9.174, + 9.175, + 9.385, + 9.415, + 9.442, + 9.465, + 9.486, + 9.505, + 9.521, + 9.536, + 9.549, + 9.560, + 9.570, + 9.570, + 9.570, + 9.570, + 9.570, + 9.570, + 9.570, + 9.570, + 9.570, + 9.570, + ] self.velocity = common.var_to_dict( - 'velocity', velocity, 'm s^-1', 'Terminal fall velocity for each bin') + "velocity", velocity, "m s^-1", "Terminal fall velocity for each bin" + ) self.bin_edges = common.var_to_dict( - 'bin_edges', np.array(list(range(0, 51))) * 0.2, 'mm', - 'Boundaries of bin sizes') + "bin_edges", + np.array(list(range(0, 51))) * 0.2, + "mm", + "Boundaries of bin sizes", + ) self.spread = common.var_to_dict( - 'spread', np.array([0.2] * 50), 'mm', - 'Bin size spread of bins') + "spread", np.array([0.2] * 50), "mm", "Bin size spread of bins" + ) self.diameter = common.var_to_dict( - 'diameter', np.arange(0.1, 10.1, .2), 'mm', 'Particle diameter of bins') + "diameter", np.arange(0.1, 10.1, .2), "mm", "Particle diameter of bins" + ) def _get_number_of_samples(self, filename, skip_header): - ''' Loop through file counting number of lines to calculate number of samples.''' + """ Loop through file counting number of lines to calculate number of samples.""" num_samples = 0 with open(filename) as input: @@ -218,15 +286,20 @@ def _get_number_of_samples(self, filename, skip_header): input.readline() for line in input: - num_samples +=1 + num_samples += 1 return num_samples def _get_epoch_time(self, sample_time): - ''' + """ Convert the time to an Epoch time using package standard. - ''' - eptime = {'data': sample_time, 'units': common.EPOCH_UNITS, - 'title': 'Time', 'full_name': 'Time (UTC)'} + """ + eptime = { + "data": sample_time, + "units": common.EPOCH_UNITS, + "title": "Time", + "full_name": "Time (UTC)", + } return eptime - supported_campaigns = ['mc3e', 'ifloods'] + + supported_campaigns = ["mc3e", "ifloods"] diff --git a/pydsd/aux_readers/read_2ds.py b/pydsd/aux_readers/read_2ds.py index 83b208c..d9a4ce2 100644 --- a/pydsd/aux_readers/read_2ds.py +++ b/pydsd/aux_readers/read_2ds.py @@ -13,8 +13,8 @@ from ..io import common -def read_2ds(filename, campaign='acapex'): - '''Read a airborne 2DS Cloud Probe File into a DropSizeDistribution Object. +def read_2ds(filename, campaign="acapex"): + """Read a airborne 2DS Cloud Probe File into a DropSizeDistribution Object. Takes a filename pointing to a 2DS Horizontal G1 Aircraft file and returns a `DropSizeDistribution` object. @@ -40,7 +40,7 @@ def read_2ds(filename, campaign='acapex'): Note: ----- No rain rate in the raw file. - ''' + """ reader = TwoDSReader(filename, campaign) @@ -49,16 +49,17 @@ def read_2ds(filename, campaign='acapex'): else: return None - del(reader) + del (reader) class TwoDSReader(object): - ''' TwoDSReader class for 2DS Cloud Probe Data. + """ TwoDSReader class for 2DS Cloud Probe Data. This class reads and parses data from 2DS data files. Use the read_2ds() function to interface with this. - ''' - def __init__(self, filename, campaign='acapex'): - ''' Initializer for a 2DS Cloud Probe class. + """ + + def __init__(self, filename, campaign="acapex"): + """ Initializer for a 2DS Cloud Probe class. Read and process a 2DS Cloud Probe data file. This should only be called by the read_2ds() function. @@ -73,17 +74,17 @@ def __init__(self, filename, campaign='acapex'): -------- two_ds: TwoDSReader TwoDSReader class - ''' + """ self.fields = {} time = [] bins = [] Nd = [] - self.f = open(filename, 'rU') + self.f = open(filename, "rU") reader = csv.reader(self.f) - #Remove Header lines but save them to variables for use later + # Remove Header lines but save them to variables for use later next(self.f) next(self.f) next(self.f) @@ -91,7 +92,7 @@ def __init__(self, filename, campaign='acapex'): for row in reader: time.append(float(row[0].split()[0])) - Nd.append(list(map(float,row[10:71]))) + Nd.append(list(map(float, row[10:71]))) Nd = np.ma.array(Nd) time = np.ma.array(time) @@ -99,15 +100,15 @@ def __init__(self, filename, campaign='acapex'): header = header_l.split(",") bins = header[10:71] - #Loop over the bins, split them, remove the C, split again into bin edges + # Loop over the bins, split them, remove the C, split again into bin edges bin_edge_str = [] - for i,sbin in enumerate(bins): + for i, sbin in enumerate(bins): s = sbin.split(":") - bin_no_clist=s[1] - s1 = bin_no_clist.split('-') + bin_no_clist = s[1] + s1 = bin_no_clist.split("-") bin_edge_str.append(s1) - #Loop over the list of strings containing bin edges, turn them into integers + # Loop over the list of strings containing bin edges, turn them into integers bin_edge_int = [] bin_edge_int.append(float(bin_edge_str[0][0])) @@ -119,27 +120,33 @@ def __init__(self, filename, campaign='acapex'): spread = np.diff(bin_edges) - #diameter - diameter = bin_edges[0:-1] + spread/2.0 + # diameter + diameter = bin_edges[0:-1] + spread / 2.0 # NEED TO GRAB DATE FROM FILE yyyy = os.path.basename(self.filename).split(".")[1][0:4] mm = os.path.basename(self.filename).split(".")[1][4:6] dd = os.path.basename(self.filename).split(".")[1][6:8] - t_units = 'seconds since ' + "-".join([yyyy, mm, dd]) + 'T00:00:00' + t_units = "seconds since " + "-".join([yyyy, mm, dd]) + "T00:00:00" # Return a common epoch time dictionary self.time = _get_epoch_time(time, t_units) self.bin_edges = common.var_to_dict( - 'bin_edges', bin_edges/1000., 'mm', 'Boundaries of bin sizes') + "bin_edges", bin_edges / 1000., "mm", "Boundaries of bin sizes" + ) self.spread = common.var_to_dict( - 'spread', spread/1000., 'mm', 'Bin size spread of bins') + "spread", spread / 1000., "mm", "Bin size spread of bins" + ) self.diameter = common.var_to_dict( - 'diameter', diameter/1000., 'mm', 'Particle diameter of bins') + "diameter", diameter / 1000., "mm", "Particle diameter of bins" + ) # #/L/micrometer to #/m^3/mm - self.fields['Nd'] = common.var_to_dict( - 'Nd', np.ma.array(Nd *1000.*1000.), 'm^-3 mm^-1', - 'Liquid water particle concentration') + self.fields["Nd"] = common.var_to_dict( + "Nd", + np.ma.array(Nd * 1000. * 1000.), + "m^-3 mm^-1", + "Liquid water particle concentration", + ) def _get_epoch_time(sample_times, t_units): """Convert time to epoch time and return a dictionary.""" @@ -149,41 +156,9 @@ def _get_epoch_time(sample_times, t_units): timesec = date2num(dts, common.EPOCH_UNITS) # Now once again convert this data into a datetime instance time_unaware = num2date(timesec, common.EPOCH_UNITS) - eptime = {'data': time_unaware, 'units': common.EPOCH_UNITS, - 'standard_name': 'Time', 'long_name': 'Time (UTC)'} - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + eptime = { + "data": time_unaware, + "units": common.EPOCH_UNITS, + "standard_name": "Time", + "long_name": "Time (UTC)", + } diff --git a/pydsd/aux_readers/read_hvps.py b/pydsd/aux_readers/read_hvps.py index ba06a93..a6e3db2 100644 --- a/pydsd/aux_readers/read_hvps.py +++ b/pydsd/aux_readers/read_hvps.py @@ -12,8 +12,9 @@ from ..DropSizeDistribution import DropSizeDistribution from ..io import common + def read_hvps(filename): - ''' Read a airborne HVPS Cloud Probe File into a DropSizeDistribution Object. + """ Read a airborne HVPS Cloud Probe File into a DropSizeDistribution Object. Takes a filename pointing to a HVPS Horizontal G1 Aircraft file and returns a drop size distribution object. @@ -34,7 +35,7 @@ def read_hvps(filename): Note: ----- No rain rate in the raw file. - ''' + """ reader = HVPSReader(filename) @@ -44,68 +45,135 @@ def read_hvps(filename): else: return None - del(reader) + del (reader) + class HVPSReader(object): - ''' + """ This class reads and parses data from 2DS data files. Use the read_2ds_h() function to interface with this. - ''' + """ - def __init__(self, filename): #, campaign) - ''' + def __init__(self, filename): # , campaign) + """ Handles settuping up a 2DS H reader - ''' + """ self.fields = {} time = [] Nd = [] - bin_edges = np.array([75,225,375,525,675,825,975,1125,1275,1425,1575,1725,1875,2025,2175,2325, - 2475,2625,2775,2925,3075,3375,3675,3975,4275,4575,4875,5175,5475,5775, - 6075,6375,6675,6975,7275,7575,8325,9075,9825,10575,11325,12075,12825, - 13575,14325,15075,16575,18075,19575,21075,22575,24075,25575,27075,28575, - 30075,33075,36075,39075,42075,45075,48075]) - - self.f = open(filename, 'rU') + bin_edges = np.array( + [ + 75, + 225, + 375, + 525, + 675, + 825, + 975, + 1125, + 1275, + 1425, + 1575, + 1725, + 1875, + 2025, + 2175, + 2325, + 2475, + 2625, + 2775, + 2925, + 3075, + 3375, + 3675, + 3975, + 4275, + 4575, + 4875, + 5175, + 5475, + 5775, + 6075, + 6375, + 6675, + 6975, + 7275, + 7575, + 8325, + 9075, + 9825, + 10575, + 11325, + 12075, + 12825, + 13575, + 14325, + 15075, + 16575, + 18075, + 19575, + 21075, + 22575, + 24075, + 25575, + 27075, + 28575, + 30075, + 33075, + 36075, + 39075, + 42075, + 45075, + 48075, + ] + ) + + self.f = open(filename, "rU") reader = csv.reader(self.f) - #Remove Header lines + # Remove Header lines next(self.f) next(self.f) next(self.f) next(self.f) - for row in reader: time.append(float(row[0].split()[0])) - Nd.append(list(map(float,row[10:71]))) + Nd.append(list(map(float, row[10:71]))) Nd = np.ma.array(Nd) time = np.ma.array(time) - #spread + # spread spread = np.diff(bin_edges) - diameter = bin_edges[:-1] + spread/2.0 + diameter = bin_edges[:-1] + spread / 2.0 # NEED TO GRAB DATE FROM FILE yyyy = os.path.basename(self.filename).split(".")[1][0:4] mm = os.path.basename(self.filename).split(".")[1][4:6] dd = os.path.basename(self.filename).split(".")[1][6:8] - t_units = 'seconds since ' + "-".join([yyyy, mm, dd]) + 'T00:00:00' + t_units = "seconds since " + "-".join([yyyy, mm, dd]) + "T00:00:00" # Return a common epoch time dictionary self.time = _get_epoch_time(time, t_units) self.bin_edges = common.var_to_dict( - 'bin_edges', bin_edges/1000., 'mm', 'Boundaries of bin sizes') + "bin_edges", bin_edges / 1000., "mm", "Boundaries of bin sizes" + ) self.spread = common.var_to_dict( - 'spread', spread/1000., 'mm', 'Bin size spread of bins') + "spread", spread / 1000., "mm", "Bin size spread of bins" + ) self.diameter = common.var_to_dict( - 'diameter', diameter/1000., 'mm', 'Particle diameter of bins') + "diameter", diameter / 1000., "mm", "Particle diameter of bins" + ) # #/L/micrometer to #/m^3/mm - self.fields['Nd'] = common.var_to_dict( - 'Nd', np.ma.array(Nd *1000.*1000.), 'm^-3 mm^-1', - 'Liquid water particle concentration') + self.fields["Nd"] = common.var_to_dict( + "Nd", + np.ma.array(Nd * 1000. * 1000.), + "m^-3 mm^-1", + "Liquid water particle concentration", + ) def _get_epoch_time(sample_times, t_units): """Convert time to epoch time and return a dictionary.""" @@ -115,40 +183,9 @@ def _get_epoch_time(sample_times, t_units): timesec = date2num(dts, common.EPOCH_UNITS) # Now once again convert this data into a datetime instance time_unaware = num2date(timesec, common.EPOCH_UNITS) - eptime = {'data': time_unaware, 'units': common.EPOCH_UNITS, - 'standard_name': 'Time', 'long_name': 'Time (UTC)'} - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + eptime = { + "data": time_unaware, + "units": common.EPOCH_UNITS, + "standard_name": "Time", + "long_name": "Time (UTC)", + } diff --git a/pydsd/fit/ua98.py b/pydsd/fit/ua98.py index bf512fe..6c402d8 100644 --- a/pydsd/fit/ua98.py +++ b/pydsd/fit/ua98.py @@ -1,9 +1,9 @@ # -*- coding: utf-8 -*- -''' +""" The calc_dsd module contains various fitting functions to calculate dsd parameters based on measured drop size distributions. Many of these (all currently) were ported from the pyparticleprobe package. -''' +""" from __future__ import print_function, division @@ -52,10 +52,11 @@ """ # Define various constants that may be used for calculations -rho0=1.2 # reference density at the surface [kg m^-3] -rhoL=1000. # density of water [kg m^-3] +rho0 = 1.2 # reference density at the surface [kg m^-3] +rhoL = 1000. # density of water [kg m^-3] + -def eta_ratio(M2,M4,M6): +def eta_ratio(M2, M4, M6): """Compute the ratio eta using the method of moments Ulbrich and Atlas (JAM 1998), Eqn 8 @@ -79,10 +80,11 @@ def eta_ratio(M2,M4,M6): This particular methodology uses the 2nd, 4th, and 6th moments. """ - Eta = M4**2 / (M2 * M6) + Eta = M4 ** 2 / (M2 * M6) return Eta -def shape(M2,M4,M6): + +def shape(M2, M4, M6): """Compute the shape parameter mu using the method of moments Ulbrich and Atlas (JAM 1998), Eqns 8 and 9 @@ -111,21 +113,23 @@ def shape(M2,M4,M6): are not calculated. """ # Calculate the eta ratio [eq. 8] - eta = eta_ratio(M2,M4,M6) + eta = eta_ratio(M2, M4, M6) # Now calculate the shape parameter [eq. 3] - muNumer = (7.0 - 11.0 * eta) - np.sqrt((7.0 - 11.0 * eta)**2.0 - 4.0 * (eta - 1.0) - * (30. * eta - 12.0)) + muNumer = (7.0 - 11.0 * eta) - np.sqrt( + (7.0 - 11.0 * eta) ** 2.0 - 4.0 * (eta - 1.0) * (30. * eta - 12.0) + ) muDenom = 2.0 * (eta - 1.0) # Mask any zero or unrealistically low values in denominator - muDenom = np.ma.masked_inside(muDenom,-1E-1,1E-1) + muDenom = np.ma.masked_inside(muDenom, -1E-1, 1E-1) mu = muNumer / muDenom return mu -def slope(M2,M4,mu): + +def slope(M2, M4, mu): """Compute the slope (Lambda) using the method of moments Ulbrich and Atlas (JAM 1998), Eqn 10 @@ -151,7 +155,8 @@ def slope(M2,M4,mu): return Lambda -def intercept(M6,mu,Lambda): + +def intercept(M6, mu, Lambda): """Compute the intercept parameter (N0) using the method of moments Ulbrich and Atlas (JAM 1998), Eqn 6 solved for N0 @@ -176,25 +181,26 @@ def intercept(M6,mu,Lambda): """ # Calculate numerator - IntNumer = M6 * Lambda**(7. + mu) + IntNumer = M6 * Lambda ** (7. + mu) # Calculate denominator # Mask values near 0., otherwise gamma function returns "inf" - mucopy = np.ma.masked_inside(mu,-1E-1,1E-1) + mucopy = np.ma.masked_inside(mu, -1E-1, 1E-1) IntDenom = scifunct.gamma(7 + mucopy) # Mask any invalid values np.ma.masked_invalid(IntDenom) # Mask any zero values from Denom (-999. a problem) - IntDenom = np.ma.masked_equal(IntDenom,0.) - #print(mu(:,0)+" "+Lambda(:,0)+" "+IntNumer(:,0)+" "+IntDenom(:,0)) + IntDenom = np.ma.masked_equal(IntDenom, 0.) + # print(mu(:,0)+" "+Lambda(:,0)+" "+IntNumer(:,0)+" "+IntDenom(:,0)) # Calculate N0 N0 = IntNumer / IntDenom return N0 -def mom_d0(mu,Lambda): + +def mom_d0(mu, Lambda): """Compute the median volume diameter (D0) using the method of moments. Ulbrich and Atlas (JAM 1998), Eqn 11 @@ -215,7 +221,8 @@ def mom_d0(mu,Lambda): D0 = (3.67 + mu) / Lambda return D0 -def zr_a(mu,N0): + +def zr_a(mu, N0): """Assuming a rainfall parameter relationship of Z=AR^b, Compute the A prefactor using gamma distribution. @@ -235,20 +242,21 @@ def zr_a(mu,N0): """ # Mask 0. values, otherwise gamma function returns "inf" - mucopy = np.ma.masked_equal(mu,0.) + mucopy = np.ma.masked_equal(mu, 0.) # gamma_fix is a patch for versions earlier than NCL v6.2, # which cannot handle missing data in the gamma function - ANumer = 10E6 * scifunct.gamma(7 + mucopy) * N0**(-2.33/(4.67 + mu)) - ADenom = (33.31 * scifunct.gamma(4.67 + mucopy))**((7 + mu)/(4.67 + mu)) + ANumer = 10E6 * scifunct.gamma(7 + mucopy) * N0 ** (-2.33 / (4.67 + mu)) + ADenom = (33.31 * scifunct.gamma(4.67 + mucopy)) ** ((7 + mu) / (4.67 + mu)) # Mask any zero values from Denom - ADenom = np.ma.masked_equal(ADenom,0.) + ADenom = np.ma.masked_equal(ADenom, 0.) A = ANumer / ADenom return A + def zr_b(mu): """Assuming a rainfall parameter relationship of Z=AR^b, Compute the b exponent using gamma distribution. @@ -268,7 +276,8 @@ def zr_b(mu): b = (7 + mu) / (4.67 + mu) return b -def norm_intercept(LWC,Dm): + +def norm_intercept(LWC, Dm): """Calculates the normalized intercept parameter, which is more physically meaningful than N0. @@ -287,6 +296,6 @@ def norm_intercept(LWC,Dm): Normalized intercept parameter """ # The factor of 1000 converts the water density from kg/m^3 to g/m^3 - Nw = (256 / (np.pi * rhoL * 1000.)) * (LWC / Dm**4) + Nw = (256 / (np.pi * rhoL * 1000.)) * (LWC / Dm ** 4) return Nw diff --git a/pydsd/io/Image2DReader.py b/pydsd/io/Image2DReader.py index 9130ce2..1c4d5c7 100644 --- a/pydsd/io/Image2DReader.py +++ b/pydsd/io/Image2DReader.py @@ -11,7 +11,7 @@ def read_ucsc_netcdf(filename): - ''' + """ Takes a filename pointing to a probe data file and returns a drop size distribution object. @@ -21,9 +21,9 @@ def read_ucsc_netcdf(filename): Returns: DropSizeDistrometer object - ''' + """ - reader = Image2DReader(filename, file_type='ucsc_netcdf') + reader = Image2DReader(filename, file_type="ucsc_netcdf") if reader: dsd = DropSizeDistribution(reader) @@ -31,8 +31,9 @@ def read_ucsc_netcdf(filename): else: return None + def read_noaa_aoml_netcdf(filename): - ''' + """ Takes a filename pointing to a probe data file and returns a drop size distribution object. @@ -42,9 +43,9 @@ def read_noaa_aoml_netcdf(filename): Returns: DropSizeDistrometer object - ''' + """ - reader = Image2DReader(filename, file_type='noaa_aoml_netcdf') + reader = Image2DReader(filename, file_type="noaa_aoml_netcdf") if reader: dsd = DropSizeDistribution(reader) @@ -53,20 +54,24 @@ def read_noaa_aoml_netcdf(filename): class Image2DReader(object): + def __init__(self, filename, file_type): self.filename = filename self.fields = {} - if file_type is 'noaa_aoml_netcdf': + if file_type is "noaa_aoml_netcdf": self._read_noaa_aoml_netcdf() - if file_type is 'ucsc_netcdf': + if file_type is "ucsc_netcdf": self._read_ucsc_netcdf() - def _read_ucsc_netcdf(self, - flight_time_dict=None, flight_air_density_dict=None, - flight_vert_wind_dict=None, - flight_altitude_dict=None): + def _read_ucsc_netcdf( + self, + flight_time_dict=None, + flight_air_density_dict=None, + flight_vert_wind_dict=None, + flight_altitude_dict=None, + ): """ Read particle distribution NetCDF files generated by the University of California at Santa Cruz. @@ -97,7 +102,7 @@ def _read_ucsc_netcdf(self, disdrometer. """ # Read the NetCDF file - ncFile = netCDF4.Dataset(self.filename, 'r') + ncFile = netCDF4.Dataset(self.filename, "r") yyyy = os.path.basename(self.filename).split(".")[1][0:4] mm = os.path.basename(self.filename).split(".")[1][4:6] @@ -108,27 +113,35 @@ def _read_ucsc_netcdf(self, self.diameter = common.ncvar_to_dict(ncFile.variables[varmatch[0]]) varmin = [s for s in ncFile.variables.keys() if "corr_bin_min" in s] varmax = [s for s in ncFile.variables.keys() if "corr_bin_max" in s] - bin_edges = np.hstack((ncFile.variables[varmin[0]][0], - ncFile.variables[varmax[0]])) + bin_edges = np.hstack( + (ncFile.variables[varmin[0]][0], ncFile.variables[varmax[0]]) + ) self.bin_edges = common.var_to_dict( - 'bin_edges', bin_edges/1000., 'mm', 'Particle size bin edges') + "bin_edges", bin_edges / 1000., "mm", "Particle size bin edges" + ) self.spread = common.var_to_dict( - 'spread', np.diff(bin_edges/1000.), - self.bin_edges['units'], 'Bin spread size') + "spread", + np.diff(bin_edges / 1000.), + self.bin_edges["units"], + "Bin spread size", + ) # Retrieve concentration convert from cm^-3 to m^-3 varNd = [s for s in ncFile.variables.keys() if "corr_conc" in s] nd = ncFile.variables[varNd[0]][:] * 1E6 - self.fields['Nd'] = common.var_to_dict( - 'Nd', np.rollaxis(np.ma.array(nd), 1), 'm^-3', - 'Liquid water particle concentration') + self.fields["Nd"] = common.var_to_dict( + "Nd", + np.rollaxis(np.ma.array(nd), 1), + "m^-3", + "Liquid water particle concentration", + ) # First pull out the time variable - HHMMSS = np.ma.array(ncFile.variables['time'][:]) + HHMMSS = np.ma.array(ncFile.variables["time"][:]) # Make this an "unaware" datetime object converted back into a number # of seconds since beginning of day. Necessary because of the way the # variable is saved in NetCDF - t_units = 'seconds since ' + "-".join([yyyy, mm, dd]) + ' 00:00:00' + t_units = "seconds since " + "-".join([yyyy, mm, dd]) + " 00:00:00" # Return a common epoch time dictionary self.time = get_epoch_time(HHMMSS, t_units) @@ -137,36 +150,53 @@ def _read_ucsc_netcdf(self, if flight_time_dict is not None: if flight_air_density_dict is not None: air_density = np.ma.array( - sinterp.griddata(flight_time_dict['data'][:], - flight_air_density_dict['data'][:], - Time_unaware[:])) - self.fields['air_density'] = common.var_to_dict( - 'Air Density', air_density, - flight_air_density_dict['units'], 'Air Density') + sinterp.griddata( + flight_time_dict["data"][:], + flight_air_density_dict["data"][:], + Time_unaware[:], + ) + ) + self.fields["air_density"] = common.var_to_dict( + "Air Density", + air_density, + flight_air_density_dict["units"], + "Air Density", + ) if flight_vert_wind_dict is not None: vert_wind_velocity = np.ma.array( - sinterp.griddata(flight_time_dict['data'][:], - flight_vert_wind_dict['data'][:], - Time_unaware[:])) - self.fields['vert_wind_velocity'] = common.var_to_dict( - 'Vertical Wind Velocity', vert_wind_velocity, - flight_vert_wind_dict['units'], 'Vertical Wind Velocity') + sinterp.griddata( + flight_time_dict["data"][:], + flight_vert_wind_dict["data"][:], + Time_unaware[:], + ) + ) + self.fields["vert_wind_velocity"] = common.var_to_dict( + "Vertical Wind Velocity", + vert_wind_velocity, + flight_vert_wind_dict["units"], + "Vertical Wind Velocity", + ) if flight_altitude_dict is not None: altitude = np.ma.array( - sinterp.griddata(flight_time_dict['data'][:], - flight_altitude_dict['data'][:], - Time_unaware[:])) - self.fields['altitude'] = common.var_to_dict( - 'Altitude', altitude, - flight_altitude_dict['units'], 'Altitude') - - def _read_noaa_aoml_netcdf(self, - flight_time_dict=None, - flight_air_density_dict=None, - flight_vert_wind_dict=None, - flight_altitude_dict=None): + sinterp.griddata( + flight_time_dict["data"][:], + flight_altitude_dict["data"][:], + Time_unaware[:], + ) + ) + self.fields["altitude"] = common.var_to_dict( + "Altitude", altitude, flight_altitude_dict["units"], "Altitude" + ) + + def _read_noaa_aoml_netcdf( + self, + flight_time_dict=None, + flight_air_density_dict=None, + flight_vert_wind_dict=None, + flight_altitude_dict=None, + ): """ Read a NetCDF file containing distribution data generated by NOAA AOML. The original Fortran binary has been converted to NetCDF format. @@ -192,61 +222,62 @@ def _read_noaa_aoml_netcdf(self, Mid-point size of bin [micron]. """ # Read the NetCDF file - ncFile = netCDF4.Dataset(self.filename, 'r') + ncFile = netCDF4.Dataset(self.filename, "r") # Read the size bins - self.diameter = common.ncvar_to_dict(ncFile.variables['Sizebins']) - self.diameter['data'] = self.diameter['data'] / 1000.0 - self.diameter['units'] = 'mm' + self.diameter = common.ncvar_to_dict(ncFile.variables["Sizebins"]) + self.diameter["data"] = self.diameter["data"] / 1000.0 + self.diameter["units"] = "mm" # Retrieve the time variable - eptime = common.ncvar_to_dict(ncFile.variables['EpochTime']) + eptime = common.ncvar_to_dict(ncFile.variables["EpochTime"]) # Return a common epoch time dictionary - self.time = get_epoch_time(eptime['data'][:], eptime['units']) - self.spread = {'data': np.zeros(len(self.diameter['data'])), - 'units': 'mm', 'Description': 'Bin Width'} - self.spread['data'][:] = 0.1 #millimeters for now - - bin_edges = self.diameter['data'] - (self.spread['data'][0]/2.0) - bin_edges=np.append( - bin_edges, bin_edges[-1] + self.spread['data'][0]) + self.time = get_epoch_time(eptime["data"][:], eptime["units"]) + self.spread = { + "data": np.zeros(len(self.diameter["data"])), + "units": "mm", + "Description": "Bin Width", + } + self.spread["data"][:] = 0.1 # millimeters for now + + bin_edges = self.diameter["data"] - (self.spread["data"][0] / 2.0) + bin_edges = np.append(bin_edges, bin_edges[-1] + self.spread["data"][0]) self.bin_edges = common.var_to_dict( - 'bin_edges', bin_edges, 'mm', 'Particle size bin edges') - + "bin_edges", bin_edges, "mm", "Particle size bin edges" + ) # Retrieve other variables - self.fields['Nd_water'] = common.ncvar_to_dict( - ncFile.variables['Water']) - self.fields['Nd_ice'] = common.ncvar_to_dict( - ncFile.variables['Ice']) - self.fields['air_density'] = common.ncvar_to_dict( - ncFile.variables['RhoAir']) - self.fields['vert_wind_velocity'] = common.ncvar_to_dict( - ncFile.variables['vertVel']) - - #Now let's convert to drop counts by dividing by volume. - vol_per_bin = (1/3.0) * np.pi * np.power(self.diameter['data'], 3) - - self.fields['Nd'] = common.var_to_dict( - 'Nd', np.divide(self.fields['Nd_water']['data'], vol_per_bin), - '#/mm/m^3', 'Calculated Drop Counts') + self.fields["Nd_water"] = common.ncvar_to_dict(ncFile.variables["Water"]) + self.fields["Nd_ice"] = common.ncvar_to_dict(ncFile.variables["Ice"]) + self.fields["air_density"] = common.ncvar_to_dict(ncFile.variables["RhoAir"]) + self.fields["vert_wind_velocity"] = common.ncvar_to_dict( + ncFile.variables["vertVel"] + ) + + # Now let's convert to drop counts by dividing by volume. + vol_per_bin = (1 / 3.0) * np.pi * np.power(self.diameter["data"], 3) + + self.fields["Nd"] = common.var_to_dict( + "Nd", + np.divide(self.fields["Nd_water"]["data"], vol_per_bin), + "#/mm/m^3", + "Calculated Drop Counts", + ) def apply_running_average(self, array, dim=0, num=6): - ''' + """ Parameters ---------- num : int Number of points for running average dim : int Dimension to applay the averaging. - ''' + """ weights = np.repeat(1., num) / num if dim == 0: - array = np.convolve(array, weights, 'valid')[::num] + array = np.convolve(array, weights, "valid")[::num] else: tmp = np.empty(array.shape[0], array.shape[1] - num + 1) for index in range(array.shape[0]): - tmp[index,:] = np.convolve(array[index,:], weights, 'valid') + tmp[index, :] = np.convolve(array[index, :], weights, "valid") array = tmp[:, ::num] - - diff --git a/pydsd/io/JWDReader.py b/pydsd/io/JWDReader.py index 2b7fe3a..76ee5d0 100644 --- a/pydsd/io/JWDReader.py +++ b/pydsd/io/JWDReader.py @@ -5,8 +5,9 @@ from ..DropSizeDistribution import DropSizeDistribution from . import common + def read_jwd(filename): - ''' + """ Takes a filename pointing to a Joss-WaldVogel file and returns a drop size distribution object. @@ -16,30 +17,76 @@ def read_jwd(filename): Returns: DropSizeDistrometer object - ''' + """ reader = JWDReader(filename) return DropSizeDistribution(reader) class JWDReader(object): - ''' + """ JWDReader class takes takes a filename as it's only argument(for now). This should be a Joss-Waldvogel datafile. - ''' + """ diameter = common.var_to_dict( - 'diameter', - np.array([ - 0.359, 0.455, 0.551, 0.656, 0.771, 0.913, 1.116, 1.331, 1.506, 1.665, - 1.912, 2.259, 2.584, 2.869, 3.198, 3.544, 3.916, 4.350, 4.859, 5.373]), - 'mm', 'Particle diameter of bins') + "diameter", + np.array( + [ + 0.359, + 0.455, + 0.551, + 0.656, + 0.771, + 0.913, + 1.116, + 1.331, + 1.506, + 1.665, + 1.912, + 2.259, + 2.584, + 2.869, + 3.198, + 3.544, + 3.916, + 4.350, + 4.859, + 5.373, + ] + ), + "mm", + "Particle diameter of bins", + ) spread = common.var_to_dict( - 'spread', - np.array([ - 0.092, 0.100, 0.091, 0.119, 0.112, 0.172, 0.233, 0.197, 0.153, 0.166, - 0.329, 0.364, 0.286, 0.284, 0.374, 0.319, 0.423, 0.446, 0.572, 0.455]), - 'mm', 'Bin size spread of bins') + "spread", + np.array( + [ + 0.092, + 0.100, + 0.091, + 0.119, + 0.112, + 0.172, + 0.233, + 0.197, + 0.153, + 0.166, + 0.329, + 0.364, + 0.286, + 0.284, + 0.374, + 0.319, + 0.423, + 0.446, + 0.572, + 0.455, + ] + ), + "mm", + "Bin size spread of bins", + ) def __init__(self, filename): self.filename = filename @@ -52,13 +99,15 @@ def __init__(self, filename): self._prep_data() self.bin_edges = common.var_to_dict( - 'bin_edges', np.hstack( - (0, self.diameter['data'] + - np.array(self.spread['data']) / 2)) * 0.2, - 'mm', 'Boundaries of bin sizes') + "bin_edges", + np.hstack((0, self.diameter["data"] + np.array(self.spread["data"]) / 2)) + * 0.2, + "mm", + "Boundaries of bin sizes", + ) def getSec(self, s, start_hh, start_mm): - l = s.split(':') + l = s.split(":") if int(l[0]) < start_hh: return int(l[0]) * 3600 + int(l[1]) * 60 + int(l[2]) + 86400 elif int(l[0]) == start_hh and int(l[1]) < start_mm: @@ -76,46 +125,48 @@ def _read_file(self): with open(self.filename) as f: next(f) for i, line in enumerate(f): - if i==1: + if i == 1: start_time = line.split()[1] - t = start_time.split(':') + t = start_time.split(":") start_hh = int(t[0]) start_mm = int(t[1]) self.time.append( - float(self.getSec(line.split()[1],start_hh,start_mm))) + float(self.getSec(line.split()[1], start_hh, start_mm)) + ) md = line.split()[3:23] md_float = np.array(list(map(float, md))) - self.Nd.append( - self.conv_md_to_nd(md_float)) - self.rain_rate.append( - float(line.split()[24])) - elif i>1: + self.Nd.append(self.conv_md_to_nd(md_float)) + self.rain_rate.append(float(line.split()[24])) + elif i > 1: start_time = line.split()[1] - t = start_time.split(':') + t = start_time.split(":") start_hh = int(t[0]) start_mm = int(t[1]) self.time.append( - float(self.getSec(line.split()[1], start_hh, start_mm))) + float(self.getSec(line.split()[1], start_hh, start_mm)) + ) md = line.split()[3:23] md_float = np.array(list(map(float, md))) - self.Nd.append( - self.conv_md_to_nd(md_float)) - self.rain_rate.append( - float(line.split()[24])) + self.Nd.append(self.conv_md_to_nd(md_float)) + self.rain_rate.append(float(line.split()[24])) def _get_epoch_time(self): - ''' + """ Convert the time to an Epoch time using package standard. - ''' + """ # Convert the time array into a datetime instance - dt_units = 'seconds since ' + StartDate + '00:00:00+0:00' + dt_units = "seconds since " + StartDate + "00:00:00+0:00" dt_minutes = num2date(self.time, dt_units) # Convert this datetime instance into a number of seconds since Epoch timesec = date2num(dt_minutes, common.EPOCH_UNITS) # Once again convert this data into a datetime instance time_unaware = num2date(timesec, common.EPOCH_UNITS) - eptime = {'data': time_unaware, 'units': common.EPOCH_UNITS, - 'title': 'Time', 'full_name': 'Time (UTC)'} + eptime = { + "data": time_unaware, + "units": common.EPOCH_UNITS, + "title": "Time", + "full_name": "Time (UTC)", + } return eptime def _prep_data(self): @@ -123,15 +174,23 @@ def _prep_data(self): self.time = np.ma.array(self.time) self.time = self.time - self.time[0] - self.fields['Nd'] = common.var_to_dict( - 'Nd', np.ma.array(self.Nd), 'm^-3 mm^-1', - 'Liquid water particle concentration') - self.fields['rain_rate'] = common.var_to_dict( - 'Rain rate', np.ma.array(self.rain_rate), 'mm/h', 'Rain rate') + self.fields["Nd"] = common.var_to_dict( + "Nd", + np.ma.array(self.Nd), + "m^-3 mm^-1", + "Liquid water particle concentration", + ) + self.fields["rain_rate"] = common.var_to_dict( + "Rain rate", np.ma.array(self.rain_rate), "mm/h", "Rain rate" + ) try: self.time = self._get_epoch_time() except: - raise ValueError('Conversion to Epoch did not work!') - self.time = {'data': np.array(self.time), 'units': None, - 'title': 'Time', 'full_name': 'Native file time'} + raise ValueError("Conversion to Epoch did not work!") + self.time = { + "data": np.array(self.time), + "units": None, + "title": "Time", + "full_name": "Native file time", + } diff --git a/pydsd/io/ParsivelNasaGVReader.py b/pydsd/io/ParsivelNasaGVReader.py index f922f56..b84adb7 100644 --- a/pydsd/io/ParsivelNasaGVReader.py +++ b/pydsd/io/ParsivelNasaGVReader.py @@ -12,8 +12,8 @@ from . import common -def read_parsivel_nasa_gv(filename, campaign='ifloods', skip_header=None): - ''' +def read_parsivel_nasa_gv(filename, campaign="ifloods", skip_header=None): + """ Parameters ---------- filename: str @@ -41,7 +41,7 @@ def read_parsivel_nasa_gv(filename, campaign='ifloods', skip_header=None): Note: NASA's processing strips out rain rate so we have to recalculate it based upon a fall speed relationship. - ''' + """ reader = NASA_APU_reader(filename, campaign, skip_header) @@ -51,17 +51,17 @@ def read_parsivel_nasa_gv(filename, campaign='ifloods', skip_header=None): else: return None - del(reader) + del (reader) class NASA_APU_reader(object): - ''' + """ This class reads and parses parsivel disdrometer data from NASA ground validation campaigns. These conform to document ??? Use the read_parsivel_nasa_gv() function to interface with this. - ''' + """ # Nt = [] # T = [] # W = [] @@ -71,98 +71,210 @@ class NASA_APU_reader(object): # rho_w = 1 def __init__(self, filename, campaign, skip_header): - ''' + """ Handles setting up a NASA APU Reader - ''' + """ self.time = [] # Time in minutes from start of recording self.Nd = [] if not campaign in self.supported_campaigns: - print('Campaign type not supported') + print("Campaign type not supported") return - self.f = open(filename, 'r') + self.f = open(filename, "r") reader = csv.reader(self.f) if skip_header is not None: next(reader, None) for row in reader: - self.time.append(self._parse_time(list(map(int, (row[0].split()[0:4]))))) - self.Nd.append([float(x) for x in row[0].split()[4:]]) + self.time.append(self._parse_time(list(map(int, (row[0].split()[0:4]))))) + self.Nd.append([float(x) for x in row[0].split()[4:]]) self._prep_data() self.bin_edges = common.var_to_dict( - 'bin_edges', - np.hstack( - (0, self.diameter['data'] + np.array(self.spread['data']) / 2)), - 'mm', 'Boundaries of bin sizes') - self.time['data'] = self._datetime_to_epoch_time(self.time['data']) + "bin_edges", + np.hstack((0, self.diameter["data"] + np.array(self.spread["data"]) / 2)), + "mm", + "Boundaries of bin sizes", + ) + self.time["data"] = self._datetime_to_epoch_time(self.time["data"]) self.f.close() def _prep_data(self): self.fields = {} - self.fields['Nd'] = common.var_to_dict( - 'Nd', np.ma.array(self.Nd), 'm^-3', - 'Liquid water particle concentration') + self.fields["Nd"] = common.var_to_dict( + "Nd", np.ma.array(self.Nd), "m^-3", "Liquid water particle concentration" + ) try: time_dict = self._get_epoch_time(self.time) except: - time_dict = {'data': np.array(self.time), 'units': None, - 'title': 'Time', 'full_name': 'Native file time'} + time_dict = { + "data": np.array(self.time), + "units": None, + "title": "Time", + "full_name": "Native file time", + } self.time = time_dict def _parse_time(self, time_vector): - epoch_time = datetime.datetime(time_vector[0], 1, 1) + datetime.timedelta(days=time_vector[1]-1, hours=time_vector[2], minutes=time_vector[3]) + epoch_time = datetime.datetime(time_vector[0], 1, 1) + datetime.timedelta( + days=time_vector[1] - 1, hours=time_vector[2], minutes=time_vector[3] + ) return time.mktime(epoch_time.timetuple()) def _get_epoch_time(self, sample_time): - ''' + """ Convert the time to an Epoch time using package standard. - ''' + """ # Convert the time array into a datetime instance time_unaware = num2date(sample_time, common.EPOCH_UNITS) - eptime = {'data': time_unaware, 'units': common.EPOCH_UNITS, - 'title': 'Time', 'full_name': 'Time (UTC)'} + eptime = { + "data": time_unaware, + "units": common.EPOCH_UNITS, + "title": "Time", + "full_name": "Time (UTC)", + } return eptime def _datetime_to_epoch_time(self, time_array): - ''' + """ Convert the time to an Epoch time using package standard. - ''' + """ epoch = datetime.datetime.utcfromtimestamp(0) - time_secs = [(timestamp-epoch).total_seconds() for timestamp in time_array] + time_secs = [(timestamp - epoch).total_seconds() for timestamp in time_array] return time_secs spread = common.var_to_dict( - 'spread', + "spread", np.array( - [0.129, 0.129, 0.129, 0.129, 0.129, 0.129, 0.129, 0.129, 0.129, 0.129, 0.257, - 0.257, 0.257, 0.257, 0.257, 0.515, 0.515, 0.515, 0.515, 0.515, 1.030, 1.030, - 1.030, 1.030, 1.030, 2.060, 2.060, 2.060, 2.060, 2.060, 3.090, 3.090]), - 'mm', 'Bin size spread of bins') - - supported_campaigns = ['ifloods', 'mc3e_dsd', 'mc3e_raw'] + [ + 0.129, + 0.129, + 0.129, + 0.129, + 0.129, + 0.129, + 0.129, + 0.129, + 0.129, + 0.129, + 0.257, + 0.257, + 0.257, + 0.257, + 0.257, + 0.515, + 0.515, + 0.515, + 0.515, + 0.515, + 1.030, + 1.030, + 1.030, + 1.030, + 1.030, + 2.060, + 2.060, + 2.060, + 2.060, + 2.060, + 3.090, + 3.090, + ] + ), + "mm", + "Bin size spread of bins", + ) + + supported_campaigns = ["ifloods", "mc3e_dsd", "mc3e_raw"] diameter = common.var_to_dict( - 'diameter', + "diameter", np.array( - [0.06, 0.19, 0.32, 0.45, 0.58, 0.71, 0.84, 0.96, 1.09, 1.22, - 1.42, 1.67, 1.93, 2.19, 2.45, 2.83, 3.35, 3.86, 4.38, 4.89, - 5.66, 6.70, 7.72, 8.76, 9.78, 11.33, 13.39, 15.45, 17.51, - 19.57, 22.15, 25.24]), - 'mm', 'Particle diameter of bins') + [ + 0.06, + 0.19, + 0.32, + 0.45, + 0.58, + 0.71, + 0.84, + 0.96, + 1.09, + 1.22, + 1.42, + 1.67, + 1.93, + 2.19, + 2.45, + 2.83, + 3.35, + 3.86, + 4.38, + 4.89, + 5.66, + 6.70, + 7.72, + 8.76, + 9.78, + 11.33, + 13.39, + 15.45, + 17.51, + 19.57, + 22.15, + 25.24, + ] + ), + "mm", + "Particle diameter of bins", + ) velocity = common.var_to_dict( - 'velocity', + "velocity", np.array( - [0.05, 0.15, 0.25, 0.35, 0.45, 0.55, 0.65, 0.75, 0.85, 0.96, 1.13, - 1.35, 1.59, 1.83, 2.08, 2.40, 2.78, 3.15, 3.50, 3.84, 4.40, 5.20, - 6.00, 6.80, 7.60, 8.80, 10.40, 12.00, 13.60, 15.20, 17.60, 20.80]), - 'm s^-1', 'Terminal fall velocity for each bin') + [ + 0.05, + 0.15, + 0.25, + 0.35, + 0.45, + 0.55, + 0.65, + 0.75, + 0.85, + 0.96, + 1.13, + 1.35, + 1.59, + 1.83, + 2.08, + 2.40, + 2.78, + 3.15, + 3.50, + 3.84, + 4.40, + 5.20, + 6.00, + 6.80, + 7.60, + 8.80, + 10.40, + 12.00, + 13.60, + 15.20, + 17.60, + 20.80, + ] + ), + "m s^-1", + "Terminal fall velocity for each bin", + ) diff --git a/pydsd/io/ParsivelReader.py b/pydsd/io/ParsivelReader.py index 1d0fddb..aa95fd5 100644 --- a/pydsd/io/ParsivelReader.py +++ b/pydsd/io/ParsivelReader.py @@ -9,7 +9,7 @@ def read_parsivel(filename): - ''' + """ Takes a filename pointing to a parsivel raw file and returns a drop size distribution object. @@ -19,7 +19,7 @@ def read_parsivel(filename): Returns: DropSizeDistrometer object - ''' + """ reader = ParsivelReader(filename) dsd = DropSizeDistribution(reader) return dsd @@ -54,12 +54,12 @@ def __init__(self, filename): self._prep_data() self.bin_edges = np.hstack( - (0, self.diameter['data'] + np.array(self.spread['data']) / 2)) + (0, self.diameter["data"] + np.array(self.spread["data"]) / 2) + ) self.bin_edges = common.var_to_dict( - 'bin_edges', - self.bin_edges, - 'mm', 'Bin Edges') + "bin_edges", self.bin_edges, "mm", "Bin Edges" + ) self._apply_pcm_matrix() @@ -68,73 +68,93 @@ def _read_file(self): Returns: None """ - with io.open(self.filename, encoding='latin-1') as f: + with io.open(self.filename, encoding="latin-1") as f: for line in f: - line = line.rstrip('\n\r;') - code = line.split(':')[0] - if code == '01': # Rain Rate - self.rain_rate.append( - float(line.split(':')[1])) - elif code == '07': # Reflectivity - self.Z.append(float(line.split(':')[1])) - elif code == '11': # Num Particles - self.num_particles.append( - int(line.split(':')[1])) - elif code == '20': # Time string - self.time.append( - self.get_sec(line.split(':')[1:4])) - elif code == '21': # Date string - date_tuple = line.split(':')[1].split('.') - self._base_time.append(datetime(year=int(date_tuple[2]), - month=int(date_tuple[1]), - day=int(date_tuple[0]))) - elif code == '90': # Nd + line = line.rstrip("\n\r;") + code = line.split(":")[0] + if code == "01": # Rain Rate + self.rain_rate.append(float(line.split(":")[1])) + elif code == "07": # Reflectivity + self.Z.append(float(line.split(":")[1])) + elif code == "11": # Num Particles + self.num_particles.append(int(line.split(":")[1])) + elif code == "20": # Time string + self.time.append(self.get_sec(line.split(":")[1:4])) + elif code == "21": # Date string + date_tuple = line.split(":")[1].split(".") + self._base_time.append( + datetime( + year=int(date_tuple[2]), + month=int(date_tuple[1]), + day=int(date_tuple[0]), + ) + ) + elif code == "90": # Nd self.nd.append( - np.power(10, list(map(float, line.split(':')[1].split(';'))))) - elif code == '91': # Vd + np.power(10, list(map(float, line.split(":")[1].split(";")))) + ) + elif code == "91": # Vd self.vd.append( - list(map(float, line.split(':')[1].rstrip(';\r').split(';')))) - elif code == '93': # md - self.raw.append( - list(map(int, line.split(':')[1].split(';')))) + list(map(float, line.split(":")[1].rstrip(";\r").split(";"))) + ) + elif code == "93": # md + self.raw.append(list(map(int, line.split(":")[1].split(";")))) def _apply_pcm_matrix(self): """ Apply Data Quality matrix from Ali Tokay Returns: None """ - self.filtered_raw_matrix = np.ndarray(shape=(len(self.raw), - 32, 32), dtype=float) + self.filtered_raw_matrix = np.ndarray( + shape=(len(self.raw), 32, 32), dtype=float + ) for i in range(len(self.raw)): self.filtered_raw_matrix[i] = np.multiply( - self.pcm, np.reshape(self.raw[i], (32, 32))) + self.pcm, np.reshape(self.raw[i], (32, 32)) + ) def _prep_data(self): self.fields = {} - self.fields['rain_rate'] = common.var_to_dict( - 'Rain rate', np.ma.array(self.rain_rate), 'mm/h', 'Rain rate') - self.fields['reflectivity'] = common.var_to_dict( - 'Reflectivity', np.ma.masked_equal(self.Z, -9.999), 'dBZ', - 'Equivalent reflectivity factor') - self.fields['Nd'] = common.var_to_dict( - 'Nd', np.ma.masked_equal( - self.nd, np.power(10, -9.999)), 'm^-3 mm^-1', - 'Liquid water particle concentration') - self.fields['Nd']['data'].set_fill_value(0) - - self.fields['num_particles'] = common.var_to_dict( - 'Number of Particles', np.ma.array(self.num_particles), - '', 'Number of particles') - self.fields['terminal_velocity'] = common.var_to_dict( - 'Terminal Fall Velocity', self.vd, # np.ndarray(self.vd), - 'm/s', 'Terminal fall velocity for each bin') + self.fields["rain_rate"] = common.var_to_dict( + "Rain rate", np.ma.array(self.rain_rate), "mm/h", "Rain rate" + ) + self.fields["reflectivity"] = common.var_to_dict( + "Reflectivity", + np.ma.masked_equal(self.Z, -9.999), + "dBZ", + "Equivalent reflectivity factor", + ) + self.fields["Nd"] = common.var_to_dict( + "Nd", + np.ma.masked_equal(self.nd, np.power(10, -9.999)), + "m^-3 mm^-1", + "Liquid water particle concentration", + ) + self.fields["Nd"]["data"].set_fill_value(0) + + self.fields["num_particles"] = common.var_to_dict( + "Number of Particles", + np.ma.array(self.num_particles), + "", + "Number of particles", + ) + self.fields["terminal_velocity"] = common.var_to_dict( + "Terminal Fall Velocity", + self.vd, # np.ndarray(self.vd), + "m/s", + "Terminal fall velocity for each bin", + ) try: self.time = self._get_epoch_time() except: - self.time = {'data': np.array(self.time), 'units': None, - 'title': 'Time', 'full_name': 'Native file time'} + self.time = { + "data": np.array(self.time), + "units": None, + "title": "Time", + "full_name": "Native file time", + } print("Conversion to Epoch Time did not work.") def get_sec(self, s): @@ -145,72 +165,1204 @@ def _get_epoch_time(self): Convert the time to an Epoch time using package standard. """ time_unaware = np.array( - [self._base_time[i] + timedelta(seconds=self.time[i]) for i in range(0, len(self.time))]) + [ + self._base_time[i] + timedelta(seconds=self.time[i]) + for i in range(0, len(self.time)) + ] + ) epoch = datetime.utcfromtimestamp(0) - time_secs = [(timestamp - epoch).total_seconds() - for timestamp in time_unaware] + time_secs = [(timestamp - epoch).total_seconds() for timestamp in time_unaware] - eptime = {'data': time_secs, 'units': common.EPOCH_UNITS, - 'title': 'Time', 'long_name': 'time'} + eptime = { + "data": time_secs, + "units": common.EPOCH_UNITS, + "title": "Time", + "long_name": "time", + } return eptime diameter = common.var_to_dict( - 'diameter', + "diameter", np.array( - [0.06, 0.19, 0.32, 0.45, 0.58, 0.71, 0.84, 0.96, 1.09, 1.22, 1.42, 1.67, - 1.93, 2.19, 2.45, 2.83, 3.35, 3.86, 4.38, 4.89, 5.66, - 6.7, 7.72, 8.76, 9.78, 11.33, 13.39, 15.45, 17.51, 19.57, 22.15, 25.24]), - 'mm', 'Particle diameter of bins') + [ + 0.06, + 0.19, + 0.32, + 0.45, + 0.58, + 0.71, + 0.84, + 0.96, + 1.09, + 1.22, + 1.42, + 1.67, + 1.93, + 2.19, + 2.45, + 2.83, + 3.35, + 3.86, + 4.38, + 4.89, + 5.66, + 6.7, + 7.72, + 8.76, + 9.78, + 11.33, + 13.39, + 15.45, + 17.51, + 19.57, + 22.15, + 25.24, + ] + ), + "mm", + "Particle diameter of bins", + ) spread = common.var_to_dict( - 'spread', + "spread", [ - 0.129, 0.129, 0.129, 0.129, 0.129, 0.129, 0.129, 0.129, 0.129, 0.129, 0.257, - 0.257, 0.257, 0.257, 0.257, 0.515, 0.515, 0.515, 0.515, 0.515, 1.030, 1.030, - 1.030, 1.030, 1.030, 2.060, 2.060, 2.060, 2.060, 2.060, 3.090, 3.090], - 'mm', 'Bin size spread of bins') + 0.129, + 0.129, + 0.129, + 0.129, + 0.129, + 0.129, + 0.129, + 0.129, + 0.129, + 0.129, + 0.257, + 0.257, + 0.257, + 0.257, + 0.257, + 0.515, + 0.515, + 0.515, + 0.515, + 0.515, + 1.030, + 1.030, + 1.030, + 1.030, + 1.030, + 2.060, + 2.060, + 2.060, + 2.060, + 2.060, + 3.090, + 3.090, + ], + "mm", + "Bin size spread of bins", + ) velocity = common.var_to_dict( - 'velocity', + "velocity", np.array( - [0.05, 0.15, 0.25, 0.35, 0.45, 0.55, 0.65, 0.75, 0.85, 0.95, 1.1, 1.3, 1.5, 1.7, 1.9, - 2.2, 2.6, 3, 3.4, 3.8, 4.4, 5.2, 6.0, 6.8, 7.6, 8.8, 10.4, 12.0, 13.6, 15.2, - 17.6, 20.8]), - 'm s^-1', 'Terminal fall velocity for each bin') + [ + 0.05, + 0.15, + 0.25, + 0.35, + 0.45, + 0.55, + 0.65, + 0.75, + 0.85, + 0.95, + 1.1, + 1.3, + 1.5, + 1.7, + 1.9, + 2.2, + 2.6, + 3, + 3.4, + 3.8, + 4.4, + 5.2, + 6.0, + 6.8, + 7.6, + 8.8, + 10.4, + 12.0, + 13.6, + 15.2, + 17.6, + 20.8, + ] + ), + "m s^-1", + "Terminal fall velocity for each bin", + ) - v_spread = [.1, .1, .1, .1, .1, .1, .1, .1, .1, .1, .2, .2, .2, .2, .2, .4, - .4, .4, .4, .4, .8, .8, .8, .8, .8, 1.6, 1.6, 1.6, 1.6, 1.6, 3.2, 3.2] + v_spread = [ + .1, + .1, + .1, + .1, + .1, + .1, + .1, + .1, + .1, + .1, + .2, + .2, + .2, + .2, + .2, + .4, + .4, + .4, + .4, + .4, + .8, + .8, + .8, + .8, + .8, + 1.6, + 1.6, + 1.6, + 1.6, + 1.6, + 3.2, + 3.2, + ] pcm_matrix = ( - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + ) diff --git a/pydsd/io/common.py b/pydsd/io/common.py index 0848945..daf8059 100644 --- a/pydsd/io/common.py +++ b/pydsd/io/common.py @@ -3,7 +3,7 @@ import numpy as np from netCDF4 import num2date, date2num -EPOCH_UNITS = 'seconds since 1970-1-1 00:00:00+0:00' +EPOCH_UNITS = "seconds since 1970-1-1 00:00:00+0:00" def var_to_dict(standard_name, data, units, long_name): @@ -11,24 +11,25 @@ def var_to_dict(standard_name, data, units, long_name): Convert variable information to a dictionary. """ d = {} - d['data'] = data[:] - d['units'] = units - d['long_name'] = long_name - d['standard_name'] = standard_name + d["data"] = data[:] + d["units"] = units + d["long_name"] = long_name + d["standard_name"] = standard_name return d + def ncvar_to_dict(ncvar): """ Convert a NetCDF Dataset variable to a dictionary. Appropriated from Py-Art package. """ d = dict((k, getattr(ncvar, k)) for k in ncvar.ncattrs()) - d['data'] = ncvar[:] - if np.isscalar(d['data']): + d["data"] = ncvar[:] + if np.isscalar(d["data"]): # netCDF4 1.1.0+ returns a scalar for 0-dim array, we always want # 1-dim+ arrays with a valid shape. - d['data'] = np.array(d['data'][:]) - d['data'].shape = (1, ) + d["data"] = np.array(d["data"][:]) + d["data"].shape = (1,) return d @@ -43,6 +44,10 @@ def get_epoch_time(sample_times, t_units): # eptime = {'data': time_unaware, 'units': EPOCH_UNITS, # 'standard_name': 'Time', 'long_name': 'Time (UTC)'} - eptime = {'data': timesec, 'units': EPOCH_UNITS, - 'standard_name': 'Time', 'long_name': 'Time (UTC)'} + eptime = { + "data": timesec, + "units": EPOCH_UNITS, + "standard_name": "Time", + "long_name": "Time (UTC)", + } return eptime diff --git a/pydsd/partition/cs_partition.py b/pydsd/partition/cs_partition.py index c326fa7..3cc6a96 100644 --- a/pydsd/partition/cs_partition.py +++ b/pydsd/partition/cs_partition.py @@ -1,8 +1,11 @@ import numpy as np from ..utility import ts_utility -def cs_partition_bringi_2010(Nw, D0, slope=-1.65, intercept=6.5, c_thresh=0.1, s_thresh=-0.1): - '''Methodology put forth in Bringi et al. (JAOT 2009)[1], discussed in + +def cs_partition_bringi_2010( + Nw, D0, slope=-1.65, intercept=6.5, c_thresh=0.1, s_thresh=-0.1 +): + """Methodology put forth in Bringi et al. (JAOT 2009)[1], discussed in Thurai et al (JAOT 2010). This techniques separates convective and stratiform based upon @@ -41,23 +44,24 @@ def cs_partition_bringi_2010(Nw, D0, slope=-1.65, intercept=6.5, c_thresh=0.1, s References: ----------- [1]Bringi et al. (JAOT 2009) - ''' + """ - nPts = len(Nw) + nPts = len(Nw) classification = np.zeros_like(Nw) Nw_D0_index = np.ma.log10(Nw) - (slope * np.array(D0) + intercept) classification[Nw_D0_index <= s_thresh] = 1 - classification[Nw_D0_index >=c_thresh] = 2 + classification[Nw_D0_index >= c_thresh] = 2 # We could probably remove this line by initializing to 3? classification[np.logical_and(Nw_D0_index > s_thresh, Nw_D0_index < c_thresh)] = 3 return classification + def cs_partition_islam_2012(rain_rate, r_thresh=10.0, sd_thresh=1.5, window=4): - '''Convective stratiform partitioning from Islam et al (Atmos Res 2012). The method + """Convective stratiform partitioning from Islam et al (Atmos Res 2012). The method combines Testud et al. 2011, and Bringi et al. 2003. Parameters: @@ -79,19 +83,18 @@ def cs_partition_islam_2012(rain_rate, r_thresh=10.0, sd_thresh=1.5, window=4): References: [1]: Tested et al. 2011 [2]: Bringi et al. 2003 - ''' + """ classification = np.zeros_like(rain_rate) - classification[:] = 2 #Defaults to convective for now. - padded_rain_rate = np.pad(rain_rate, int(window/2), 'reflect') - + classification[:] = 2 # Defaults to convective for now. + padded_rain_rate = np.pad(rain_rate, int(window / 2), "reflect") n_pts = len(rain_rate) thresh_left = padded_rain_rate < r_thresh windowed_thresh = ts_utility.rolling_window(thresh_left, window) - windowed_std = np.std(ts_utility.rolling_window(padded_rain_rate, window),1) + windowed_std = np.std(ts_utility.rolling_window(padded_rain_rate, window), 1) rain_thresh_mask = list(map(np.all, windowed_thresh)) std_thresh_mask = windowed_std < sd_thresh @@ -102,12 +105,8 @@ def cs_partition_islam_2012(rain_rate, r_thresh=10.0, sd_thresh=1.5, window=4): return classification - - - - def cs_partition_atlas_2000(vertical_velocity, w_thresh=1.0): - ''' Convective stratiform partitioning from Atlas et al. (JGR 2000). + """ Convective stratiform partitioning from Atlas et al. (JGR 2000). Convective stratiform partitioning based upon vertical velocity. This study was based on aircraft instrumentation. If used on non-aerial data, great care should be taken. @@ -126,7 +125,7 @@ def cs_partition_atlas_2000(vertical_velocity, w_thresh=1.0): References: [1] Atlas et al. (JGR 2000) - ''' + """ classification = np.zeros_like(vertical_velocity) @@ -134,6 +133,3 @@ def cs_partition_atlas_2000(vertical_velocity, w_thresh=1.0): classification[np.array(vertical_velocity) < w_thresh] = 1 return classification - - - diff --git a/pydsd/plot/plot.py b/pydsd/plot/plot.py index bd785b9..df84edf 100644 --- a/pydsd/plot/plot.py +++ b/pydsd/plot/plot.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- -''' +""" Plotting routines for different aspects of the drop size distribution class. -''' +""" import numpy as np import matplotlib.pyplot as plt @@ -11,9 +11,19 @@ from matplotlib.dates import SecondLocator, MinuteLocator, HourLocator, DayLocator -def plot_dsd(dsd, xlims=None, ylims=None, log_scale=True, tighten=True, - vmin=None, vmax=None, cmap=None, ax=None, fig=None): - '''Plotting function for drop size distribution Nd +def plot_dsd( + dsd, + xlims=None, + ylims=None, + log_scale=True, + tighten=True, + vmin=None, + vmax=None, + cmap=None, + ax=None, + fig=None, +): + """Plotting function for drop size distribution Nd plot_dsd creates a pcolormesh based plot for a drop size distribution object's `Nd` field. @@ -35,20 +45,18 @@ def plot_dsd(dsd, xlims=None, ylims=None, log_scale=True, tighten=True, ------- fig: Figure instance - ''' + """ ax = parse_ax(ax) fig = parse_fig(fig) if cmap is None: - colors = [('white')] + [(cm.jet(i)) for i in range(1, 256)] - cmap = mpl.colors.LinearSegmentedColormap.from_list( - 'new_map', colors, N=256) - + colors = [("white")] + [(cm.jet(i)) for i in range(1, 256)] + cmap = mpl.colors.LinearSegmentedColormap.from_list("new_map", colors, N=256) if vmin is None: - vmin = np.nanmin(dsd.fields['Nd']['data']) + vmin = np.nanmin(dsd.fields["Nd"]["data"]) if vmax is None: - vmax = np.nanmax(dsd.fields['Nd']['data']) + vmax = np.nanmax(dsd.fields["Nd"]["data"]) if log_scale: if vmin == 0.: @@ -56,37 +64,48 @@ def plot_dsd(dsd, xlims=None, ylims=None, log_scale=True, tighten=True, norm = mpl.colors.LogNorm(vmin=vmin, vmax=vmax) else: norm = None - import pdb; pdb.set_trace() - plt.pcolormesh(dsd.time['data'].filled(), dsd.diameter['data'].filled(), dsd.fields['Nd']['data'].T, - vmin=vmin, vmax=vmax, - figure=fig, norm=norm, cmap=cmap) - - plt.axis('tight') + import pdb + + pdb.set_trace() + plt.pcolormesh( + dsd.time["data"].filled(), + dsd.diameter["data"].filled(), + dsd.fields["Nd"]["data"].T, + vmin=vmin, + vmax=vmax, + figure=fig, + norm=norm, + cmap=cmap, + ) + + plt.axis("tight") if xlims is not None: ax.set_xlim(xlims) else: - ax.set_xlim(dsd.time['data'][0], dsd.time['data'][-1]) + ax.set_xlim(dsd.time["data"][0], dsd.time["data"][-1]) if ylims is not None: ax.set_ylim(ylims) else: - ax.set_ylim(0., dsd.diameter['data'][-1]) + ax.set_ylim(0., dsd.diameter["data"][-1]) if tighten: - max_diameter = dsd.diameter['data'][ - len(dsd.diameter['data']) - - np.argmax(np.nansum(dsd.fields['Nd']['data'], axis=0)[::-1] > 0)] + max_diameter = dsd.diameter["data"][ + len(dsd.diameter["data"]) + - np.argmax(np.nansum(dsd.fields["Nd"]["data"], axis=0)[::-1] > 0) + ] plt.ylim(0, max_diameter) plt.colorbar() - plt.xlabel('Time(m)') - plt.ylabel('Diameter(mm)') + plt.xlabel("Time(m)") + plt.ylabel("Diameter(mm)") return fig, ax -def plot_NwD0(dsd, col='k', msize=20, edgecolors='none', title=None, - ax=None, fig=None, **kwargs): +def plot_NwD0( + dsd, col="k", msize=20, edgecolors="none", title=None, ax=None, fig=None, **kwargs +): """ Create Normalized Intercept Parameter- median volume diameter scatterplot. @@ -106,18 +125,35 @@ def plot_NwD0(dsd, col='k', msize=20, edgecolors='none', title=None, ax = parse_ax(ax) fig = parse_ax(fig) - xlab = r'D$_0$ (mm)' - ylab = r'log$_{10}$[N$_w$] (mm$^{-1}$ m$^{-3}$)' - fig, ax = scatter(np.log10(dsd.fields['Nw']['data']), dsd.fields['D0']['data'], col=col, msize=msize, - edgecolors=edgecolors, title=title, ax=ax, fig=fig, - **kwargs) + xlab = r"D$_0$ (mm)" + ylab = r"log$_{10}$[N$_w$] (mm$^{-1}$ m$^{-3}$)" + fig, ax = scatter( + np.log10(dsd.fields["Nw"]["data"]), + dsd.fields["D0"]["data"], + col=col, + msize=msize, + edgecolors=edgecolors, + title=title, + ax=ax, + fig=fig, + **kwargs + ) ax.set_xlabel(xlab) ax.set_ylabel(ylab) return fig, ax -def plot_ZR(dsd, log_scale=False, col='k', msize=20, edgecolors='none', - title=None, ax=None, fig=None, **kwargs): +def plot_ZR( + dsd, + log_scale=False, + col="k", + msize=20, + edgecolors="none", + title=None, + ax=None, + fig=None, + **kwargs +): """ Create reflectivity - rainfall rate scatterplot. @@ -140,28 +176,47 @@ def plot_ZR(dsd, log_scale=False, col='k', msize=20, edgecolors='none', fig = parse_ax(fig) if log_scale: - z = dsd.fields['Zh']['data'] - rr = np.log10(dsd.fields['rain_rate']['data']) - xlab = r'Reflectivity (dBZ)' - ylab = r'log$_{10}$(Rainfall Rate (mm h$^{-1}$))' + z = dsd.fields["Zh"]["data"] + rr = np.log10(dsd.fields["rain_rate"]["data"]) + xlab = r"Reflectivity (dBZ)" + ylab = r"log$_{10}$(Rainfall Rate (mm h$^{-1}$))" else: - z = 10. ** (dsd.fields['Zh']['data'] / 10.) - rr = dsd.fields['rain_rate']['data'] - xlab = r'Reflectivity (mm$^{6}$ m$^{-3}$)' - ylab = r'Rainfall Rate (mm h$^{-1}$)' - - fig, ax = scatter(z, rr, col=col, - msize=msize, edgecolors=edgecolors, title=title, - ax=ax, fig=fig, **kwargs) + z = 10. ** (dsd.fields["Zh"]["data"] / 10.) + rr = dsd.fields["rain_rate"]["data"] + xlab = r"Reflectivity (mm$^{6}$ m$^{-3}$)" + ylab = r"Rainfall Rate (mm h$^{-1}$)" + + fig, ax = scatter( + z, + rr, + col=col, + msize=msize, + edgecolors=edgecolors, + title=title, + ax=ax, + fig=fig, + **kwargs + ) ax.set_xlabel(xlab) ax.set_ylabel(ylab) return fig, ax -def plot_ZR_hist2d(dsd, log_scale=False, bins=(80, 60), ranges=None, norm=None, - xlims=None, ylims=None, title=None, - colorbar=True, clabel='Normalized Counts', - ax=None, fig=None, **kwargs): +def plot_ZR_hist2d( + dsd, + log_scale=False, + bins=(80, 60), + ranges=None, + norm=None, + xlims=None, + ylims=None, + title=None, + colorbar=True, + clabel="Normalized Counts", + ax=None, + fig=None, + **kwargs +): """ Create reflectivity - rainfall rate scatterplot. @@ -188,29 +243,48 @@ def plot_ZR_hist2d(dsd, log_scale=False, bins=(80, 60), ranges=None, norm=None, fig = parse_ax(fig) if log_scale: - z = dsd.fields['Zh']['data'] - rr = np.log10(dsd.fields['rain_rate']['data']) - xlab = r'Reflectivity (dBZ)' - ylab = r'log$_{10}$(Rainfall Rate (mm h$^{-1}$))' + z = dsd.fields["Zh"]["data"] + rr = np.log10(dsd.fields["rain_rate"]["data"]) + xlab = r"Reflectivity (dBZ)" + ylab = r"log$_{10}$(Rainfall Rate (mm h$^{-1}$))" else: - z = np.power(10, (dsd.fields['Zh']['data'] / 10.0)) - rr = dsd.fields['rain_rate']['data'] - xlab = r'Reflectivity (mm$^{6}$ m$^{-3}$)' - ylab = r'Rainfall Rate (mm h$^{-1}$)' - - fig, ax = plot_hist2d(z, rr, bins=bins, ranges=ranges, norm=norm, - xlims=xlims, ylims=ylims, title=title, - colorbar=colorbar, - clabel=clabel, - ax=ax, fig=fig, **kwargs) + z = np.power(10, (dsd.fields["Zh"]["data"] / 10.0)) + rr = dsd.fields["rain_rate"]["data"] + xlab = r"Reflectivity (mm$^{6}$ m$^{-3}$)" + ylab = r"Rainfall Rate (mm h$^{-1}$)" + + fig, ax = plot_hist2d( + z, + rr, + bins=bins, + ranges=ranges, + norm=norm, + xlims=xlims, + ylims=ylims, + title=title, + colorbar=colorbar, + clabel=clabel, + ax=ax, + fig=fig, + **kwargs + ) ax.set_xlabel(xlab) ax.set_ylabel(ylab) return fig, ax -def scatter(xvar, yvar, col='k', msize=20, edgecolors='none', title=None, - ax=None, fig=None, **kwargs): +def scatter( + xvar, + yvar, + col="k", + msize=20, + edgecolors="none", + title=None, + ax=None, + fig=None, + **kwargs +): """ Create a scatterplot two variables. @@ -235,9 +309,21 @@ def scatter(xvar, yvar, col='k', msize=20, edgecolors='none', title=None, return fig, ax -def plot_hist2d(xvar, yvar, bins=(80, 60), ranges=None, norm=None, - xlims=None, ylims=None, title=None, colorbar=True, - clabel='Normalized Counts', ax=None, fig=None, **kwargs): +def plot_hist2d( + xvar, + yvar, + bins=(80, 60), + ranges=None, + norm=None, + xlims=None, + ylims=None, + title=None, + colorbar=True, + clabel="Normalized Counts", + ax=None, + fig=None, + **kwargs +): """ 2-D histogram plot. @@ -265,7 +351,8 @@ def plot_hist2d(xvar, yvar, bins=(80, 60), ranges=None, norm=None, ylims = (np.nanmin(yvar), np.nanmax(yvar)) hist2d, xedges, yedges = get_masked_hist2d( - xvar, yvar, bins=bins, ranges=(ylims, xlims), norm=norm) + xvar, yvar, bins=bins, ranges=(ylims, xlims), norm=norm + ) # Replace any zero values with missing data for nice plots hist2d = np.ma.masked_equal(hist2d, 0) @@ -289,9 +376,17 @@ def plot_hist2d(xvar, yvar, bins=(80, 60), ranges=None, norm=None, return fig, ax -def plot_ts(dsd, varname, date_format='%H:%M', tz=None, - x_min_tick_format='minute', title=None, ax=None, fig=None, - **kwargs): +def plot_ts( + dsd, + varname, + date_format="%H:%M", + tz=None, + x_min_tick_format="minute", + title=None, + ax=None, + fig=None, + **kwargs +): """ Time series plot of variable. @@ -317,22 +412,23 @@ def plot_ts(dsd, varname, date_format='%H:%M', tz=None, x_fmt = DateFormatter(date_format, tz=tz) - ax.plot_date(dsd.time['data'], dsd.fields[varname]['data'], **kwargs) + ax.plot_date(dsd.time["data"], dsd.fields[varname]["data"], **kwargs) ax.xaxis.set_major_formatter(x_fmt) - if x_min_tick_format == 'second': + if x_min_tick_format == "second": ax.xaxis.set_minor_locator(SecondLocator()) - elif x_min_tick_format == 'minute': + elif x_min_tick_format == "minute": ax.xaxis.set_minor_locator(MinuteLocator()) - elif x_min_tick_format == 'hour': + elif x_min_tick_format == "hour": ax.xaxis.set_minor_locator(HourLocator()) - elif x_min_tick_format == 'day': + elif x_min_tick_format == "day": ax.xaxis.set_minor_locator(DayLocator()) if title is not None: ax.set_title(title) return fig, ax + # def plotHov(dsd, xvar, datavar, log_scale=False, # date_format='%H:%M', tz=None, # clevs=7, vmin=None, vmax=None, @@ -495,7 +591,8 @@ def get_masked_hist2d(xvar, yvar, bins=(25, 25), ranges=None, norm=False): ranges = ([qx.min(), qx.max()], [qy.min(), qy.max()]) hist2d, xedges, yedges = np.histogram2d( - qx, qy, bins=bins, range=ranges, normed=norm) + qx, qy, bins=bins, range=ranges, normed=norm + ) return hist2d, xedges, yedges @@ -549,7 +646,7 @@ def set_ylabel(label, pad=None, fontsize=None, ax=None): def turn_ticks_out(ax=None): """Convenience function to turn ticks outward.""" ax = parse_ax(ax) - ax.tick_params(which='both', direction='out') + ax.tick_params(which="both", direction="out") def parse_ax(ax): diff --git a/pydsd/tests/testARM_JWD_Reader.py b/pydsd/tests/testARM_JWD_Reader.py index 04babd7..d632aa4 100644 --- a/pydsd/tests/testARM_JWD_Reader.py +++ b/pydsd/tests/testARM_JWD_Reader.py @@ -5,35 +5,51 @@ class TestArmJwdReader(unittest.TestCase): - 'Test module for the ARM_JWD_Reader' + "Test module for the ARM_JWD_Reader" def setUp(self): - filename = 'testdata/sgpdisdrometerC1.b1.20110427.000000_test_jwd_b1.cdf' + filename = "testdata/sgpdisdrometerC1.b1.20110427.000000_test_jwd_b1.cdf" self.dsd = ARM_JWD_Reader.read_arm_jwd_b1(filename) def test_can_read_sample_file(self): - self.assertIsNotNone(self.dsd, 'File did not read in correctly, returned None') + self.assertIsNotNone(self.dsd, "File did not read in correctly, returned None") def test_dsd_nd_exists(self): - self.assertIsNotNone(self.dsd.fields['Nd'], 'DSD Object has no Nd field') + self.assertIsNotNone(self.dsd.fields["Nd"], "DSD Object has no Nd field") def test_dsd_nd_is_dict(self): - self.assertIsInstance(self.dsd.fields['Nd'], dict, 'Nd was not a dictionary.') + self.assertIsInstance(self.dsd.fields["Nd"], dict, "Nd was not a dictionary.") def test_RR_works(self): self.dsd.calculate_RR() - self.assertIsNotNone(self.dsd.fields['rain_rate'], 'Rain Rate is not in fields after calculate_RR()') - self.assertEqual(len(self.dsd.fields['rain_rate']['data']), 2, 'Wrong number of time samples in rain rate') + self.assertIsNotNone( + self.dsd.fields["rain_rate"], + "Rain Rate is not in fields after calculate_RR()", + ) + self.assertEqual( + len(self.dsd.fields["rain_rate"]["data"]), + 2, + "Wrong number of time samples in rain rate", + ) def test_can_run_calc_dsd_params(self): self.dsd.calculate_dsd_parameterization() - self.assertIsNotNone(self.dsd.fields['D0'], 'The Field D0 did not exist after dsd_parameterization check') - self.assertEqual(len(self.dsd.fields['D0']['data']), 2, 'Wrong number of samples in D0') + self.assertIsNotNone( + self.dsd.fields["D0"], + "The Field D0 did not exist after dsd_parameterization check", + ) + self.assertEqual( + len(self.dsd.fields["D0"]["data"]), 2, "Wrong number of samples in D0" + ) def test_time_same_length_as_Nd(self): - self.assertEqual(len(self.dsd.time['data']), self.dsd.fields['Nd']['data'].shape[0], 'Different number of samples for time and Nd') + self.assertEqual( + len(self.dsd.time["data"]), + self.dsd.fields["Nd"]["data"].shape[0], + "Different number of samples for time and Nd", + ) def test_time_is_in_epoch(self): - self.assertEqual(self.dsd.time['data'][0],1303859040+3360) # Basetime + first start time - - + self.assertEqual( + self.dsd.time["data"][0], 1303859040 + 3360 + ) # Basetime + first start time diff --git a/pydsd/tests/testConfiguration.py b/pydsd/tests/testConfiguration.py index 7854047..97ddecd 100644 --- a/pydsd/tests/testConfiguration.py +++ b/pydsd/tests/testConfiguration.py @@ -6,12 +6,13 @@ from .. import DropSizeDistribution + class testConfiguration(unittest.TestCase): - ''' Unit tests for Configuration Class''' + """ Unit tests for Configuration Class""" def setUp(self): self.config = configuration.Configuration() def test_config_loads_and_has_keys(self): self.assertIsNotNone(self.config) - self.assertTrue(len(self.config.metadata.keys()) > 0 ) + self.assertTrue(len(self.config.metadata.keys()) > 0) diff --git a/pydsd/tests/testNasa2DVDReader_iphex.py b/pydsd/tests/testNasa2DVDReader_iphex.py index a97aee8..f979603 100644 --- a/pydsd/tests/testNasa2DVDReader_iphex.py +++ b/pydsd/tests/testNasa2DVDReader_iphex.py @@ -3,33 +3,48 @@ from ..aux_readers import NASA_2DVD_reader + class TestNasa2DvdReaderIphexSubcase(unittest.TestCase): - 'Test module for the NASA_2DVD_reader class in pydsd.aux_io.NASA_2DVD_reader for ifloods file' + "Test module for the NASA_2DVD_reader class in pydsd.aux_io.NASA_2DVD_reader for ifloods file" def setUp(self): - filename = 'testdata/nasa_gv_iphex_2dvd_test.txt' - self.dsd = NASA_2DVD_reader.read_2dvd_dsd_nasa_gv(filename ) + filename = "testdata/nasa_gv_iphex_2dvd_test.txt" + self.dsd = NASA_2DVD_reader.read_2dvd_dsd_nasa_gv(filename) def test_can_read_sample_file(self): - self.assertIsNotNone(self.dsd, 'File did not read in correctly, returned None') + self.assertIsNotNone(self.dsd, "File did not read in correctly, returned None") def test_dsd_nd_exists(self): - self.assertIsNotNone(self.dsd.fields['Nd'], 'DSD Object has no Nd field') + self.assertIsNotNone(self.dsd.fields["Nd"], "DSD Object has no Nd field") def test_dsd_nd_is_dict(self): - self.assertIsInstance(self.dsd.fields['Nd'], dict, 'Nd was not a dictionary.') + self.assertIsInstance(self.dsd.fields["Nd"], dict, "Nd was not a dictionary.") def test_RR_works(self): self.dsd.calculate_RR() - self.assertIsNotNone(self.dsd.fields['rain_rate'], 'Rain Rate is not in fields after calculate_RR()') - self.assertEqual(len(self.dsd.fields['rain_rate']['data']), 10, 'Wrong number of time samples in rain rate') + self.assertIsNotNone( + self.dsd.fields["rain_rate"], + "Rain Rate is not in fields after calculate_RR()", + ) + self.assertEqual( + len(self.dsd.fields["rain_rate"]["data"]), + 10, + "Wrong number of time samples in rain rate", + ) def test_can_run_calc_dsd_params(self): self.dsd.calculate_dsd_parameterization() - self.assertIsNotNone(self.dsd.fields['D0'], 'The Field D0 did not exist after dsd_parameterization check') - self.assertEqual(len(self.dsd.fields['D0']['data']), 10, 'Wrong number of samples in D0') + self.assertIsNotNone( + self.dsd.fields["D0"], + "The Field D0 did not exist after dsd_parameterization check", + ) + self.assertEqual( + len(self.dsd.fields["D0"]["data"]), 10, "Wrong number of samples in D0" + ) def test_time_same_length_as_Nd(self): - self.assertEqual(len(self.dsd.time['data']), self.dsd.fields['Nd']['data'].shape[0], 'Different number of samples for time and Nd') - - + self.assertEqual( + len(self.dsd.time["data"]), + self.dsd.fields["Nd"]["data"].shape[0], + "Different number of samples for time and Nd", + ) diff --git a/pydsd/tests/testNasa2DVDReader_mc3e.py b/pydsd/tests/testNasa2DVDReader_mc3e.py index c8620b6..8608820 100644 --- a/pydsd/tests/testNasa2DVDReader_mc3e.py +++ b/pydsd/tests/testNasa2DVDReader_mc3e.py @@ -3,33 +3,48 @@ from ..aux_readers import NASA_2DVD_reader + class TestNasa2DvdReaderMc3eSubcase(unittest.TestCase): - 'Test module for the NASA_2DVD_reader class in pydsd.aux_io.NASA_2DVD_reader for mc3e files' + "Test module for the NASA_2DVD_reader class in pydsd.aux_io.NASA_2DVD_reader for mc3e files" def setUp(self): - filename = 'testdata/nasa_gv_mc3e_2dvd_test.txt' - self.dsd = NASA_2DVD_reader.read_2dvd_dsd_nasa_gv(filename ) + filename = "testdata/nasa_gv_mc3e_2dvd_test.txt" + self.dsd = NASA_2DVD_reader.read_2dvd_dsd_nasa_gv(filename) def test_can_read_sample_file(self): - self.assertIsNotNone(self.dsd, 'File did not read in correctly, returned None') + self.assertIsNotNone(self.dsd, "File did not read in correctly, returned None") def test_dsd_nd_exists(self): - self.assertIsNotNone(self.dsd.fields['Nd'], 'DSD Object has no Nd field') + self.assertIsNotNone(self.dsd.fields["Nd"], "DSD Object has no Nd field") def test_dsd_nd_is_dict(self): - self.assertIsInstance(self.dsd.fields['Nd'], dict, 'Nd was not a dictionary.') + self.assertIsInstance(self.dsd.fields["Nd"], dict, "Nd was not a dictionary.") def test_RR_works(self): self.dsd.calculate_RR() - self.assertIsNotNone(self.dsd.fields['rain_rate'], 'Rain Rate is not in fields after calculate_RR()') - self.assertEqual(len(self.dsd.fields['rain_rate']['data']), 5, 'Wrong number of time samples in rain rate') + self.assertIsNotNone( + self.dsd.fields["rain_rate"], + "Rain Rate is not in fields after calculate_RR()", + ) + self.assertEqual( + len(self.dsd.fields["rain_rate"]["data"]), + 5, + "Wrong number of time samples in rain rate", + ) def test_can_run_calc_dsd_params(self): self.dsd.calculate_dsd_parameterization() - self.assertIsNotNone(self.dsd.fields['D0'], 'The Field D0 did not exist after dsd_parameterization check') - self.assertEqual(len(self.dsd.fields['D0']['data']), 5, 'Wrong number of samples in D0') + self.assertIsNotNone( + self.dsd.fields["D0"], + "The Field D0 did not exist after dsd_parameterization check", + ) + self.assertEqual( + len(self.dsd.fields["D0"]["data"]), 5, "Wrong number of samples in D0" + ) def test_time_same_length_as_Nd(self): - self.assertEqual(len(self.dsd.time['data']), self.dsd.fields['Nd']['data'].shape[0], 'Different number of samples for time and Nd') - - + self.assertEqual( + len(self.dsd.time["data"]), + self.dsd.fields["Nd"]["data"].shape[0], + "Different number of samples for time and Nd", + ) diff --git a/pydsd/tests/test_DropSizeDistribution.py b/pydsd/tests/test_DropSizeDistribution.py index 71e5b67..8e94909 100644 --- a/pydsd/tests/test_DropSizeDistribution.py +++ b/pydsd/tests/test_DropSizeDistribution.py @@ -5,9 +5,9 @@ from .. import DropSizeDistribution + class testDropSizeDistribution(unittest.TestCase): - ''' Unit tests for DropSizeDistribution Class''' + """ Unit tests for DropSizeDistribution Class""" def setUp(self): pass - diff --git a/pydsd/tests/test_Image2DReader.py b/pydsd/tests/test_Image2DReader.py index 1c4eb98..a1125d8 100644 --- a/pydsd/tests/test_Image2DReader.py +++ b/pydsd/tests/test_Image2DReader.py @@ -9,13 +9,13 @@ class TestUCSCReader(TestCase): """ def setUp(self): - self.dsd = read_ucsc_netcdf('testdata/noaa_p3_pip_test.20170101.nc') + self.dsd = read_ucsc_netcdf("testdata/noaa_p3_pip_test.20170101.nc") def test_read_did_not_return_none(self): self.assertIsNotNone(self.dsd, "UCSC Reader returned a none object") def test_time_has_units_string(self): - self.assertTrue('units' in self.dsd.time.keys()) + self.assertTrue("units" in self.dsd.time.keys()) class TestNOAAAOMLReader(TestCase): @@ -23,10 +23,10 @@ class TestNOAAAOMLReader(TestCase): """ def setUp(self): - self.dsd = read_noaa_aoml_netcdf('testdata/aoml_pip_test.nc') + self.dsd = read_noaa_aoml_netcdf("testdata/aoml_pip_test.nc") def test_read_did_not_return_none(self): self.assertIsNotNone(self.dsd, "AOML Pip Reader Returned a None Object") def test_dsd_has_3_entries(self): - self.assertTrue(len(self.dsd.Nd['data']) == 3) + self.assertTrue(len(self.dsd.Nd["data"]) == 3) diff --git a/pydsd/tests/test_ParsivelNasaGVReader.py b/pydsd/tests/test_ParsivelNasaGVReader.py index f995ccf..2bfcbc3 100644 --- a/pydsd/tests/test_ParsivelNasaGVReader.py +++ b/pydsd/tests/test_ParsivelNasaGVReader.py @@ -11,38 +11,42 @@ class TestParsivelNASAGVReader(TestCase): def setUp(self): - self.dsd = read_parsivel_nasa_gv('testdata/apu_nasa_mc3e_dsd.txt') + self.dsd = read_parsivel_nasa_gv("testdata/apu_nasa_mc3e_dsd.txt") def test_read_file_does_not_return_none(self): - ''' Test file apu_nasa_mc3e_dsd.txt reads in and returns non None object. - ''' - self.assertIsNotNone(self.dsd, 'reader returned a None object') + """ Test file apu_nasa_mc3e_dsd.txt reads in and returns non None object. + """ + self.assertIsNotNone(self.dsd, "reader returned a None object") def test_number_entries(self): """ Number of entries in apu_nasa_mc3e_dsd.txt file is 5 and consistent. """ - self.assertTrue(len(self.dsd.Nd['data'])) == 3 + self.assertTrue(len(self.dsd.Nd["data"])) == 3 def test_has_10_time_entries(self): - self.assertTrue(len(self.dsd.time['data']) == 3, "Test MC3E File for ParsivelNASAGVReader did not have 3 time entries") + self.assertTrue( + len(self.dsd.time["data"]) == 3, + "Test MC3E File for ParsivelNASAGVReader did not have 3 time entries", + ) class TestParsivelNASAGVReader_ifloods(TestCase): def setUp(self): - self.dsd = read_parsivel_nasa_gv('testdata/nasa_gv_ifloods_apu_test.txt') + self.dsd = read_parsivel_nasa_gv("testdata/nasa_gv_ifloods_apu_test.txt") def test_read_file_does_not_return_none(self): - ''' Test file apu_nasa_mc3e_dsd.txt reads in and returns non None object. - ''' - self.assertIsNotNone(self.dsd, 'reader returned a None object') + """ Test file apu_nasa_mc3e_dsd.txt reads in and returns non None object. + """ + self.assertIsNotNone(self.dsd, "reader returned a None object") def test_number_entries(self): """ Number of entries in apu_nasa_mc3e_dsd.txt file is 10 and consistent. """ - self.assertTrue(len(self.dsd.Nd['data'])) == 10 + self.assertTrue(len(self.dsd.Nd["data"])) == 10 def test_has_10_time_entries(self): - self.assertTrue(len(self.dsd.time['data']) == 10, "Test ifloods File for ParsivelNASAGVReader did not have 10 time entries") - - + self.assertTrue( + len(self.dsd.time["data"]) == 10, + "Test ifloods File for ParsivelNASAGVReader did not have 10 time entries", + ) diff --git a/pydsd/tests/test_ParsivelReader.py b/pydsd/tests/test_ParsivelReader.py index ca35690..467cc4a 100644 --- a/pydsd/tests/test_ParsivelReader.py +++ b/pydsd/tests/test_ParsivelReader.py @@ -9,31 +9,46 @@ class TestParsivelReader(unittest.TestCase): """Test module for the ParsivelReader class in pydsd.io.ParsivelReader """ def setUp(self): - filename = 'testdata/parsivel_telegraph_testfile.mis' + filename = "testdata/parsivel_telegraph_testfile.mis" self.dsd = ParsivelReader.read_parsivel(filename) def test_can_read_sample_file(self): - self.assertIsNotNone(self.dsd, 'File did not read in correctly, returned None') + self.assertIsNotNone(self.dsd, "File did not read in correctly, returned None") def test_dsd_nd_exists(self): - self.assertIsNotNone(self.dsd.fields['Nd'], 'DSD Object has no Nd field') + self.assertIsNotNone(self.dsd.fields["Nd"], "DSD Object has no Nd field") def test_dsd_nd_is_dict(self): - self.assertIsInstance(self.dsd.fields['Nd'], dict, 'Nd was not a dictionary.') + self.assertIsInstance(self.dsd.fields["Nd"], dict, "Nd was not a dictionary.") def test_RR_works_on_Parsivel(self): self.dsd.calculate_RR() - self.assertIsNotNone(self.dsd.fields['rain_rate'], 'Rain Rate is not in fields after calculate_RR()') - self.assertEqual(len(self.dsd.fields['rain_rate']['data']), 6, 'Wrong number of time samples in rain rate') + self.assertIsNotNone( + self.dsd.fields["rain_rate"], + "Rain Rate is not in fields after calculate_RR()", + ) + self.assertEqual( + len(self.dsd.fields["rain_rate"]["data"]), + 6, + "Wrong number of time samples in rain rate", + ) def test_can_run_calc_dsd_params(self): self.dsd.calculate_dsd_parameterization() - self.assertIsNotNone(self.dsd.fields['D0'], 'The Field D0 did not exist after dsd_parameterization check') - self.assertEqual(len(self.dsd.fields['D0']['data']), 6, 'Wrong number of samples in D0') + self.assertIsNotNone( + self.dsd.fields["D0"], + "The Field D0 did not exist after dsd_parameterization check", + ) + self.assertEqual( + len(self.dsd.fields["D0"]["data"]), 6, "Wrong number of samples in D0" + ) def test_time_same_length_as_Nd(self): - self.assertEqual(len(self.dsd.time['data']), self.dsd.fields['Nd']['data'].shape[0], - 'Different number of samples for time and Nd') + self.assertEqual( + len(self.dsd.time["data"]), + self.dsd.fields["Nd"]["data"].shape[0], + "Different number of samples for time and Nd", + ) def test_time_reads_as_epoch_time_correctly(self): """Test whether the two time fields read in and merge correctly""" @@ -42,6 +57,6 @@ def test_time_reads_as_epoch_time_correctly(self): time_array = np.array(base_time) + np.array(time_deltas) epoch = datetime.datetime.utcfromtimestamp(0) - time_secs = [(timestamp-epoch).total_seconds() for timestamp in time_array] - self.assertEqual(time_secs[0], self.dsd.time['data'][0]) + time_secs = [(timestamp - epoch).total_seconds() for timestamp in time_array] + self.assertEqual(time_secs[0], self.dsd.time["data"][0]) # self.assertItemsEqual(time_secs, self.dsd.time['data']) # Might bring this back with six diff --git a/pydsd/tests/test_dielectric.py b/pydsd/tests/test_dielectric.py index 205181a..c76efed 100644 --- a/pydsd/tests/test_dielectric.py +++ b/pydsd/tests/test_dielectric.py @@ -8,9 +8,10 @@ class TestDielectricMethods(unittest.TestCase): def test_dielectric_is_roughly_093_at_S_water(self): perm = dielectric.get_refractivity(2.9e9, 0) - km = (perm**2-1)/(perm**2+2) - d_c = np.abs(km)**2 - self.assertTrue(d_c-0.93 < 0.01) + km = (perm ** 2 - 1) / (perm ** 2 + 2) + d_c = np.abs(km) ** 2 + self.assertTrue(d_c - 0.93 < 0.01) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/pydsd/tests/test_expfit.py b/pydsd/tests/test_expfit.py index 4ab5346..dbbe674 100644 --- a/pydsd/tests/test_expfit.py +++ b/pydsd/tests/test_expfit.py @@ -4,25 +4,25 @@ class Test_expfit(TestCase): - ''' Tests for the expfit module. ''' + """ Tests for the expfit module. """ def test_expfit_returns_correct_relationship(self): - ''' + """ Test whether or not expfit can model a simple one variable exponential relationship. - ''' + """ a = 2 b = 3 x = [1, 2, 3] y = a * np.power(x, b) fit = expfit.expfit(x, y)[0] - self.assertAlmostEqual(fit[0], a, 7, - "Fit of Scale Parameter Failed for expfit") - self.assertAlmostEqual(fit[1], b, 7, - "Fit of Exponent Parameter Failed for expfit") + self.assertAlmostEqual(fit[0], a, 7, "Fit of Scale Parameter Failed for expfit") + self.assertAlmostEqual( + fit[1], b, 7, "Fit of Exponent Parameter Failed for expfit" + ) def test_expfit_handles_nan(self): - ''' Test whether expfit correctly handles not a number in input array.''' + """ Test whether expfit correctly handles not a number in input array.""" a = 2 b = 3 @@ -30,16 +30,16 @@ def test_expfit_handles_nan(self): y = a * np.power(x, b) fit = expfit.expfit(x, y)[0] self.assertAlmostEqual( - fit[0], a, 7, - "Fit of Scale Parameter Failed for expfit with nan data") + fit[0], a, 7, "Fit of Scale Parameter Failed for expfit with nan data" + ) self.assertAlmostEqual( - fit[1], b, 7, - "Fit of Exponent Parameter Failed for expfit with nan data") + fit[1], b, 7, "Fit of Exponent Parameter Failed for expfit with nan data" + ) def test_expfit2_returns_correct_relationship(self): - ''' + """ Test whether or not expfit2 can model a simple two variable exponential relationship. - ''' + """ a = 1.5 b = 2.5 @@ -48,19 +48,19 @@ def test_expfit2_returns_correct_relationship(self): x2 = 2 * np.array([1, 3, 5, 7, 9]) y = a * np.power(x1, b) * np.power(x2, c) fit = expfit.expfit2([x1, x2], y)[0] - self.assertAlmostEqual(fit[0], a, 7, - "Fit of Scale Parameter Failed for expfit") + self.assertAlmostEqual(fit[0], a, 7, "Fit of Scale Parameter Failed for expfit") self.assertAlmostEqual( - fit[1], b, 7, "Fit of First Exponent Parameter Failed for expfit2") + fit[1], b, 7, "Fit of First Exponent Parameter Failed for expfit2" + ) self.assertAlmostEqual( - fit[2], c, 7, - "Fit of Second Exponent Parameter Failed for expfit2") + fit[2], c, 7, "Fit of Second Exponent Parameter Failed for expfit2" + ) def test_expfit2_handles_nan(self): - ''' + """ Test whether or not expfit2 can model a simple two variable exponential relationship in the presence of nans. - ''' + """ a = 1.5 b = 2.5 c = 3.5 @@ -68,10 +68,10 @@ def test_expfit2_handles_nan(self): x2 = 2 * np.array([1, 3, 5, 7, np.nan, 9, 11, 12, 1]) y = a * np.power(x1, b) * np.power(x2, c) fit = expfit.expfit2([x1, x2], y)[0] - self.assertAlmostEqual(fit[0], a, 7, - "Fit of Scale Parameter Failed for expfit") + self.assertAlmostEqual(fit[0], a, 7, "Fit of Scale Parameter Failed for expfit") self.assertAlmostEqual( - fit[1], b, 7, "Fit of First Exponent Parameter Failed for expfit2") + fit[1], b, 7, "Fit of First Exponent Parameter Failed for expfit2" + ) self.assertAlmostEqual( - fit[2], c, 7, - "Fit of Second Exponent Parameter Failed for expfit2") + fit[2], c, 7, "Fit of Second Exponent Parameter Failed for expfit2" + ) diff --git a/pydsd/tests/test_plot.py b/pydsd/tests/test_plot.py index f408c9c..eb26321 100644 --- a/pydsd/tests/test_plot.py +++ b/pydsd/tests/test_plot.py @@ -7,10 +7,10 @@ class TestPlot(unittest.TestCase): - 'Test module for the plot scripts' + "Test module for the plot scripts" def setUp(self): - filename = 'testdata/sgpdisdrometerC1.b1.20110427.000000_test_jwd_b1.cdf' + filename = "testdata/sgpdisdrometerC1.b1.20110427.000000_test_jwd_b1.cdf" self.dsd = ARM_JWD_Reader.read_arm_jwd_b1(filename) def test_plot_dsd(self): @@ -18,7 +18,7 @@ def test_plot_dsd(self): plt.close() def test_plot_NwD0(self): - self.dsd.calculate_dsd_parameterization() # Bad form to rely on this, but not a better way right now. + self.dsd.calculate_dsd_parameterization() # Bad form to rely on this, but not a better way right now. fig, ax = plot.plot_NwD0(self.dsd) plt.close() @@ -33,19 +33,21 @@ def test_plot_ZR_hist2d(self): plt.close() def test_scatter(self): - x = self.dsd.fields['rain_rate']['data'] #Visually not the best example, but avoids call to dsd parameterization - y = self.dsd.fields['rain_rate']['data'] + x = self.dsd.fields["rain_rate"][ + "data" + ] # Visually not the best example, but avoids call to dsd parameterization + y = self.dsd.fields["rain_rate"]["data"] fig, ax = plot.scatter(x, y) plt.close() def test_plot_hist2d(self): - x = self.dsd.fields['rain_rate']['data'] - y = self.dsd.fields['rain_rate']['data'] + x = self.dsd.fields["rain_rate"]["data"] + y = self.dsd.fields["rain_rate"]["data"] fig, ax = plot.plot_hist2d(x, y) plt.close() def test_plot_ts(self): - fig, ax = plot.plot_ts(self.dsd, 'Nd') + fig, ax = plot.plot_ts(self.dsd, "Nd") plt.close() # def test_plotHov(self): @@ -65,4 +67,3 @@ def test_methods(self): fig, ax = plot.plot_dsd(self.dsd) plot.set_ax_limits(xlim=(0, 100), ylim=(0, 100), ax=ax) plt.close() - diff --git a/pydsd/tests/test_ua98.py b/pydsd/tests/test_ua98.py index 4a1ca38..aac8b19 100644 --- a/pydsd/tests/test_ua98.py +++ b/pydsd/tests/test_ua98.py @@ -4,6 +4,7 @@ import pydsd.fit.ua98 as ua98 from ..fit import ua98 + class TestDSDFits(unittest.TestCase): """ Test the various dsd fit functions ported from pyparticle probe. This needs some more extensive testing for correctness.""" @@ -16,19 +17,22 @@ def test_ua98_eta_ratio_gives_correct_value(self): expected_eta = 1 eta = ua98.eta_ratio(M2, M4, M6) - self.assertTrue(np.isclose(eta,expected_eta), - "eta_ratio in ua98.returned an incorrect value for case (2,2,2):1") + self.assertTrue( + np.isclose(eta, expected_eta), + "eta_ratio in ua98.returned an incorrect value for case (2,2,2):1", + ) def test_ua98_shape_gives_correct_value(self): M2 = 1.0 M4 = 2.0 M6 = 3.0 - - expected_shape = -18.4462219947 + expected_shape = -18.4462219947 shape = ua98.shape(M2, M4, M6) - self.assertTrue(np.isclose(shape,expected_shape), - "shape in ua98.returned an incorrect value for case (1,2,3)") + self.assertTrue( + np.isclose(shape, expected_shape), + "shape in ua98.returned an incorrect value for case (1,2,3)", + ) def test_ua98_slope_gives_correct_value(self): M2 = 1.0 @@ -37,8 +41,10 @@ def test_ua98_slope_gives_correct_value(self): expected_slope = 4.582575694 slope = ua98.slope(M2, M4, mu) - self.assertTrue(np.isclose(slope,expected_slope, rtol=1e-5, atol=1e-5), - "slope in ua98.returned an incorrect value for case (1,2,3)") + self.assertTrue( + np.isclose(slope, expected_slope, rtol=1e-5, atol=1e-5), + "slope in ua98.returned an incorrect value for case (1,2,3)", + ) def test_ua98_intercept_gives_correct_value(self): M6 = 1.0 @@ -47,8 +53,10 @@ def test_ua98_intercept_gives_correct_value(self): expected_intercept = 0.00282186 intercept = ua98.intercept(M6, mu, Lambda) - self.assertTrue(np.isclose(intercept, expected_intercept, rtol=1e-5, atol=1e-5), - "intercept in ua98.returned an incorrect value for case (1,3,2)") + self.assertTrue( + np.isclose(intercept, expected_intercept, rtol=1e-5, atol=1e-5), + "intercept in ua98.returned an incorrect value for case (1,3,2)", + ) def test_ua98_d0_gives_correct_value(self): mu = 3.0 @@ -56,8 +64,10 @@ def test_ua98_d0_gives_correct_value(self): expected_d0 = 3.335 d0 = ua98.mom_d0(mu, Lambda) - self.assertTrue(np.isclose(d0, expected_d0 ), - "mom_d0 in ua98.returned an incorrect value for case (3,2)") + self.assertTrue( + np.isclose(d0, expected_d0), + "mom_d0 in ua98.returned an incorrect value for case (3,2)", + ) def test_ua98_zr_a_gives_correct_value(self): mu = 3.0 @@ -65,27 +75,33 @@ def test_ua98_zr_a_gives_correct_value(self): expected_a = 80323.3609808 a = ua98.zr_a(mu, N0) - self.assertTrue(np.isclose(a, expected_a ), - "zr_a in ua98.returned an incorrect value for case (3,1e4)") + self.assertTrue( + np.isclose(a, expected_a), + "zr_a in ua98.returned an incorrect value for case (3,1e4)", + ) def test_ua98_zr_b_gives_correct_value(self): mu = 3.0 expected_b = 1.303780964797914 b = ua98.zr_b(mu) - self.assertTrue(np.isclose(b, expected_b ), - "zr_b in ua98.returned an incorrect value for case (3)") + self.assertTrue( + np.isclose(b, expected_b), + "zr_b in ua98.returned an incorrect value for case (3)", + ) def test_ua98_norm_intercept_gives_correct_value(self): LWC = 1000 Dm = 1.5 - expected_intercept =0.016096262886528476 + expected_intercept = 0.016096262886528476 intercept = ua98.norm_intercept(LWC, Dm) - self.assertTrue(np.isclose(intercept, expected_intercept), - "norm_intercept in ua98.returned an incorrect value for case (1000,1.5)") + self.assertTrue( + np.isclose(intercept, expected_intercept), + "norm_intercept in ua98.returned an incorrect value for case (1000,1.5)", + ) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/pydsd/utility/configuration.py b/pydsd/utility/configuration.py index f90a179..6b609a1 100644 --- a/pydsd/utility/configuration.py +++ b/pydsd/utility/configuration.py @@ -3,30 +3,24 @@ import os - class Configuration(object): - ''' Class to store PyDisdrometer configuration options + """ Class to store PyDisdrometer configuration options Attributes: ----------- - ''' + """ config_dir = os.path.dirname(os.path.abspath(__file__)) - metadata_config_file = os.path.join(config_dir, 'metadata.json') + metadata_config_file = os.path.join(config_dir, "metadata.json") def __init__(self): self.metadata = self.load_metadata_config() def load_metadata_config(self): - ''' Load the metadata configuration file and return the dictionary''' + """ Load the metadata configuration file and return the dictionary""" return json.load(open(self.metadata_config_file)) - def fill_in_metadata(self, field, data): metadata = self.metadata[field].copy() - metadata['data'] = copy(data) + metadata["data"] = copy(data) return metadata - - - - diff --git a/pydsd/utility/dielectric.py b/pydsd/utility/dielectric.py index ee1909e..49e2dd2 100644 --- a/pydsd/utility/dielectric.py +++ b/pydsd/utility/dielectric.py @@ -27,8 +27,8 @@ def get_refractivity(freq, temp): Complex refractivity index. """ - es = s[0] + s[1] * temp + s[2] * temp**2 + s[3] * temp**3 - ep = es - (2 * np.pi * freq)**2 * (A_i(0, temp, freq) + A_i(1, temp, freq)) + es = s[0] + s[1] * temp + s[2] * temp ** 2 + s[3] * temp ** 3 + ep = es - (2 * np.pi * freq) ** 2 * (A_i(0, temp, freq) + A_i(1, temp, freq)) epp = 2 * np.pi * freq * (B_i(0, temp, freq) + B_i(1, temp, freq)) return np.sqrt(ep + 1j * epp) @@ -39,9 +39,9 @@ def A_i(i, temp, freq): """ delta = a[i] * np.exp(-1 * b[i] * temp) - tau = c[i] * np.exp(d[i]/(temp + tc)) + tau = c[i] * np.exp(d[i] / (temp + tc)) - return (tau**2 * delta)/(1 + (2 * np.pi * freq * tau)**2) + return (tau ** 2 * delta) / (1 + (2 * np.pi * freq * tau) ** 2) def B_i(i, temp, freq): @@ -49,6 +49,6 @@ def B_i(i, temp, freq): """ delta = a[i] * np.exp(-1 * b[i] * temp) - tau = c[i] * np.exp(d[i]/(temp + tc)) + tau = c[i] * np.exp(d[i] / (temp + tc)) - return (tau * delta)/(1 + (2 * np.pi * freq * tau)**2) + return (tau * delta) / (1 + (2 * np.pi * freq * tau) ** 2) diff --git a/pydsd/utility/expfit.py b/pydsd/utility/expfit.py index 990cebe..dc0259a 100644 --- a/pydsd/utility/expfit.py +++ b/pydsd/utility/expfit.py @@ -3,7 +3,7 @@ def expfit(x, y): - ''' + """ expfit calculates an exponential power law fit based upon levenburg-marquardt minimization. Fits are of the form. y = ax**b Parameters: @@ -24,7 +24,7 @@ def expfit(x, y): ------ There are some stability issues if bad data is passed into it. - ''' + """ x_array = np.array(x) y_array = np.array(y) @@ -40,7 +40,7 @@ def expfit(x, y): def expfit2(x, y): - ''' + """ expfit2 calculates an exponential power law fit based upon levenburg-marquardt minimization. Fits are of the form. y = a(x[0]**b)(x[1]**c) Parameters: @@ -60,7 +60,7 @@ def expfit2(x, y): Notes: ------ There are some stability issues if bad data is passed into it. - ''' + """ x1_array = np.array(x[0]) x2_array = np.array(x[1]) @@ -70,8 +70,9 @@ def expfit2(x, y): x2_finite_index = np.isfinite(x2_array) y_finite_index = np.isfinite(y_array) - mask = np.logical_and(x2_finite_index, np.logical_and(x1_finite_index, y_finite_index)) - + mask = np.logical_and( + x2_finite_index, np.logical_and(x1_finite_index, y_finite_index) + ) expfunc = lambda x, a, b, c: a * np.power(x[0], b) * np.power(x[1], c) popt, pcov = curve_fit(expfunc, [x1_array[mask], x2_array[mask]], y_array[mask]) diff --git a/pydsd/utility/ts_utility.py b/pydsd/utility/ts_utility.py index 602e013..0b89199 100644 --- a/pydsd/utility/ts_utility.py +++ b/pydsd/utility/ts_utility.py @@ -1,5 +1,6 @@ import numpy as np + def rolling_window(a, window): shape = a.shape[:-1] + (a.shape[-1] - window + 1, window) strides = a.strides + (a.strides[-1],)