diff --git a/validphys2/src/validphys/commondata.py b/validphys2/src/validphys/commondata.py index 538e5ff6b8..1f2f47ada5 100644 --- a/validphys2/src/validphys/commondata.py +++ b/validphys2/src/validphys/commondata.py @@ -6,9 +6,12 @@ :py:mod:`validphys.coredata` """ + +import functools + from reportengine import collect from validphys.commondataparser import load_commondata -import functools + @functools.lru_cache def loaded_commondata_with_cuts(commondata, cuts): @@ -37,5 +40,5 @@ def loaded_commondata_with_cuts(commondata, cuts): ) groups_dataset_inputs_loaded_cd_with_cuts_byprocess = collect( - "loaded_commondata_with_cuts", ("group_dataset_inputs_by_process", "data") - ) \ No newline at end of file + "loaded_commondata_with_cuts", ("group_dataset_inputs_by_process", "data") +) diff --git a/validphys2/src/validphys/config.py b/validphys2/src/validphys/config.py index b2677b4153..ed7101c4f8 100644 --- a/validphys2/src/validphys/config.py +++ b/validphys2/src/validphys/config.py @@ -1166,8 +1166,7 @@ def produce_loaded_user_covmat_path( l = self.loader fileloc = l.check_vp_output_file(user_covmat_path) return fileloc - - + @configparser.explicit_node def produce_covmat_custom(self, use_ht_uncertainties: bool = False): if use_ht_uncertainties: @@ -1193,8 +1192,8 @@ def produce_combine_custom(self, use_ht_uncertainties: bool = False): @configparser.explicit_node def produce_nnfit_theory_covmat( self, - #use_thcovmat_in_sampling: bool, - #use_thcovmat_in_fitting: bool, + # use_thcovmat_in_sampling: bool, + # use_thcovmat_in_fitting: bool, inclusive_use_scalevar_uncertainties, use_ht_uncertainties: bool = False, use_user_uncertainties: bool = False, @@ -1240,15 +1239,13 @@ def res(*args, **kwargs): # Set this to get the same filename regardless of the action. res.__name__ = "theory_covmat" return res - - + @configparser.explicit_node def produce_combine_by_type_custom(self, use_ht_uncertainties: bool = False): if use_ht_uncertainties: return validphys.theorycovariance.construction.combine_by_type_ht return validphys.theorycovariance.construction.combine_by_type - def produce_fitthcovmat( self, use_thcovmat_if_present: bool = False, fit: (str, type(None)) = None ): @@ -1799,9 +1796,9 @@ def produce_filter_data(self, fakedata: bool = False, theorycovmatconfig=None): if not fakedata: return validphys.filters.filter_real_data else: - #if theorycovmatconfig is not None and theorycovmatconfig.get( + # if theorycovmatconfig is not None and theorycovmatconfig.get( # "use_thcovmat_in_sampling" - #): + # ): # # NOTE: By the time we run theory covmat closure tests, # # hopefully the generation of pseudodata will be done in python. # raise ConfigError( diff --git a/validphys2/src/validphys/dataplots.py b/validphys2/src/validphys/dataplots.py index e2242aac4c..27eb15e505 100644 --- a/validphys2/src/validphys/dataplots.py +++ b/validphys2/src/validphys/dataplots.py @@ -2,8 +2,6 @@ Plots of relations between data PDFs and fits. """ -from __future__ import generator_stop - from collections import defaultdict from collections.abc import Sequence import itertools @@ -28,7 +26,7 @@ from validphys.coredata import KIN_NAMES from validphys.plotoptions.core import get_info, kitable, transform_result from validphys.results import chi2_stat_labels, chi2_stats -from validphys.sumrules import POL_LIMS, partial_polarized_sum_rules +from validphys.sumrules import POL_LIMS from validphys.utils import sane_groupby_iter, scale_from_grid, split_ranges log = logging.getLogger(__name__) @@ -301,9 +299,7 @@ def _plot_fancy_impl( min_vals = [] max_vals = [] fig, ax = plotutils.subplots() - ax.set_title( - "{} {}".format(info.dataset_label, info.group_label(samefig_vals, info.figure_by)) - ) + ax.set_title(f"{info.dataset_label} {info.group_label(samefig_vals, info.figure_by)}") lineby = sane_groupby_iter(fig_data, info.line_by) @@ -1484,7 +1480,7 @@ def next_options(): # if group is None then make sure that shows on legend. key = str(group) elif marker_by == "kinematics": - key = None + key = None else: raise ValueError('Unknown marker_by value') @@ -1516,8 +1512,10 @@ def next_options(): # This is to get the label key coords = [], [] if marker_by == "kinematics": - ht_magnitude = np.concatenate( cvdict[key]) / (coords[1] * (1 - coords[0]) ) - out = ax.scatter(*coords, marker='.', c=ht_magnitude, cmap="viridis", norm=mcolors.LogNorm()) + ht_magnitude = np.concatenate(cvdict[key]) / (coords[1] * (1 - coords[0])) + out = ax.scatter( + *coords, marker='.', c=ht_magnitude, cmap="viridis", norm=mcolors.LogNorm() + ) clb = fig.colorbar(out) clb.ax.set_title(r'$F_\mathrm{exp}\frac{1}{Q^2(1-x)}$') ax.plot(*coords, label=key, markeredgewidth=1, markeredgecolor=None, **key_options[key]) diff --git a/validphys2/src/validphys/results.py b/validphys2/src/validphys/results.py index 8854659351..3c0ec33945 100644 --- a/validphys2/src/validphys/results.py +++ b/validphys2/src/validphys/results.py @@ -276,22 +276,29 @@ def group_kin_table_no_table(groups_data, groups_index): """Generate a table containing the kinematics and the process_type.""" result_records = [] for group_data in groups_data: - group_cd = group_data.load_commondata() - cd = np.concatenate( - [group_cd[i].commondata_table[['kin1','kin2','kin3','process']] for i in range(len(group_cd))], - axis=0 - ) - for index, dataset in enumerate(cd): - try: - process_name = dataset[3].name - except AttributeError: - process_name = dataset[3] - result_records.append( - dict([("kin_1", dataset[0]), - ("kin_2", dataset[1]), - ("kin_3", dataset[2]), - ("process_type", process_name)]) + group_cd = group_data.load_commondata() + cd = np.concatenate( + [ + group_cd[i].commondata_table[['kin1', 'kin2', 'kin3', 'process']] + for i in range(len(group_cd)) + ], + axis=0, ) + for index, dataset in enumerate(cd): + try: + process_name = dataset[3].name + except AttributeError: + process_name = dataset[3] + result_records.append( + dict( + [ + ("kin_1", dataset[0]), + ("kin_2", dataset[1]), + ("kin_3", dataset[2]), + ("process_type", process_name), + ] + ) + ) if not result_records: log.warning("Empty records for group results") diff --git a/validphys2/src/validphys/theorycovariance/construction.py b/validphys2/src/validphys/theorycovariance/construction.py index a9192f9618..d32def2865 100644 --- a/validphys2/src/validphys/theorycovariance/construction.py +++ b/validphys2/src/validphys/theorycovariance/construction.py @@ -5,25 +5,21 @@ from collections import defaultdict, namedtuple import logging -import operator import numpy as np import pandas as pd -import scipy.linalg as la -import scipy.interpolate as scint from reportengine import collect from reportengine.table import table from validphys.checks import check_using_theory_covmat -from validphys.results import results, results_central -from validphys.core import PDF from validphys.process_options import _Process +from validphys.results import results, results_central +import validphys.theorycovariance.higher_twist_functions as ht_func from validphys.theorycovariance.theorycovarianceutils import ( check_correct_theory_combination, check_fit_dataset_order_matches_grouped, process_lookup, ) -import validphys.theorycovariance.higher_twist_functions as ht_func log = logging.getLogger(__name__) @@ -120,154 +116,161 @@ def combine_by_type_ht(each_dataset_results, groups_dataset_inputs_loaded_cd_wit return process_info -def thcov_shifts_ht(ht_parameters, - ht_included_proc, - ht_excluded_exp, - groups_data_by_process, - pdf): - """ - Same as `thcov_HT` but implementing theory covariance method for each node of the spline. - Note that 'groups_data_by_process' contains the same info as 'combine_by_type_ht'. At some - point we should use only one of them. - """ - groups_data = groups_data_by_process - start_proc_by_exp = defaultdict(list) - ndata_by_exp = defaultdict(list) - deltas = defaultdict(list) - running_index_tot = 0 - - HT = {} - HT_func = {} - - for par in ht_parameters: - if len(par['list']) != len(par['nodes']): - raise ValueError(f"The length of nodes does not match that of the list in {par['ht']}. Check the runcard.\n \ - {len(par['list'])} vs. {len(par['nodes'])}") - - HT[par['ht']] = { - "list": par['list'], - "nodes": par['nodes'] - } - - HT_func[par['ht']] = None - - # Initialise shifts according to 5pt - for idx_node, _ in enumerate(par['nodes']): - deltas[par['ht'] + f"({idx_node})"] = np.array([]) - - for idx_proc, group_proc in enumerate(groups_data): - for idx_exp, exp_set in enumerate(group_proc.datasets): - # For covmat construction - exp_ndata = exp_set.load_commondata().ndata - start_proc_by_exp[exp_set.name] = running_index_tot - running_index_tot += exp_ndata - ndata_by_exp[exp_set.name] = exp_ndata - - for ht in HT_func.keys(): - HT_func[ht] = ht_func.null_func(exp_ndata) - - if group_proc.name in ht_included_proc and exp_set.name not in ht_excluded_exp: - cd_table = exp_set.load_commondata().commondata_table - process_type = cd_table['process'].iloc[0] - - if isinstance(process_type, _Process): - process_type = process_type.name - - x = cd_table['kin1'].to_numpy() - q2 = cd_table['kin2'].to_numpy() - y = cd_table['kin3'].to_numpy() - - if x.size != exp_ndata: - raise ValueError("Problem with the number of data.") - - # NMC_NC_NOTFIXED_DW_EM-F2 - if process_type == "DIS_F2R": - HT_func['H2p'], HT_func['H2d'] = ht_func.DIS_F2R_ht(exp_set, pdf, HT["H2p"], HT["H2d"], x, q2) - - elif process_type == "DIS_F2P": - HT_func['H2p'] = ht_func.DIS_F2_ht(HT["H2p"], x, q2) +def thcov_shifts_ht(ht_parameters, ht_included_proc, ht_excluded_exp, groups_data_by_process, pdf): + """ + Same as `thcov_HT` but implementing theory covariance method for each node of the spline. + Note that 'groups_data_by_process' contains the same info as 'combine_by_type_ht'. At some + point we should use only one of them. + """ + groups_data = groups_data_by_process + start_proc_by_exp = defaultdict(list) + ndata_by_exp = defaultdict(list) + deltas = defaultdict(list) + running_index_tot = 0 + + HT = {} + HT_func = {} + + for par in ht_parameters: + if len(par['list']) != len(par['nodes']): + raise ValueError( + f"The length of nodes does not match that of the list in {par['ht']}. Check the runcard.\n \ + {len(par['list'])} vs. {len(par['nodes'])}" + ) - elif process_type == "DIS_F2D": - HT_func['H2d'] = ht_func.DIS_F2_ht(HT["H2d"], x, q2) + HT[par['ht']] = {"list": par['list'], "nodes": par['nodes']} - # EMC - elif process_type == "DIS_F2C": - HT_func['H2p'], HT_func['H2d'] = ht_func.DIS_F2C_ht(HT['H2p'], HT['H2d'], x, q2) + HT_func[par['ht']] = None - # HERA NC - elif process_type in ["DIS_NCE", "DIS_NCP", "DIS_NCP_CH", "DIS_NCE_BT"]: - HT_func['H2p'], HT_func["HLp"] = ht_func.DIS_NC_ht(HT['H2p'], HT['HLp'], x, q2, y) + # Initialise shifts according to 5pt + for idx_node, _ in enumerate(par['nodes']): + deltas[par['ht'] + f"({idx_node})"] = np.array([]) - #CHORUS - elif process_type in ["DIS_SNU", "DIS_SNB"]: - # Lead target:: - A = 208.0 - Z = 82 - if process_type == "DIS_SNU": + for idx_proc, group_proc in enumerate(groups_data): + for idx_exp, exp_set in enumerate(group_proc.datasets): + # For covmat construction + exp_ndata = exp_set.load_commondata().ndata + start_proc_by_exp[exp_set.name] = running_index_tot + running_index_tot += exp_ndata + ndata_by_exp[exp_set.name] = exp_ndata + + for ht in HT_func.keys(): + HT_func[ht] = ht_func.null_func(exp_ndata) + + if group_proc.name in ht_included_proc and exp_set.name not in ht_excluded_exp: + cd_table = exp_set.load_commondata().commondata_table + process_type = cd_table['process'].iloc[0] + + if isinstance(process_type, _Process): + process_type = process_type.name + + x = cd_table['kin1'].to_numpy() + q2 = cd_table['kin2'].to_numpy() + y = cd_table['kin3'].to_numpy() + + if x.size != exp_ndata: + raise ValueError("Problem with the number of data.") + + # NMC_NC_NOTFIXED_DW_EM-F2 + if process_type == "DIS_F2R": + HT_func['H2p'], HT_func['H2d'] = ht_func.DIS_F2R_ht( + exp_set, pdf, HT["H2p"], HT["H2d"], x, q2 + ) + + elif process_type == "DIS_F2P": + HT_func['H2p'] = ht_func.DIS_F2_ht(HT["H2p"], x, q2) + + elif process_type == "DIS_F2D": + HT_func['H2d'] = ht_func.DIS_F2_ht(HT["H2d"], x, q2) + + # EMC + elif process_type == "DIS_F2C": + HT_func['H2p'], HT_func['H2d'] = ht_func.DIS_F2C_ht(HT['H2p'], HT['H2d'], x, q2) + + # HERA NC + elif process_type in ["DIS_NCE", "DIS_NCP", "DIS_NCP_CH", "DIS_NCE_BT"]: + HT_func['H2p'], HT_func["HLp"] = ht_func.DIS_NC_ht( + HT['H2p'], HT['HLp'], x, q2, y + ) + + # CHORUS + elif process_type in ["DIS_SNU", "DIS_SNB"]: + # Lead target:: + A = 208.0 + Z = 82 + if process_type == "DIS_SNU": l = 0 - elif process_type == "DIS_SNB": + elif process_type == "DIS_SNB": l = 1 - DIS_NU = ht_func.DIS_SNU(HT, (A,Z), (x,q2,y), Mh=0.938, Mw=80.398, lepton=l) - HT_func['H2p'] = DIS_NU.PC_2_p - HT_func['H2d'] = DIS_NU.PC_2_d - HT_func["HLp"] = DIS_NU.PC_L_p - HT_func["HLd"] = DIS_NU.PC_L_d - HT_func["H3p"] = DIS_NU.PC_3_p - HT_func["H3d"] = DIS_NU.PC_3_d - - #NuTeV - elif process_type in ["DIS_DM_NU", "DIS_DM_NB"]: - # Iron target - Z = 23.403 - A = 49.618 - if process_type == "DIS_SNU": + DIS_NU = ht_func.DIS_SNU(HT, (A, Z), (x, q2, y), Mh=0.938, Mw=80.398, lepton=l) + HT_func['H2p'] = DIS_NU.PC_2_p + HT_func['H2d'] = DIS_NU.PC_2_d + HT_func["HLp"] = DIS_NU.PC_L_p + HT_func["HLd"] = DIS_NU.PC_L_d + HT_func["H3p"] = DIS_NU.PC_3_p + HT_func["H3d"] = DIS_NU.PC_3_d + + # NuTeV + elif process_type in ["DIS_DM_NU", "DIS_DM_NB"]: + # Iron target + Z = 23.403 + A = 49.618 + if process_type == "DIS_SNU": l = 0 - elif process_type == "DIS_SNB": + elif process_type == "DIS_SNB": l = 1 - DIS_NuTeV = ht_func.DIS_NUTEV(HT, (A,Z), (x,q2,y), Mh=0.938, Mw=80.398, lepton=l) - HT_func['H2p'] = DIS_NuTeV.PC_2_p - HT_func['H2d'] = DIS_NuTeV.PC_2_d - HT_func["HLp"] = DIS_NuTeV.PC_L_p - HT_func["HLd"] = DIS_NuTeV.PC_L_d - HT_func["H3p"] = DIS_NuTeV.PC_3_p - HT_func["H3d"] = DIS_NuTeV.PC_3_d - - #HERA_CC - elif process_type in ["DIS_CCE", "DIS_CCP"]: - if process_type == "DIS_CCE": + DIS_NuTeV = ht_func.DIS_NUTEV( + HT, (A, Z), (x, q2, y), Mh=0.938, Mw=80.398, lepton=l + ) + HT_func['H2p'] = DIS_NuTeV.PC_2_p + HT_func['H2d'] = DIS_NuTeV.PC_2_d + HT_func["HLp"] = DIS_NuTeV.PC_L_p + HT_func["HLd"] = DIS_NuTeV.PC_L_d + HT_func["H3p"] = DIS_NuTeV.PC_3_p + HT_func["H3d"] = DIS_NuTeV.PC_3_d + + # HERA_CC + elif process_type in ["DIS_CCE", "DIS_CCP"]: + if process_type == "DIS_CCE": l = 0 - elif process_type == "DIS_CCP": + elif process_type == "DIS_CCP": l = 1 - DIS_CC_HERA = ht_func.DIS_HERA_CC(HT, (x,q2,y), Mh=0.938, Mw=80.398, lepton=l) - HT_func['H2p'] = DIS_CC_HERA.PC_2_p - HT_func['H2d'] = DIS_CC_HERA.PC_2_d - HT_func["HLp"] = DIS_CC_HERA.PC_L_p - HT_func["HLd"] = DIS_CC_HERA.PC_L_d - HT_func["H3p"] = DIS_CC_HERA.PC_3_p - HT_func["H3d"] = DIS_CC_HERA.PC_3_d - else: - raise Exception(f"The process type `{process_type}` in `{exp_set.name} has not been implemented.") - - for ht in HT.keys(): + DIS_CC_HERA = ht_func.DIS_HERA_CC(HT, (x, q2, y), Mh=0.938, Mw=80.398, lepton=l) + HT_func['H2p'] = DIS_CC_HERA.PC_2_p + HT_func['H2d'] = DIS_CC_HERA.PC_2_d + HT_func["HLp"] = DIS_CC_HERA.PC_L_p + HT_func["HLd"] = DIS_CC_HERA.PC_L_d + HT_func["H3p"] = DIS_CC_HERA.PC_3_p + HT_func["H3d"] = DIS_CC_HERA.PC_3_d + else: + raise Exception( + f"The process type `{process_type}` in `{exp_set.name} has not been implemented." + ) + + for ht in HT.keys(): for idx_node in range(len(HT[ht]['nodes'])): - shifted_list = ht_func.beta_tilde_5pt(HT[ht]['list'], idx_node) - deltas[ht + f"({idx_node})"] = np.append(deltas[ht + f"({idx_node})"], HT_func[ht](shifted_list)) + shifted_list = ht_func.beta_tilde_5pt(HT[ht]['list'], idx_node) + deltas[ht + f"({idx_node})"] = np.append( + deltas[ht + f"({idx_node})"], HT_func[ht](shifted_list) + ) - # Construct the covariance matrix - covmats = defaultdict(list) - for exp_name_1, exp_idx_1 in start_proc_by_exp.items(): - for exp_name_2, exp_idx_2 in start_proc_by_exp.items(): - s = np.zeros(shape=(ndata_by_exp[exp_name_1], ndata_by_exp[exp_name_2])) - for shifts in deltas.keys(): - s += np.outer(deltas[shifts][exp_idx_1: exp_idx_1 + ndata_by_exp[exp_name_1]], - deltas[shifts][exp_idx_2: exp_idx_2 + ndata_by_exp[exp_name_2]]) + # Construct the covariance matrix + covmats = defaultdict(list) + for exp_name_1, exp_idx_1 in start_proc_by_exp.items(): + for exp_name_2, exp_idx_2 in start_proc_by_exp.items(): + s = np.zeros(shape=(ndata_by_exp[exp_name_1], ndata_by_exp[exp_name_2])) + for shifts in deltas.keys(): + s += np.outer( + deltas[shifts][exp_idx_1 : exp_idx_1 + ndata_by_exp[exp_name_1]], + deltas[shifts][exp_idx_2 : exp_idx_2 + ndata_by_exp[exp_name_2]], + ) - start_locs = (exp_idx_1, exp_idx_2) - covmats[start_locs] = s + start_locs = (exp_idx_1, exp_idx_2) + covmats[start_locs] = s - return covmats, deltas + return covmats, deltas def thcov_ht(thcov_shifts_ht, table_ht_deltas): @@ -785,4 +788,4 @@ def experimentplustheory_corrmat_custom(procs_covmat, theory_covmat_custom): each_dataset_results = collect(results, ("group_dataset_inputs_by_process", "data")) -groups_data_by_process = collect("data", ("group_dataset_inputs_by_process",)) \ No newline at end of file +groups_data_by_process = collect("data", ("group_dataset_inputs_by_process",)) diff --git a/validphys2/src/validphys/theorycovariance/higher_twist_functions.py b/validphys2/src/validphys/theorycovariance/higher_twist_functions.py index 0e16b62689..bfdc4bbf2f 100644 --- a/validphys2/src/validphys/theorycovariance/higher_twist_functions.py +++ b/validphys2/src/validphys/theorycovariance/higher_twist_functions.py @@ -1,192 +1,164 @@ +import operator + import numpy as np -import pandas as pd -import scipy.linalg as la import scipy.interpolate as scint -from validphys.convolution import central_fk_predictions -import operator +from validphys.convolution import central_fk_predictions GEV_CM2_CONV = 3.893793e10 -GF = 1.1663787e-05 # Fermi's constant [GeV^-2] +GF = 1.1663787e-05 # Fermi's constant [GeV^-2] + def beta_tilde_5pt(delta_h, idx): - shifted_list = [0 for _ in range(len(delta_h))] - shifted_list[idx] = delta_h[idx] - return shifted_list + shifted_list = [0 for _ in range(len(delta_h))] + shifted_list[idx] = delta_h[idx] + return shifted_list + -def ht_parametrisation( - delta_h: list, - nodes: list, - x: list, - Q2: list, - ): - H = scint.CubicSpline(nodes, delta_h) - H = np.vectorize(H) +def ht_parametrisation(delta_h: list, nodes: list, x: list, Q2: list): + H = scint.CubicSpline(nodes, delta_h) + H = np.vectorize(H) - PC = H(x) / Q2 - return PC + PC = H(x) / Q2 + return PC def null_func(size): """Auxiliary function used to return arrays of zeros for those datasets that don't require higher twist.""" + def zeros(list): return np.zeros(size) + return zeros -def DIS_F2R_ht(experiment, - pdf, - H2p_dict, - H2d_dict, - x, - q2): - cuts = experiment.cuts - fkspec_F2D, fkspec_F2P = experiment.fkspecs - fk_F2D = fkspec_F2D.load_with_cuts(cuts) - fk_F2P = fkspec_F2P.load_with_cuts(cuts) - F2D = central_fk_predictions(fk_F2D, pdf) - F2P = central_fk_predictions(fk_F2P, pdf) - - F2D = np.concatenate(F2D.values) - F2P = np.concatenate(F2P.values) - F2_ratio = operator.truediv(F2D, F2P) - - - def PC_2_p(list): - PC_p = ht_parametrisation(list, H2p_dict["nodes"], x, q2) - result = np.array(operator.truediv(F2D, np.sum([F2P, PC_p],axis=0)) - F2_ratio) - return result - - def PC_2_d(list): - PC_d = ht_parametrisation(list, H2d_dict["nodes"], x, q2) - result = np.array(operator.truediv(np.sum([F2D, PC_d],axis=0), F2P) - F2_ratio) - return result - - return PC_2_p, PC_2_d +def DIS_F2R_ht(experiment, pdf, H2p_dict, H2d_dict, x, q2): + cuts = experiment.cuts + fkspec_F2D, fkspec_F2P = experiment.fkspecs + fk_F2D = fkspec_F2D.load_with_cuts(cuts) + fk_F2P = fkspec_F2P.load_with_cuts(cuts) + F2D = central_fk_predictions(fk_F2D, pdf) + F2P = central_fk_predictions(fk_F2P, pdf) + + F2D = np.concatenate(F2D.values) + F2P = np.concatenate(F2P.values) + F2_ratio = operator.truediv(F2D, F2P) + + def PC_2_p(list): + PC_p = ht_parametrisation(list, H2p_dict["nodes"], x, q2) + result = np.array(operator.truediv(F2D, np.sum([F2P, PC_p], axis=0)) - F2_ratio) + return result + + def PC_2_d(list): + PC_d = ht_parametrisation(list, H2d_dict["nodes"], x, q2) + result = np.array(operator.truediv(np.sum([F2D, PC_d], axis=0), F2P) - F2_ratio) + return result + + return PC_2_p, PC_2_d def DIS_F2_ht(H2_dict, x, q2): - def PC_2(list): - result = ht_parametrisation(list, H2_dict['nodes'], x, q2) - return result - - return PC_2 - - -def DIS_F2C_ht(H2p_dict, - H2d_dict, - x, - q2): - # Iron target - Z = 23.403 - A = 49.618 - - def PC_2_p(list): - result = ht_parametrisation(list, H2p_dict['nodes'], x, q2) - return result - - def PC_2_d(list): - result = 2 * (Z - A) / A * ht_parametrisation(list, H2d_dict['nodes'], x, q2) - return result - - return PC_2_p, PC_2_d - - -def DIS_NC_ht(H2_dict, - HL_dict, - x, - q2, - y): - yp = 1 + np.power(1 - y, 2) - yL = np.power(y, 2) - N_L = - yL / yp - - def PC_2(list): - return ht_parametrisation(list, H2_dict['nodes'], x, q2) - - def PC_L(list): - return N_L * ht_parametrisation(list, HL_dict['nodes'], x, q2) - - return PC_2, PC_L + def PC_2(list): + result = ht_parametrisation(list, H2_dict['nodes'], x, q2) + return result + + return PC_2 + + +def DIS_F2C_ht(H2p_dict, H2d_dict, x, q2): + # Iron target + Z = 23.403 + A = 49.618 + + def PC_2_p(list): + result = ht_parametrisation(list, H2p_dict['nodes'], x, q2) + return result + + def PC_2_d(list): + result = 2 * (Z - A) / A * ht_parametrisation(list, H2d_dict['nodes'], x, q2) + return result + + return PC_2_p, PC_2_d + + +def DIS_NC_ht(H2_dict, HL_dict, x, q2, y): + yp = 1 + np.power(1 - y, 2) + yL = np.power(y, 2) + N_L = -yL / yp + + def PC_2(list): + return ht_parametrisation(list, H2_dict['nodes'], x, q2) + + def PC_L(list): + return N_L * ht_parametrisation(list, HL_dict['nodes'], x, q2) + + return PC_2, PC_L class DIS_SNU: - def __init__(self, - HT_dict, - target_tuple, #(A,Z) - kin_tuple, - Mh, - Mw, - lepton): - - # Lead target - A = target_tuple[0] - Z = target_tuple[1] - x = kin_tuple[0] - q2 = kin_tuple[1] - y = kin_tuple[2] - self.nuclear_target = 2 * (Z - A) / A - self.Mh = Mh #0.938 GeV - self.Mw2 = np.power(Mw, 2) # GeV^2 - self.yp = 1 + np.power(1 - y, 2) - 2 * np.power( x * y * Mh, 2) / q2 - self.yL = np.power(y, 2) - self.ym = 1 - np.power(1 - y, 2) - self.N = GEV_CM2_CONV * (GF ** 2) * Mh / ( 2 * np.pi * np.power( 1 + q2 / self.Mw2, 2) ) * self.yp - self.H2p_dict = HT_dict['H2p'] - self.H2d_dict = HT_dict['H2d'] - self.HLp_dict = HT_dict['HLp'] - self.HLd_dict = HT_dict['HLd'] - self.H3p_dict = HT_dict['H3p'] - self.H3d_dict = HT_dict['H3d'] - - self.x = x - self.q2 = q2 - self.l = lepton - - def PC_2_p(self, list): - norm = self.N - return norm * ht_parametrisation(list, self.H2p_dict['nodes'], self.x, self.q2) - - def PC_2_d(self, list): - norm = self.N * self.nuclear_target - return norm * ht_parametrisation(list, self.H2d_dict['nodes'], self.x, self.q2) - - def PC_L_p(self, list): - norm = - self.N * self.yL / self.yp - return norm * ht_parametrisation(list, self.HLp_dict['nodes'], self.x, self.q2) - - def PC_L_d(self, list): - norm = - self.N * self.yL / self.yp * self.nuclear_target - return norm * ht_parametrisation(list, self.HLd_dict['nodes'], self.x, self.q2) - - def PC_3_p(self, list): - norm = self.N * np.power(-1, self.l) * self.ym / self.yp * self.x - return norm * ht_parametrisation(list, self.H3p_dict['nodes'], self.x, self.q2) - - def PC_3_d(self, list): - norm = self.N * np.power(-1, self.l) * self.ym / self.yp * self.x * self.nuclear_target - return norm * ht_parametrisation(list, self.H3d_dict['nodes'], self.x, self.q2) - + def __init__(self, HT_dict, target_tuple, kin_tuple, Mh, Mw, lepton): # (A,Z) + + # Lead target + A = target_tuple[0] + Z = target_tuple[1] + x = kin_tuple[0] + q2 = kin_tuple[1] + y = kin_tuple[2] + self.nuclear_target = 2 * (Z - A) / A + self.Mh = Mh # 0.938 GeV + self.Mw2 = np.power(Mw, 2) # GeV^2 + self.yp = 1 + np.power(1 - y, 2) - 2 * np.power(x * y * Mh, 2) / q2 + self.yL = np.power(y, 2) + self.ym = 1 - np.power(1 - y, 2) + self.N = ( + GEV_CM2_CONV * (GF**2) * Mh / (2 * np.pi * np.power(1 + q2 / self.Mw2, 2)) * self.yp + ) + self.H2p_dict = HT_dict['H2p'] + self.H2d_dict = HT_dict['H2d'] + self.HLp_dict = HT_dict['HLp'] + self.HLd_dict = HT_dict['HLd'] + self.H3p_dict = HT_dict['H3p'] + self.H3d_dict = HT_dict['H3d'] + + self.x = x + self.q2 = q2 + self.l = lepton + + def PC_2_p(self, list): + norm = self.N + return norm * ht_parametrisation(list, self.H2p_dict['nodes'], self.x, self.q2) + + def PC_2_d(self, list): + norm = self.N * self.nuclear_target + return norm * ht_parametrisation(list, self.H2d_dict['nodes'], self.x, self.q2) + + def PC_L_p(self, list): + norm = -self.N * self.yL / self.yp + return norm * ht_parametrisation(list, self.HLp_dict['nodes'], self.x, self.q2) + + def PC_L_d(self, list): + norm = -self.N * self.yL / self.yp * self.nuclear_target + return norm * ht_parametrisation(list, self.HLd_dict['nodes'], self.x, self.q2) + + def PC_3_p(self, list): + norm = self.N * np.power(-1, self.l) * self.ym / self.yp * self.x + return norm * ht_parametrisation(list, self.H3p_dict['nodes'], self.x, self.q2) + + def PC_3_d(self, list): + norm = self.N * np.power(-1, self.l) * self.ym / self.yp * self.x * self.nuclear_target + return norm * ht_parametrisation(list, self.H3d_dict['nodes'], self.x, self.q2) + class DIS_NUTEV(DIS_SNU): - def __init__(self, HT_dict, - target_tuple, #(A,Z) - kin_tuple, - Mh, - Mw, - lepton): - super().__init__(HT_dict, target_tuple, kin_tuple, Mh, Mw, lepton) - self.N = 50 * self.yp / np.power( 1 + self.q2 / self.Mw2, 2) + def __init__(self, HT_dict, target_tuple, kin_tuple, Mh, Mw, lepton): # (A,Z) + super().__init__(HT_dict, target_tuple, kin_tuple, Mh, Mw, lepton) + self.N = 50 * self.yp / np.power(1 + self.q2 / self.Mw2, 2) class DIS_HERA_CC(DIS_SNU): - def __init__(self, HT_dict, - kin_tuple, - Mh, - Mw, - lepton): - super().__init__(HT_dict, (1,1), kin_tuple, Mh, Mw, lepton) - y = kin_tuple[2] - self.yp = 1 + np.power(1 - y, 2) - N = 1 / 4 * self.yp \ No newline at end of file + def __init__(self, HT_dict, kin_tuple, Mh, Mw, lepton): + super().__init__(HT_dict, (1, 1), kin_tuple, Mh, Mw, lepton) + y = kin_tuple[2] + self.yp = 1 + np.power(1 - y, 2) + N = 1 / 4 * self.yp