Skip to content

Commit

Permalink
make the code compatible with pytorch
Browse files Browse the repository at this point in the history
  • Loading branch information
scarlehoff committed Aug 16, 2024
1 parent e14496c commit 4448ba4
Show file tree
Hide file tree
Showing 5 changed files with 25 additions and 28 deletions.
10 changes: 5 additions & 5 deletions extra_tests/regression_fits/central.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@ description: n3fit regression test
# ewk: apply ewk k-factors
# sys: systematics treatment (see systypes)
dataset_inputs:
- {dataset: NMC_NC_NOTFIXED_P_EM-SIGMARED, frac: 0.55, variant: legacy}
- {dataset: SLAC_NC_NOTFIXED_P_EM-F2, frac: 0.75, variant: legacy}
- {dataset: CMS_Z0J_8TEV_PT-Y, frac: 0.75, cfac: [QCD], variant: legacy_10}
- {dataset: NMC_NC_NOTFIXED_P_EM-SIGMARED, frac: 0.5, variant: legacy}
- {dataset: SLAC_NC_NOTFIXED_P_EM-F2, frac: 0.5, variant: legacy}
- {dataset: CMS_Z0J_8TEV_PT-Y, frac: 0.5, cfac: [QCD], variant: legacy_10}
- {dataset: ATLAS_TTBAR_8TEV_TOT_X-SEC, frac: 1.0, cfac: [QCD], variant: legacy}

############################################################
Expand All @@ -32,7 +32,8 @@ genrep: False # on = generate MC replicas, False = use real data
trvlseed: 3
nnseed: 2
mcseed: 1
sum_rules: vsr

load: "weights.weights.h5"

parameters: # This defines the parameter dictionary that is passed to the Model Trainer
nodes_per_layer: [15, 10, 8]
Expand Down Expand Up @@ -77,4 +78,3 @@ integrability:
############################################################
debug: true
double_precision: false
parallel_models: true
17 changes: 10 additions & 7 deletions n3fit/src/n3fit/backends/keras_backend/MetaModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
from pathlib import Path
import re

from keras import backend as K
from keras import ops as Kops
from keras import optimizers as Kopt
from keras.models import Model
import numpy as np
Expand All @@ -16,8 +18,10 @@
import n3fit.backends.keras_backend.operations as op

# We need a function to transform tensors to numpy/python primitives
# which is not part of the official TF interface and can change with the version
_to_numpy_or_python_type = lambda ret: {k: i.numpy() for k, i in ret.items()}
if K.backend() == "torch":
_to_numpy_or_python_type = lambda ret: {k: i.detach().numpy() for k, i in ret.items()}
else:
_to_numpy_or_python_type = lambda ret: {k: i.numpy() for k, i in ret.items()}

# Starting with TF 2.16, a memory leak in TF https://github.com/tensorflow/tensorflow/issues/64170
# makes jit compilation unusable in GPU.
Expand Down Expand Up @@ -115,7 +119,6 @@ def __init__(self, input_tensors, output_tensors, scaler=None, input_values=None
self.compute_losses_function = None
self._scaler = scaler

@tf.autograph.experimental.do_not_convert
def _parse_input(self, extra_input=None):
"""Returns the input data the model was compiled with.
Introduces the extra_input in the places asigned to the placeholders.
Expand Down Expand Up @@ -167,8 +170,8 @@ def perform_fit(self, x=None, y=None, epochs=1, **kwargs):
steps_per_epoch = self._determine_steps_per_epoch(epochs)

for k, v in x_params.items():
x_params[k] = tf.repeat(v, steps_per_epoch, axis=0)
y = [tf.repeat(yi, steps_per_epoch, axis=0) for yi in y]
x_params[k] = Kops.repeat(v, steps_per_epoch, axis=0)
y = [Kops.repeat(yi, steps_per_epoch, axis=0) for yi in y]

history = super().fit(
x=x_params, y=y, epochs=epochs // steps_per_epoch, batch_size=1, **kwargs
Expand Down Expand Up @@ -222,13 +225,13 @@ def compute_losses(self):
inputs[k] = v[:1]

# Compile a evaluation function
@tf.function

def losses_fun():
predictions = self(inputs)
# If we only have one dataset the output changes
if len(out_names) == 2:
predictions = [predictions]
total_loss = tf.reduce_sum(predictions, axis=0)
total_loss = Kops.sum(predictions, axis=0)
ret = [total_loss] + predictions
return dict(zip(out_names, ret))

Expand Down
1 change: 0 additions & 1 deletion n3fit/src/n3fit/backends/keras_backend/callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,6 @@ def on_train_begin(self, logs=None):
layer = self.model.get_layer(layer_name)
self.updateable_weights.append(layer.weights)

@tf.function
def _update_weights(self):
"""Update all the weight with the corresponding multipliers
Wrapped with tf.function to compensate the for loops as both weights variables
Expand Down
23 changes: 9 additions & 14 deletions n3fit/src/n3fit/backends/keras_backend/operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
This includes an implementation of the NNPDF operations on fktable in the keras
language (with the mapping ``c_to_py_fun``) into Keras ``Lambda`` layers.
Tensor operations are compiled through the @tf.function decorator for optimization
Tensor operations are compiled through the decorator for optimization
The rest of the operations in this module are divided into four categories:
numpy to tensor:
Expand Down Expand Up @@ -92,7 +92,6 @@ def c_to_py_fun(op_name, name="dataset"):
except KeyError as e:
raise ValueError(f"Operation {op_name} not recognised") from e

@tf.function
def operate_on_tensors(tensor_list):
return operation(*tensor_list)

Expand Down Expand Up @@ -196,7 +195,8 @@ def tmp(x):

# Generation operations
# generate tensors of given shape/content
@tf.function


def tensor_ones_like(*args, **kwargs):
"""
Generates a tensor of ones of the same shape as the input tensor
Expand All @@ -207,19 +207,18 @@ def tensor_ones_like(*args, **kwargs):

# Property operations
# modify properties of the tensor like the shape or elements it has
@tf.function


def reshape(x, shape):
"""reshape tensor x"""
return Kops.reshape(x, shape)


@tf.function
def flatten(x):
"""Flatten tensor x"""
return reshape(x, (-1,))


@tf.function
def transpose(tensor, **kwargs):
"""
Transpose a layer,
Expand Down Expand Up @@ -263,23 +262,20 @@ def tensor_product(*args, **kwargs):
return Kops.tensordot(*args, **kwargs)


@tf.function
def pow(tensor, power):
"""
Computes the power of the tensor
"""
return Kops.power(tensor, power)


@tf.function(reduce_retracing=True)
def op_log(o_tensor, **kwargs):
"""
Computes the logarithm of the input
"""
return Kops.log(o_tensor)


@tf.function
def sum(*args, **kwargs):
"""
Computes the sum of the elements of the tensor
Expand Down Expand Up @@ -310,24 +306,23 @@ def swapaxes(tensor, source, destination):
Moves the axis of the tensor from source to destination, as in numpy.swapaxes.
see full `docs <https://numpy.org/doc/stable/reference/generated/numpy.swapaxes.html>`_
"""
indices = list(range(tensor.shape.rank))
rank = len(tensor.shape)
indices = list(range(rank))
if source < 0:
source += tensor.shape.rank
source += rank
if destination < 0:
destination += tensor.shape.rank
destination += rank

indices[source], indices[destination] = indices[destination], indices[source]

return Kops.transpose(tensor, indices)


@tf.function
def elu(x, alpha=1.0, **kwargs):
new_layer = ELU(alpha=alpha, **kwargs)
return new_layer(x)


@tf.function
def backend_function(fun_name, *args, **kwargs):
"""
Wrapper to call non-explicitly implemented backend functions by name: (``fun_name``)
Expand Down
2 changes: 1 addition & 1 deletion n3fit/src/n3fit/layers/observable.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ def compute_float_mask(bool_mask):
"""
# Create a tensor with the shape (**bool_mask.shape, num_active_flavours)
masked_to_full = []
for idx in np.argwhere(bool_mask):
for idx in np.argwhere(np.array(bool_mask)):
temp_matrix = np.zeros(bool_mask.shape)
temp_matrix[tuple(idx)] = 1
masked_to_full.append(temp_matrix)
Expand Down

0 comments on commit 4448ba4

Please sign in to comment.