Skip to content

Commit

Permalink
Update to super() for py3 style.
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 464429203
  • Loading branch information
saberkun authored and tensorflower-gardener committed Aug 1, 2022
1 parent 0e19256 commit 0ff8db0
Show file tree
Hide file tree
Showing 15 changed files with 42 additions and 42 deletions.
4 changes: 2 additions & 2 deletions official/nlp/modeling/layers/block_diag_feedforward.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def __init__(
kernel_constraint: Optional[tf.keras.constraints.Constraint] = None,
bias_constraint: Optional[tf.keras.constraints.Constraint] = None,
**kwargs): # pylint: disable=g-doc-args
super(BlockDiagFeedforward, self).__init__(**kwargs)
super().__init__(**kwargs)
self._intermediate_size = intermediate_size
self._intermediate_activation = intermediate_activation
self._dropout = dropout
Expand Down Expand Up @@ -156,7 +156,7 @@ def get_config(self):
"bias_constraint":
tf.keras.constraints.serialize(self._bias_constraint)
}
base_config = super(BlockDiagFeedforward, self).get_config()
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))

def call(self, inputs):
Expand Down
2 changes: 1 addition & 1 deletion official/nlp/modeling/layers/gaussian_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def __init__(self,
name: (string) Layer name.
**gp_output_kwargs: Additional keyword arguments to dense output layer.
"""
super(RandomFeatureGaussianProcess, self).__init__(name=name, dtype=dtype)
super().__init__(name=name, dtype=dtype)
self.units = units
self.num_inducing = num_inducing

Expand Down
4 changes: 2 additions & 2 deletions official/nlp/modeling/layers/masked_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def __init__(self,
output='logits',
name=None,
**kwargs):
super(MaskedLM, self).__init__(name=name, **kwargs)
super().__init__(name=name, **kwargs)
self.embedding_table = embedding_table
self.activation = activation
self.initializer = tf.keras.initializers.get(initializer)
Expand All @@ -73,7 +73,7 @@ def build(self, input_shape):
initializer='zeros',
trainable=True)

super(MaskedLM, self).build(input_shape)
super().build(input_shape)

def call(self, sequence_data, masked_positions):
masked_lm_input = self._gather_indexes(sequence_data, masked_positions)
Expand Down
4 changes: 2 additions & 2 deletions official/nlp/modeling/layers/masked_softmax.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def __init__(self,
self._normalization_axes = (-1,)
else:
self._normalization_axes = normalization_axes
super(MaskedSoftmax, self).__init__(**kwargs)
super().__init__(**kwargs)

def call(self, scores, mask=None):

Expand Down Expand Up @@ -81,5 +81,5 @@ def get_config(self):
'mask_expansion_axes': self._mask_expansion_axes,
'normalization_axes': self._normalization_axes
}
base_config = super(MaskedSoftmax, self).get_config()
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
4 changes: 2 additions & 2 deletions official/nlp/modeling/layers/mat_mul_with_margin.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ def __init__(self,
logit_scale=1.0,
logit_margin=0.0,
**kwargs):
super(MatMulWithMargin, self).__init__(**kwargs)
super().__init__(**kwargs)
self.logit_scale = logit_scale
self.logit_margin = logit_margin

Expand All @@ -61,7 +61,7 @@ def get_config(self):
config = {
'logit_scale': self.logit_scale,
'logit_margin': self.logit_margin}
config.update(super(MatMulWithMargin, self).get_config())
config.update(super().get_config())
return config

@classmethod
Expand Down
8 changes: 4 additions & 4 deletions official/nlp/modeling/layers/mobile_bert_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ class NoNorm(tf.keras.layers.Layer):
"""Apply element-wise linear transformation to the last dimension."""

def __init__(self, name=None):
super(NoNorm, self).__init__(name=name)
super().__init__(name=name)

def build(self, shape):
kernal_size = shape[-1]
Expand Down Expand Up @@ -98,7 +98,7 @@ def __init__(self,
dropout_rate: Dropout rate.
**kwargs: keyword arguments.
"""
super(MobileBertEmbedding, self).__init__(**kwargs)
super().__init__(**kwargs)
self.word_vocab_size = word_vocab_size
self.word_embed_size = word_embed_size
self.type_vocab_size = type_vocab_size
Expand Down Expand Up @@ -222,7 +222,7 @@ def __init__(self,
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
super(MobileBertTransformer, self).__init__(**kwargs)
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
Expand Down Expand Up @@ -459,7 +459,7 @@ def __init__(self,
`predictions`.
**kwargs: keyword arguments.
"""
super(MobileBertMaskedLM, self).__init__(**kwargs)
super().__init__(**kwargs)
self.embedding_table = embedding_table
self.activation = activation
self.initializer = tf.keras.initializers.get(initializer)
Expand Down
6 changes: 3 additions & 3 deletions official/nlp/modeling/layers/multi_channel_attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def __init__(self,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(VotingAttention, self).__init__(**kwargs)
super().__init__(**kwargs)
self._num_heads = num_heads
self._head_size = head_size
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
Expand Down Expand Up @@ -82,7 +82,7 @@ def build(self, unused_input_shapes):
kernel_initializer=tf_utils.clone_initializer(self._kernel_initializer),
bias_initializer=tf_utils.clone_initializer(self._bias_initializer),
**common_kwargs)
super(VotingAttention, self).build(unused_input_shapes)
super().build(unused_input_shapes)

def call(self, encoder_outputs, doc_attention_mask):
num_docs = tf_utils.get_shape_list(encoder_outputs, expected_rank=[4])[1]
Expand Down Expand Up @@ -123,7 +123,7 @@ class MultiChannelAttention(tf.keras.layers.MultiHeadAttention):
"""

def _build_attention(self, rank):
super(MultiChannelAttention, self)._build_attention(rank) # pytype: disable=attribute-error # typed-keras
super()._build_attention(rank) # pytype: disable=attribute-error # typed-keras
self._masked_softmax = masked_softmax.MaskedSoftmax(mask_expansion_axes=[2])

def call(self,
Expand Down
6 changes: 3 additions & 3 deletions official/nlp/modeling/layers/on_device_embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def __init__(self,
scale_factor=None,
**kwargs):

super(OnDeviceEmbedding, self).__init__(**kwargs)
super().__init__(**kwargs)
self._vocab_size = vocab_size
self._embedding_width = embedding_width
self._initializer = initializer
Expand All @@ -62,7 +62,7 @@ def get_config(self):
"use_one_hot": self._use_one_hot,
"scale_factor": self._scale_factor,
}
base_config = super(OnDeviceEmbedding, self).get_config()
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))

def build(self, input_shape):
Expand All @@ -72,7 +72,7 @@ def build(self, input_shape):
initializer=self._initializer,
dtype=tf.float32)

super(OnDeviceEmbedding, self).build(input_shape)
super().build(input_shape)

def call(self, inputs):
flat_inputs = tf.reshape(inputs, [-1])
Expand Down
4 changes: 2 additions & 2 deletions official/nlp/modeling/layers/position_embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def __init__(self,
seq_axis=1,
**kwargs):

super(PositionEmbedding, self).__init__(**kwargs)
super().__init__(**kwargs)
if max_length is None:
raise ValueError(
"`max_length` must be an Integer, not `None`."
Expand Down Expand Up @@ -81,7 +81,7 @@ def build(self, input_shape):
shape=[weight_sequence_length, width],
initializer=self._initializer)

super(PositionEmbedding, self).build(input_shape)
super().build(input_shape)

def call(self, inputs):
input_shape = tf.shape(inputs)
Expand Down
4 changes: 2 additions & 2 deletions official/nlp/modeling/layers/reuse_attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ def __init__(self,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(ReuseMultiHeadAttention, self).__init__(**kwargs)
super().__init__(**kwargs)
self._num_heads = num_heads
self._key_dim = key_dim
self._value_dim = value_dim if value_dim else key_dim
Expand Down Expand Up @@ -301,7 +301,7 @@ def get_config(self):
"key_shape": self._key_shape,
"value_shape": self._value_shape,
}
base_config = super(ReuseMultiHeadAttention, self).get_config()
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))

@classmethod
Expand Down
8 changes: 4 additions & 4 deletions official/nlp/modeling/layers/routing.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def __init__(self,
self._vocab_size = vocab_size
self._init_importance = init_importance
self._moving_average_beta = moving_average_beta
super(TokenImportanceWithMovingAvg, self).__init__(**kwargs)
super().__init__(**kwargs)

def build(self, input_shape):
self._importance_embedding = self.add_weight(
Expand All @@ -51,7 +51,7 @@ def get_config(self):
"moving_average_beta":
self._moving_average_beta,
}
base_config = super(TokenImportanceWithMovingAvg, self).get_config()
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))

def update_token_importance(self, token_ids, importance):
Expand Down Expand Up @@ -80,7 +80,7 @@ def __init__(self,
**kwargs):
self._top_k = top_k
self._random_k = random_k
super(SelectTopK, self).__init__(**kwargs)
super().__init__(**kwargs)

def get_config(self):
config = {
Expand All @@ -89,7 +89,7 @@ def get_config(self):
"random_k":
self._random_k,
}
base_config = super(SelectTopK, self).get_config()
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))

def call(self, inputs):
Expand Down
8 changes: 4 additions & 4 deletions official/nlp/modeling/layers/spectral_normalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,11 +74,11 @@ def __init__(self,
if not isinstance(layer, tf.keras.layers.Layer):
raise ValueError('`layer` must be a `tf.keras.layer.Layer`. '
'Observed `{}`'.format(layer))
super(SpectralNormalization, self).__init__(
super().__init__(
layer, name=wrapper_name, **kwargs)

def build(self, input_shape):
super(SpectralNormalization, self).build(input_shape)
super().build(input_shape)
self.layer.kernel._aggregation = self.aggregation # pylint: disable=protected-access
self._dtype = self.layer.kernel.dtype

Expand Down Expand Up @@ -193,7 +193,7 @@ def __init__(self,
raise ValueError(
'layer must be a `tf.keras.layer.Conv2D` instance. You passed: {input}'
.format(input=layer))
super(SpectralNormalizationConv2D, self).__init__(layer, **kwargs)
super().__init__(layer, **kwargs)

def build(self, input_shape):
if not self.layer.built:
Expand Down Expand Up @@ -238,7 +238,7 @@ def build(self, input_shape):
dtype=self.dtype,
aggregation=self.aggregation)

super(SpectralNormalizationConv2D, self).build()
super().build()

def call(self, inputs):
u_update_op, v_update_op, w_update_op = self.update_weights()
Expand Down
6 changes: 3 additions & 3 deletions official/nlp/modeling/layers/tn_expand_condense.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ def __init__(self,
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)

super(TNExpandCondense, self).__init__(**kwargs)
super().__init__(**kwargs)

assert proj_multiplier in [
2, 4, 6, 8, 10, 12
Expand All @@ -86,7 +86,7 @@ def build(self, input_shape: List[int]) -> None:
'The last dimension of the inputs to `TNExpandCondense` '
'should be defined. Found `None`.')

super(TNExpandCondense, self).build(input_shape)
super().build(input_shape)

self.proj_size = self.proj_multiplier * input_shape[-1]

Expand Down Expand Up @@ -178,5 +178,5 @@ def get_config(self) -> Dict[Any, Any]:
getattr(self, initializer_arg))

# Get base config
base_config = super(TNExpandCondense, self).get_config()
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def __init__(self,
intermediate_dropout=0.0,
attention_initializer=None,
**kwargs):
super(TNTransformerExpandCondense, self).__init__(**kwargs)
super().__init__(**kwargs)

self._num_heads = num_attention_heads
self._intermediate_size = intermediate_size
Expand Down Expand Up @@ -170,7 +170,7 @@ def build(self, input_shape):
epsilon=self._norm_epsilon,
dtype=tf.float32)

super(TNTransformerExpandCondense, self).build(input_shape)
super().build(input_shape)

def get_config(self):
config = {
Expand Down Expand Up @@ -211,7 +211,7 @@ def get_config(self):
"attention_initializer":
tf.keras.initializers.serialize(self._attention_initializer)
}
base_config = super(TNTransformerExpandCondense, self).get_config()
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))

def call(self, inputs):
Expand Down
10 changes: 5 additions & 5 deletions official/nlp/modeling/layers/transformer_xl.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def __init__(self,
**kwargs):
"""Initializes TransformerXLBlock layer."""

super(TransformerXLBlock, self).__init__(**kwargs)
super().__init__(**kwargs)
self._vocab_size = vocab_size
self._num_heads = num_attention_heads
self._head_size = head_size
Expand Down Expand Up @@ -181,7 +181,7 @@ def build(self, input_shape):
axis=-1,
epsilon=self._norm_epsilon)

super(TransformerXLBlock, self).build(input_shape)
super().build(input_shape)

def get_config(self):
config = {
Expand Down Expand Up @@ -210,7 +210,7 @@ def get_config(self):
"inner_dropout":
self._inner_dropout,
}
base_config = super(TransformerXLBlock, self).get_config()
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))

def call(self,
Expand Down Expand Up @@ -371,7 +371,7 @@ def __init__(self,
inner_activation="relu",
**kwargs):
"""Initializes TransformerXL."""
super(TransformerXL, self).__init__(**kwargs)
super().__init__(**kwargs)

self._vocab_size = vocab_size
self._initializer = initializer
Expand Down Expand Up @@ -461,7 +461,7 @@ def get_config(self):
"inner_activation":
self._inner_activation,
}
base_config = super(TransformerXL, self).get_config()
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))

def call(self,
Expand Down

0 comments on commit 0ff8db0

Please sign in to comment.