Skip to content

Commit

Permalink
Revert "Fix Llama initialization"
Browse files Browse the repository at this point in the history
This reverts commit ddb4c02.
  • Loading branch information
regisss committed Feb 14, 2024
1 parent ddb4c02 commit 18252c7
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions optimum/habana/transformers/models/llama/modeling_llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -414,7 +414,7 @@ def post_mlp_forward(self, x):

class GaudiLlamaDecoderLayer(LlamaDecoderLayer):
def __init__(self, config: LlamaConfig, layer_idx: int):
super(LlamaDecoderLayer, self).__init__()
super().__init__(config, layer_idx)
self.hidden_size = config.hidden_size

self.self_attn = GaudiLlamaAttention(config=config, layer_idx=layer_idx)
Expand Down Expand Up @@ -666,7 +666,7 @@ def forward(
attn_softmax_bf16,
False,
use_flash_attention,
flash_attention_recompute,
True,
)
else:
layer_outputs = decoder_layer(
Expand Down

0 comments on commit 18252c7

Please sign in to comment.