Skip to content

Commit

Permalink
Fix gradient checkpointing bug in gptneox (huggingface#21815)
Browse files Browse the repository at this point in the history
* Fix gradient checkpointing bug in gptneox

* Remove use_cache block
  • Loading branch information
KMFODA authored and gongbaitao committed Feb 27, 2023
1 parent 38f6caa commit 7006a85
Showing 1 changed file with 7 additions and 5 deletions.
12 changes: 7 additions & 5 deletions src/transformers/models/gpt_neox/modeling_gpt_neox.py
Original file line number Diff line number Diff line change
Expand Up @@ -500,6 +500,13 @@ def forward(

hidden_states = inputs_embeds

if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False

presents = () if use_cache else None
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
Expand All @@ -508,11 +515,6 @@ def forward(
all_hidden_states = all_hidden_states + (hidden_states,)

if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False

def create_custom_forward(module):
def custom_forward(*inputs):
Expand Down

0 comments on commit 7006a85

Please sign in to comment.