Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Raise UserWarning in RewardTraining if PEFT target_modules="all-linear" #2743

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 22 additions & 0 deletions tests/test_reward_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,28 @@ def test_train_lora_pretokenized(self):
new_param = trainer.model.get_parameter(n)
self.assertTrue(torch.allclose(param, new_param, atol=1e-12, rtol=1e-12))

@require_peft
def test_all_linear_user_warning(self):
peft_config = LoraConfig(
task_type=TaskType.SEQ_CLS,
inference_mode=False,
r=8,
lora_alpha=32,
lora_dropout=0.1,
target_modules="all-linear",
)
with tempfile.TemporaryDirectory() as tmp_dir:
dummy_dataset = load_dataset("trl-internal-testing/zen", "conversational_preference", split="train")
training_args = RewardConfig(output_dir=tmp_dir, max_steps=3, report_to="none")
with self.assertWarns(UserWarning, msg="You passed target_modules="):
_ = RewardTrainer(
model=self.model,
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset,
peft_config=peft_config,
)

def test_margin(self):
with tempfile.TemporaryDirectory() as tmp_dir:
dummy_dataset_dict = {
Expand Down
14 changes: 14 additions & 0 deletions trl/trainer/reward_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
from accelerate import PartialState
from accelerate.utils import gather_object
from datasets import Dataset
from peft.utils import INCLUDE_LINEAR_LAYERS_SHORTHAND
from transformers import (
BaseImageProcessor,
DataCollator,
Expand Down Expand Up @@ -159,6 +160,19 @@ def __init__(

model = prepare_model_for_kbit_training(model, **prepare_model_kwargs)

# Warn if the user passes "all-linear" for the target_modules
target_modules = (
peft_config.get("target_modules", None)
if isinstance(peft_config, dict)
else peft_config.target_modules
)
Comment on lines +164 to +168
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am handling both cases of peft_config being dict and being a PeftConfigobject.

It is type-hinted as a dict in the RewardModel docstring, but I can see a PeftConfig object is passed to RewardTrainer here.

if target_modules == INCLUDE_LINEAR_LAYERS_SHORTHAND:
warnings.warn(
f"You passed target_modules='{INCLUDE_LINEAR_LAYERS_SHORTHAND}' in the peft_config. "
"This will result in all linear layers except the output layer being adapted. "
"This could negatively impact the performance of the reward model as the output layer (used for scoring of chosen and rejected completions) will not be adapted or trained.",
UserWarning,
)
model = get_peft_model(model, peft_config)

# Disable dropout in the model
Expand Down