Skip to content

Commit

Permalink
Add mandatory total_step to AdaLoraConfig tests
Browse files Browse the repository at this point in the history
  • Loading branch information
nemo committed Jan 23, 2025
1 parent 9bf10c2 commit a2aaaa6
Show file tree
Hide file tree
Showing 8 changed files with 86 additions and 74 deletions.
4 changes: 4 additions & 0 deletions tests/regression/test_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -328,6 +328,7 @@ def test_adalora(self):
r=8,
init_lora_weights=False,
target_modules=["lin0"],
total_step=1,
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "adalora_mlp")
Expand Down Expand Up @@ -567,6 +568,7 @@ def test_adalora(self):
config = AdaLoraConfig(
r=8,
init_lora_weights=False,
total_step=1,
)
model = get_peft_model(base_model, config)
self.assert_results_equal_or_store(model, "adalora_opt-350m")
Expand Down Expand Up @@ -621,6 +623,7 @@ def test_adalora(self):
target_r=4,
tinit=50,
tfinal=100,
total_step=200,
deltaT=5,
beta1=0.3,
beta2=0.3,
Expand Down Expand Up @@ -681,6 +684,7 @@ def test_adalora(self):
target_r=4,
tinit=50,
tfinal=100,
total_step=200,
deltaT=5,
beta1=0.3,
beta2=0.3,
Expand Down
4 changes: 2 additions & 2 deletions tests/test_common_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ def test_adalora_bnb_quantization_from_pretrained_safetensors(self, quantization
kwargs["quantization_config"] = BitsAndBytesConfig(load_in_8bit=True)

model = AutoModelForCausalLM.from_pretrained(model_id, **kwargs)
config = AdaLoraConfig(task_type=TaskType.CAUSAL_LM)
config = AdaLoraConfig(task_type=TaskType.CAUSAL_LM, total_step=1)
peft_model = get_peft_model(model, config)
peft_model = prepare_model_for_kbit_training(peft_model)
peft_model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(0))
Expand Down Expand Up @@ -1624,7 +1624,7 @@ def test_lora_dora_add_new_adapter_does_not_change_device(self, mlp):
def test_adalora_add_new_adapter_does_not_change_device(self, mlp):
# same as first test, but using AdaLORA
# AdaLora does not like multiple trainable adapters, hence inference_mode=True
config = AdaLoraConfig(target_modules=["lin0"], inference_mode=True)
config = AdaLoraConfig(target_modules=["lin0"], inference_mode=True, total_step=1)
model = get_peft_model(mlp, config)
model = model.to(self.device)
model.lin0.lora_A.cpu()
Expand Down
124 changes: 65 additions & 59 deletions tests/test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,30 +48,31 @@

PEFT_MODELS_TO_TEST = [("lewtun/tiny-random-OPTForCausalLM-delta", "v1")]

# Config classes and their mandatory parameters
ALL_CONFIG_CLASSES = (
AdaLoraConfig,
AdaptionPromptConfig,
BOFTConfig,
FourierFTConfig,
HRAConfig,
IA3Config,
LNTuningConfig,
LoHaConfig,
LoKrConfig,
LoraConfig,
MultitaskPromptTuningConfig,
PolyConfig,
PrefixTuningConfig,
PromptEncoderConfig,
PromptTuningConfig,
VeraConfig,
VBLoRAConfig,
(AdaLoraConfig, {"total_step": 1}),
(AdaptionPromptConfig, {}),
(BOFTConfig, {}),
(FourierFTConfig, {}),
(HRAConfig, {}),
(IA3Config, {}),
(LNTuningConfig, {}),
(LoHaConfig, {}),
(LoKrConfig, {}),
(LoraConfig, {}),
(MultitaskPromptTuningConfig, {}),
(PolyConfig, {}),
(PrefixTuningConfig, {}),
(PromptEncoderConfig, {}),
(PromptTuningConfig, {}),
(VeraConfig, {}),
(VBLoRAConfig, {}),
)


class TestPeftConfig:
@pytest.mark.parametrize("config_class", ALL_CONFIG_CLASSES)
def test_methods(self, config_class):
@pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES)
def test_methods(self, config_class, mandatory_kwargs):
r"""
Test if all configs have the expected methods. Here we test
- to_dict
Expand All @@ -80,22 +81,22 @@ def test_methods(self, config_class):
- from_json_file
"""
# test if all configs have the expected methods
config = config_class()
config = config_class(**mandatory_kwargs)
assert hasattr(config, "to_dict")
assert hasattr(config, "save_pretrained")
assert hasattr(config, "from_pretrained")
assert hasattr(config, "from_json_file")

@pytest.mark.parametrize("config_class", ALL_CONFIG_CLASSES)
@pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES)
@pytest.mark.parametrize("valid_task_type", list(TaskType) + [None])
def test_valid_task_type(self, config_class, valid_task_type):
def test_valid_task_type(self, config_class, mandatory_kwargs, valid_task_type):
r"""
Test if all configs work correctly for all valid task types
"""
config_class(task_type=valid_task_type)
config_class(task_type=valid_task_type, **mandatory_kwargs)

@pytest.mark.parametrize("config_class", ALL_CONFIG_CLASSES)
def test_invalid_task_type(self, config_class):
@pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES)
def test_invalid_task_type(self, config_class, mandatory_kwargs):
r"""
Test if all configs correctly raise the defined error message for invalid task types.
"""
Expand All @@ -104,7 +105,7 @@ def test_invalid_task_type(self, config_class):
ValueError,
match=f"Invalid task type: '{invalid_task_type}'. Must be one of the following task types: {', '.join(TaskType)}.",
):
config_class(task_type=invalid_task_type)
config_class(task_type=invalid_task_type, **mandatory_kwargs)

def test_from_peft_type(self):
r"""
Expand All @@ -115,11 +116,16 @@ def test_from_peft_type(self):

for peft_type in PeftType:
expected_cls = PEFT_TYPE_TO_CONFIG_MAPPING[peft_type]
config = PeftConfig.from_peft_type(peft_type=peft_type)
mandatory_config_kwargs = {}

if expected_cls == AdaLoraConfig:
mandatory_config_kwargs = {'total_step': 1}

config = PeftConfig.from_peft_type(peft_type=peft_type, **mandatory_config_kwargs)
assert type(config) is expected_cls

@pytest.mark.parametrize("config_class", ALL_CONFIG_CLASSES)
def test_from_pretrained(self, config_class):
@pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES)
def test_from_pretrained(self, config_class, mandatory_kwargs):
r"""
Test if the config is correctly loaded using:
- from_pretrained
Expand All @@ -128,22 +134,22 @@ def test_from_pretrained(self, config_class):
# Test we can load config from delta
config_class.from_pretrained(model_name, revision=revision)

@pytest.mark.parametrize("config_class", ALL_CONFIG_CLASSES)
def test_save_pretrained(self, config_class):
@pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES)
def test_save_pretrained(self, config_class, mandatory_kwargs):
r"""
Test if the config is correctly saved and loaded using
- save_pretrained
"""
config = config_class()
config = config_class(**mandatory_kwargs)
with tempfile.TemporaryDirectory() as tmp_dirname:
config.save_pretrained(tmp_dirname)

config_from_pretrained = config_class.from_pretrained(tmp_dirname)
assert config.to_dict() == config_from_pretrained.to_dict()

@pytest.mark.parametrize("config_class", ALL_CONFIG_CLASSES)
def test_from_json_file(self, config_class):
config = config_class()
@pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES)
def test_from_json_file(self, config_class, mandatory_kwargs):
config = config_class(**mandatory_kwargs)
with tempfile.TemporaryDirectory() as tmp_dirname:
config.save_pretrained(tmp_dirname)

Expand All @@ -159,17 +165,17 @@ def test_from_json_file(self, config_class):
config_from_json = config_class.from_json_file(config_path)
assert config.to_dict() == config_from_json

@pytest.mark.parametrize("config_class", ALL_CONFIG_CLASSES)
def test_to_dict(self, config_class):
@pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES)
def test_to_dict(self, config_class, mandatory_kwargs):
r"""
Test if the config can be correctly converted to a dict using:
- to_dict
"""
config = config_class()
config = config_class(**mandatory_kwargs)
assert isinstance(config.to_dict(), dict)

@pytest.mark.parametrize("config_class", ALL_CONFIG_CLASSES)
def test_from_pretrained_cache_dir(self, config_class):
@pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES)
def test_from_pretrained_cache_dir(self, config_class, mandatory_kwargs):
r"""
Test if the config is correctly loaded with extra kwargs
"""
Expand All @@ -186,8 +192,8 @@ def test_from_pretrained_cache_dir_remote(self):
PeftConfig.from_pretrained("ybelkada/test-st-lora", cache_dir=tmp_dirname)
assert "models--ybelkada--test-st-lora" in os.listdir(tmp_dirname)

@pytest.mark.parametrize("config_class", ALL_CONFIG_CLASSES)
def test_save_pretrained_with_runtime_config(self, config_class):
@pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES)
def test_save_pretrained_with_runtime_config(self, config_class, mandatory_kwargs):
r"""
Test if the config correctly removes runtime config when saving
"""
Expand All @@ -201,10 +207,10 @@ def test_save_pretrained_with_runtime_config(self, config_class):
cfg = config_class.from_pretrained(tmp_dirname)
assert not cfg.runtime_config.ephemeral_gpu_offload

@pytest.mark.parametrize("config_class", ALL_CONFIG_CLASSES)
def test_set_attributes(self, config_class):
@pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES)
def test_set_attributes(self, config_class, mandatory_kwargs):
# manually set attributes and check if they are correctly written
config = config_class(peft_type="test")
config = config_class(peft_type="test", **mandatory_kwargs)

# save pretrained
with tempfile.TemporaryDirectory() as tmp_dirname:
Expand All @@ -213,24 +219,24 @@ def test_set_attributes(self, config_class):
config_from_pretrained = config_class.from_pretrained(tmp_dirname)
assert config.to_dict() == config_from_pretrained.to_dict()

@pytest.mark.parametrize("config_class", ALL_CONFIG_CLASSES)
def test_config_copy(self, config_class):
@pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES)
def test_config_copy(self, config_class, mandatory_kwargs):
# see https://github.com/huggingface/peft/issues/424
config = config_class()
config = config_class(**mandatory_kwargs)
copied = copy.copy(config)
assert config.to_dict() == copied.to_dict()

@pytest.mark.parametrize("config_class", ALL_CONFIG_CLASSES)
def test_config_deepcopy(self, config_class):
@pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES)
def test_config_deepcopy(self, config_class, mandatory_kwargs):
# see https://github.com/huggingface/peft/issues/424
config = config_class()
config = config_class(**mandatory_kwargs)
copied = copy.deepcopy(config)
assert config.to_dict() == copied.to_dict()

@pytest.mark.parametrize("config_class", ALL_CONFIG_CLASSES)
def test_config_pickle_roundtrip(self, config_class):
@pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES)
def test_config_pickle_roundtrip(self, config_class, mandatory_kwargs):
# see https://github.com/huggingface/peft/issues/424
config = config_class()
config = config_class(**mandatory_kwargs)
copied = pickle.loads(pickle.dumps(config))
assert config.to_dict() == copied.to_dict()

Expand Down Expand Up @@ -371,14 +377,14 @@ def test_adalora_config_timing_bounds_error(self, timing_kwargs):

assert "The supplied schedule values don't allow for a budgeting phase" in str(e)

@pytest.mark.parametrize("config_class", ALL_CONFIG_CLASSES)
def test_from_pretrained_forward_compatible(self, config_class, tmp_path, recwarn):
@pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES)
def test_from_pretrained_forward_compatible(self, config_class, mandatory_kwargs, tmp_path, recwarn):
"""
Make it possible to load configs that contain unknown keys by ignoring them.
The idea is to make PEFT configs forward-compatible with future versions of the library.
"""
config = config_class()
config = config_class(**mandatory_kwargs)
config.save_pretrained(tmp_path)
# add a spurious key to the config
with open(tmp_path / "adapter_config.json") as f:
Expand All @@ -398,8 +404,8 @@ def test_from_pretrained_forward_compatible(self, config_class, tmp_path, recwar
assert config.to_dict() == config_from_pretrained.to_dict()
assert isinstance(config_from_pretrained, config_class)

@pytest.mark.parametrize("config_class", ALL_CONFIG_CLASSES)
def test_from_pretrained_sanity_check(self, config_class, tmp_path):
@pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES)
def test_from_pretrained_sanity_check(self, config_class, mandatory_kwargs, tmp_path):
"""Following up on the previous test about forward compatibility, we *don't* want any random json to be accepted as
a PEFT config. There should be a minimum set of required keys.
"""
Expand Down
16 changes: 8 additions & 8 deletions tests/test_custom_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -538,15 +538,15 @@
"AdaLora Same",
"adalora",
AdaLoraConfig,
{"target_modules": ["lin0"], "init_lora_weights": False, "inference_mode": True},
{"target_modules": ["lin0"], "init_lora_weights": False, "inference_mode": True},
{"target_modules": ["lin0"], "init_lora_weights": False, "inference_mode": True, "total_step": 1},
{"target_modules": ["lin0"], "init_lora_weights": False, "inference_mode": True, "total_step": 1},
),
(
"AdaLora Different",
"adalora",
AdaLoraConfig,
{"target_modules": ["lin0"], "init_lora_weights": False, "inference_mode": True},
{"target_modules": ["lin1"], "init_lora_weights": False, "inference_mode": True},
{"target_modules": ["lin0"], "init_lora_weights": False, "inference_mode": True, "total_step": 1},
{"target_modules": ["lin1"], "init_lora_weights": False, "inference_mode": True, "total_step": 1},
),
(
"FourierFT Same",
Expand Down Expand Up @@ -2405,10 +2405,10 @@ def test_requires_grad_ia3_same_targets(self):

def test_requires_grad_adalora_different_targets(self):
# test two different AdaLora adapters that target different modules
config0 = AdaLoraConfig(target_modules=["lin0"])
config0 = AdaLoraConfig(target_modules=["lin0"], total_step=1)
peft_model = get_peft_model(MLP(), config0)

config1 = AdaLoraConfig(target_modules=["lin1"], inference_mode=True)
config1 = AdaLoraConfig(target_modules=["lin1"], total_step=1, inference_mode=True)
peft_model.add_adapter("adapter1", config1)

# active adapter is still "default"
Expand Down Expand Up @@ -2451,10 +2451,10 @@ def test_requires_grad_adalora_different_targets(self):

def test_requires_grad_adalora_same_targets(self):
# same as previous test, except that AdaLora adapters target the same layer
config0 = AdaLoraConfig(target_modules=["lin0"])
config0 = AdaLoraConfig(target_modules=["lin0"], total_step=1)
peft_model = get_peft_model(MLP(), config0)

config1 = AdaLoraConfig(target_modules=["lin0"], inference_mode=True)
config1 = AdaLoraConfig(target_modules=["lin0"], total_step=1, inference_mode=True)
peft_model.add_adapter("adapter1", config1)

# active adapter is still "default"
Expand Down
1 change: 1 addition & 0 deletions tests/test_decoder_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -453,6 +453,7 @@ def test_generate_adalora_no_dropout(self):
"target_modules": None,
"task_type": "CAUSAL_LM",
"lora_dropout": 0.0,
"total_step": 1,
}
self._test_generate(model_id, AdaLoraConfig, config_kwargs)

Expand Down
8 changes: 4 additions & 4 deletions tests/test_initialization.py
Original file line number Diff line number Diff line change
Expand Up @@ -1352,16 +1352,16 @@ class TestAdaLoraInitialization:
torch_device = infer_device()

def test_adalora_target_modules_set(self):
config = AdaLoraConfig(target_modules=["linear", "embed", "conv2d"])
config = AdaLoraConfig(target_modules=["linear", "embed", "conv2d"], total_step=1)
assert config.target_modules == {"linear", "embed", "conv2d"}

def test_adalora_use_dora_raises(self):
with pytest.raises(ValueError, match="ADALORA does not support DoRA"):
AdaLoraConfig(use_dora=True)
AdaLoraConfig(use_dora=True, total_step=1)

def test_adalora_loftq_config_raises(self):
with pytest.raises(ValueError, match="ADALORA does not support LOFTQ"):
AdaLoraConfig(init_lora_weights="loftq", loftq_config={"loftq": "config"})
AdaLoraConfig(init_lora_weights="loftq", loftq_config={"loftq": "config"}, total_step=1)

def get_model(self):
class MyModule(nn.Module):
Expand All @@ -1385,7 +1385,7 @@ def test_adalora_default_init_identity(self, data):

model = self.get_model()
output_before = model(data)
config = AdaLoraConfig(target_modules=["linear"])
config = AdaLoraConfig(target_modules=["linear"], total_step=1)
model = get_peft_model(model, config)
output_after = model(data)
assert torch.allclose(output_before, output_after)
Expand Down
2 changes: 1 addition & 1 deletion tests/test_torch_compile.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@

# Mapping: name of the setting -> (Peft config instance, torch.compile kwargs)
SETTINGS = {
"adalora": (AdaLoraConfig(task_type=TaskType.CAUSAL_LM), {}),
"adalora": (AdaLoraConfig(task_type=TaskType.CAUSAL_LM, total_step=1), {}),
"boft": (BOFTConfig(task_type=TaskType.CAUSAL_LM), {}),
"dora": (LoraConfig(task_type=TaskType.CAUSAL_LM, use_dora=True), {}),
"ia3": (IA3Config(task_type=TaskType.CAUSAL_LM), {}),
Expand Down
1 change: 1 addition & 0 deletions tests/testing_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,7 @@
# AdaLoRA
{
"target_modules": None,
"total_step": 1,
},
# BOFT
{
Expand Down

0 comments on commit a2aaaa6

Please sign in to comment.