Skip to content

Commit

Permalink
Integrate CLIP into refactored test structure
Browse files Browse the repository at this point in the history
  • Loading branch information
TimoImhof committed Jan 16, 2025
1 parent f1b1136 commit 88f6230
Show file tree
Hide file tree
Showing 6 changed files with 114 additions and 143 deletions.
Original file line number Diff line number Diff line change
@@ -1,68 +1,7 @@
import random

from transformers import CLIPConfig, CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionConfig

from .generator import *


class CLIPTextAdapterTestBase(TextAdapterTestBase):
model_class = CLIPTextModel
config_class = CLIPTextConfig
config = make_config(
CLIPTextConfig,
hidden_size=32,
num_hidden_layers=4,
num_attention_heads=4,
intermediate_size=37,
)
tokenizer_name = "openai/clip-vit-base-patch32"


@require_torch
class CLIPTextAdapterTest(
BottleneckAdapterTestMixin,
CompacterTestMixin,
IA3TestMixin,
LoRATestMixin,
PrefixTuningTestMixin,
ReftTestMixin,
UniPELTTestMixin,
AdapterFusionModelTestMixin,
CompabilityTestMixin,
CLIPTextAdapterTestBase,
unittest.TestCase,
):
pass


class CLIPTextWithProjectionAdapterTestBase(TextAdapterTestBase):
model_class = CLIPTextModelWithProjection
config_class = CLIPTextConfig
config = make_config(
CLIPTextConfig,
hidden_size=32,
num_hidden_layers=4,
num_attention_heads=4,
intermediate_size=37,
)
tokenizer_name = "openai/clip-vit-base-patch32"


@require_torch
class CLIPTextWithProjectionAdapterTest(
BottleneckAdapterTestMixin,
CompacterTestMixin,
IA3TestMixin,
LoRATestMixin,
PrefixTuningTestMixin,
ReftTestMixin,
UniPELTTestMixin,
AdapterFusionModelTestMixin,
CompabilityTestMixin,
CLIPTextWithProjectionAdapterTestBase,
unittest.TestCase,
):
pass
from tests.test_methods.generator import *
from transformers import CLIPConfig, CLIPTextConfig, CLIPVisionConfig


class CLIPAdapterTestBase(TextAdapterTestBase):
Expand Down Expand Up @@ -122,24 +61,19 @@ def get_input_samples(self, vocab_size=5000, config=None, dtype=torch.float, **k
def add_head(self, *args, **kwargs):
pass


@require_torch
class CLIPAdapterTest(
BottleneckAdapterTestMixin,
CompacterTestMixin,
IA3TestMixin,
LoRATestMixin,
PrefixTuningTestMixin,
ReftTestMixin,
UniPELTTestMixin,
AdapterFusionModelTestMixin,
CompabilityTestMixin,
CLIPAdapterTestBase,
unittest.TestCase,
):
def test_adapter_fusion_save_with_head(self):
# This test is not applicable to CLIP
self.skipTest("Not applicable to CLIP.")

def test_load_adapter_setup(self):
self.skipTest("Not applicable to CLIP.")


method_tests = generate_method_tests(
model_test_base=CLIPAdapterTestBase,
excluded_tests=["Embeddings", "Heads", "Composition", "ClassConversion", "PromptTuning", "ConfigUnion"],
)


for test_class_name, test_class in method_tests.items():
globals()[test_class_name] = test_class
25 changes: 25 additions & 0 deletions tests/test_methods/test_on_clip/test_textmodel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
from tests.test_methods.generator import *
from transformers import CLIPTextConfig, CLIPTextModel


class CLIPTextAdapterTestBase(TextAdapterTestBase):
model_class = CLIPTextModel
config_class = CLIPTextConfig
config = make_config(
CLIPTextConfig,
hidden_size=32,
num_hidden_layers=4,
num_attention_heads=4,
intermediate_size=37,
)
tokenizer_name = "openai/clip-vit-base-patch32"


method_tests = generate_method_tests(
model_test_base=CLIPTextAdapterTestBase,
excluded_tests=["Embeddings", "Heads", "Composition", "ClassConversion", "PromptTuning", "ConfigUnion"],
)


for test_class_name, test_class in method_tests.items():
globals()[test_class_name] = test_class
25 changes: 25 additions & 0 deletions tests/test_methods/test_on_clip/test_textwithprojectionmodel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
from tests.test_methods.generator import *
from transformers import CLIPTextConfig, CLIPTextModelWithProjection


class CLIPTextWithProjectionAdapterTestBase(TextAdapterTestBase):
model_class = CLIPTextModelWithProjection
config_class = CLIPTextConfig
config = make_config(
CLIPTextConfig,
hidden_size=32,
num_hidden_layers=4,
num_attention_heads=4,
intermediate_size=37,
)
tokenizer_name = "openai/clip-vit-base-patch32"


method_tests = generate_method_tests(
model_test_base=CLIPTextWithProjectionAdapterTestBase,
excluded_tests=["Embeddings", "Heads", "Composition", "ClassConversion", "PromptTuning", "ConfigUnion"],
)


for test_class_name, test_class in method_tests.items():
globals()[test_class_name] = test_class
26 changes: 26 additions & 0 deletions tests/test_methods/test_on_clip/test_visionmodel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
from tests.test_methods.generator import *
from transformers import CLIPVisionConfig, CLIPVisionModel


class CLIPVisionAdapterTestBase(VisionAdapterTestBase):
model_class = CLIPVisionModel
config_class = CLIPVisionConfig
config = make_config(
CLIPVisionConfig,
image_size=224,
hidden_size=32,
num_hidden_layers=4,
num_attention_heads=4,
intermediate_size=37,
)
feature_extractor_name = "openai/clip-vit-base-patch32"


method_tests = generate_method_tests(
model_test_base=CLIPVisionAdapterTestBase,
excluded_tests=["Embeddings", "Heads", "Composition", "ClassConversion", "PromptTuning", "ConfigUnion"],
)


for test_class_name, test_class in method_tests.items():
globals()[test_class_name] = test_class
26 changes: 26 additions & 0 deletions tests/test_methods/test_on_clip/test_visionwithprojectionmodel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
from tests.test_methods.generator import *
from transformers import CLIPVisionConfig, CLIPVisionModelWithProjection


class CLIPVisionWithProjectionAdapterTestBase(VisionAdapterTestBase):
model_class = CLIPVisionModelWithProjection
config_class = CLIPVisionConfig
config = make_config(
CLIPVisionConfig,
image_size=224,
hidden_size=32,
num_hidden_layers=4,
num_attention_heads=4,
intermediate_size=37,
)
feature_extractor_name = "openai/clip-vit-base-patch32"


method_tests = generate_method_tests(
model_test_base=CLIPVisionWithProjectionAdapterTestBase,
excluded_tests=["Embeddings", "Heads", "Composition", "ClassConversion", "PromptTuning", "ConfigUnion"],
)


for test_class_name, test_class in method_tests.items():
globals()[test_class_name] = test_class
65 changes: 0 additions & 65 deletions tests/test_methods/test_on_clip_vision.py

This file was deleted.

0 comments on commit 88f6230

Please sign in to comment.