From 1dbcf4e11f352adb5e70b0f0d94207fd2ce7f4a7 Mon Sep 17 00:00:00 2001 From: xrsrke Date: Tue, 28 Nov 2023 13:41:25 +0700 Subject: [PATCH] [Refactor] Use small bloom model in model partitioning's test --- tests/nn/pipeline_parallel/test_partitioner.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/tests/nn/pipeline_parallel/test_partitioner.py b/tests/nn/pipeline_parallel/test_partitioner.py index fafc38c..19dc0e4 100644 --- a/tests/nn/pipeline_parallel/test_partitioner.py +++ b/tests/nn/pipeline_parallel/test_partitioner.py @@ -1,10 +1,11 @@ import pytest import torch from transformers import ( - AutoModelForCausalLM, AutoTokenizer, BloomConfig, BloomForCausalLM, + GPT2Config, + GPT2LMHeadModel, ) from pipegoose.nn.pipeline_parallel.partitioner import UniformPartitioner @@ -12,13 +13,9 @@ def get_gpt2_and_tokenizer(): - return AutoModelForCausalLM.from_pretrained("gpt2"), AutoTokenizer.from_pretrained("gpt2") - - -def get_bloom_560m_and_tokenizer(): - return AutoModelForCausalLM.from_pretrained("bigscience/bloom-560m"), AutoTokenizer.from_pretrained( - "bigscience/bloom-560m" - ) + model = GPT2LMHeadModel(config=GPT2Config(n_layer=6)) + tokenizer = AutoTokenizer.from_pretrained("gpt2") + return model, tokenizer def get_bloom_and_tokenizer_with_6_layers(): @@ -87,7 +84,6 @@ def run_model_partitioner( [ get_gpt2_and_tokenizer, get_bloom_and_tokenizer_with_6_layers, - get_bloom_560m_and_tokenizer, ], ) def test_naive_partitioning(pipeline_parallel_size, model_retrieval_func):