diff --git a/llama_stack/providers/tests/ci_test_config.yaml b/llama_stack/providers/tests/ci_test_config.yaml new file mode 100644 index 0000000000..c4c6b23199 --- /dev/null +++ b/llama_stack/providers/tests/ci_test_config.yaml @@ -0,0 +1,24 @@ +tests: +- path: inference/test_vision_inference.py + functions: + - test_vision_chat_completion_streaming + - test_vision_chat_completion_non_streaming + +- path: inference/test_text_inference.py + functions: + - test_structured_output + - test_chat_completion_streaming + - test_chat_completion_non_streaming + - test_chat_completion_with_tool_calling + - test_chat_completion_with_tool_calling_streaming + +inference_fixtures: + - ollama + - fireworks + - together + - tgi + - vllm_remote + +test_models: + text: meta-llama/Llama-3.1-8B-Instruct + vision: meta-llama/Llama-3.2-11B-Vision-Instruct diff --git a/llama_stack/providers/tests/conftest.py b/llama_stack/providers/tests/conftest.py index e319db1c6d..dbdbad033b 100644 --- a/llama_stack/providers/tests/conftest.py +++ b/llama_stack/providers/tests/conftest.py @@ -9,6 +9,8 @@ from typing import Any, Dict, List, Optional import pytest + +import yaml from dotenv import load_dotenv from pydantic import BaseModel from termcolor import colored @@ -69,6 +71,11 @@ def pytest_addoption(parser): "Example: --providers inference=ollama,safety=meta-reference" ), ) + parser.addoption( + "--config", + action="store", + help="Set test config file (supported format: YAML), e.g. --config=test_config.yml", + ) """Add custom command line options""" parser.addoption( "--env", action="append", help="Set environment variables, e.g. --env KEY=value" @@ -172,6 +179,41 @@ def pytest_itemcollected(item): item.name = f"{item.name}[{marks}]" +def pytest_collection_modifyitems(session, config, items): + if config.getoption("--config") is None: + return + file_name = config.getoption("--config") + config_file_path = Path(__file__).parent / file_name + if not config_file_path.exists(): + raise ValueError( + f"Test config {file_name} was specified but not found. Please make sure it exists in the llama_stack/providers/tests directory." + ) + + required_tests = dict() + inference_providers = set() + with open(config_file_path, "r") as config_file: + test_config = yaml.safe_load(config_file) + for test in test_config["tests"]: + required_tests[Path(__file__).parent / test["path"]] = set( + test["functions"] + ) + inference_providers = set(test_config["inference_fixtures"]) + + new_items, deselected_items = [], [] + for item in items: + if item.fspath in required_tests: + func_name = getattr(item, "originalname", item.name) + if func_name in required_tests[item.fspath]: + inference = item.callspec.params.get("inference_stack") + if inference in inference_providers: + new_items.append(item) + continue + deselected_items.append(item) + + items[:] = new_items + config.hook.pytest_deselected(items=deselected_items) + + pytest_plugins = [ "llama_stack.providers.tests.inference.fixtures", "llama_stack.providers.tests.safety.fixtures", diff --git a/llama_stack/providers/tests/tools/conftest.py b/llama_stack/providers/tests/tools/conftest.py index 11aad5ab66..525abe8ab5 100644 --- a/llama_stack/providers/tests/tools/conftest.py +++ b/llama_stack/providers/tests/tools/conftest.py @@ -34,21 +34,6 @@ def pytest_configure(config): ) -def pytest_addoption(parser): - parser.addoption( - "--inference-model", - action="store", - default="meta-llama/Llama-3.2-3B-Instruct", - help="Specify the inference model to use for testing", - ) - parser.addoption( - "--safety-shield", - action="store", - default="meta-llama/Llama-Guard-3-1B", - help="Specify the safety shield to use for testing", - ) - - def pytest_generate_tests(metafunc): if "tools_stack" in metafunc.fixturenames: available_fixtures = {