Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: dedicated llm type #3

Merged
merged 1 commit into from
Jul 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions langchain_llamacpp_chat_model/llama_chat_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

class LlamaChatModel(BaseChatOpenAI):
model_name: str = "unknown"
llama: Llama = None

def __init__(
self,
Expand All @@ -19,3 +20,9 @@ def __init__(
client=LLamaOpenAIClientProxy(llama=llama),
async_client=LLamaOpenAIClientAsyncProxy(llama=llama),
)
self.llama = llama

@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return self.llama.model_path
4 changes: 2 additions & 2 deletions tests/test_functional/models_configuration.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,8 @@ def _create_models_settings():
return models


def create_llama(request) -> Llama:
local_path = _model_local_path(request.param)
def create_llama(params) -> Llama:
local_path = _model_local_path(params)

return Llama(
model_path=local_path,
Expand Down
2 changes: 1 addition & 1 deletion tests/test_functional/test_ainvoke.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class TestAInvoke:
params=models_to_test, ids=[config["repo_id"] for config in models_to_test]
)
def llama(self, request) -> Llama:
return create_llama(request)
return create_llama(request.param)

@pytest.fixture
def instance(self, llama):
Expand Down
2 changes: 1 addition & 1 deletion tests/test_functional/test_astream.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ class TestAStream:
params=models_to_test, ids=[config["repo_id"] for config in models_to_test]
)
def llama(self, request) -> Llama:
return create_llama(request)
return create_llama(request.param)

@pytest.fixture
def instance(self, llama):
Expand Down
2 changes: 1 addition & 1 deletion tests/test_functional/test_invoke.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ class TestInvoke:
params=models_to_test, ids=[config["repo_id"] for config in models_to_test]
)
def llama(self, request) -> Llama:
return create_llama(request)
return create_llama(request.param)

@pytest.fixture
def instance(self, llama):
Expand Down
20 changes: 20 additions & 0 deletions tests/test_functional/test_llm_type.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
from llama_cpp import Llama
import pytest
from langchain_llamacpp_chat_model import LlamaChatModel
from tests.test_functional.models_configuration import create_llama, models_to_test


class TestInvoke:

@pytest.fixture()
def llama(self) -> Llama:

return create_llama(models_to_test[0])

@pytest.fixture
def instance(self, llama):
return LlamaChatModel(llama=llama)

def test_llm_type(self, instance: LlamaChatModel):
result = instance._llm_type
assert models_to_test[0]["repo_id"] in result
2 changes: 1 addition & 1 deletion tests/test_functional/test_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ class TestStream:
params=models_to_test, ids=[config["repo_id"] for config in models_to_test]
)
def llama(self, request) -> Llama:
return create_llama(request)
return create_llama(request.param)

@pytest.fixture
def instance(self, llama):
Expand Down