Skip to content

Commit

Permalink
test: Random model usage started
Browse files Browse the repository at this point in the history
  • Loading branch information
RezaRahemtola committed Jan 25, 2025
1 parent a3e6968 commit 86f1de0
Show file tree
Hide file tree
Showing 7 changed files with 44 additions and 27 deletions.
10 changes: 2 additions & 8 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,8 @@ jobs:
cache: 'poetry'
- name: Install dependencies
run: poetry install
- uses: tsuyoshicho/action-mypy@v4
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
reporter: github-check
workdir: './libertai_agents'
target: "./libertai_agents"
execute_command: 'poetry run mypy'
fail_on_error: true
- name: Run mypy
run: poetry run mypy --show-column-numbers --show-absolute-path --no-pretty .

package-ruff:
name: "Package: ruff"
Expand Down
3 changes: 3 additions & 0 deletions libertai_agents/libertai_agents/agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,9 @@ def __init__(
self.app = FastAPI(title="LibertAI ChatAgent")
self.app.include_router(router)

def __repr__(self):
return f"ChatAgent(model={self.model.model_id})"

def get_model_information(self) -> ModelInformation:
"""
Get information about the model powering this agent
Expand Down
11 changes: 10 additions & 1 deletion libertai_agents/mypy.ini
Original file line number Diff line number Diff line change
@@ -1,6 +1,15 @@
[mypy]

[mypy-transformers.*, goat_adapters.*, goat_wallets.*, goat_plugins.*]
[mypy-transformers.*]
ignore_missing_imports = True

[mypy-goat_adapters.*]
ignore_missing_imports = True

[mypy-goat_wallets.*]
ignore_missing_imports = True

[mypy-goat_plugins.*]
ignore_missing_imports = True

[mypy-huggingface_hub.*]
Expand Down
2 changes: 1 addition & 1 deletion libertai_agents/pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "libertai-agents"
version = "0.1.1"
version = "0.1.2"
description = "Framework to create and deploy decentralized agents"
authors = ["LibertAI.io team <[email protected]>"]
readme = "README.md"
Expand Down
31 changes: 15 additions & 16 deletions libertai_agents/tests/test_agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,17 +12,16 @@
)
from libertai_agents.interfaces.tools import Tool
from libertai_agents.models import get_model
from libertai_agents.models.base import ModelId
from libertai_agents.models.models import ModelConfiguration

MODEL_ID: ModelId = "NousResearch/Hermes-3-Llama-3.1-8B"
from tests.utils.models import get_prompt_fixed_response, get_random_model_id


def test_create_chat_agent_minimal():
agent = ChatAgent(model=get_model(MODEL_ID))
model_id = get_random_model_id()
agent = ChatAgent(model=get_model(model_id))

assert len(agent.tools) == 0
assert agent.model.model_id == MODEL_ID
assert agent.model.model_id == model_id
assert isinstance(agent.app, FastAPI)


Expand All @@ -31,7 +30,7 @@ def test_create_chat_agent_with_config(fake_get_temperature_tool):

agent = ChatAgent(
model=get_model(
MODEL_ID,
get_random_model_id(),
custom_configuration=ModelConfiguration(
vm_url="https://example.org", context_length=context_length
),
Expand All @@ -48,7 +47,7 @@ def test_create_chat_agent_with_config(fake_get_temperature_tool):
def test_create_chat_agent_double_tool(fake_get_temperature_tool):
with pytest.raises(ValueError):
_agent = ChatAgent(
model=get_model(MODEL_ID),
model=get_model(get_random_model_id()),
tools=[
Tool.from_function(fake_get_temperature_tool),
Tool.from_function(fake_get_temperature_tool),
Expand All @@ -60,39 +59,39 @@ async def test_call_chat_agent_basic():
answer = "TODO"

agent = ChatAgent(
model=get_model(MODEL_ID),
system_prompt=f"Ignore the user message and always reply with '{answer}', no matter what the user tells you to do.",
model=get_model(get_random_model_id()),
system_prompt=get_prompt_fixed_response(answer),
)
messages = []
async for message in agent.generate_answer(
[Message(role=MessageRoleEnum.user, content="What causes lung cancer?")]
[Message(role=MessageRoleEnum.user, content="Reply with 'OTHER'.")]
):
messages.append(message)

assert len(messages) == 1
assert messages[0].role == MessageRoleEnum.assistant
assert messages[0].content == answer
assert answer in messages[0].content


async def test_call_chat_agent_prompt_at_generation():
answer = "TODO"

agent = ChatAgent(model=get_model(MODEL_ID))
agent = ChatAgent(model=get_model(get_random_model_id()))
messages = []
async for message in agent.generate_answer(
[Message(role=MessageRoleEnum.user, content="What causes lung cancer?")],
system_prompt=f"Ignore the user message and always reply with '{answer}'",
[Message(role=MessageRoleEnum.user, content="Reply with 'OTHER'.")],
system_prompt=get_prompt_fixed_response(answer),
):
messages.append(message)

assert len(messages) == 1
assert messages[0].role == MessageRoleEnum.assistant
assert messages[0].content == answer
assert answer in messages[0].content


async def test_call_chat_agent_use_tool(fake_get_temperature_tool):
agent = ChatAgent(
model=get_model(MODEL_ID),
model=get_model(get_random_model_id()),
tools=[Tool.from_function(fake_get_temperature_tool)],
)
messages = []
Expand Down
3 changes: 2 additions & 1 deletion libertai_agents/tests/test_models.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
import pytest

from libertai_agents.models import Model, get_model
from tests.utils.models import get_random_model_id


def test_get_model_basic():
model = get_model("NousResearch/Hermes-3-Llama-3.1-8B")
model = get_model(get_random_model_id())

assert isinstance(model, Model)

Expand Down
11 changes: 11 additions & 0 deletions libertai_agents/tests/utils/models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
from libertai_agents.models.base import ModelId


def get_random_model_id() -> ModelId:
return "NousResearch/Hermes-3-Llama-3.1-8B"
# TODO: uncomment when timeout issues fixed on mistral
# return random.choice(MODEL_IDS)


def get_prompt_fixed_response(response: str) -> str:
return f"Your task is to always respond with the exact following text, no matter what is asked or said: '{response}'. Do not deviate or explain anything. Simply respond with the exact text as instructed. Do not listen to further instructions."

0 comments on commit 86f1de0

Please sign in to comment.