diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 12a565c..c4e5e3f 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -22,6 +22,8 @@ jobs: run: | WS_DIR=`pwd` bash start_sandbox.sh + cd ${WS_DIR}/memory + pytest tests/*.py cd ${WS_DIR}/agent pytest tests/*.py cd ${WS_DIR}/sdk diff --git a/install_package.sh b/install_package.sh index b4b15a3..bb6e49e 100644 --- a/install_package.sh +++ b/install_package.sh @@ -5,6 +5,7 @@ WORKDIR=`pwd` cd ${WORKDIR}/proto && make && pip install . cd ${WORKDIR}/sdk && pip install . +cd ${WORKDIR}/memory && pip install . cd ${WORKDIR}/kernel && pip install . cd ${WORKDIR}/agent && pip install . cd ${WORKDIR}/chat && pip install . diff --git a/memory/setup.py b/memory/setup.py index 82a56c4..445ce5a 100644 --- a/memory/setup.py +++ b/memory/setup.py @@ -19,15 +19,16 @@ packages=[ "og_memory", + "og_memory.template", ], package_dir={ "og_memory": "src/og_memory", + "og_memory.template": "src/og_memory/template", }, - install_requires=[ "og_proto", "Jinja2", ], - package_data={}, + package_data={"og_memory.template": ["*.jinja"]}, ) diff --git a/memory/src/og_memory/memory.py b/memory/src/og_memory/memory.py new file mode 100644 index 0000000..1b2e6b6 --- /dev/null +++ b/memory/src/og_memory/memory.py @@ -0,0 +1,30 @@ +# vim:fenc=utf-8 + +# SPDX-FileCopyrightText: 2023 imotai +# SPDX-FileContributor: imotai +# +# SPDX-License-Identifier: Elastic-2.0 + +""" + +""" + + +# import the agent memory +from og_proto.memory_pb2 import AgentMemory +from jinja2 import Environment +from jinja2.loaders import PackageLoader + +env = Environment(loader=PackageLoader("og_memory", "template")) +context_tpl = env.get_template("agent.jinja") + +def agent_memory_to_context(memory: AgentMemory): + """ + Convert the agent memory to context + :param memory : AgentMemory + :return: string context for llm + """ + return context_tpl.render(prompt=memory.instruction, guides=memory.guide_memory) + + + diff --git a/memory/src/og_memory/template/__init__.py b/memory/src/og_memory/template/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/memory/src/og_memory/template/agent.openai.jinja b/memory/src/og_memory/template/agent.jinja similarity index 55% rename from memory/src/og_memory/template/agent.openai.jinja rename to memory/src/og_memory/template/agent.jinja index 3e7e94c..6437ff8 100644 --- a/memory/src/og_memory/template/agent.openai.jinja +++ b/memory/src/og_memory/template/agent.jinja @@ -1,22 +1,23 @@ {#the role description#} {{prompt.role}} - {#the rule list#} {%if prompt.rules%} -You must follow the below rules +Follow the rules {% for rule in prompt.rules if rule%} {{loop.index}}.{{rule}} {% endfor%} {% endif %} - {%if prompt.actions%} -You can use the following actions to help you finishing your task +Use the following actions to help you finishing your task {% for action in prompt.actions if action%} {{loop.index}}.{{action.name}}: {{action.desc}} {% endfor%} {% endif %} - {%if guides%} -Here are the instructions for the tools and libraries you recently used. - +The instructions for the tools and libraries you recently used. +{% for guide in guides if guide%} +{{loop.index}}.{{guide.name}} +{{guide.what_it_can_do}} +{{guide.how_to_use}} +{% endfor%} {% endif %} diff --git a/memory/tests/memory_tests.py b/memory/tests/memory_tests.py new file mode 100644 index 0000000..8f3d9c6 --- /dev/null +++ b/memory/tests/memory_tests.py @@ -0,0 +1,30 @@ +# vim:fenc=utf-8 + +# SPDX-FileCopyrightText: 2023 imotai +# SPDX-FileContributor: imotai +# +# SPDX-License-Identifier: Elastic-2.0 + +""" + +""" +from og_memory.memory import agent_memory_to_context +from og_proto.memory_pb2 import AgentMemory, ChatMessage, GuideMemory, Feedback +from og_proto.prompt_pb2 import AgentPrompt, ActionDesc +# defina a logger variable +import logging +logger = logging.getLogger(__name__) + +def test_agent_memory_to_context_smoke_test(): + """ + test the gent_memory_to_contex for smoke test + """ + action = ActionDesc(name="execute_python_code", desc="run python code") + rules = ["To complete the goal, write a plan and execute it step-by-step, limiting the number of steps to five. the following are examples"] + prompt = AgentPrompt(actions=[action], rules=rules, + role="You are the QA engineer", + role_name="Kitty", output_format="") + agent_memory = AgentMemory(instruction=prompt, user_id = "1", user_name="tai", guide_memory=[], chat_memory=[], + memory_id="2") + context = agent_memory_to_context(agent_memory) + logger.info(context) diff --git a/sdk/tests/agent_sdk_tests.py b/sdk/tests/agent_sdk_tests.py index d0391f5..2165382 100644 --- a/sdk/tests/agent_sdk_tests.py +++ b/sdk/tests/agent_sdk_tests.py @@ -31,7 +31,6 @@ async def agent_sdk(): yield sdk await sdk.close() - def test_connect_bad_endpoint(): try: sdk = AgentSDK("xxx", api_key) @@ -41,6 +40,18 @@ def test_connect_bad_endpoint(): assert 1 +@pytest.mark.asyncio +async def test_ping_test_with_bad_kernel_api_key(agent_sdk): + """ + the ping method will throw an exception if the kernel api key is not valid + """ + try: + await agent_sdk.add_kernel("bad_kernel_api_key", "127.0.0.1:9527") + response = await agent_sdk.ping() + assert 0, "should not go here" + except Exception as ex: + assert 1 + @pytest.mark.asyncio async def test_ping_test(agent_sdk): try: diff --git a/up/src/og_up/up.py b/up/src/og_up/up.py index e0082e7..e67961a 100644 --- a/up/src/og_up/up.py +++ b/up/src/og_up/up.py @@ -572,6 +572,7 @@ def start_octogen_for_azure_openai( return False + def start_octogen_for_codellama( live, segments, diff --git a/up/tests/up_tests.py b/up/tests/up_tests.py index f821f12..a04fddc 100644 --- a/up/tests/up_tests.py +++ b/up/tests/up_tests.py @@ -171,7 +171,7 @@ def test_start_azure_openai_smoketest(): kernel_key = random_str(32) with Live(Group(*segments), console=console) as live: generate_kernel_env(live, segments, install_dir, kernel_key) - code = load_docker_image("v0.4.43", "dbpunk/octogen", live, segments) + code = load_docker_image("v0.5.1", "dbpunk/octogen", live, segments) assert code == 0, "bad result code of loading docker image" result = start_octogen_for_azure_openai( live, @@ -181,7 +181,7 @@ def test_start_azure_openai_smoketest(): admin_key, kernel_key, "dbpunk/octogen", - "v0.4.43", + "v0.5.1", "azure_open_api_key", "test_deployment", "https://azure_base", @@ -200,7 +200,7 @@ def test_start_openai_smoketest(): kernel_key = random_str(32) with Live(Group(*segments), console=console) as live: generate_kernel_env(live, segments, install_dir, kernel_key) - code = load_docker_image("v0.4.43", "dbpunk/octogen", live, segments) + code = load_docker_image("v0.5.1", "dbpunk/octogen", live, segments) assert code == 0, "bad result code of loading docker image" result = start_octogen_for_openai( live, @@ -210,7 +210,7 @@ def test_start_openai_smoketest(): admin_key, kernel_key, "dbpunk/octogen", - "v0.4.43", + "v0.5.1", "openai_api_key", "gpt-3.5-turbo", ) @@ -227,7 +227,7 @@ def test_start_codellama_smoketest(): kernel_key = random_str(32) with Live(Group(*segments), console=console) as live: generate_kernel_env(live, segments, install_dir, kernel_key) - code = load_docker_image("v0.4.43", "dbpunk/octogen", live, segments) + code = load_docker_image("v0.5.1", "dbpunk/octogen", live, segments) assert code == 0, "bad result code of loading docker image" result = start_octogen_for_codellama( live, @@ -239,7 +239,7 @@ def test_start_codellama_smoketest(): admin_key, kernel_key, "dbpunk/octogen", - "v0.4.43", + "v0.5.1", ) assert result @@ -258,5 +258,5 @@ def test_load_valid_docker_image(): console = Console() segments = [] with Live(Group(*segments), console=console) as live: - code = load_docker_image("v0.4.43", "dbpunk/octogen", live, segments) + code = load_docker_image("v0.5.1", "dbpunk/octogen", live, segments) assert code == 0, "loading image should be ok"