Skip to content

Commit

Permalink
Implement chat history demo
Browse files Browse the repository at this point in the history
  • Loading branch information
klntsky committed Nov 17, 2024
1 parent 320629c commit 6317133
Show file tree
Hide file tree
Showing 5 changed files with 72 additions and 10 deletions.
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ This is an early work-in-progress. Follow [me on twitter](https://x.com/klntsky)
- [x] `[# comments]`
- [ ] `[:status some-status]` - to show during prompt evaluation
- [ ] `[:call ffi-function :param1=foo :param2=bar]`
- [ ] syntax for ignoring `$` output
- [ ] Implement an evaluator
- [x] meta-prompting
- [x] conditionals
Expand All @@ -70,7 +71,7 @@ This is an early work-in-progress. Follow [me on twitter](https://x.com/klntsky)
- [ ] Runtime system
- [x] Support variable definition at runtime
- [x] dynamic model switching (via `MODEL` variable - [example](./examples/model-change.metaprompt))
- [ ] Multiple chat instances and ability to switch between them, to distribute data between chat contexts. E.g. `[chat1$ the object is the moon][chat2$ the object is the sun][chat1$ what is the object?]`
- [x] Multiple chat instances and ability to switch between them, to distribute data between chat contexts. E.g. `[chat1$ the object is the moon][chat1$ what is the object?]` [(example)](./examples/chat-history.metaprompt)
- [ ] message role system (system, user)
- [ ] exceptions
- [ ] throwing exceptions
Expand Down
19 changes: 19 additions & 0 deletions examples/chat-history.metaprompt
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
[:_=[chat1$ the $OBJECT is a car. remember this]]
[:_=[chat2$ the $OBJECT is an apple. remember this]]
[:_=[chat1$ the $ACTION is "ride". remember this]]
[:_=[chat2$ the $ACTION is "eat". remember this]]
[:question=
Combine the $ACTION and the $OBJECT
into a single phrase. Give me just the phrase,
no other output
]
chat1: [chat1$ [:question]]
chat2: [chat2$ [:question]]

Chat history for chat1:

[:chat1]

Chat history for chat2:

[:chat2]
51 changes: 46 additions & 5 deletions python/src/eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,18 @@
"""


def serialize_chat_history(chat_history):
res = ""
for chat_item in chat_history:
role = chat_item["role"]
content = chat_item["content"]
res += f"[{role}]: {content}\n"
return res


async def eval_ast(ast, config, runtime):
env = Env(**config.parameters)
chats = dict()
default_model = config.providers.get_default_model()
if default_model is not None:
env.set("MODEL", default_model.strip())
Expand Down Expand Up @@ -47,9 +57,11 @@ def get_current_model_provider():
raise ValueError(f"Model not available: {model_name}")
return provider

async def stream_invoke(prompt: str) -> AsyncGenerator[str, None]:
async def stream_invoke(
prompt: str, history=[]
) -> AsyncGenerator[str, None]:
provider = get_current_model_provider()
async for chunk in provider.ainvoke(prompt, "user"):
async for chunk in provider.ainvoke(prompt, "user", history):
yield chunk

async def invoke(self, prompt: str) -> str:
Expand All @@ -59,7 +71,7 @@ async def invoke(self, prompt: str) -> str:
return res

async def _eval_ast(ast):
nonlocal env, runtime
nonlocal env, runtime, chats
if isinstance(ast, list):
# TODO: is this case needed?
async for chunk in _eval_exprs(ast):
Expand Down Expand Up @@ -91,28 +103,57 @@ async def _eval_ast(ast):
evaluated_parameters[parameter] = await _collect_exprs(
parameters[parameter]
)
# save parent state in a closure
old_env = env
old_chats = chats
# prepare new state
if "MODEL" not in evaluated_parameters:
evaluated_parameters["MODEL"] = get_model()
env = Env(evaluated_parameters)
chats = {}
# recurse
async for chunk in _eval_ast(loaded_ast):
yield chunk
# restore parent state
env = old_env
chats = old_chats
elif ast["type"] == "assign":
var_name = ast["name"]
value = (await _collect_exprs(ast["exprs"])).strip()
if var_name == "STATUS":
runtime.set_status(value)
env.set(var_name, value)
elif ast["type"] == "meta":
# Load chat history
chat_id = ast["chat"] if "chat" in ast else None
if chat_id is not None:
if chat_id not in chats:
chats[chat_id] = []
# evaluate the prompt
chunks = []
chat_id = ast["chat"]
for expr in ast["exprs"]:
async for chunk in _eval_ast(expr):
chunks.append(chunk)
prompt = "".join(chunks)
async for chunk in stream_invoke(prompt):
# collect the assistant response
assistant_response = ""
async for chunk in stream_invoke(
prompt, chats[chat_id] if chat_id in chats else []
):
assistant_response += chunk
yield chunk
# update chat history
if chat_id is not None:
chats[chat_id].append(
{
"role": "user", # TODO: use current role
"content": prompt,
}
)
chats[chat_id].append(
{"role": "assistant", "content": assistant_response}
)
env.set(chat_id, serialize_chat_history(chats[chat_id]))
elif ast["type"] == "exprs":
for expr in ast["exprs"]:
async for chunk in _eval_ast(expr):
Expand Down
3 changes: 2 additions & 1 deletion python/src/providers/interactive.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@ def __init__(self, api_key: str = None):
async def ainvoke(
self,
prompt: str,
role: str = "user"
role: str = "user",
history = [] # TODO: make interactive provider respect history?
) -> AsyncGenerator[str, None]:
"""Asynchronously invoke the OpenAI API and yield results in chunks.
Expand Down
6 changes: 3 additions & 3 deletions python/src/providers/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

import openai
import os
from typing import AsyncGenerator
from typing import AsyncGenerator, List


class OpenAIProvider(ProviderConfig):
Expand Down Expand Up @@ -44,7 +44,7 @@ def __init__(self, api_key: str = None, model: str = "gpt-4"):
"API key is required for OpenAI API. Specify OPENAI_API_KEY environment variable or provide an api_key argument"
)

async def ainvoke(self, prompt: str, role: str) -> AsyncGenerator[str, None]:
async def ainvoke(self, prompt: str, role: str, history: List[{ "role": str, "content": str }] = []) -> AsyncGenerator[str, None]:
"""Asynchronously invoke the OpenAI API and yield results in chunks.
Args:
Expand All @@ -58,7 +58,7 @@ async def ainvoke(self, prompt: str, role: str) -> AsyncGenerator[str, None]:
# TODO: use system message role for IF_PROMPT
stream = await client.chat.completions.create(
model="gpt-4",
messages=[{"role": role, "content": prompt}],
messages=history + [{"role": role, "content": prompt}],
stream=True,
)

Expand Down

0 comments on commit 6317133

Please sign in to comment.