Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Test GenAI course code #293

Merged
merged 7 commits into from
Jan 20, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
node_modules
.env
.env.*
!.env.example
.DS_Store
server.py
**/.DS_Store
Expand All @@ -12,5 +13,7 @@ courses.zip
html.zip
build/html/*
tmp
__pycache__
.venv
*.pyc
__pycache__/
.pytest_cache/
.venv
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
CREATE FULLTEXT INDEX plotFulltext IF NOT EXISTS
FOR (m:Movie)
ON EACH [m.plot]
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
LOAD CSV WITH HEADERS FROM "https://data.neo4j.com/rec-embed/movie-poster-embeddings-1k.csv" AS row
match (m:Movie {movieId:row.movieId})
WITH row,m
CALL db.create.setNodeVectorProperty(m, 'posterEmbedding', apoc.convert.fromJsonList(row.posterEmbedding));

CREATE VECTOR INDEX moviePosters IF NOT EXISTS
FOR (m:Movie)
ON m.posterEmbedding
OPTIONS {indexConfig: {
`vector.dimensions`: 512,
`vector.similarity_function`: 'cosine'
}};
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ The `write_message()` helper function has been link:{repository-blob}/main/utils
.Write Message Helper Function
[source,python]
----
include::{repository-raw}/{branch}/utils.py[tag=write_message]
include::{repository-raw}/{branch}/solutions/utils.py[tag=write_message]
----

The function accepts two positional arguments - the `role` of the author, either `human` or `assistant`, and the message.
Expand Down
5 changes: 5 additions & 0 deletions asciidoc/courses/llm-fundamentals/.env.example
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
OPENAI_API_KEY="sk-..."
NEO4J_URI="bolt://"
NEO4J_USERNAME="neo4j"
NEO4J_PASSWORD=""
LANGSMITH_API_KEY="lsv2_..."
56 changes: 56 additions & 0 deletions asciidoc/courses/llm-fundamentals/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
import builtins
import importlib
import io
import sys

import pytest
from pytest import MonkeyPatch

from dotenv import load_dotenv

@pytest.fixture(autouse=True)
def load_env_vars():
load_dotenv()

class TestHelpers:
@staticmethod
def run_module(monkeypatch: MonkeyPatch, module_name: str, input_values: list[str] = [] ) -> str:
"""
Runs a module (.py file) to completion.
stdin input_values can be provided as a list of strings.
stdout output is returned as a string.
"""

def mocked_input(prompt: str = "", return_values: list[str] = input_values):
if len(return_values) == 0:
raise Exception("Test error - Ran out of input values")
return return_values.pop(0)

mocked_stdout = io.StringIO()

with monkeypatch.context() as m:
m.setattr(builtins, "input", mocked_input)
m.setattr(sys, "stdout", mocked_stdout)

sys.modules.pop(module_name, None)
importlib.import_module(name=module_name, package="files")

return mocked_stdout.getvalue().strip()

@staticmethod
def run_cypher_file(graph, file_path):
with open(file_path, "r") as file:
cyphers = file.read()
result = []
for cypher in cyphers.split(";"):
if cypher.strip() != "":
result.append(TestHelpers.run_cypher(graph, cypher))
return result

@staticmethod
def run_cypher(graph, cypher):
return graph.query(cypher)

@pytest.fixture()
def test_helpers():
return TestHelpers
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import os

from langchain_openai import ChatOpenAI
from langchain.agents import AgentExecutor, create_react_agent
from langchain.tools import Tool
Expand All @@ -11,12 +13,14 @@
SESSION_ID = str(uuid4())
print(f"Session ID: {SESSION_ID}")

llm = ChatOpenAI(openai_api_key="sk-...")
llm = ChatOpenAI(
openai_api_key=os.getenv("OPENAI_API_KEY")
)

graph = Neo4jGraph(
url="bolt://localhost:7687",
username="neo4j",
password="pleaseletmein"
url=os.getenv("NEO4J_URI"),
username=os.getenv("NEO4J_USERNAME"),
password=os.getenv("NEO4J_PASSWORD")
)

prompt = ChatPromptTemplate.from_messages(
Expand Down Expand Up @@ -53,9 +57,8 @@ def get_memory(session_id):
history_messages_key="chat_history",
)

while True:
q = input("> ")

while (q := input("> ")) != "exit":

response = cypher_agent.invoke(
{
"input": q
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
def test_example_applcation(test_helpers, monkeypatch):

output = test_helpers.run_module(monkeypatch, "example_applcation", ["hello", "exit"])

# Test a response was received from the agent
# There is a output which looks like Session ID: #####\n[response from LLM]\n
assert len(output.split("\n")) == 2

Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import os
from langchain_openai import OpenAI

llm = OpenAI(openai_api_key="sk-...")
llm = OpenAI(openai_api_key=os.getenv("OPENAI_API_KEY"))

response = llm.invoke("What is Neo4j?")

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import os
from langchain_openai import OpenAI

# tag::config[]
llm = OpenAI(
openai_api_key=os.getenv("OPENAI_API_KEY"),
model="gpt-3.5-turbo-instruct",
temperature=0
)
# end::config[]

response = llm.invoke("What is Neo4j?")

print(response)


Original file line number Diff line number Diff line change
@@ -1,16 +1,23 @@
import os
from langchain_openai import OpenAI
from langchain.prompts import PromptTemplate
# tag::import[]
from langchain.prompts import PromptTemplate#
# end::import[]

llm = OpenAI(openai_api_key="sk-...")
llm = OpenAI(openai_api_key=os.getenv("OPENAI_API_KEY"))

# tag::template[]
template = PromptTemplate(template="""
You are a cockney fruit and vegetable seller.
Your role is to assist your customer with their fruit and vegetable needs.
Respond using cockney rhyming slang.

Tell me about the following fruit: {fruit}
""", input_variables=["fruit"])
# end::template[]

# tag::invoke[]
response = llm.invoke(template.format(fruit="apple"))

print(response)
print(response)
# end::invoke[]
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
def test_llm(test_helpers, monkeypatch):

output = test_helpers.run_module(monkeypatch, "llm")

assert output > ""

def test_llm_prompt(test_helpers, monkeypatch):

output = test_helpers.run_module(monkeypatch, "llm_prompt")

assert output > ""

def test_llm_config(test_helpers, monkeypatch):

output = test_helpers.run_module(monkeypatch, "llm_config")

assert output > ""
Original file line number Diff line number Diff line change
Expand Up @@ -60,18 +60,12 @@ Create a new Python program and copy this code into a new Python file.

[source,python]
----
from langchain_openai import OpenAI

llm = OpenAI(openai_api_key="sk-...")

response = llm.invoke("What is Neo4j?")

print(response)
include::code/llm.py[tag=**]
----

[IMPORTANT]
.OpenAI API Key
Remember to include your OpenAI API key in the `openai_api_key` parameter.
Create an environment variable called `OPENAI_API_KEY` and set it to your OpenAI API key.

Review the program and note the following:

Expand Down Expand Up @@ -109,24 +103,16 @@ Modify your program to use the prompt template:

[source,python]
----
from langchain.prompts import PromptTemplate
include::code/llm_prompt.py[tag=import]

template = PromptTemplate(template="""
You are a cockney fruit and vegetable seller.
Your role is to assist your customer with their fruit and vegetable needs.
Respond using cockney rhyming slang.

Tell me about the following fruit: {fruit}
""", input_variables=["fruit"])
include::code/llm_prompt.py[tag=template]
----

Call the LLM, passing the formatted prompt template as the input:

[source,python]
----
response = llm.invoke(template.format(fruit="apple"))

print(response)
include::code/llm_prompt.py[tag=invoke]
----

You use the `format` method to pass the parameters to the prompt e.g. `fruit="apple"`. The input variables will be validated when the prompt is formatted, and a `KeyError` will be raised if any variables are missing from the input.
Expand Down Expand Up @@ -158,11 +144,7 @@ When you create the LLM, you can configure it with parameters such as the `tempe

[source,python]
----
llm = OpenAI(
openai_api_key="sk-...",
model="gpt-3.5-turbo-instruct",
temperature=0
)
include::code/llm_config.py[tag=config]
----

When selecting a model, it is worth considering the quality of the output and the cost per token. There are several link:https://platform.openai.com/docs/models/overview[OpenAI models^] available, each with different characteristics.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
import os
from langchain_openai import OpenAI
from langchain.prompts import PromptTemplate

llm = OpenAI(openai_api_key="sk-...")
llm = OpenAI(
openai_api_key=os.getenv("OPENAI_API_KEY")
)

template = PromptTemplate.from_template("""
You are a cockney fruit and vegetable seller.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
import os
from langchain_openai import OpenAI
from langchain.prompts import PromptTemplate
# tag::import[]
from langchain.schema import StrOutputParser
# end::import[]

llm = OpenAI(
openai_api_key="sk-...")
openai_api_key=os.getenv("OPENAI_API_KEY")
)

template = PromptTemplate.from_template("""
You are a cockney fruit and vegetable seller.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
import os
from langchain_openai import OpenAI
from langchain.prompts import PromptTemplate
# tag::import[]
from langchain.output_parsers.json import SimpleJsonOutputParser
# end::import[]

llm = OpenAI(
openai_api_key="sk-...")
openai_api_key=os.getenv("OPENAI_API_KEY")
)

template = PromptTemplate.from_template("""
You are a cockney fruit and vegetable seller.
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
def test_llm_chain(test_helpers, monkeypatch):

assert test_helpers.run_module(monkeypatch, "llm_chain") > ""

def test_llm_chain_output(test_helpers, monkeypatch):

assert test_helpers.run_module(monkeypatch, "llm_chain_output") > ""

def test_llm_chain_output_json(test_helpers, monkeypatch):

assert test_helpers.run_module(monkeypatch, "llm_chain_output_json") > ""
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
import os
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain.schema import StrOutputParser

chat_llm = ChatOpenAI(openai_api_key="sk-...")
chat_llm = ChatOpenAI(
openai_api_key=os.getenv("OPENAI_API_KEY")
)

prompt = ChatPromptTemplate.from_messages(
[
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
import os
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain.schema import StrOutputParser

chat_llm = ChatOpenAI(openai_api_key="sk-...")
chat_llm = ChatOpenAI(
openai_api_key=os.getenv("OPENAI_API_KEY")
)

prompt = ChatPromptTemplate.from_messages(
[
Expand Down
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
# tag::import[]
import os
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage, SystemMessage
# end::import[]

# tag::llm[]
chat_llm = ChatOpenAI(
openai_api_key="sk-..."
openai_api_key=os.getenv("OPENAI_API_KEY")
)
# end::llm[]

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
def test_chat_model(test_helpers, monkeypatch):

assert test_helpers.run_module(monkeypatch, "chat-model") > ""

def test_chat_model_chain(test_helpers, monkeypatch):

assert test_helpers.run_module(monkeypatch, "chat-model-chain") > ""

def test_chat_model_context(test_helpers, monkeypatch):

assert test_helpers.run_module(monkeypatch, "chat-model-context") > ""
Loading