diff --git a/README.md b/README.md index 1b723da45..f1e092da3 100644 --- a/README.md +++ b/README.md @@ -46,6 +46,7 @@ Each layer has a certain strength of communication inbuilt ✅ `GitHub actions` configured\ ✅ `Vale.sh` configured at PR level\ ✅ `Pre-commit hooks` configured for code linting/formatting\ +✨ [LangChain](https://python.langchain.com/) Basics & workflows\ ✅ Environment management via [pixi](https://prefix.dev/)\ ✅ Reading data from online sources using [intake](https://github.com/intake/intake)\ ✅ Sample pipeline built using [Dagster](https://github.com/dagster-io/dagster)\ @@ -54,7 +55,6 @@ Each layer has a certain strength of communication inbuilt ✅ Web UI build on [Flask](https://flask.palletsprojects.com/en/3.0.x/) \ ✅ Web UI re-done and expanded with [FastHTML](https://docs.fastht.ml/)\ ✅ Leverage AI models to analyse data [GitHub AI models Beta](https://docs.github.com/en/github-models/prototyping-with-ai-models) -✨ LangChain integration ### ☕️ Quickly getting started with DataJourney diff --git a/analytics_framework/langchain/hello_world_lc.py b/analytics_framework/langchain/hello_world_lc.py new file mode 100644 index 000000000..dae16fe52 --- /dev/null +++ b/analytics_framework/langchain/hello_world_lc.py @@ -0,0 +1,39 @@ +import os +from langchain.prompts import PromptTemplate +from langchain.llms import OpenAI + +# NOTE: Using GitHub models here (add your preferred token attribute) + +token = os.environ["GITHUB_TOKEN"] +endpoint = "https://models.inference.ai.azure.com" +model_name = "gpt-4o-mini" + +llm = OpenAI( + base_url=endpoint, + api_key=token, + model=model_name, + temperature=0.7 +) + +# Step 2: Define a Prompt Template +prompt = PromptTemplate( + input_variables=["question"], + template="You are a helpful assistant. Answer the following question in a clear and concise way: {question}" +) + + +# Step 3: Create a Function to Get AI Responses +def get_answer(question): + # Format the prompt with the input question + formatted_prompt = prompt.format(question=question) + + # Use the LLM to generate a response + response = llm(formatted_prompt) + return response + + +# Example Usage +if __name__ == "__main__": + question = "What is LangChain and why is it useful?" + answer = get_answer(question) + print("AI Speaks:", answer) diff --git a/pixi.toml b/pixi.toml index 46b390496..cbc28910d 100644 --- a/pixi.toml +++ b/pixi.toml @@ -55,5 +55,6 @@ DJ_panel_app = { cmd = "python stock_price_twilio_app.py", depends-on = "DJ_pack DJ_flask_app = { cmd = "python app.py", depends-on = "DJ_package", cwd = "analytics_framework/intake/web_ui_flask" } DJ_fasthtml_app = { cmd = "python app.py", depends-on = "DJ_package", cwd = "analytics_framework/intake/web_ui_fasthtml" } DJ_mito_app = { cmd = "jupyter notebook mito_exp.ipynb", depends-on = "DJ_package", cwd = "usage_guide"} -DJ_llm_analysis_gpt_4o = {cmd = "python analyse_my_data__gpt_4o_mini.py", cwd= "analytics_framework/ai_modeling"} -DJ_advance_llm_analysis = {cmd = "python advance_analysis_coral_bleeching.py", cwd= "analytics_framework/ai_modeling"} +DJ_llm_analysis_gpt_4o = {cmd = "python analyse_my_data__gpt_4o_mini.py", cwd = "analytics_framework/ai_modeling"} +DJ_advance_llm_analysis = {cmd = "python advance_analysis_coral_bleeching.py", cwd = "analytics_framework/ai_modeling"} +DJ_hello_world_langchain = {cmd = "python hello_world_lc.py", cwd = "analytics_framework/langchain"}