diff --git a/prediction_prophet/autonolas/research.py b/prediction_prophet/autonolas/research.py index b8a0af66..974956a9 100644 --- a/prediction_prophet/autonolas/research.py +++ b/prediction_prophet/autonolas/research.py @@ -32,7 +32,7 @@ from dateutil import parser from prediction_prophet.functions.utils import check_not_none from prediction_market_agent_tooling.gtypes import Probability -from prediction_market_agent_tooling.tools.utils import secret_str_from_env +from prediction_market_agent_tooling.config import APIKeys from prediction_market_agent_tooling.tools.caches.db_cache import db_cache from prediction_prophet.functions.parallelism import par_map from pydantic.types import SecretStr @@ -1220,7 +1220,7 @@ def make_prediction( api_key: SecretStr | None = None, ) -> Prediction: if api_key == None: - api_key = secret_str_from_env("OPENAI_API_KEY") + api_key = APIKeys().openai_api_key current_time_utc = datetime.now(timezone.utc) formatted_time_utc = current_time_utc.strftime("%Y-%m-%dT%H:%M:%S.%f")[:-6] + "Z" diff --git a/prediction_prophet/functions/create_embeddings_from_results.py b/prediction_prophet/functions/create_embeddings_from_results.py index 43962005..fcdd6b07 100644 --- a/prediction_prophet/functions/create_embeddings_from_results.py +++ b/prediction_prophet/functions/create_embeddings_from_results.py @@ -13,13 +13,13 @@ from prediction_prophet.models.WebScrapeResult import WebScrapeResult from langchain.text_splitter import RecursiveCharacterTextSplitter from pydantic.types import SecretStr -from prediction_market_agent_tooling.tools.utils import secret_str_from_env +from prediction_market_agent_tooling.config import APIKeys from prediction_market_agent_tooling.gtypes import secretstr_to_v1_secretstr def create_embeddings_from_results(results: list[WebScrapeResult], text_splitter: RecursiveCharacterTextSplitter, api_key: SecretStr | None = None) -> Chroma: if api_key == None: - api_key = secret_str_from_env("OPENAI_API_KEY") + api_key = APIKeys().openai_api_key collection = Chroma(embedding_function=OpenAIEmbeddings(api_key=secretstr_to_v1_secretstr(api_key))) texts = [] diff --git a/prediction_prophet/functions/debate_prediction.py b/prediction_prophet/functions/debate_prediction.py index 43345314..b2eb5599 100644 --- a/prediction_prophet/functions/debate_prediction.py +++ b/prediction_prophet/functions/debate_prediction.py @@ -9,7 +9,7 @@ from langchain.schema.output_parser import StrOutputParser from langchain_openai import ChatOpenAI from langchain.prompts import ChatPromptTemplate -from prediction_market_agent_tooling.tools.utils import secret_str_from_env +from prediction_market_agent_tooling.config import APIKeys from prediction_market_agent_tooling.gtypes import secretstr_to_v1_secretstr @@ -85,7 +85,7 @@ def make_debated_prediction(prompt: str, additional_information: str, api_key: SecretStr | None = None) -> Prediction: if api_key == None: - api_key = secret_str_from_env("OPENAI_API_KEY") + api_key = APIKeys().openai_api_key formatted_time_utc = datetime.datetime.now(datetime.timezone.utc).isoformat(timespec='seconds') + "Z" diff --git a/prediction_prophet/functions/generate_subqueries.py b/prediction_prophet/functions/generate_subqueries.py index d93c345a..956eefa5 100644 --- a/prediction_prophet/functions/generate_subqueries.py +++ b/prediction_prophet/functions/generate_subqueries.py @@ -3,7 +3,7 @@ from langchain.output_parsers import CommaSeparatedListOutputParser from langchain.prompts import ChatPromptTemplate from pydantic.types import SecretStr -from prediction_market_agent_tooling.tools.utils import secret_str_from_env +from prediction_market_agent_tooling.config import APIKeys from prediction_market_agent_tooling.gtypes import secretstr_to_v1_secretstr from prediction_market_agent_tooling.tools.langfuse_ import get_langfuse_langchain_config, observe @@ -22,7 +22,7 @@ def generate_subqueries(query: str, limit: int, model: str, temperature: float, return [query] if api_key == None: - api_key = secret_str_from_env("OPENAI_API_KEY") + api_key = APIKeys().openai_api_key subquery_generation_prompt = ChatPromptTemplate.from_template(template=subquery_generation_template) diff --git a/prediction_prophet/functions/prepare_report.py b/prediction_prophet/functions/prepare_report.py index 06037b01..fceec3b9 100644 --- a/prediction_prophet/functions/prepare_report.py +++ b/prediction_prophet/functions/prepare_report.py @@ -5,7 +5,7 @@ from langchain.prompts import ChatPromptTemplate from langchain.schema.output_parser import StrOutputParser from prediction_prophet.functions.utils import trim_to_n_tokens -from prediction_market_agent_tooling.tools.utils import secret_str_from_env +from prediction_market_agent_tooling.config import APIKeys from pydantic.types import SecretStr from prediction_market_agent_tooling.gtypes import secretstr_to_v1_secretstr from prediction_market_agent_tooling.tools.langfuse_ import get_langfuse_langchain_config, observe @@ -13,7 +13,7 @@ @observe() def prepare_summary(goal: str, content: str, model: str, api_key: SecretStr | None = None, trim_content_to_tokens: t.Optional[int] = None) -> str: if api_key == None: - api_key = secret_str_from_env("OPENAI_API_KEY") + api_key = APIKeys().openai_api_key prompt_template = """Write comprehensive summary of the following web content, that provides relevant information to answer the question: '{goal}'. But cut the fluff and keep it up to the point. @@ -43,7 +43,7 @@ def prepare_summary(goal: str, content: str, model: str, api_key: SecretStr | No @observe() def prepare_report(goal: str, scraped: list[str], model: str, temperature: float, api_key: SecretStr | None = None) -> str: if api_key == None: - api_key = secret_str_from_env("OPENAI_API_KEY") + api_key = APIKeys().openai_api_key evaluation_prompt_template = """ You are a professional researcher. Your goal is to provide a relevant information report diff --git a/prediction_prophet/functions/rerank_subqueries.py b/prediction_prophet/functions/rerank_subqueries.py index b240d3ca..57487c67 100644 --- a/prediction_prophet/functions/rerank_subqueries.py +++ b/prediction_prophet/functions/rerank_subqueries.py @@ -3,7 +3,7 @@ from langchain.prompts import ChatPromptTemplate from langchain.schema.output_parser import StrOutputParser from pydantic.types import SecretStr -from prediction_market_agent_tooling.tools.utils import secret_str_from_env +from prediction_market_agent_tooling.config import APIKeys from prediction_market_agent_tooling.gtypes import secretstr_to_v1_secretstr from prediction_market_agent_tooling.tools.langfuse_ import get_langfuse_langchain_config, observe @@ -20,7 +20,7 @@ @observe() def rerank_subqueries(queries: list[str], goal: str, model: str, temperature: float, api_key: SecretStr | None = None) -> list[str]: if api_key == None: - api_key = secret_str_from_env("OPENAI_API_KEY") + api_key = APIKeys().openai_api_key rerank_results_prompt = ChatPromptTemplate.from_template(template=rerank_queries_template)