Skip to content

Commit

Permalink
Migrate openai from 0.28.1 to 1.30.1
Browse files Browse the repository at this point in the history
  • Loading branch information
iwamot authored and seratch committed May 17, 2024
1 parent 7b789e0 commit 61d0da3
Show file tree
Hide file tree
Showing 5 changed files with 81 additions and 52 deletions.
12 changes: 6 additions & 6 deletions app/bolt_listeners.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import time
from typing import Optional

from openai.error import Timeout
from openai import APITimeoutError
from slack_bolt import App, Ack, BoltContext, BoltResponse
from slack_bolt.request.payload_utils import is_event
from slack_sdk.errors import SlackApiError
Expand Down Expand Up @@ -180,7 +180,7 @@ def respond_to_app_mention(
translate_markdown=TRANSLATE_MARKDOWN,
)

except Timeout:
except (APITimeoutError, TimeoutError):
if wip_reply is not None:
text = (
(
Expand Down Expand Up @@ -422,7 +422,7 @@ def respond_to_new_message(
translate_markdown=TRANSLATE_MARKDOWN,
)

except Timeout:
except (APITimeoutError, TimeoutError):
if wip_reply is not None:
text = (
(
Expand Down Expand Up @@ -705,7 +705,7 @@ def prepare_and_share_thread_summary(
thread_ts=private_metadata.get("thread_ts"),
text=f"{here_is_summary}\n\n{summary}",
)
except Timeout:
except (APITimeoutError, TimeoutError):
client.views_update(
view_id=payload["id"],
view={
Expand Down Expand Up @@ -950,7 +950,7 @@ def display_proofreading_result(
view_id=payload["id"],
view=modal_view,
)
except Timeout:
except (APITimeoutError, TimeoutError):
client.views_update(
view_id=payload["id"],
view={
Expand Down Expand Up @@ -1134,7 +1134,7 @@ def display_chat_from_scratch_result(
],
},
)
except Timeout:
except (APITimeoutError, TimeoutError):
client.views_update(
view_id=payload["id"],
view={
Expand Down
23 changes: 15 additions & 8 deletions app/i18n.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from typing import Optional

import openai
from openai import OpenAI, AzureOpenAI
from slack_bolt import BoltContext

from .openai_constants import GPT_3_5_TURBO_0613_MODEL
Expand Down Expand Up @@ -43,8 +43,19 @@ def translate(*, openai_api_key: Optional[str], context: BoltContext, text: str)
cached_result = _translation_result_cache.get(f"{lang}:{text}")
if cached_result is not None:
return cached_result
response = openai.ChatCompletion.create(
api_key=openai_api_key,
if context.get("OPENAI_API_TYPE") == "azure":
client = AzureOpenAI(
api_key=openai_api_key,
api_version=context.get("OPENAI_API_VERSION"),
azure_endpoint=context.get("OPENAI_API_BASE"),
azure_deployment=context.get("OPENAI_DEPLOYMENT_ID"),
)
else:
client = OpenAI(
api_key=openai_api_key,
base_url=context.get("OPENAI_API_BASE"),
)
response = client.chat.completions.create(
model=GPT_3_5_TURBO_0613_MODEL,
messages=[
{
Expand Down Expand Up @@ -73,11 +84,7 @@ def translate(*, openai_api_key: Optional[str], context: BoltContext, text: str)
frequency_penalty=0,
logit_bias={},
user="system",
api_base=context.get("OPENAI_API_BASE"),
api_type=context.get("OPENAI_API_TYPE"),
api_version=context.get("OPENAI_API_VERSION"),
deployment_id=context.get("OPENAI_DEPLOYMENT_ID"),
)
translated_text = response["choices"][0]["message"].get("content")
translated_text = response.model_dump()["choices"][0]["message"].get("content")
_translation_result_cache[f"{lang}:{text}"] = translated_text
return translated_text
85 changes: 53 additions & 32 deletions app/openai_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,12 @@
import time
import re
import json
from typing import List, Dict, Any, Generator, Tuple, Optional, Union
from typing import List, Dict, Any, Tuple, Optional, Union
from importlib import import_module

import openai
from openai.error import Timeout
from openai.openai_object import OpenAIObject
from openai import OpenAI, AzureOpenAI
from openai.types import Completion
from openai._streaming import Stream
import tiktoken

from slack_bolt import BoltContext
Expand Down Expand Up @@ -105,9 +105,20 @@ def make_synchronous_openai_call(
openai_api_version: str,
openai_deployment_id: str,
timeout_seconds: int,
) -> OpenAIObject:
return openai.ChatCompletion.create(
api_key=openai_api_key,
) -> Completion:
if openai_api_type == "azure":
client = AzureOpenAI(
api_key=openai_api_key,
api_version=openai_api_version,
azure_endpoint=openai_api_base,
azure_deployment=openai_deployment_id,
)
else:
client = OpenAI(
api_key=openai_api_key,
base_url=openai_api_base,
)
return client.chat.completions.create(
model=model,
messages=messages,
top_p=1,
Expand All @@ -119,10 +130,6 @@ def make_synchronous_openai_call(
logit_bias={},
user=user,
stream=False,
api_type=openai_api_type,
api_base=openai_api_base,
api_version=openai_api_version,
deployment_id=openai_deployment_id,
request_timeout=timeout_seconds,
)

Expand All @@ -139,12 +146,23 @@ def start_receiving_openai_response(
openai_api_version: str,
openai_deployment_id: str,
function_call_module_name: Optional[str],
) -> Generator[OpenAIObject, Any, None]:
) -> Stream[Completion]:
kwargs = {}
if function_call_module_name is not None:
kwargs["functions"] = import_module(function_call_module_name).functions
return openai.ChatCompletion.create(
api_key=openai_api_key,
if openai_api_type == "azure":
client = AzureOpenAI(
api_key=openai_api_key,
api_version=openai_api_version,
azure_endpoint=openai_api_base,
azure_deployment=openai_deployment_id,
)
else:
client = OpenAI(
api_key=openai_api_key,
base_url=openai_api_base,
)
return client.chat.completions.create(
model=model,
messages=messages,
top_p=1,
Expand All @@ -156,10 +174,6 @@ def start_receiving_openai_response(
logit_bias={},
user=user,
stream=True,
api_type=openai_api_type,
api_base=openai_api_base,
api_version=openai_api_version,
deployment_id=openai_deployment_id,
**kwargs,
)

Expand All @@ -171,7 +185,7 @@ def consume_openai_stream_to_write_reply(
context: BoltContext,
user_id: str,
messages: List[Dict[str, Union[str, Dict[str, str]]]],
stream: Generator[OpenAIObject, Any, None],
stream: Stream[Completion],
timeout_seconds: int,
translate_markdown: bool,
):
Expand All @@ -189,11 +203,11 @@ def consume_openai_stream_to_write_reply(
for chunk in stream:
spent_seconds = time.time() - start_time
if timeout_seconds < spent_seconds:
raise Timeout()
raise TimeoutError()
# Some versions of the Azure OpenAI API return an empty choices array in the first chunk
if context.get("OPENAI_API_TYPE") == "azure" and not chunk.choices:
continue
item = chunk.choices[0]
item = chunk.choices[0].model_dump()
if item.get("finish_reason") is not None:
break
delta = item.get("delta")
Expand Down Expand Up @@ -225,7 +239,7 @@ def update_message():
# Ignore function call suggestions after content has been received
if assistant_reply["content"] == "":
for k in function_call.keys():
function_call[k] += delta["function_call"].get(k, "")
function_call[k] += delta["function_call"].get(k) or ""
assistant_reply["function_call"] = function_call

for t in threads:
Expand Down Expand Up @@ -483,18 +497,25 @@ def calculate_tokens_necessary_for_function_call(context: BoltContext) -> int:
return _prompt_tokens_used_by_function_call_cache

def _calculate_prompt_tokens(functions) -> int:
return openai.ChatCompletion.create(
api_key=context.get("OPENAI_API_KEY"),
if context.get("OPENAI_API_TYPE") == "azure":
client = AzureOpenAI(
api_key=context.get("OPENAI_API_KEY"),
api_version=context.get("OPENAI_API_VERSION"),
azure_endpoint=context.get("OPENAI_API_BASE"),
azure_deployment=context.get("OPENAI_DEPLOYMENT_ID"),
)
else:
client = OpenAI(
api_key=context.get("OPENAI_API_KEY"),
base_url=context.get("OPENAI_API_BASE"),
)
return client.chat.completions.create(
model=context.get("OPENAI_MODEL"),
messages=[{"role": "user", "content": "hello"}],
max_tokens=1024,
user="system",
api_type=context.get("OPENAI_API_TYPE"),
api_base=context.get("OPENAI_API_BASE"),
api_version=context.get("OPENAI_API_VERSION"),
deployment_id=context.get("OPENAI_DEPLOYMENT_ID"),
**({"functions": functions} if functions is not None else {}),
)["usage"]["prompt_tokens"]
).model_dump()["usage"]["prompt_tokens"]

# TODO: If there is a better way to calculate this, replace the logic with it
module = import_module(function_call_module_name)
Expand Down Expand Up @@ -545,7 +566,7 @@ def generate_slack_thread_summary(
)
spent_time = time.time() - start_time
logger.debug(f"Making a summary took {spent_time} seconds")
return openai_response["choices"][0]["message"]["content"]
return openai_response.model_dump()["choices"][0]["message"]["content"]


def generate_proofreading_result(
Expand Down Expand Up @@ -597,7 +618,7 @@ def generate_proofreading_result(
)
spent_time = time.time() - start_time
logger.debug(f"Proofreading took {spent_time} seconds")
return openai_response["choices"][0]["message"]["content"]
return openai_response.model_dump()["choices"][0]["message"]["content"]


def generate_chatgpt_response(
Expand Down Expand Up @@ -635,4 +656,4 @@ def generate_chatgpt_response(
)
spent_time = time.time() - start_time
logger.debug(f"Proofreading took {spent_time} seconds")
return openai_response["choices"][0]["message"]["content"]
return openai_response.model_dump()["choices"][0]["message"]["content"]
10 changes: 6 additions & 4 deletions main_prod.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
import json
import logging
import os
import openai
from openai import OpenAI

from slack_sdk.web import WebClient
from slack_sdk.errors import SlackApiError
Expand Down Expand Up @@ -279,10 +279,11 @@ def validate_api_key_registration(ack: Ack, view: dict, context: BoltContext):
model = inputs["model"]["input"]["selected_option"]["value"]
try:
# Verify if the API key is valid
openai.Model.retrieve(api_key=api_key, id="gpt-3.5-turbo")
client = OpenAI(api_key=api_key)
client.models.retrieve(model="gpt-3.5-turbo")
try:
# Verify if the given model works with the API key
openai.Model.retrieve(api_key=api_key, id=model)
client.models.retrieve(model=model)
except Exception:
text = "This model is not yet available for this API key"
if already_set_api_key is not None:
Expand Down Expand Up @@ -315,7 +316,8 @@ def save_api_key_registration(
api_key = inputs["api_key"]["input"]["value"]
model = inputs["model"]["input"]["selected_option"]["value"]
try:
openai.Model.retrieve(api_key=api_key, id=model)
client = OpenAI(api_key=api_key)
client.models.retrieve(model=model)
s3_client.put_object(
Bucket=openai_bucket_name,
Key=context.team_id,
Expand Down
3 changes: 1 addition & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
slack-bolt>=1.18.1,<2
slack-sdk>=3.27.1,<4
# TODO: migrate to 1.x https://github.com/seratch/ChatGPT-in-Slack/issues/80
openai==0.28.1
openai>=1.30.1,<2
tiktoken>=0.6,<0.7
# https://github.com/Yelp/elastalert/issues/2306
urllib3<2

0 comments on commit 61d0da3

Please sign in to comment.