Skip to content

Commit

Permalink
convert all system role messages to user role for reasoning models (m…
Browse files Browse the repository at this point in the history
…icrosoft#241)

While the original system message was being converted, this change
brings support for converting _all_ system role messages to user role
for reasoning models, including those created by extensions.
  • Loading branch information
bkrabach authored Nov 13, 2024
1 parent f4c3399 commit 446586e
Showing 1 changed file with 24 additions and 11 deletions.
35 changes: 24 additions & 11 deletions assistants/explorer-assistant/assistant/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,19 +215,13 @@ async def respond_to_conversation(
# add the guardrails prompt to the system message content
system_message_content += f"\n\n{config.guardrails_prompt}"

# reasoning models do not support system messages, so set the role to "user" for the system message
completion_messages: list[ChatCompletionMessageParam] = []
if config.request_config.is_reasoning_model:
# if the model is a reasoning model, add the system message as a user message
completion_messages.append({
"role": "user",
"content": system_message_content,
})
else:
completion_messages.append({
# initialize the completion messages with the system message
completion_messages: list[ChatCompletionMessageParam] = [
{
"role": "system",
"content": system_message_content,
})
}
]

# generate the attachment messages from the attachment agent
attachment_messages = await attachments_extension.get_completion_messages_for_attachments(
Expand Down Expand Up @@ -284,6 +278,25 @@ async def respond_to_conversation(
# generate a response from the AI model
async with openai_client.create_client(config.service_config) as client:
try:
if config.request_config.is_reasoning_model:
# convert all messages that use system role to user role as reasoning models do not support system role
completion_messages = [
{
"role": "user",
"content": message["content"],
}
if message["role"] == "system"
else message
for message in completion_messages
]

# for reasoning models, use max_completion_tokens instead of max_tokens
completion = await client.chat.completions.create(
messages=completion_messages,
model=config.request_config.openai_model,
max_completion_tokens=config.request_config.response_tokens,
)

if config.extensions_config.artifacts.enabled:
response = await artifacts_extension.get_openai_completion_response(
client,
Expand Down

0 comments on commit 446586e

Please sign in to comment.