Skip to content

Commit

Permalink
Updated config to allow different models to be used with M1
Browse files Browse the repository at this point in the history
  • Loading branch information
afourney committed Feb 3, 2025
1 parent d15af5b commit db6964b
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 7 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,11 @@ async def main() -> None:
# Load model configuration and create the model client.
with open("config.yaml", "r") as f:
config = yaml.safe_load(f)
model_client = ChatCompletionClient.load_component(config["model_config"])

orchestrator_client = ChatCompletionClient.load_component(config["orchestrator_client"])
coder_client = ChatCompletionClient.load_component(config["coder_client"])
web_surfer_client = ChatCompletionClient.load_component(config["web_surfer_client"])
file_surfer_client = ChatCompletionClient.load_component(config["file_surfer_client"])

# Read the prompt
prompt = ""
Expand All @@ -30,27 +34,27 @@ async def main() -> None:
# Set up the team
coder = MagenticOneCoderAgent(
"Assistant",
model_client = model_client,
model_client = coder_client,
)

executor = CodeExecutorAgent("ComputerTerminal", code_executor=LocalCommandLineCodeExecutor())

file_surfer = FileSurfer(
name="FileSurfer",
model_client = model_client,
model_client = file_surfer_client,
)

web_surfer = MultimodalWebSurfer(
name="WebSurfer",
model_client = model_client,
model_client = web_surfer_client,
downloads_folder=os.getcwd(),
debug_dir="logs",
to_save_screenshots=True,
)

team = MagenticOneGroupChat(
[coder, executor, file_surfer, web_surfer],
model_client=model_client,
model_client=orchestrator_client,
max_turns=20,
final_answer_prompt= f""",
We have completed the following task:
Expand Down
13 changes: 11 additions & 2 deletions python/packages/agbench/benchmarks/GAIA/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
###############################
# Open AI model configuration #
###############################
model_config:
model_config: &client
provider: autogen_ext.models.openai.OpenAIChatCompletionClient
config:
model: gpt-4o
Expand All @@ -16,7 +16,7 @@ model_config:
##############################
# Ollama model configuration #
##############################
#model_config:
#model_config: &client
# provider: autogen_ext.models.openai.OpenAIChatCompletionClient
# config:
# model: deepseek-r1:7b
Expand All @@ -27,3 +27,12 @@ model_config:
# json_output: false
# vision: false
# family: r1
#

#######################
# Used by MagenticOne #
#######################
orchestrator_client: *client
coder_client: *client
web_surfer_client: *client
file_surfer_client: *client

0 comments on commit db6964b

Please sign in to comment.