Skip to content

Commit

Permalink
Revert "Merge pull request #790 from Notnaton/better-config"
Browse files Browse the repository at this point in the history
This reverts commit 5ca3acf, reversing
changes made to 70ae798.
  • Loading branch information
KillianLucas committed Dec 2, 2023
1 parent 5ca3acf commit 043fbc2
Show file tree
Hide file tree
Showing 3 changed files with 95 additions and 201 deletions.
36 changes: 7 additions & 29 deletions interpreter/core/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
"""

import json
import yaml
import os
from datetime import datetime

Expand Down Expand Up @@ -35,7 +34,6 @@ def __init__(self):
self.max_output = 2000
self.safe_mode = "off"
self.disable_procedures = False
self.launch_message = ""

# Conversation history
self.conversation_history = True
Expand All @@ -62,37 +60,17 @@ def __init__(self):
self.languages = [i.name.lower() for i in self.computer.languages]

# Load config defaults
self.load_config()
self.extend_config(self.config_file)

# Expose class so people can make new instances
self.Interpreter = Interpreter

def load_config(self, profile=None):
with open(self.config_file, 'r') as file:
config = yaml.safe_load(file)

# Use default_profile if no specific profile is provided
if profile is None:
profile = config.get('default_profile', None)

# Load profile-specific configuration
if profile in config:
#Load default values first
self.update_attributes(config['base'])
self.update_attributes(config[profile])
else:
print(f"Profile '{profile}' not found in the configuration file.")
raise

def update_attributes(self, config):
for key, value in config.items():
if hasattr(self, key):
setattr(self, key, value)
elif key.startswith('self.'):
# Handle attributes prefixed with 'self.'
actual_key = key.split('self.', 1)[1]
if hasattr(self, actual_key):
setattr(self, actual_key, value)
def extend_config(self, config_path):
if self.debug_mode:
print(f"Extending configuration from `{config_path}`")

config = get_config(config_path)
self.__dict__.update(config)

def chat(self, message=None, display=True, stream=False):
if stream:
Expand Down
147 changes: 14 additions & 133 deletions interpreter/terminal_interface/config.yaml
Original file line number Diff line number Diff line change
@@ -1,133 +1,14 @@
#Choose which profile to run using "interpreter"
#use interpreter --profile <profile> to override
#interpreter --fast / --local switches the profile too.
default_profile: "gpt-4"

vision:
model: "openai/gpt-4-vision-preview"
max_tokens: 4_000
context_window: 128_000
vision: True
function_calling_llm: False
self.launch_message: "Using GPT-vision"
system_message: "\nThe user will show you an image of the code you write. You can view images directly. Be sure to actually write a markdown code block for almost every user request! Almost EVERY message should include a markdown code block. Do not end your message prematurely!\n\nFor HTML: This will be run STATELESSLY. You may NEVER write '<!-- previous code here... --!>' or `<!-- header will go here -->` or anything like that. It is CRITICAL TO NEVER WRITE PLACEHOLDERS. Placeholders will BREAK it. You must write the FULL HTML CODE EVERY TIME. Therefore you cannot write HTML piecemeal—write all the HTML, CSS, and possibly Javascript **in one step, in one code block**. The user will help you review it visually.\nIf the user submits a filepath, you will also see the image. The filepath and user image will both be in the user's message."

gpt-4:
model: "openai/gpt-4-1106-preview"
max_tokens: 4_000
context_window: 128_000
function_calling_llm: True
self.launch_message: "Using GPT-4"

#GPT-3 also --fast
gpt-3:
model: "openai/gpt-3.5-turbo-1106"
max_tokens: 4_000
context_window: 16_000
function_calling_llm: True
self.launch_message: "Using GPT-3"

local:
model: "openai/local"
api_base: "http://localhost:1234/v1"
max_tokens: 1024
context_window: 3000
api_key: "0"
launch_message: "
> Open Interpreter's local mode is powered by **`LM Studio`**.
You will need to run **LM Studio** in the background.
1. Download **LM Studio** from [https://lmstudio.ai/](https://lmstudio.ai/) then start it.\n
2. Select a language model then click **Download**.\n
3. Click the **<->** button on the left (below the chat button).\n
4. Select your model at the top, then click **Start Server**.\n
Once the server is running, you can begin your conversation below.
> **Warning:** This feature is highly experimental.
> Don't expect `gpt-3.5` / `gpt-4` level quality, speed, or reliability yet!"

#Do not remove.
base:
# Settings
self.local: False
self.auto_run: False
self.debug_mode: False
self.max_output: 2000
self.safe_mode: "off"
self.disable_procedures: False
self.launch_message: ""

# Conversation history
self.conversation_history: True
self.conversation_filename: None
self.conversation_history_path: "conversations"

# LLM settings
self.model: ""
self.temperature: None
self.system_message: ""
self.context_window: None
self.max_tokens: None
self.api_base: None
self.api_key: None
self.max_budget: None
self._llm: None
self.function_calling_llm: None
self.vision: False
temperature: 0
system_message: |
You are Open Interpreter, a world-class programmer that can complete any goal by executing code.
First, write a plan. **Always recap the plan between each code block** (you have extreme short-term memory loss, so you need to recap the plan between each message block to retain it).
When you execute code, it will be executed **on the user's machine**. The user has given you **full and complete permission** to execute any code necessary to complete the task.
If you want to send data between programming languages, save the data to a txt or json.
You can access the internet. Run **any code** to achieve the goal, and if at first you don't succeed, try again and again.
You can install new packages.
When a user refers to a filename, they're likely referring to an existing file in the directory you're currently executing code in.
Write messages to the user in Markdown.
In general, try to **make plans** with as few steps as possible. As for actually executing code to carry out that plan, for *stateful* languages (like python, javascript, shell, but NOT for html which starts from 0 every time) **it's critical not to try to do everything in one code block.** You should try something, print information about it, then continue from there in tiny, informed steps. You will never get it on the first try, and attempting it in one go will often lead to errors you cant see.
You are capable of **any** task.
#all:
# # Settings
# local: False
# auto_run: False
# debug_mode: False
# max_output: 2000
# safe_mode: "off"
# disable_procedures: False
# display_markdown_message: "" #Not implemented
#
# # Conversation history
# conversation_history: True
# conversation_filename: None
# conversation_history_path: ""
#
# # LLM settings
# model: ""
# temperature: None
# system_message: ""
# context_window: None
# max_tokens: None
# api_base: None
# api_key: None
# api_version: None #Reqired by Azure
# max_budget: None
# _llm: None
# function_calling_llm: None
# vision: False # LLM supports vision
#
# system_message: |
# You are Open Interpreter, a world-class programmer that can complete any goal by executing code.
# First, write a plan. **Always recap the plan between each code block** (you have extreme short-term memory loss, so you need to recap the plan between each message block to retain it).
# When you execute code, it will be executed **on the user's machine**. The user has given you **full and complete permission** to execute any code necessary to complete the task.
# If you want to send data between programming languages, save the data to a txt or json.
# You can access the internet. Run **any code** to achieve the goal, and if at first you don't succeed, try again and again.
# You can install new packages.
# When a user refers to a filename, they're likely referring to an existing file in the directory you're currently executing code in.
# Write messages to the user in Markdown.
# In general, try to **make plans** with as few steps as possible. As for actually executing code to carry out that plan, for *stateful* languages (like python, javascript, shell, but NOT for html which starts from 0 every time) **it's critical not to try to do everything in one code block.** You should try something, print information about it, then continue from there in tiny, informed steps. You will never get it on the first try, and attempting it in one go will often lead to errors you cant see.
# You are capable of **any** task.
system_message: |
You are Open Interpreter, a world-class programmer that can complete any goal by executing code.
First, write a plan. **Always recap the plan between each code block** (you have extreme short-term memory loss, so you need to recap the plan between each message block to retain it).
When you execute code, it will be executed **on the user's machine**. The user has given you **full and complete permission** to execute any code necessary to complete the task.
If you want to send data between programming languages, save the data to a txt or json.
You can access the internet. Run **any code** to achieve the goal, and if at first you don't succeed, try again and again.
You can install new packages.
When a user refers to a filename, they're likely referring to an existing file in the directory you're currently executing code in.
Write messages to the user in Markdown.
In general, try to **make plans** with as few steps as possible. As for actually executing code to carry out that plan, for *stateful* languages (like python, javascript, shell, but NOT for html which starts from 0 every time) **it's critical not to try to do everything in one code block.** You should try something, print information about it, then continue from there in tiny, informed steps. You will never get it on the first try, and attempting it in one go will often lead to errors you cant see.
You are capable of **any** task.
local: false
model: "gpt-4"
temperature: 0
113 changes: 74 additions & 39 deletions interpreter/terminal_interface/start_terminal_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,12 +110,6 @@
"help_text": "experimentally use vision for supported languages (HTML)",
"type": bool,
},
{
"name": "profile",
"nickname": "p",
"help_text": "select config profile to load (run `interpreter --config` for more information)",
"type": str,
},
]


Expand Down Expand Up @@ -180,12 +174,6 @@ def start_terminal_interface(interpreter):

args = parser.parse_args()

if args.version:
version = pkg_resources.get_distribution("open-interpreter").version
update_name = "New Computer" # Change this with each major update
print(f'Open Interpreter {version} "{update_name}"')
return

# This should be pushed into an open_config.py util
# If --config is used, open the config.yaml file in the Open Interpreter folder of the user's config dir
if args.config:
Expand All @@ -209,16 +197,38 @@ def start_terminal_interface(interpreter):
# Fallback to using 'open' on macOS if 'xdg-open' is not available
subprocess.call(["open", config_file])
return

# Good defaults for common models, in case folks haven't set anything
if interpreter.model == "gpt-4-1106-preview" and "context_window" not in args and "max_tokens" not in args and "function_calling_llm" not in args:
interpreter.context_window = 128000
interpreter.max_tokens = 4096
interpreter.function_calling_llm = True
if interpreter.model == "gpt-3.5-turbo-1106" and "context_window" not in args and "max_tokens" not in args and "function_calling_llm" not in args:
interpreter.context_window = 16000
interpreter.max_tokens = 4096
interpreter.function_calling_llm = True

if args.local:
# Default local (LM studio) attributes
interpreter.system_message = "You are an AI."
interpreter.model = (
"openai/" + interpreter.model
) # This tells LiteLLM it's an OpenAI compatible server
interpreter.api_base = "http://localhost:1234/v1"
interpreter.max_tokens = 1000
interpreter.context_window = 3000
interpreter.api_key = "0"

display_markdown_message(
"""
> Open Interpreter's local mode is powered by **`LM Studio`**.
You will need to run **LM Studio** in the background.
1. Download **LM Studio** from [https://lmstudio.ai/](https://lmstudio.ai/) then start it.
2. Select a language model then click **Download**.
3. Click the **<->** button on the left (below the chat button).
4. Select your model at the top, then click **Start Server**.
Once the server is running, you can begin your conversation below.
> **Warning:** This feature is highly experimental.
> Don't expect `gpt-3.5` / `gpt-4` level quality, speed, or reliability yet!
"""
)

# Set attributes on interpreter
for attr_name, attr_value in vars(args).items():
Expand All @@ -228,7 +238,7 @@ def start_terminal_interface(interpreter):
if attr_name == "config_file":
user_config = get_config_path(attr_value)
interpreter.config_file = user_config
interpreter.load_config()
interpreter.extend_config(config_path=user_config)
else:
setattr(interpreter, attr_name, attr_value)

Expand All @@ -238,34 +248,59 @@ def start_terminal_interface(interpreter):
):
setattr(interpreter, "auto_run", False)

# If --conversations is used, run conversation_navigator
if args.conversations:
conversation_navigator(interpreter)
return

if args.version:
version = pkg_resources.get_distribution("open-interpreter").version
update_name = "New Computer" # Change this with each major update
print(f'Open Interpreter {version} "{update_name}"')
return

if args.fast:
interpreter.load_config("gpt-3")
interpreter.model = "gpt-3.5-turbo"

if args.vision:
interpreter.load_config("vision")

if args.local:
interpreter.load_config("local")
interpreter.vision = True
interpreter.model = "gpt-4-vision-preview"
interpreter.system_message += "\nThe user will show you an image of the code you write. You can view images directly. Be sure to actually write a markdown code block for almost every user request! Almost EVERY message should include a markdown code block. Do not end your message prematurely!\n\nFor HTML: This will be run STATELESSLY. You may NEVER write '<!-- previous code here... --!>' or `<!-- header will go here -->` or anything like that. It is CRITICAL TO NEVER WRITE PLACEHOLDERS. Placeholders will BREAK it. You must write the FULL HTML CODE EVERY TIME. Therefore you cannot write HTML piecemeal—write all the HTML, CSS, and possibly Javascript **in one step, in one code block**. The user will help you review it visually.\nIf the user submits a filepath, you will also see the image. The filepath and user image will both be in the user's message."
interpreter.function_calling_llm = False
interpreter.context_window = 110000
interpreter.max_tokens = 4096

if args.profile:
interpreter.load_config(args.profile)
display_markdown_message("> `Vision` enabled **(experimental)**\n")

# Check for update
try:
if check_for_update():
display_markdown_message(
"> **A new version of Open Interpreter is available.**\n>Please run: `pip install --upgrade open-interpreter`\n\n---"
)
if not interpreter.local:
# This should actually be pushed into the utility
if check_for_update():
display_markdown_message(
"> **A new version of Open Interpreter is available.**\n>Please run: `pip install --upgrade open-interpreter`\n\n---"
)
except:
# Doesn't matter
pass

# If --conversations is used, run conversation_navigator
if args.conversations:
conversation_navigator(interpreter)
return
# At some point in the future these model names redirects wont be necessary anymore, but legacy names will remain for a while
if interpreter.model == "gpt-4" or interpreter.model == "gpt-4-32k":
interpreter.model = "gpt-4-1106-preview"

if interpreter.model == "gpt-3.5-turbo" or interpreter.model == "gpt-3.5-turbo-16k":
interpreter.model = "gpt-3.5-turbo-1106"

if not interpreter.local and interpreter.model == "gpt-4-1106-preview":
interpreter.context_window = 128000
interpreter.max_tokens = 4096
interpreter.function_calling_llm = True

if not interpreter.local and interpreter.model == "gpt-3.5-turbo-1106":
interpreter.context_window = 16000
interpreter.max_tokens = 4096
interpreter.function_calling_llm = True

validate_llm_settings(interpreter)
print(display_markdown_message(interpreter.launch_message))

interpreter.chat()

0 comments on commit 043fbc2

Please sign in to comment.