Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 26 additions & 1 deletion docs/guides/profiles.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ If you want to make your own profile, start with the [Template Profile](https://

To apply a Profile to an Open Interpreter session, you can run `interpreter --profile <name>`

# Example Profile
# Example Python Profile

```Python
from interpreter import interpreter
Expand All @@ -38,6 +38,31 @@ interpreter.auto_run = True
interpreter.loop = True
```

# Example YAML Profile

<Info> Make sure YAML profile version is set to 0.2.5 </Info>

```YAML
llm:
model: "gpt-4-o"
temperature: 0
# api_key: ... # Your API key, if the API requires it
# api_base: ... # The URL where an OpenAI-compatible server is running to handle LLM API requests

# Computer Settings
computer:
import_computer_api: True # Gives OI a helpful Computer API designed for code interpreting language models

# Custom Instructions
custom_instructions: "" # This will be appended to the system message

# General Configuration
auto_run: False # If True, code will run without asking for confirmation
offline: False # If True, will disable some online features like checking for updates

version: 0.2.5 # Configuration file version (do not modify)
```

<Tip>
There are many settings that can be configured. [See them all
here](/settings/all-settings)
Expand Down
30 changes: 24 additions & 6 deletions docs/mint.json
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,10 @@
"navigation": [
{
"group": "Getting Started",
"pages": ["getting-started/introduction", "getting-started/setup"]
"pages": [
"getting-started/introduction",
"getting-started/setup"
]
},
{
"group": "Guides",
Expand All @@ -47,7 +50,9 @@
},
{
"group": "Settings",
"pages": ["settings/all-settings"]
"pages": [
"settings/all-settings"
]
},
{
"group": "Language Models",
Expand Down Expand Up @@ -105,11 +110,16 @@
},
{
"group": "Protocols",
"pages": ["protocols/lmc-messages"]
"pages": [
"protocols/lmc-messages"
]
},
{
"group": "Integrations",
"pages": ["integrations/e2b", "integrations/docker"]
"pages": [
"integrations/e2b",
"integrations/docker"
]
},
{
"group": "Safety",
Expand All @@ -120,9 +130,17 @@
"safety/best-practices"
]
},
{
"group": "Troubleshooting",
"pages": [
"troubleshooting/faq"
]
},
{
"group": "Telemetry",
"pages": ["telemetry/telemetry"]
"pages": [
"telemetry/telemetry"
]
}
],
"feedback": {
Expand All @@ -133,4 +151,4 @@
"youtube": "https://www.youtube.com/@OpenInterpreter",
"linkedin": "https://www.linkedin.com/company/openinterpreter"
}
}
}
16 changes: 16 additions & 0 deletions docs/troubleshooting/faq.mdx
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
---
title: "FAQ"
description: "Frequently Asked Questions"
---

<Accordion title="Does Open Interpreter ensure that my data doesn't leave my computer?">
As long as you're using a local language model, your messages / personal info
won't leave your computer. If you use a cloud model, we send your messages +
custom instructions to the model. We also have a basic telemetry
[function](https://github.com/OpenInterpreter/open-interpreter/blob/main/interpreter/core/core.py#L167)
(copied over from ChromaDB's telemetry) that anonymously tracks usage. This
only lets us know if a message was sent, includes no PII. OI errors will also
be reported here which includes the exception string. Detailed docs on all
this is [here](/telemetry/telemetry), and you can opt out by running
`--local`, `--offline`, or `--disable_telemetry`.
</Accordion>
20 changes: 20 additions & 0 deletions examples/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# This is a Dockerfile for using an isolated instance of Open Interpreter

# Start with Python 3.11
FROM python:3.11

# Replace <your_openai_api_key> with your own key
ENV OPENAI_API_KEY <your_openai_api_key>

# Install Open Interpreter
RUN pip install open-interpreter


# To run the container

# docker build -t openinterpreter .
# docker run -d -it --name interpreter-instance openinterpreter interpreter
# docker attach interpreter-instance

# To mount a volume
# docker run -d -it -v /path/on/your/host:/path/in/the/container --name interpreter-instance openinterpreter interpreter
4 changes: 4 additions & 0 deletions examples/interactive_quickstart.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# This is all you need to get started
from interpreter import interpreter

interpreter.chat()
119 changes: 119 additions & 0 deletions examples/local_server.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Build a local Open Interpreter server for a custom front end"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from flask import Flask, request, jsonify\n",
"from interpreter import interpreter\n",
"import json"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"app = Flask(__name__)\n",
"\n",
"# Configure Open Interpreter\n",
"\n",
"## Local Model\n",
"# interpreter.offline = True\n",
"# interpreter.llm.model = \"ollama/llama3.1\"\n",
"# interpreter.llm.api_base = \"http://localhost:11434\"\n",
"# interpreter.llm.context_window = 4000\n",
"# interpreter.llm.max_tokens = 3000\n",
"# interpreter.auto_run = True\n",
"# interpreter.verbose = True\n",
"\n",
"## Hosted Model\n",
"interpreter.llm.model = \"gpt-4o\"\n",
"interpreter.llm.context_window = 10000\n",
"interpreter.llm.max_tokens = 4096\n",
"interpreter.auto_run = True\n",
"\n",
"# Create an endpoint\n",
"@app.route('/chat', methods=['POST'])\n",
"def chat():\n",
" # Expected payload: {\"prompt\": \"User's message or question\"}\n",
" data = request.json\n",
" prompt = data.get('prompt')\n",
" \n",
" if not prompt:\n",
" return jsonify({\"error\": \"No prompt provided\"}), 400\n",
"\n",
" full_response = \"\"\n",
" try:\n",
" for chunk in interpreter.chat(prompt, stream=True, display=False):\n",
" if isinstance(chunk, dict):\n",
" if chunk.get(\"type\") == \"message\":\n",
" full_response += chunk.get(\"content\", \"\")\n",
" elif isinstance(chunk, str):\n",
" # Attempt to parse the string as JSON\n",
" try:\n",
" json_chunk = json.loads(chunk)\n",
" full_response += json_chunk.get(\"response\", \"\")\n",
" except json.JSONDecodeError:\n",
" # If it's not valid JSON, just add the string\n",
" full_response += chunk\n",
" except Exception as e:\n",
" return jsonify({\"error\": str(e)}), 500\n",
"\n",
" return jsonify({\"response\": full_response.strip()})\n",
"\n",
"if __name__ == '__main__':\n",
" app.run(host='0.0.0.0', port=5001)\n",
"\n",
"print(\"Open Interpreter server is running on http://0.0.0.0:5001\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Make a request to the server"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"curl -X POST http://localhost:5001/chat \\\n",
" -H \"Content-Type: application/json\" \\\n",
" -d '{\"prompt\": \"Hello, how are you?\"}'"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
2 changes: 1 addition & 1 deletion interpreter/core/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def __init__(self, interpreter):
self.completions = fixed_litellm_completions

# Settings
self.model = "gpt-4-turbo"
self.model = "gpt-4o"
self.temperature = 0

self.supports_vision = None # Will try to auto-detect
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
"""
This is an Open Interpreter profile. It configures Open Interpreter to run Anthropic's `Claude 3 Sonnet` using Bedrock.
"""

"""
Required pip package:
pip install boto3>=1.28.57

Required environment variables:
os.environ["AWS_ACCESS_KEY_ID"] = "" # Access key
os.environ["AWS_SECRET_ACCESS_KEY"] = "" # Secret access key
os.environ["AWS_REGION_NAME"] = "" # us-east-1, us-east-2, us-west-1, us-west-2

More information can be found here: https://docs.litellm.ai/docs/providers/bedrock
"""

from interpreter import interpreter

interpreter.llm.model = "bedrock/anthropic.claude-3-sonnet-20240229-v1:0"

interpreter.computer.import_computer_api = True

interpreter.llm.supports_functions = True
interpreter.llm.supports_vision = True
interpreter.llm.context_window = 100000
interpreter.llm.max_tokens = 4096
4 changes: 2 additions & 2 deletions interpreter/terminal_interface/profiles/defaults/default.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

# LLM Settings
llm:
model: "gpt-4-turbo"
model: "gpt-4o"
temperature: 0
# api_key: ... # Your API key, if the API requires it
# api_base: ... # The URL where an OpenAI-compatible server is running to handle LLM API requests
Expand All @@ -26,7 +26,7 @@ computer:

# To use a separate model for the `wtf` command:
# wtf:
# model: "gpt-3.5-turbo"
# model: "gpt-4o-mini"

# Documentation
# All options: https://docs.openinterpreter.com/settings
4 changes: 2 additions & 2 deletions interpreter/terminal_interface/profiles/defaults/fast.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
# Remove the "#" before the settings below to use them.

llm:
model: "gpt-3.5-turbo"
model: "gpt-4o-mini"
temperature: 0
# api_key: ... # Your API key, if the API requires it
# api_base: ... # The URL where an OpenAI-compatible server is running to handle LLM API requests
Expand All @@ -23,4 +23,4 @@ custom_instructions: "The user has set you to FAST mode. **No talk, just code.**

# All options: https://docs.openinterpreter.com/settings

version: 0.2.1 # Configuration file version (do not modify)
version: 0.2.5 # Configuration file version (do not modify)
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

# LLM Settings
llm:
model: "gpt-4-turbo"
model: "gpt-4o"
temperature: 0
# api_key: ... # Your API key, if the API requires it
# api_base: ... # The URL where an OpenAI-compatible server is running to handle LLM API requests
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,5 +17,5 @@ llm:

# All options: https://docs.openinterpreter.com/usage/terminal/settings

version: 0.2.1 # Configuration file version (do not modify)
version: 0.2.5 # Configuration file version (do not modify)

8 changes: 4 additions & 4 deletions interpreter/terminal_interface/profiles/profiles.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,11 +170,11 @@ def apply_profile(interpreter, profile, profile_path):

try:
if profile["llm"]["model"] == "gpt-4":
text = text.replace("gpt-4", "gpt-4-turbo")
profile["llm"]["model"] = "gpt-4-turbo"
text = text.replace("gpt-4", "gpt-4o")
profile["llm"]["model"] = "gpt-4o"
elif profile["llm"]["model"] == "gpt-4-turbo-preview":
text = text.replace("gpt-4-turbo-preview", "gpt-4-turbo")
profile["llm"]["model"] = "gpt-4-turbo"
text = text.replace("gpt-4-turbo-preview", "gpt-4o")
profile["llm"]["model"] = "gpt-4o"
except:
raise
pass # fine
Expand Down
2 changes: 1 addition & 1 deletion interpreter/terminal_interface/start_terminal_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ def start_terminal_interface(interpreter):
{
"name": "fast",
"nickname": "f",
"help_text": "runs `interpreter --model gpt-3.5-turbo` and asks OI to be extremely concise (shortcut for `interpreter --profile fast`)",
"help_text": "runs `interpreter --model gpt-4o-mini` and asks OI to be extremely concise (shortcut for `interpreter --profile fast`)",
"type": bool,
},
{
Expand Down
3 changes: 2 additions & 1 deletion interpreter/terminal_interface/validate_llm_settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ def validate_llm_settings(interpreter):
"gpt-4",
"gpt-3.5-turbo",
"gpt-4o",
"gpt-4o-mini",
"gpt-4-turbo",
]:
if (
Expand All @@ -50,7 +51,7 @@ def validate_llm_settings(interpreter):
"""---
> OpenAI API key not found

To use `gpt-4-turbo` (recommended) please provide an OpenAI API key.
To use `gpt-4o` (recommended) please provide an OpenAI API key.

To use another language model, run `interpreter --local` or consult the documentation at [docs.openinterpreter.com](https://docs.openinterpreter.com/language-model-setup/).

Expand Down
Loading