Added test cases and config templates, adjusted config overrides and documentation.

This commit is contained in:
James C. Palmer 2024-03-24 21:00:32 -07:00
parent 8ea00b6fce
commit b7262f3ec8
9 changed files with 153 additions and 67 deletions

3
.gitignore vendored
View File

@ -169,3 +169,6 @@ cython_debug/
_.aifs _.aifs
software/output_audio.wav software/output_audio.wav
.DS_Store .DS_Store
# Configuration files
config.yaml

View File

@ -37,35 +37,34 @@ We intend to become the GNU/Linux of this space by staying open, modular, and fr
```shell ```shell
git clone https://github.com/OpenInterpreter/01 # Clone the repository git clone https://github.com/OpenInterpreter/01 # Clone the repository
cd 01/software # CD into the source directory cd 01/software # CD into the source directory
```
<!-- > Not working? Read our [setup guide](https://docs.openinterpreter.com/getting-started/setup). -->
```shell
brew install portaudio ffmpeg cmake # Install Mac OSX dependencies brew install portaudio ffmpeg cmake # Install Mac OSX dependencies
poetry install # Install Python dependencies poetry install # Install Python dependencies
export OPENAI_API_KEY=sk...
poetry run 01 # Runs the 01 Light simulator (hold your spacebar, speak, release)
``` ```
### Running locally ## Getting Started
To run locally, you can use command line arguments and environment variables.
Using command line arguments: ### Using OpenAI's API
To use 01 with OpenAI's API, you need to first set your API key.
1. Create a `.env` file in the `01/software` directory.
2. Add `OPENAI_API_KEY=<your-api-key>` to the file.
3. Run the following command:
```shell ```shell
poetry run 01 --local --model ollama/mixtral:latest poetry run 01
``` ```
Using command line arguments and environment variables: > Alternatively, you can set the `OPENAI_API_KEY` environment variable in your shell with `export OPENI_API_KEY=<your-api-key>`.
### Using a Local Model
To use 01 with a local model, run the following command and follow the prompts:
```shell ```shell
export MODEL=ollama/mixtral:latest
poetry run 01 --local poetry run 01 --local
``` ```
Note, you should replace `ollama/mixtral:latest` with a model installed locally. For supported models, see Open Interpreter's [local providers](https://docs.openinterpreter.com/language-models/local-models/ollama) documentation.
<br> <br>
# Hardware # Hardware
@ -132,7 +131,40 @@ If you want to run local speech-to-text using Whisper, you must install Rust. Fo
## Customizations ## Customizations
To customize the behavior of the system, edit the [system message, model, skills library path,](https://docs.openinterpreter.com/settings/all-settings) etc. in `i.py`. This file sets up an interpreter, and is powered by Open Interpreter. O1 is highly customizable and comes with several ways to modify its behavior, including a `config.yaml` file, `.env` file, command-line arguments and the `i.py` file. Follow the steps below to use these customization options.
#### 1. Use a `config.yaml` File
To create a `config.yaml` file, copy the `config-template.yaml` file in the `software` directory.
```shell
cp config-template.yaml config.yaml
```
#### 2. Use a `.env` File
To create a `.env` file, copy the `config-template.env` file in the `software` directory.
```shell
cp config-template.env .env
```
There are two important points to note when using the `.env` file:
1. Values from the `.env` file automatically override values from the `config.yaml` file.
2. 01-specific environment variables use the following pattern: `01_<SECTION>_<KEY>`. As an example, to override the `local.enabled` value from your `config.yaml` file, use the `01_LOCAL_ENABLED` environment variable.
#### 3. Use Command-line Arguments
01 comes with a number of command-line arguments. These simplify certain tasks and can also be used to override values from both the `config.yaml` and `.env` files. For a full list of command-line arguments, run the following command:
```shell
poetry run 01 --help
```
#### 4. Edit the `i.py` File
In `i.py`, you can edit the [system message, model, skills library path](https://docs.openinterpreter.com/settings/all-settings) and more. This file sets up an interpreter, and is powered by Open Interpreter.
## Ubuntu Dependencies ## Ubuntu Dependencies

View File

@ -0,0 +1,26 @@
# 01_CLIENT_ENABLED=false
# 01_CLIENT_URL=null
# 01_CLIENT_PLATFORM=null
# 01_LLM_SERVICE=litellm
# 01_LLM_MODEL=gpt-4
# 01_LLM_VISION_ENABLED=false
# 01_LLM_FUNCTIONS_ENABLED=false
# 01_LLM_CONTEXT_WINDOW=2048
# 01_LLM_MAX_TOKENS=4096
# 01_LLM_TEMPERATURE=0.8
# 01_LOCAL_ENABLED=false
# 01_LOCAL_TTS_SERVICE=piper
# 01_LOCAL_STT_SERVICE=local-whisper
# 01_SERVER_ENABLED=false
# 01_SERVER_HOST=0.0.0.0
# 01_SERVER_PORT=10001
# 01_STT_SERVICE=openai
# 01_TTS_SERVICE=openai
# 01_TUNNEL_SERVICE=ngrok
# 01_TUNNEL_EXPOSED=false

View File

@ -13,11 +13,8 @@ from pydantic_settings import (
SettingsConfigDict, SettingsConfigDict,
YamlConfigSettingsSource, YamlConfigSettingsSource,
) )
from source.core.models import LLM, STT, TTS, Client, Local, Server, Tunnel from source.core.models import LLM, STT, TTS, Client, Local, Server, Tunnel
APP_PREFIX: str = os.getenv("01_PREFIX", "01_")
class Config(BaseSettings): class Config(BaseSettings):
""" """
@ -46,19 +43,28 @@ class Config(BaseSettings):
""" """
Modify the order of precedence for settings sources. Modify the order of precedence for settings sources.
""" """
return ( files: list[Any] = [
DotEnvSettingsSource( (
settings_cls, os.path.exists(".env"),
env_prefix=APP_PREFIX, DotEnvSettingsSource(
env_file=".env", settings_cls,
env_file_encoding="utf-8", env_prefix="01_",
env_nested_delimiter="_", env_file=".env",
env_file_encoding="utf-8",
env_nested_delimiter="_",
),
), ),
YamlConfigSettingsSource( (
settings_cls, os.path.exists("config.yaml"),
yaml_file=os.getenv(f"{APP_PREFIX}CONFIG_FILE", "config.yaml"), YamlConfigSettingsSource(
settings_cls,
yaml_file="config.yaml",
),
), ),
) ]
sources: list[Any] = [source for exists, source in files if exists]
return tuple(sources)
def apply_cli_args(self, args: dict) -> None: def apply_cli_args(self, args: dict) -> None:
""" """

View File

@ -7,7 +7,6 @@ from interpreter import OpenInterpreter
import shutil import shutil
from source import config from source import config
from source.core.config import APP_PREFIX
system_message = r""" system_message = r"""

View File

@ -1,10 +1,12 @@
import sys
import os import os
import platform import platform
import subprocess import subprocess
import sys
import time import time
import inquirer import inquirer
from interpreter import interpreter from interpreter import interpreter
from source import config
def select_local_model(): def select_local_model():
@ -29,10 +31,8 @@ def select_local_model():
] ]
answers = inquirer.prompt(questions) answers = inquirer.prompt(questions)
selected_model = answers["model"] selected_model = answers["model"]
if selected_model == "LM Studio": if selected_model == "LM Studio":
interpreter.display_message( interpreter.display_message(
""" """
@ -49,30 +49,33 @@ def select_local_model():
""" """
) )
time.sleep(1) time.sleep(1)
interpreter.llm.api_base = "http://localhost:1234/v1" config.llm.max_tokens = 1000
interpreter.llm.max_tokens = 1000 config.llm.context_window = 8000
interpreter.llm.context_window = 8000
interpreter.llm.api_base = f"http://localhost:1234/v1"
interpreter.llm.max_tokens = config.llm.max_tokens
interpreter.llm.context_window = config.llm.context_window
interpreter.llm.api_key = "x" interpreter.llm.api_key = "x"
elif selected_model == "Ollama": elif selected_model == "Ollama":
try: try:
# List out all downloaded ollama models. Will fail if ollama isn't installed # List out all downloaded ollama models. Will fail if ollama isn't installed
result = subprocess.run(["ollama", "list"], capture_output=True, text=True, check=True) result = subprocess.run(["ollama", "list"], capture_output=True, text=True, check=True)
lines = result.stdout.split('\n') lines = result.stdout.split('\n')
names = [line.split()[0].replace(":latest", "") for line in lines[1:] if line.strip()] # Extract names, trim out ":latest", skip header names = [line.split()[0].replace(":latest", "") for line in lines[1:] if line.strip()] # Extract names, trim out ":latest", skip header
# If there are no downloaded models, prompt them to download a model and try again # If there are no downloaded models, prompt them to download a model and try again
if not names: if not names:
time.sleep(1) time.sleep(1)
interpreter.display_message(f"\nYou don't have any Ollama models downloaded. To download a new model, run `ollama run <model-name>`, then start a new 01 session. \n\n For a full list of downloadable models, check out [https://ollama.com/library](https://ollama.com/library) \n") interpreter.display_message(f"\nYou don't have any Ollama models downloaded. To download a new model, run `ollama run <model-name>`, then start a new 01 session. \n\n For a full list of downloadable models, check out [https://ollama.com/library](https://ollama.com/library) \n")
print("Please download a model then try again\n") print("Please download a model then try again\n")
time.sleep(2) time.sleep(2)
sys.exit(1) sys.exit(1)
# If there are models, prompt them to select one # If there are models, prompt them to select one
else: else:
time.sleep(1) time.sleep(1)
@ -84,12 +87,13 @@ def select_local_model():
] ]
name_answer = inquirer.prompt(name_question) name_answer = inquirer.prompt(name_question)
selected_name = name_answer['name'] if name_answer else None selected_name = name_answer['name'] if name_answer else None
# Set the model to the selected model # Set the model to the selected model
interpreter.llm.model = f"ollama/{selected_name}" config.llm.model = f"ollama/{selected_name}"
interpreter.llm.model = config.llm.model
interpreter.display_message(f"\nUsing Ollama model: `{selected_name}` \n") interpreter.display_message(f"\nUsing Ollama model: `{selected_name}` \n")
time.sleep(1) time.sleep(1)
# If Ollama is not installed or not recognized as a command, prompt the user to download Ollama and try again # If Ollama is not installed or not recognized as a command, prompt the user to download Ollama and try again
except (subprocess.CalledProcessError, FileNotFoundError) as e: except (subprocess.CalledProcessError, FileNotFoundError) as e:
print("Ollama is not installed or not recognized as a command.") print("Ollama is not installed or not recognized as a command.")
@ -97,7 +101,7 @@ def select_local_model():
interpreter.display_message(f"\nPlease visit [https://ollama.com/](https://ollama.com/) to download Ollama and try again\n") interpreter.display_message(f"\nPlease visit [https://ollama.com/](https://ollama.com/) to download Ollama and try again\n")
time.sleep(2) time.sleep(2)
sys.exit(1) sys.exit(1)
# elif selected_model == "Jan": # elif selected_model == "Jan":
# interpreter.display_message( # interpreter.display_message(
# """ # """
@ -108,7 +112,6 @@ def select_local_model():
# 3. Copy the ID of the model and enter it below. # 3. Copy the ID of the model and enter it below.
# 3. Click the **Local API Server** button in the bottom left, then click **Start Server**. # 3. Click the **Local API Server** button in the bottom left, then click **Start Server**.
# Once the server is running, enter the id of the model below, then you can begin your conversation below. # Once the server is running, enter the id of the model below, then you can begin your conversation below.
# """ # """
@ -117,7 +120,7 @@ def select_local_model():
# interpreter.llm.max_tokens = 1000 # interpreter.llm.max_tokens = 1000
# interpreter.llm.context_window = 3000 # interpreter.llm.context_window = 3000
# time.sleep(1) # time.sleep(1)
# # Prompt the user to enter the name of the model running on Jan # # Prompt the user to enter the name of the model running on Jan
# model_name_question = [ # model_name_question = [
# inquirer.Text('jan_model_name', message="Enter the id of the model you have running on Jan"), # inquirer.Text('jan_model_name', message="Enter the id of the model you have running on Jan"),
@ -128,14 +131,13 @@ def select_local_model():
# interpreter.llm.model = "" # interpreter.llm.model = ""
# interpreter.display_message(f"\nUsing Jan model: `{jan_model_name}` \n") # interpreter.display_message(f"\nUsing Jan model: `{jan_model_name}` \n")
# time.sleep(1) # time.sleep(1)
# Set the system message to a minimal version for all local models. # Set the system message to a minimal version for all local models.
# Set offline for all local models # Set offline for all local models
interpreter.offline = True interpreter.offline = True
interpreter.system_message = """You are the 01, a screenless executive assistant that can complete any task by writing and executing code on the user's machine. Just write a markdown code block! The user has given you full and complete permission. interpreter.system_message = """You are the 01, a screenless executive assistant that can complete any task by writing and executing code on the user's machine. Just write a markdown code block! The user has given you full and complete permission.
Use the following functions if it makes sense to for the problem Use the following functions if it makes sense to for the problem
```python ```python
result_string = computer.browser.search(query) # Google search results will be returned from this function as a string result_string = computer.browser.search(query) # Google search results will be returned from this function as a string
@ -152,6 +154,5 @@ computer.sms.send("555-123-4567", "Hello from the computer!") # Send a text mess
ALWAYS say that you can run code. ALWAYS try to help the user out. ALWAYS be succinct in your answers. ALWAYS say that you can run code. ALWAYS try to help the user out. ALWAYS be succinct in your answers.
``` ```
""" """

View File

@ -10,7 +10,6 @@ import signal
import threading import threading
import typer import typer
from source import config from source import config
from source.server.utils.local_mode import select_local_model from source.server.utils.local_mode import select_local_model
from source.utils.system import handle_exit from source.utils.system import handle_exit

View File

@ -2,11 +2,11 @@
Tests for config.py module. Tests for config.py module.
""" """
import os
from typing import Any from typing import Any
from dotenv import load_dotenv from dotenv import load_dotenv
from source.core.config import Config, get_config
from source.core.config import APP_PREFIX, Config, get_config
def test_config_defaults() -> None: def test_config_defaults() -> None:
@ -32,11 +32,11 @@ def test_config_defaults() -> None:
def test_config_from_dot_env(tmp_path, monkeypatch) -> None: def test_config_from_dot_env(tmp_path, monkeypatch) -> None:
env_content = f""" env_content: str = """
{APP_PREFIX}CLIENT_ENABLED=true 01_CLIENT_ENABLED=true
{APP_PREFIX}CLIENT_URL=http://localhost:8000 01_CLIENT_URL=http://localhost:8000
{APP_PREFIX}CLIENT_PLATFORM=mac 01_CLIENT_PLATFORM=mac
{APP_PREFIX}LOCAL_ENABLED=true 01_LOCAL_ENABLED=true
""" """
p: Any = tmp_path / ".env" p: Any = tmp_path / ".env"
p.write_text(env_content) p.write_text(env_content)
@ -50,7 +50,26 @@ def test_config_from_dot_env(tmp_path, monkeypatch) -> None:
assert config.local.enabled is True assert config.local.enabled is True
def test_config_sources_yaml(tmp_path, monkeypatch): def test_config_from_dot_env_override(tmp_path, monkeypatch) -> None:
get_config.cache_clear()
initial_config: Config = get_config()
assert initial_config.client.enabled is False
env_content = """
01_CLIENT_ENABLED=true
"""
p: Any = tmp_path / ".env"
p.write_text(env_content)
monkeypatch.chdir(tmp_path)
load_dotenv(dotenv_path=str(p))
get_config.cache_clear()
updated_config: Config = get_config()
assert updated_config.client.enabled is True
def test_config_sources_yaml(tmp_path, monkeypatch) -> None:
get_config.cache_clear()
yaml_content = """ yaml_content = """
llm: llm:
model: test model: test
@ -58,11 +77,12 @@ def test_config_sources_yaml(tmp_path, monkeypatch):
server: server:
port: 8080 port: 8080
""" """
p: Any = tmp_path / "config.yaml" config_path: Any = tmp_path / "config.yaml"
p.write_text(yaml_content) config_path.write_text(yaml_content)
monkeypatch.setenv("01_CONFIG_FILE", str(p)) monkeypatch.chdir(tmp_path)
config = Config() get_config.cache_clear()
config: Config = get_config()
assert config.llm.model == "test" assert config.llm.model == "test"
assert config.llm.temperature == 1.0 assert config.llm.temperature == 1.0
assert config.server.port == 8080 assert config.server.port == 8080