From b7262f3ec82a2e9ba9356e7306c596d2cfab9b02 Mon Sep 17 00:00:00 2001 From: "James C. Palmer" Date: Sun, 24 Mar 2024 21:00:32 -0700 Subject: [PATCH] Added test cases and config templates, adjusted config overrides and documentation. --- .gitignore | 3 + README.md | 64 ++++++++++++++----- software/config-template.env | 26 ++++++++ .../{config.yaml => config-template.yaml} | 0 software/source/core/config.py | 34 ++++++---- software/source/server/i.py | 1 - software/source/server/utils/local_mode.py | 47 +++++++------- software/start.py | 1 - software/tests/test_config.py | 44 +++++++++---- 9 files changed, 153 insertions(+), 67 deletions(-) create mode 100644 software/config-template.env rename software/{config.yaml => config-template.yaml} (100%) diff --git a/.gitignore b/.gitignore index aeaed36..dfa4c10 100644 --- a/.gitignore +++ b/.gitignore @@ -169,3 +169,6 @@ cython_debug/ _.aifs software/output_audio.wav .DS_Store + +# Configuration files +config.yaml diff --git a/README.md b/README.md index 92c6529..128d0d9 100644 --- a/README.md +++ b/README.md @@ -37,35 +37,34 @@ We intend to become the GNU/Linux of this space by staying open, modular, and fr ```shell git clone https://github.com/OpenInterpreter/01 # Clone the repository cd 01/software # CD into the source directory -``` - - - -```shell brew install portaudio ffmpeg cmake # Install Mac OSX dependencies poetry install # Install Python dependencies -export OPENAI_API_KEY=sk... -poetry run 01 # Runs the 01 Light simulator (hold your spacebar, speak, release) ``` -### Running locally -To run locally, you can use command line arguments and environment variables. +## Getting Started -Using command line arguments: +### Using OpenAI's API + +To use 01 with OpenAI's API, you need to first set your API key. + +1. Create a `.env` file in the `01/software` directory. +2. Add `OPENAI_API_KEY=` to the file. +3. Run the following command: ```shell -poetry run 01 --local --model ollama/mixtral:latest +poetry run 01 ``` -Using command line arguments and environment variables: +> Alternatively, you can set the `OPENAI_API_KEY` environment variable in your shell with `export OPENI_API_KEY=`. + +### Using a Local Model + +To use 01 with a local model, run the following command and follow the prompts: ```shell -export MODEL=ollama/mixtral:latest poetry run 01 --local ``` -Note, you should replace `ollama/mixtral:latest` with a model installed locally. For supported models, see Open Interpreter's [local providers](https://docs.openinterpreter.com/language-models/local-models/ollama) documentation. -
# Hardware @@ -132,7 +131,40 @@ If you want to run local speech-to-text using Whisper, you must install Rust. Fo ## Customizations -To customize the behavior of the system, edit the [system message, model, skills library path,](https://docs.openinterpreter.com/settings/all-settings) etc. in `i.py`. This file sets up an interpreter, and is powered by Open Interpreter. +O1 is highly customizable and comes with several ways to modify its behavior, including a `config.yaml` file, `.env` file, command-line arguments and the `i.py` file. Follow the steps below to use these customization options. + +#### 1. Use a `config.yaml` File + +To create a `config.yaml` file, copy the `config-template.yaml` file in the `software` directory. + +```shell +cp config-template.yaml config.yaml +``` + +#### 2. Use a `.env` File + +To create a `.env` file, copy the `config-template.env` file in the `software` directory. + +```shell +cp config-template.env .env +``` + +There are two important points to note when using the `.env` file: + +1. Values from the `.env` file automatically override values from the `config.yaml` file. +2. 01-specific environment variables use the following pattern: `01_
_`. As an example, to override the `local.enabled` value from your `config.yaml` file, use the `01_LOCAL_ENABLED` environment variable. + +#### 3. Use Command-line Arguments + +01 comes with a number of command-line arguments. These simplify certain tasks and can also be used to override values from both the `config.yaml` and `.env` files. For a full list of command-line arguments, run the following command: + +```shell +poetry run 01 --help +``` + +#### 4. Edit the `i.py` File + +In `i.py`, you can edit the [system message, model, skills library path](https://docs.openinterpreter.com/settings/all-settings) and more. This file sets up an interpreter, and is powered by Open Interpreter. ## Ubuntu Dependencies diff --git a/software/config-template.env b/software/config-template.env new file mode 100644 index 0000000..29e6ae8 --- /dev/null +++ b/software/config-template.env @@ -0,0 +1,26 @@ +# 01_CLIENT_ENABLED=false +# 01_CLIENT_URL=null +# 01_CLIENT_PLATFORM=null + +# 01_LLM_SERVICE=litellm +# 01_LLM_MODEL=gpt-4 +# 01_LLM_VISION_ENABLED=false +# 01_LLM_FUNCTIONS_ENABLED=false +# 01_LLM_CONTEXT_WINDOW=2048 +# 01_LLM_MAX_TOKENS=4096 +# 01_LLM_TEMPERATURE=0.8 + +# 01_LOCAL_ENABLED=false +# 01_LOCAL_TTS_SERVICE=piper +# 01_LOCAL_STT_SERVICE=local-whisper + +# 01_SERVER_ENABLED=false +# 01_SERVER_HOST=0.0.0.0 +# 01_SERVER_PORT=10001 + +# 01_STT_SERVICE=openai + +# 01_TTS_SERVICE=openai + +# 01_TUNNEL_SERVICE=ngrok +# 01_TUNNEL_EXPOSED=false diff --git a/software/config.yaml b/software/config-template.yaml similarity index 100% rename from software/config.yaml rename to software/config-template.yaml diff --git a/software/source/core/config.py b/software/source/core/config.py index 1444031..18a0e9b 100644 --- a/software/source/core/config.py +++ b/software/source/core/config.py @@ -13,11 +13,8 @@ from pydantic_settings import ( SettingsConfigDict, YamlConfigSettingsSource, ) - from source.core.models import LLM, STT, TTS, Client, Local, Server, Tunnel -APP_PREFIX: str = os.getenv("01_PREFIX", "01_") - class Config(BaseSettings): """ @@ -46,19 +43,28 @@ class Config(BaseSettings): """ Modify the order of precedence for settings sources. """ - return ( - DotEnvSettingsSource( - settings_cls, - env_prefix=APP_PREFIX, - env_file=".env", - env_file_encoding="utf-8", - env_nested_delimiter="_", + files: list[Any] = [ + ( + os.path.exists(".env"), + DotEnvSettingsSource( + settings_cls, + env_prefix="01_", + env_file=".env", + env_file_encoding="utf-8", + env_nested_delimiter="_", + ), ), - YamlConfigSettingsSource( - settings_cls, - yaml_file=os.getenv(f"{APP_PREFIX}CONFIG_FILE", "config.yaml"), + ( + os.path.exists("config.yaml"), + YamlConfigSettingsSource( + settings_cls, + yaml_file="config.yaml", + ), ), - ) + ] + + sources: list[Any] = [source for exists, source in files if exists] + return tuple(sources) def apply_cli_args(self, args: dict) -> None: """ diff --git a/software/source/server/i.py b/software/source/server/i.py index 323fae9..39301c7 100644 --- a/software/source/server/i.py +++ b/software/source/server/i.py @@ -7,7 +7,6 @@ from interpreter import OpenInterpreter import shutil from source import config -from source.core.config import APP_PREFIX system_message = r""" diff --git a/software/source/server/utils/local_mode.py b/software/source/server/utils/local_mode.py index 6d7113c..1681fe4 100644 --- a/software/source/server/utils/local_mode.py +++ b/software/source/server/utils/local_mode.py @@ -1,10 +1,12 @@ -import sys import os import platform import subprocess +import sys import time + import inquirer from interpreter import interpreter +from source import config def select_local_model(): @@ -29,10 +31,8 @@ def select_local_model(): ] answers = inquirer.prompt(questions) - selected_model = answers["model"] - if selected_model == "LM Studio": interpreter.display_message( """ @@ -49,30 +49,33 @@ def select_local_model(): """ ) time.sleep(1) - - interpreter.llm.api_base = "http://localhost:1234/v1" - interpreter.llm.max_tokens = 1000 - interpreter.llm.context_window = 8000 + + config.llm.max_tokens = 1000 + config.llm.context_window = 8000 + + interpreter.llm.api_base = f"http://localhost:1234/v1" + interpreter.llm.max_tokens = config.llm.max_tokens + interpreter.llm.context_window = config.llm.context_window interpreter.llm.api_key = "x" elif selected_model == "Ollama": try: - + # List out all downloaded ollama models. Will fail if ollama isn't installed result = subprocess.run(["ollama", "list"], capture_output=True, text=True, check=True) lines = result.stdout.split('\n') names = [line.split()[0].replace(":latest", "") for line in lines[1:] if line.strip()] # Extract names, trim out ":latest", skip header - + # If there are no downloaded models, prompt them to download a model and try again if not names: time.sleep(1) - + interpreter.display_message(f"\nYou don't have any Ollama models downloaded. To download a new model, run `ollama run `, then start a new 01 session. \n\n For a full list of downloadable models, check out [https://ollama.com/library](https://ollama.com/library) \n") - + print("Please download a model then try again\n") time.sleep(2) sys.exit(1) - + # If there are models, prompt them to select one else: time.sleep(1) @@ -84,12 +87,13 @@ def select_local_model(): ] name_answer = inquirer.prompt(name_question) selected_name = name_answer['name'] if name_answer else None - + # Set the model to the selected model - interpreter.llm.model = f"ollama/{selected_name}" + config.llm.model = f"ollama/{selected_name}" + interpreter.llm.model = config.llm.model interpreter.display_message(f"\nUsing Ollama model: `{selected_name}` \n") time.sleep(1) - + # If Ollama is not installed or not recognized as a command, prompt the user to download Ollama and try again except (subprocess.CalledProcessError, FileNotFoundError) as e: print("Ollama is not installed or not recognized as a command.") @@ -97,7 +101,7 @@ def select_local_model(): interpreter.display_message(f"\nPlease visit [https://ollama.com/](https://ollama.com/) to download Ollama and try again\n") time.sleep(2) sys.exit(1) - + # elif selected_model == "Jan": # interpreter.display_message( # """ @@ -108,7 +112,6 @@ def select_local_model(): # 3. Copy the ID of the model and enter it below. # 3. Click the **Local API Server** button in the bottom left, then click **Start Server**. - # Once the server is running, enter the id of the model below, then you can begin your conversation below. # """ @@ -117,7 +120,7 @@ def select_local_model(): # interpreter.llm.max_tokens = 1000 # interpreter.llm.context_window = 3000 # time.sleep(1) - + # # Prompt the user to enter the name of the model running on Jan # model_name_question = [ # inquirer.Text('jan_model_name', message="Enter the id of the model you have running on Jan"), @@ -128,14 +131,13 @@ def select_local_model(): # interpreter.llm.model = "" # interpreter.display_message(f"\nUsing Jan model: `{jan_model_name}` \n") # time.sleep(1) - # Set the system message to a minimal version for all local models. # Set offline for all local models interpreter.offline = True - interpreter.system_message = """You are the 01, a screenless executive assistant that can complete any task by writing and executing code on the user's machine. Just write a markdown code block! The user has given you full and complete permission. - + interpreter.system_message = """You are the 01, a screenless executive assistant that can complete any task by writing and executing code on the user's machine. Just write a markdown code block! The user has given you full and complete permission. + Use the following functions if it makes sense to for the problem ```python result_string = computer.browser.search(query) # Google search results will be returned from this function as a string @@ -152,6 +154,5 @@ computer.sms.send("555-123-4567", "Hello from the computer!") # Send a text mess ALWAYS say that you can run code. ALWAYS try to help the user out. ALWAYS be succinct in your answers. ``` - + """ - diff --git a/software/start.py b/software/start.py index 01175bb..1a5369c 100644 --- a/software/start.py +++ b/software/start.py @@ -10,7 +10,6 @@ import signal import threading import typer - from source import config from source.server.utils.local_mode import select_local_model from source.utils.system import handle_exit diff --git a/software/tests/test_config.py b/software/tests/test_config.py index 2f6ff2d..d6ca028 100644 --- a/software/tests/test_config.py +++ b/software/tests/test_config.py @@ -2,11 +2,11 @@ Tests for config.py module. """ +import os from typing import Any from dotenv import load_dotenv - -from source.core.config import APP_PREFIX, Config, get_config +from source.core.config import Config, get_config def test_config_defaults() -> None: @@ -32,11 +32,11 @@ def test_config_defaults() -> None: def test_config_from_dot_env(tmp_path, monkeypatch) -> None: - env_content = f""" - {APP_PREFIX}CLIENT_ENABLED=true - {APP_PREFIX}CLIENT_URL=http://localhost:8000 - {APP_PREFIX}CLIENT_PLATFORM=mac - {APP_PREFIX}LOCAL_ENABLED=true + env_content: str = """ + 01_CLIENT_ENABLED=true + 01_CLIENT_URL=http://localhost:8000 + 01_CLIENT_PLATFORM=mac + 01_LOCAL_ENABLED=true """ p: Any = tmp_path / ".env" p.write_text(env_content) @@ -50,7 +50,26 @@ def test_config_from_dot_env(tmp_path, monkeypatch) -> None: assert config.local.enabled is True -def test_config_sources_yaml(tmp_path, monkeypatch): +def test_config_from_dot_env_override(tmp_path, monkeypatch) -> None: + get_config.cache_clear() + initial_config: Config = get_config() + assert initial_config.client.enabled is False + + env_content = """ + 01_CLIENT_ENABLED=true + """ + p: Any = tmp_path / ".env" + p.write_text(env_content) + monkeypatch.chdir(tmp_path) + load_dotenv(dotenv_path=str(p)) + + get_config.cache_clear() + updated_config: Config = get_config() + assert updated_config.client.enabled is True + + +def test_config_sources_yaml(tmp_path, monkeypatch) -> None: + get_config.cache_clear() yaml_content = """ llm: model: test @@ -58,11 +77,12 @@ def test_config_sources_yaml(tmp_path, monkeypatch): server: port: 8080 """ - p: Any = tmp_path / "config.yaml" - p.write_text(yaml_content) - monkeypatch.setenv("01_CONFIG_FILE", str(p)) + config_path: Any = tmp_path / "config.yaml" + config_path.write_text(yaml_content) + monkeypatch.chdir(tmp_path) - config = Config() + get_config.cache_clear() + config: Config = get_config() assert config.llm.model == "test" assert config.llm.temperature == 1.0 assert config.server.port == 8080