From 216ecd6c9a3c2bae68a610e333bdd9ac96d573ad Mon Sep 17 00:00:00 2001 From: "James C. Palmer" Date: Fri, 22 Mar 2024 15:57:46 -0700 Subject: [PATCH] Fix `UnboundLocalError` and make model configurable This commit addresses two issues in `software/source/server/i.py`: 1. Fix the `UnboundLocalError` caused by importing the `os` module at both the top level and within the `configure_interpreter` function. 2. Make the `interpreter.llm.model` configurable via the `MODEL` environment variable or `--model` command line argument. Defaults to gpt-4. This allows users to run O1 locally. Fixes #116 --- README.md | 20 +++++++++++++++++++- software/source/server/i.py | 2 -- software/source/server/server.py | 1 + 3 files changed, 20 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 3768847..24d6425 100644 --- a/README.md +++ b/README.md @@ -44,10 +44,28 @@ cd 01/software # CD into the source directory ```shell brew install portaudio ffmpeg cmake # Install Mac OSX dependencies poetry install # Install Python dependencies -export OPENAI_API_KEY=sk... # OR run `poetry run 01 --local` to run everything locally +export OPENAI_API_KEY=sk... poetry run 01 # Runs the 01 Light simulator (hold your spacebar, speak, release) ``` +### Running locally +To run locally, you can use command line arguments and environment variables. + +Using command line arguments: + +```shell +poetry run 01 --local --model ollama/mixtral:latest +``` + +Using command line arguments and environment variables: + +```shell +export MODEL=ollama/mixtral:latest +poetry run 01 --local +``` + +Note, you should replace `ollama/mixtral:latest` with a model installed locally. For supported models, see Open Interpreter's [local providers](https://docs.openinterpreter.com/language-models/local-models/lm-studio) documentation. +
# Hardware diff --git a/software/source/server/i.py b/software/source/server/i.py index ada9e96..e14d6f9 100644 --- a/software/source/server/i.py +++ b/software/source/server/i.py @@ -295,8 +295,6 @@ def configure_interpreter(interpreter: OpenInterpreter): # if chunk.get("format") != "active_line": # print(chunk.get("content")) - import os - from platformdirs import user_data_dir diff --git a/software/source/server/server.py b/software/source/server/server.py index 5f9f222..352d41e 100644 --- a/software/source/server/server.py +++ b/software/source/server/server.py @@ -415,6 +415,7 @@ async def main(server_host, server_port, llm_service, model, llm_supports_vision globals()[service] = getattr(service_instance, service) interpreter.llm.completions = llm + interpreter.llm.model = os.getenv('MODEL', model) # Start listening asyncio.create_task(listener())