diff --git a/README.md b/README.md index 3768847..24d6425 100644 --- a/README.md +++ b/README.md @@ -44,10 +44,28 @@ cd 01/software # CD into the source directory ```shell brew install portaudio ffmpeg cmake # Install Mac OSX dependencies poetry install # Install Python dependencies -export OPENAI_API_KEY=sk... # OR run `poetry run 01 --local` to run everything locally +export OPENAI_API_KEY=sk... poetry run 01 # Runs the 01 Light simulator (hold your spacebar, speak, release) ``` +### Running locally +To run locally, you can use command line arguments and environment variables. + +Using command line arguments: + +```shell +poetry run 01 --local --model ollama/mixtral:latest +``` + +Using command line arguments and environment variables: + +```shell +export MODEL=ollama/mixtral:latest +poetry run 01 --local +``` + +Note, you should replace `ollama/mixtral:latest` with a model installed locally. For supported models, see Open Interpreter's [local providers](https://docs.openinterpreter.com/language-models/local-models/lm-studio) documentation. +
# Hardware diff --git a/software/source/server/i.py b/software/source/server/i.py index ada9e96..e14d6f9 100644 --- a/software/source/server/i.py +++ b/software/source/server/i.py @@ -295,8 +295,6 @@ def configure_interpreter(interpreter: OpenInterpreter): # if chunk.get("format") != "active_line": # print(chunk.get("content")) - import os - from platformdirs import user_data_dir diff --git a/software/source/server/server.py b/software/source/server/server.py index 5f9f222..352d41e 100644 --- a/software/source/server/server.py +++ b/software/source/server/server.py @@ -415,6 +415,7 @@ async def main(server_host, server_port, llm_service, model, llm_supports_vision globals()[service] = getattr(service_instance, service) interpreter.llm.completions = llm + interpreter.llm.model = os.getenv('MODEL', model) # Start listening asyncio.create_task(listener())