Merge branch 'issue-116' into config

This commit is contained in:
James C. Palmer 2024-03-23 14:57:15 -07:00
commit 2bf6dbc8c1
3 changed files with 20 additions and 3 deletions

View File

@ -44,10 +44,28 @@ cd 01/software # CD into the source directory
```shell ```shell
brew install portaudio ffmpeg cmake # Install Mac OSX dependencies brew install portaudio ffmpeg cmake # Install Mac OSX dependencies
poetry install # Install Python dependencies poetry install # Install Python dependencies
export OPENAI_API_KEY=sk... # OR run `poetry run 01 --local` to run everything locally export OPENAI_API_KEY=sk...
poetry run 01 # Runs the 01 Light simulator (hold your spacebar, speak, release) poetry run 01 # Runs the 01 Light simulator (hold your spacebar, speak, release)
``` ```
### Running locally
To run locally, you can use command line arguments and environment variables.
Using command line arguments:
```shell
poetry run 01 --local --model ollama/mixtral:latest
```
Using command line arguments and environment variables:
```shell
export MODEL=ollama/mixtral:latest
poetry run 01 --local
```
Note, you should replace `ollama/mixtral:latest` with a model installed locally. For supported models, see Open Interpreter's [local providers](https://docs.openinterpreter.com/language-models/local-models/ollama) documentation.
<br> <br>
# Hardware # Hardware

View File

@ -295,8 +295,6 @@ def configure_interpreter(interpreter: OpenInterpreter):
# if chunk.get("format") != "active_line": # if chunk.get("format") != "active_line":
# print(chunk.get("content")) # print(chunk.get("content"))
import os
from platformdirs import user_data_dir from platformdirs import user_data_dir

View File

@ -415,6 +415,7 @@ async def main(server_host, server_port, llm_service, model, llm_supports_vision
globals()[service] = getattr(service_instance, service) globals()[service] = getattr(service_instance, service)
interpreter.llm.completions = llm interpreter.llm.completions = llm
interpreter.llm.model = os.getenv('MODEL', model)
# Start listening # Start listening
asyncio.create_task(listener()) asyncio.create_task(listener())