Merge branch 'issue-116' into config
This commit is contained in:
commit
2bf6dbc8c1
20
README.md
20
README.md
|
@ -44,10 +44,28 @@ cd 01/software # CD into the source directory
|
||||||
```shell
|
```shell
|
||||||
brew install portaudio ffmpeg cmake # Install Mac OSX dependencies
|
brew install portaudio ffmpeg cmake # Install Mac OSX dependencies
|
||||||
poetry install # Install Python dependencies
|
poetry install # Install Python dependencies
|
||||||
export OPENAI_API_KEY=sk... # OR run `poetry run 01 --local` to run everything locally
|
export OPENAI_API_KEY=sk...
|
||||||
poetry run 01 # Runs the 01 Light simulator (hold your spacebar, speak, release)
|
poetry run 01 # Runs the 01 Light simulator (hold your spacebar, speak, release)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Running locally
|
||||||
|
To run locally, you can use command line arguments and environment variables.
|
||||||
|
|
||||||
|
Using command line arguments:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
poetry run 01 --local --model ollama/mixtral:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
Using command line arguments and environment variables:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
export MODEL=ollama/mixtral:latest
|
||||||
|
poetry run 01 --local
|
||||||
|
```
|
||||||
|
|
||||||
|
Note, you should replace `ollama/mixtral:latest` with a model installed locally. For supported models, see Open Interpreter's [local providers](https://docs.openinterpreter.com/language-models/local-models/ollama) documentation.
|
||||||
|
|
||||||
<br>
|
<br>
|
||||||
|
|
||||||
# Hardware
|
# Hardware
|
||||||
|
|
|
@ -295,8 +295,6 @@ def configure_interpreter(interpreter: OpenInterpreter):
|
||||||
# if chunk.get("format") != "active_line":
|
# if chunk.get("format") != "active_line":
|
||||||
# print(chunk.get("content"))
|
# print(chunk.get("content"))
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from platformdirs import user_data_dir
|
from platformdirs import user_data_dir
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -415,6 +415,7 @@ async def main(server_host, server_port, llm_service, model, llm_supports_vision
|
||||||
globals()[service] = getattr(service_instance, service)
|
globals()[service] = getattr(service_instance, service)
|
||||||
|
|
||||||
interpreter.llm.completions = llm
|
interpreter.llm.completions = llm
|
||||||
|
interpreter.llm.model = os.getenv('MODEL', model)
|
||||||
|
|
||||||
# Start listening
|
# Start listening
|
||||||
asyncio.create_task(listener())
|
asyncio.create_task(listener())
|
||||||
|
|
Loading…
Reference in New Issue