move llm config to profiles directory
This commit is contained in:
parent
34bd6eabc9
commit
4850b4a9f3
|
@ -364,6 +364,10 @@ class Device:
|
||||||
if type(chunk) == str:
|
if type(chunk) == str:
|
||||||
chunk = json.loads(chunk)
|
chunk = json.loads(chunk)
|
||||||
|
|
||||||
|
if chunk.get("type") == "config":
|
||||||
|
self.tts_service = chunk.get("tts_service")
|
||||||
|
continue
|
||||||
|
|
||||||
if self.tts_service == "elevenlabs":
|
if self.tts_service == "elevenlabs":
|
||||||
message = chunk
|
message = chunk
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -194,7 +194,7 @@ class AsyncInterpreter:
|
||||||
|
|
||||||
self.tts.feed(text_iterator)
|
self.tts.feed(text_iterator)
|
||||||
|
|
||||||
self.tts.play_async(on_audio_chunk=self.on_tts_chunk, muted=False)
|
self.tts.play_async(on_audio_chunk=self.on_tts_chunk, muted=True)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
await asyncio.sleep(0.1)
|
await asyncio.sleep(0.1)
|
||||||
|
|
|
@ -1,10 +1,9 @@
|
||||||
# make this obvious
|
# TODO: import from the profiles directory the interpreter directory
|
||||||
from .profiles.default import interpreter as base_interpreter
|
|
||||||
|
from .profiles.fast import interpreter as base_interpreter
|
||||||
|
|
||||||
# from .profiles.fast import interpreter as base_interpreter
|
|
||||||
# from .profiles.local import interpreter as base_interpreter
|
# from .profiles.local import interpreter as base_interpreter
|
||||||
|
# from .profiles.default import interpreter as base_interpreter
|
||||||
# TODO: remove files i.py, llm.py, conftest?, services
|
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
import traceback
|
import traceback
|
||||||
|
@ -12,8 +11,6 @@ import json
|
||||||
from fastapi import FastAPI, WebSocket
|
from fastapi import FastAPI, WebSocket
|
||||||
from fastapi.responses import PlainTextResponse
|
from fastapi.responses import PlainTextResponse
|
||||||
from uvicorn import Config, Server
|
from uvicorn import Config, Server
|
||||||
|
|
||||||
# from interpreter import interpreter as base_interpreter
|
|
||||||
from .async_interpreter import AsyncInterpreter
|
from .async_interpreter import AsyncInterpreter
|
||||||
from fastapi.middleware.cors import CORSMiddleware
|
from fastapi.middleware.cors import CORSMiddleware
|
||||||
from typing import List, Dict, Any
|
from typing import List, Dict, Any
|
||||||
|
@ -24,8 +21,8 @@ os.environ["STT_RUNNER"] = "server"
|
||||||
os.environ["TTS_RUNNER"] = "server"
|
os.environ["TTS_RUNNER"] = "server"
|
||||||
|
|
||||||
|
|
||||||
async def main(server_host, server_port, tts_service):
|
async def main(server_host, server_port):
|
||||||
base_interpreter.tts = tts_service
|
# interpreter.tts set in the profiles directory!!!!
|
||||||
interpreter = AsyncInterpreter(base_interpreter)
|
interpreter = AsyncInterpreter(base_interpreter)
|
||||||
|
|
||||||
app = FastAPI()
|
app = FastAPI()
|
||||||
|
@ -52,6 +49,12 @@ async def main(server_host, server_port, tts_service):
|
||||||
@app.websocket("/")
|
@app.websocket("/")
|
||||||
async def websocket_endpoint(websocket: WebSocket):
|
async def websocket_endpoint(websocket: WebSocket):
|
||||||
await websocket.accept()
|
await websocket.accept()
|
||||||
|
|
||||||
|
# Send the tts_service value to the client
|
||||||
|
await websocket.send_text(
|
||||||
|
json.dumps({"type": "config", "tts_service": interpreter.interpreter.tts})
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
||||||
async def receive_input():
|
async def receive_input():
|
||||||
|
|
|
@ -3,6 +3,9 @@ from interpreter import interpreter
|
||||||
# This is an Open Interpreter compatible profile.
|
# This is an Open Interpreter compatible profile.
|
||||||
# Visit https://01.openinterpreter.com/profile for all options.
|
# Visit https://01.openinterpreter.com/profile for all options.
|
||||||
|
|
||||||
|
# 01 suports OpenAI, ElevenLabs, and Coqui (Local) TTS providers
|
||||||
|
# {OpenAI: "openai", ElevenLabs: "elevenlabs", Coqui: "coqui"}
|
||||||
|
interpreter.tts = "openai"
|
||||||
|
|
||||||
# Connect your 01 to a language model
|
# Connect your 01 to a language model
|
||||||
interpreter.llm.model = "gpt-4-turbo"
|
interpreter.llm.model = "gpt-4-turbo"
|
||||||
|
@ -10,9 +13,6 @@ interpreter.llm.context_window = 100000
|
||||||
interpreter.llm.max_tokens = 4096
|
interpreter.llm.max_tokens = 4096
|
||||||
# interpreter.llm.api_key = "<your_openai_api_key_here>"
|
# interpreter.llm.api_key = "<your_openai_api_key_here>"
|
||||||
|
|
||||||
# Give your 01 a voice
|
|
||||||
interpreter.tts = "openai"
|
|
||||||
|
|
||||||
# Tell your 01 where to find and save skills
|
# Tell your 01 where to find and save skills
|
||||||
interpreter.computer.skills.path = "./skills"
|
interpreter.computer.skills.path = "./skills"
|
||||||
|
|
||||||
|
|
|
@ -1,31 +1,42 @@
|
||||||
async def main(server_host, server_port, tts_service, asynchronous):
|
from interpreter import interpreter
|
||||||
|
|
||||||
if asynchronous:
|
# This is an Open Interpreter compatible profile.
|
||||||
|
# Visit https://01.openinterpreter.com/profile for all options.
|
||||||
|
|
||||||
base_interpreter.system_message = (
|
# 01 suports OpenAI, ElevenLabs, and Coqui (Local) TTS providers
|
||||||
|
# {OpenAI: "openai", ElevenLabs: "elevenlabs", Coqui: "coqui"}
|
||||||
|
interpreter.tts = "elevenlabs"
|
||||||
|
|
||||||
|
# 01 Language Model Config.
|
||||||
|
interpreter.llm_service = "litellm"
|
||||||
|
interpreter.llm.model = "groq/llama3-8b-8192"
|
||||||
|
interpreter.llm.supports_vision = False
|
||||||
|
interpreter.llm.supports_functions = False
|
||||||
|
interpreter.llm.context_window = 2048
|
||||||
|
interpreter.llm.max_tokens = 4096
|
||||||
|
interpreter.llm.temperature = 0.8
|
||||||
|
|
||||||
|
# interpreter.llm.api_key = os.environ["GROQ_API_KEY"]
|
||||||
|
|
||||||
|
interpreter.computer.import_computer_api = False
|
||||||
|
|
||||||
|
interpreter.auto_run = True
|
||||||
|
interpreter.system_message = (
|
||||||
"You are a helpful assistant that can answer questions and help with tasks."
|
"You are a helpful assistant that can answer questions and help with tasks."
|
||||||
)
|
)
|
||||||
|
|
||||||
base_interpreter.computer.import_computer_api = False
|
# TODO: include other options in comments in the profiles for tts
|
||||||
|
# direct people to the profiles directory to make changes to the interpreter profile
|
||||||
|
# this should be made explicit on the docs
|
||||||
|
|
||||||
base_interpreter.llm.model = "groq/llama3-8b-8192"
|
"""
|
||||||
|
llm_service: str = "litellm",
|
||||||
base_interpreter.llm.api_key = os.environ["GROQ_API_KEY"]
|
model: str = "gpt-4",
|
||||||
|
llm_supports_vision: bool = False,
|
||||||
base_interpreter.llm.supports_functions = False
|
llm_supports_functions: bool = False,
|
||||||
|
context_window: int = 2048,
|
||||||
base_interpreter.auto_run = True
|
max_tokens: int = 4096,
|
||||||
|
temperature: float = 0.8,
|
||||||
base_interpreter.tts = tts_service
|
tts_service: str = "elevenlabs",
|
||||||
|
stt_service: str = "openai",
|
||||||
interpreter = AsyncInterpreter(base_interpreter)
|
"""
|
||||||
|
|
||||||
else:
|
|
||||||
|
|
||||||
configured_interpreter = configure_interpreter(base_interpreter)
|
|
||||||
|
|
||||||
configured_interpreter.llm.supports_functions = True
|
|
||||||
|
|
||||||
configured_interpreter.tts = tts_service
|
|
||||||
|
|
||||||
interpreter = AsyncInterpreter(configured_interpreter)
|
|
||||||
|
|
|
@ -1,5 +1,9 @@
|
||||||
from interpreter import interpreter
|
from interpreter import interpreter
|
||||||
|
|
||||||
|
# 01 suports OpenAI, ElevenLabs, and Coqui (Local) TTS providers
|
||||||
|
# {OpenAI: "openai", ElevenLabs: "elevenlabs", Coqui: "coqui"}
|
||||||
|
interpreter.tts = "coqui"
|
||||||
|
|
||||||
# Local setup
|
# Local setup
|
||||||
interpreter.local_setup()
|
interpreter.local_setup()
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue