Structure

This commit is contained in:
killian 2024-01-27 10:46:15 -08:00
parent 7512783b5c
commit bd9cb4e8b7
6 changed files with 3031 additions and 6 deletions

BIN
.DS_Store vendored Normal file

Binary file not shown.

View File

@ -9,6 +9,7 @@ Also needs to be saving conversations, and checking the queue.
from typing import Generator from typing import Generator
import uvicorn import uvicorn
from fastapi import FastAPI, Request, Response from fastapi import FastAPI, Request, Response
from starlette.exceptions import DisconnectedClientError
def main(interpreter): def main(interpreter):
@ -18,9 +19,15 @@ def main(interpreter):
async def i_endpoint(request: Request) -> Response: async def i_endpoint(request: Request) -> Response:
async def event_stream() -> Generator[str, None, None]: async def event_stream() -> Generator[str, None, None]:
data = await request.json() data = await request.json()
for response in interpreter.chat(message=data["message"], stream=True): # TODO: Save conversation to /conversations
yield response try:
for response in interpreter.chat(message=data["message"], stream=True):
yield response
# TODO: Check queue. Do we need to break (I guess we need a while loop around this..?)
# and handle the new message from the queue? Then delete the message from the queue.
except DisconnectedClientError:
print("Client disconnected")
# TODO: Save conversation to /conversations
return Response(event_stream(), media_type="text/event-stream") return Response(event_stream(), media_type="text/event-stream")
uvicorn.run(app, host="0.0.0.0", port=8000) uvicorn.run(app, host="0.0.0.0", port=8000)

View File

@ -4,7 +4,7 @@ Responsible for configuring an interpreter, then using main.py to serve it at "/
from .main import main from .main import main
from interpreter import interpreter from interpreter import interpreter
import os
### SYSTEM MESSAGE ### SYSTEM MESSAGE
@ -45,12 +45,16 @@ interpreter.system_message = system_message
### LLM SETTINGS ### LLM SETTINGS
# Local settings
interpreter.llm.model = "local" interpreter.llm.model = "local"
interpreter.llm.temperature = 0
interpreter.llm.api_base = "https://localhost:8080/v1" # Llamafile default interpreter.llm.api_base = "https://localhost:8080/v1" # Llamafile default
interpreter.llm.max_tokens = 1000 interpreter.llm.max_tokens = 1000
interpreter.llm.context_window = 3000 interpreter.llm.context_window = 3000
# Hosted settings
interpreter.llm.api_key = os.getenv('OPENAI_API_KEY')
interpreter.llm.model = "gpt-3.5-turbo"
### MISC SETTINGS ### MISC SETTINGS

3012
OS/01/core/poetry.lock generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -8,6 +8,7 @@ readme = "README.md"
[tool.poetry.dependencies] [tool.poetry.dependencies]
python = "^3.11" python = "^3.11"
open-interpreter = "^0.2.0"
[build-system] [build-system]

View File

@ -1,6 +1,7 @@
### START THE LANGUAGE MODEL ### START THE LANGUAGE MODEL
python llm/start.py # Disabled as we're starting with hosted models
# python llm/start.py
### START THE INTERPRETER ### START THE INTERPRETER