Merge branch 'main' into fix/precommit-tests
This commit is contained in:
commit
0eca547fc0
|
@ -16,11 +16,8 @@ We want to help you build. [Apply for 1-on-1 support.](https://0ggfznkwh4j.typef
|
|||
|
||||
<br>
|
||||
|
||||
---
|
||||
|
||||
⚠️ **WARNING:** This experimental project is under rapid development and lacks basic safeguards. Until a stable `1.0` release, **ONLY** run this repository on devices without sensitive information or access to paid services. ⚠️
|
||||
|
||||
---
|
||||
> [!IMPORTANT]
|
||||
> This experimental project is under rapid development and lacks basic safeguards. Until a stable `1.0` release, only run this repository on devices without sensitive information or access to paid services.
|
||||
|
||||
<br>
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# Roadmap
|
||||
|
||||
Our goal is to power a billion devices with the 01OS over the next 10 years. The Cambrian explosion of AI devices.
|
||||
Our goal is to power a billion devices with the 01OS over the next 10 years. The Cambrian explosion of AI devices.
|
||||
|
||||
We can do that with your help. Help extend the 01OS to run on new hardware, to connect with new peripherals like GPS and cameras, and add new locally running language models to unlock use-cases for this technology that no-one has even imagined yet.
|
||||
We can do that with your help. Help extend the 01OS to run on new hardware, to connect with new peripherals like GPS and cameras, and add new locally running language models to unlock use-cases for this technology that no-one has even imagined yet.
|
||||
|
||||
In the coming months, we're going to release:
|
||||
|
||||
|
@ -10,4 +10,3 @@ In the coming months, we're going to release:
|
|||
- [ ] An open-source language model for computer control
|
||||
- [ ] A react-native app for your phone
|
||||
- [ ] A hand-held device that runs fully offline.
|
||||
|
||||
|
|
4
TASKS.md
4
TASKS.md
|
@ -36,7 +36,7 @@
|
|||
- [ ] Sends to describe API
|
||||
- [ ] prints and returns description
|
||||
- [ ] Llamafile for phi-2 + moondream
|
||||
- [ ] test on rPi + Jetson (+android mini phone?)
|
||||
- [ ] test on rPi + Jetson (+android mini phone?)
|
||||
|
||||
**OS**
|
||||
|
||||
|
@ -66,7 +66,7 @@
|
|||
|
||||
**Hardware**
|
||||
|
||||
- [ ] (Hardware and software) Get the 01OS working on the **Jetson** or Pi. Pick one to move forward with.
|
||||
- [ ] (Hardware and software) Get the 01OS working on the **Jetson** or Pi. Pick one to move forward with.
|
||||
- [ ] Connect the Seeed Sense (ESP32 with Wifi, Bluetooth and a mic) to a small DAC + amplifier + speaker.
|
||||
- [ ] Connect the Seeed Sense to a battery.
|
||||
- [ ] Configure the ESP32 to be a wireless mic + speaker for the Jetson or Pi.
|
||||
|
|
|
@ -16,11 +16,8 @@
|
|||
|
||||
<br>
|
||||
|
||||
---
|
||||
|
||||
⚠️ **警告:** 这个实验性项目正在快速开发中,并且缺乏基本的安全保障。在稳定的 1.0 版本发布之前, **仅在**没有敏感信息或访问付费服务的设备上运行此存储库。⚠️
|
||||
|
||||
---
|
||||
> [!IMPORTANT]
|
||||
> 这个实验性项目正在快速开发中,并且缺乏基本的安全保障。在稳定的 `1.0` 版本发布之前, 仅在没有敏感信息或访问付费服务的设备上运行此存储库。
|
||||
|
||||
<br>
|
||||
|
||||
|
@ -39,7 +36,7 @@ git clone https://github.com/OpenInterpreter/01 # Clone the repository
|
|||
cd 01/software # CD into the source directory
|
||||
```
|
||||
|
||||
<!-- > Not working? Read our [setup guide](https://docs.openinterpreter.com/getting-started/setup). -->
|
||||
<!-- > 不起作用?阅读我们的[安装指南](https://docs.openinterpreter.com/getting-started/setup)。 -->
|
||||
|
||||
```shell
|
||||
brew install portaudio ffmpeg cmake # Install Mac OSX dependencies
|
||||
|
@ -48,6 +45,8 @@ export OPENAI_API_KEY=sk... # OR run `poetry run 01 --local` to run everything l
|
|||
poetry run 01 # Runs the 01 Light simulator (hold your spacebar, speak, release)
|
||||
```
|
||||
|
||||
<!-- > 对于Windows安装,请阅读我们的[专用安装指南](https://docs.openinterpreter.com/getting-started/setup#windows)。 -->
|
||||
|
||||
<br>
|
||||
|
||||
# 硬件
|
||||
|
@ -78,7 +77,9 @@ poetry run 01 # Runs the 01 Light simulator (hold your spacebar, speak, release)
|
|||
|
||||
## LMC 消息
|
||||
|
||||
为了与系统的不同组件进行通信,我们引入了 [LMC 消息](https://docs.openinterpreter.com/protocols/lmc-messages) 格式,它扩展了 OpenAI 的消息格式以包含一个 "computer" 角色。
|
||||
为了与系统的不同组件进行通信,我们引入了 [LMC 消息](https://docs.openinterpreter.com/protocols/lmc-messages) 格式,它扩展了 OpenAI 的消息格式以包含一个 "computer" 角色:
|
||||
|
||||
https://github.com/OpenInterpreter/01/assets/63927363/8621b075-e052-46ba-8d2e-d64b9f2a5da9
|
||||
|
||||
## 动态系统消息
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ git clone https://github.com/OpenInterpreter/01 # Clone le dépôt
|
|||
cd 01/software # CD dans le répertoire source
|
||||
```
|
||||
|
||||
<!-- > Cela ne fonctionne pas ? Lis notre [guide d'installation](https://docs.openinterpreter.com/getting-started/setup). -->
|
||||
<!-- > Cela ne fonctionne pas ? Lisez notre [guide d'installation](https://docs.openinterpreter.com/getting-started/setup). -->
|
||||
|
||||
```shell
|
||||
brew install portaudio ffmpeg cmake # Installe les dépendances Mac OSX
|
||||
|
@ -48,26 +48,28 @@ export OPENAI_API_KEY=sk... # OU exécute `poetry run 01 --local` pour tout exé
|
|||
poetry run 01 # Exécute le simulateur 01 Light (maintenez votre barre d'espace, parlez, relâchez)
|
||||
```
|
||||
|
||||
<!-- > Pour une installation sous Windows, lisez [le guide dédié](https://docs.openinterpreter.com/getting-started/setup#windows). -->
|
||||
|
||||
<br>
|
||||
|
||||
# Hardware
|
||||
|
||||
- Le **01 Light** est une interface vocale basée sur ESP32. Les instructions de construction sont [ici]. (https://github.com/OpenInterpreter/01/tree/main/hardware/light). Une liste de ce qu'il faut acheter [ici](https://github.com/OpenInterpreter/01/blob/main/hardware/light/BOM.md).
|
||||
- Il fonctionne en tandem avec le **01 Server** ([guide d'installation ci-dessous](https://github.com/OpenInterpreter/01/blob/main/README.md#01-server)) fonctionnant sur votre ordinateur domestique.
|
||||
- **Mac OSX** et **Ubuntu** sont pris en charge en exécutant `poetry run 01` (**Windows** Windows est pris en charge de manière expérimentale). Cela utilise votre barre d'espace pour simuler le 01 Light..
|
||||
- Le **01 Light** est une interface vocale basée sur ESP32. Les instructions de construction sont [ici]. (https://github.com/OpenInterpreter/01/tree/main/hardware/light). Une liste de ce qu'il faut acheter se trouve [ici](https://github.com/OpenInterpreter/01/blob/main/hardware/light/BOM.md).
|
||||
- Il fonctionne en tandem avec le **Server 01** ([guide d'installation ci-dessous](https://github.com/OpenInterpreter/01/blob/main/README.md#01-server)) fonctionnant sur votre ordinateur.
|
||||
- **Mac OSX** et **Ubuntu** sont pris en charge en exécutant `poetry run 01` (**Windows** est pris en charge de manière expérimentale). Cela utilise votre barre d'espace pour simuler le 01 Light.
|
||||
- (prochainement) Le **01 Heavy** est un dispositif autonome qui exécute tout localement.
|
||||
|
||||
**Nous avons besoin de votre aide pour soutenir et construire plus de hardware.** Le 01 devrait pouvoir fonctionner sur tout dispositif avec entrée (microphone, clavier, etc.), sortie (haut-parleurs, écrans, moteurs, etc.) et une connexion internet (ou suffisamment de puissance de calcul pour tout exécuter localement). [Contribution Guide →](https://github.com/OpenInterpreter/01/blob/main/CONTRIBUTING.md)
|
||||
**Nous avons besoin de votre aide pour soutenir et construire plus de hardware.** Le 01 devrait pouvoir fonctionner sur tout dispositif avec entrée (microphone, clavier, etc.), sortie (haut-parleurs, écrans, moteurs, etc.) et connexion internet (ou suffisamment de puissance de calcul pour tout exécuter localement). [Guide de Contribution →](https://github.com/OpenInterpreter/01/blob/main/CONTRIBUTING.md)
|
||||
|
||||
<br>
|
||||
|
||||
# Comment ça marche ?
|
||||
|
||||
Le 01 expose un websocket de speech-to-speech à l'adresse localhost:10001.
|
||||
Le 01 expose un websocket de *speech-to-speech* à l'adresse `localhost:10001`.
|
||||
|
||||
Si vous diffusez des octets audio bruts vers `/` au format [Streaming LMC](https://docs.openinterpreter.com/guides/streaming-response), vous recevrez sa réponse dans le même format.
|
||||
Si vous diffusez des octets audio bruts vers `/` au [format de streaming LMC](https://docs.openinterpreter.com/guides/streaming-response), vous recevrez sa réponse dans le même format.
|
||||
|
||||
Inspiré en partie par [Andrej Karpathy's LLM OS](https://twitter.com/karpathy/status/1723140519554105733), nous utilisons un [un modèle de langage inteprétant du code](https://github.com/OpenInterpreter/open-interpreter), et le sollicitons lorsque certains événements se produisent dans le [noyau de votre ordinateur](https://github.com/OpenInterpreter/01/blob/main/software/source/server/utils/kernel.py).
|
||||
Inspiré en partie par [l'idée d'un OS LLM d'Andrej Karpathy](https://twitter.com/karpathy/status/1723140519554105733), nous utilisons un [un modèle de langage inteprétant du code](https://github.com/OpenInterpreter/open-interpreter), et le sollicitons lorsque certains événements se produisent dans le [noyau de votre ordinateur](https://github.com/OpenInterpreter/01/blob/main/software/source/server/utils/kernel.py).
|
||||
|
||||
Le 01 l'encapsule dans une interface vocale :
|
||||
|
||||
|
@ -75,29 +77,29 @@ Le 01 l'encapsule dans une interface vocale :
|
|||
|
||||
<img width="100%" alt="LMC" src="https://github.com/OpenInterpreter/01/assets/63927363/52417006-a2ca-4379-b309-ffee3509f5d4"><br><br>
|
||||
|
||||
# Protocols
|
||||
# Protocoles
|
||||
|
||||
## LMC Messages
|
||||
## Messages LMC
|
||||
|
||||
To communicate with different components of this system, we introduce [LMC Messages](https://docs.openinterpreter.com/protocols/lmc-messages) format, which extends OpenAI’s messages format to include a "computer" role:
|
||||
Pour communiquer avec les différents composants du système, nous introduisons le [format de messages LMC](https://docs.openinterpreter.com/protocols/lmc-messages), une extension du format de message d'OpenAI qui inclut un nouveau rôle "*computer*":
|
||||
|
||||
https://github.com/OpenInterpreter/01/assets/63927363/8621b075-e052-46ba-8d2e-d64b9f2a5da9
|
||||
|
||||
## Dynamic System Messages
|
||||
## Messages Systèmes Dynamiques (Dynamic System Messages)
|
||||
|
||||
Les Dynamic System Messages vous permettent d'exécuter du code à l'intérieur du message système du LLM, juste avant qu'il n'apparaisse à l'IA.
|
||||
Les Messages Systèmes Dynamiques vous permettent d'exécuter du code à l'intérieur du message système du LLM, juste avant qu'il n'apparaisse à l'IA.
|
||||
|
||||
```python
|
||||
# Modifiez les paramètres suivants dans i.py
|
||||
interpreter.system_message = r" The time is {{time.time()}}. " # Tout ce qui est entre doubles crochets sera exécuté comme du Python
|
||||
interpreter.chat("What time is it?") # Il le saura, sans faire appel à un outil/API
|
||||
interpreter.chat("What time is it?") # L'interpréteur connaitre la réponse, sans faire appel à un outil ou une API
|
||||
```
|
||||
|
||||
# Guides
|
||||
|
||||
## 01 Server
|
||||
|
||||
Pour exécuter le serveur sur votre ordinateur de bureau et le connecter à votre 01 Light, exécutez les commandes suivantes :
|
||||
Pour exécuter le serveur sur votre ordinateur et le connecter à votre 01 Light, exécutez les commandes suivantes :
|
||||
|
||||
```shell
|
||||
brew install ngrok/ngrok/ngrok
|
||||
|
@ -107,7 +109,7 @@ poetry run 01 --server --expose
|
|||
|
||||
La dernière commande affichera une URL de serveur. Vous pouvez saisir ceci dans le portail WiFi captif de votre 01 Light pour le connecter à votre serveur 01.
|
||||
|
||||
## Local Mode
|
||||
## Mode Local
|
||||
|
||||
```
|
||||
poetry run 01 --local
|
||||
|
@ -117,9 +119,9 @@ Si vous souhaitez exécuter localement du speech-to-text en utilisant Whisper, v
|
|||
|
||||
## Personnalisation
|
||||
|
||||
Pour personnaliser le comportement du système, modifie le [system message, model, skills library path,](https://docs.openinterpreter.com/settings/all-settings) etc. in `i.py`. Ce fichier configure un interprète et est alimenté par Open Interpreter.
|
||||
Pour personnaliser le comportement du système, modifie [`system message`, `model`, `skills library path`,](https://docs.openinterpreter.com/settings/all-settings) etc. in `i.py`. Ce fichier configure un interprète alimenté par Open Interpreter.
|
||||
|
||||
## Ubuntu Dependencies
|
||||
## Dépendances Ubuntu
|
||||
|
||||
```bash
|
||||
sudo apt-get install portaudio19-dev ffmpeg cmake
|
||||
|
@ -135,7 +137,7 @@ Veuillez consulter nos [directives de contribution](CONTRIBUTING.md) pour plus d
|
|||
|
||||
# Roadmap
|
||||
|
||||
Visite [notre roadmap](/ROADMAP.md) pour voir le futur du 01.
|
||||
Visitez [notre roadmap](/ROADMAP.md) pour connaitre le futur du 01.
|
||||
|
||||
<br>
|
||||
|
||||
|
|
|
@ -34,9 +34,9 @@ poetry run 01 --client
|
|||
|
||||
### Flags
|
||||
|
||||
- `--client`
|
||||
- `--client`
|
||||
Run client.
|
||||
|
||||
- `--client-type TEXT`
|
||||
Specify the client type.
|
||||
- `--client-type TEXT`
|
||||
Specify the client type.
|
||||
Default: `auto`.
|
||||
|
|
|
@ -37,12 +37,16 @@ On Windows you will need to install the following:
|
|||
- [Git for Windows](https://git-scm.com/download/win).
|
||||
- [virtualenv](https://virtualenv.pypa.io/en/latest/installation.html) or [MiniConda](https://docs.anaconda.com/free/miniconda/miniconda-install/) to manage virtual environments.
|
||||
- [Chocolatey](https://chocolatey.org/install#individual) to install the required packages.
|
||||
- [Microsoft C++ Build Tools](https://visualstudio.microsoft.com/visual-cpp-build-tools):
|
||||
- Choose [**Download Build Tools**](https://visualstudio.microsoft.com/visual-cpp-build-tools/).
|
||||
- Run the downloaded file **vs_BuildTools.exe**.
|
||||
- In the installer, select **Workloads** > **Desktop & Mobile** > **Desktop Development with C++**.
|
||||
|
||||
With these installed, you can run the following commands in a **PowerShell terminal as an administrator**:
|
||||
|
||||
```powershell
|
||||
# Install the required packages
|
||||
choco install -y ffmpeg cmake
|
||||
choco install -y ffmpeg
|
||||
```
|
||||
|
||||
## Install 01
|
||||
|
|
|
@ -44,73 +44,73 @@ For more information, please read about <a href="/services/speech-to-text">speec
|
|||
|
||||
## CLI Flags
|
||||
|
||||
- `--server`
|
||||
- `--server`
|
||||
Run server.
|
||||
|
||||
- `--server-host TEXT`
|
||||
Specify the server host where the server will deploy.
|
||||
- `--server-host TEXT`
|
||||
Specify the server host where the server will deploy.
|
||||
Default: `0.0.0.0`.
|
||||
|
||||
- `--server-port INTEGER`
|
||||
Specify the server port where the server will deploy.
|
||||
- `--server-port INTEGER`
|
||||
Specify the server port where the server will deploy.
|
||||
Default: `10001`.
|
||||
|
||||
- `--tunnel-service TEXT`
|
||||
Specify the tunnel service.
|
||||
- `--tunnel-service TEXT`
|
||||
Specify the tunnel service.
|
||||
Default: `ngrok`.
|
||||
|
||||
- `--expose`
|
||||
- `--expose`
|
||||
Expose server to internet.
|
||||
|
||||
- `--server-url TEXT`
|
||||
Specify the server URL that the client should expect.
|
||||
Defaults to server-host and server-port.
|
||||
- `--server-url TEXT`
|
||||
Specify the server URL that the client should expect.
|
||||
Defaults to server-host and server-port.
|
||||
Default: `None`.
|
||||
|
||||
- `--llm-service TEXT`
|
||||
Specify the LLM service.
|
||||
- `--llm-service TEXT`
|
||||
Specify the LLM service.
|
||||
Default: `litellm`.
|
||||
|
||||
- `--model TEXT`
|
||||
Specify the model.
|
||||
- `--model TEXT`
|
||||
Specify the model.
|
||||
Default: `gpt-4`.
|
||||
|
||||
- `--llm-supports-vision`
|
||||
- `--llm-supports-vision`
|
||||
Specify if the LLM service supports vision.
|
||||
|
||||
- `--llm-supports-functions`
|
||||
- `--llm-supports-functions`
|
||||
Specify if the LLM service supports functions.
|
||||
|
||||
- `--context-window INTEGER`
|
||||
Specify the context window size.
|
||||
- `--context-window INTEGER`
|
||||
Specify the context window size.
|
||||
Default: `2048`.
|
||||
|
||||
- `--max-tokens INTEGER`
|
||||
Specify the maximum number of tokens.
|
||||
- `--max-tokens INTEGER`
|
||||
Specify the maximum number of tokens.
|
||||
Default: `4096`.
|
||||
|
||||
- `--temperature FLOAT`
|
||||
Specify the temperature for generation.
|
||||
- `--temperature FLOAT`
|
||||
Specify the temperature for generation.
|
||||
Default: `0.8`.
|
||||
|
||||
- `--tts-service TEXT`
|
||||
Specify the TTS service.
|
||||
- `--tts-service TEXT`
|
||||
Specify the TTS service.
|
||||
Default: `openai`.
|
||||
|
||||
- `--stt-service TEXT`
|
||||
Specify the STT service.
|
||||
- `--stt-service TEXT`
|
||||
Specify the STT service.
|
||||
Default: `openai`.
|
||||
|
||||
- `--local`
|
||||
- `--local`
|
||||
Use recommended local services for LLM, STT, and TTS.
|
||||
|
||||
- `--install-completion [bash|zsh|fish|powershell|pwsh]`
|
||||
Install completion for the specified shell.
|
||||
- `--install-completion [bash|zsh|fish|powershell|pwsh]`
|
||||
Install completion for the specified shell.
|
||||
Default: `None`.
|
||||
|
||||
- `--show-completion [bash|zsh|fish|powershell|pwsh]`
|
||||
Show completion for the specified shell, to copy it or customize the installation.
|
||||
- `--show-completion [bash|zsh|fish|powershell|pwsh]`
|
||||
Show completion for the specified shell, to copy it or customize the installation.
|
||||
Default: `None`.
|
||||
|
||||
- `--help`
|
||||
- `--help`
|
||||
Show this message and exit.
|
||||
|
|
|
@ -29,4 +29,4 @@
|
|||
|
||||
.body {
|
||||
font-weight: normal;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,13 +22,13 @@ Please install first [PlatformIO](http://platformio.org/) open source ecosystem
|
|||
|
||||
```bash
|
||||
cd software/source/clients/esp32/src/client/
|
||||
```
|
||||
```
|
||||
|
||||
And build and upload the firmware with a simple command:
|
||||
|
||||
```bash
|
||||
pio run --target upload
|
||||
```
|
||||
```
|
||||
|
||||
## Wifi
|
||||
|
||||
|
|
|
@ -19,4 +19,4 @@
|
|||
|
||||

|
||||
|
||||
d. Now the Jetson should have connectivity!
|
||||
d. Now the Jetson should have connectivity!
|
||||
|
|
|
@ -10,4 +10,4 @@
|
|||
# For later
|
||||
|
||||
- [ ] We could have `/i` which other interpreter's hit. That behaves more like the OpenAI POST endpoint with stream=True by default (i think this is important for users to see the exchange happening in real time, streaming `event/stream` or whatever). You could imagine some kind of handshake — another interpreter → my interpreter's /i → the sender is unrecognized → computer message is sent to /, prompting AI to ask the user to have the sending interpreter send a specific code → the user tells the sending interpreter to use that specific code → the sender is recognized and added to friends-list (`computer.inetwork.friends()`) → now they can hit eachother's i endpoints freely with `computer.inetwork.friend(id).message("hey")`.
|
||||
- [ ] (OS team: this will require coordination with the OI core team, so let's talk about it / I'll explain at the next meetup.) When transfering skills that require OS control, the sender can replace those skills with that command, with one input "natural language query" (?) preceeded by the skill function name or something like that. Basically so if you ask it to do something you set up as a skill, it actually asks your computer to do it. If you ask your computer to do it directly, it's more direct.
|
||||
- [ ] (OS team: this will require coordination with the OI core team, so let's talk about it / I'll explain at the next meetup.) When transfering skills that require OS control, the sender can replace those skills with that command, with one input "natural language query" (?) proceeded by the skill function name or something like that. Basically so if you ask it to do something you set up as a skill, it actually asks your computer to do it. If you ask your computer to do it directly, it's more direct.
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
_archive
|
||||
__pycache__
|
||||
.idea
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -33,6 +33,7 @@ dateparser = "^1.2.0"
|
|||
pytimeparse = "^1.1.8"
|
||||
python-crontab = "^3.0.0"
|
||||
inquirer = "^3.2.4"
|
||||
pyqrcode = "^1.2.1"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
|
@ -53,4 +54,4 @@ target-version = ['py311']
|
|||
[tool.isort]
|
||||
profile = "black"
|
||||
multi_line_output = 3
|
||||
include_trailing_comma = true
|
||||
include_trailing_comma = true
|
||||
|
|
|
@ -1,23 +1,18 @@
|
|||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv() # take environment variables from .env.
|
||||
|
||||
import os
|
||||
import asyncio
|
||||
import threading
|
||||
import os
|
||||
import pyaudio
|
||||
from starlette.websockets import WebSocket
|
||||
from queue import Queue
|
||||
from pynput import keyboard
|
||||
import json
|
||||
import traceback
|
||||
import websockets
|
||||
import queue
|
||||
import pydub
|
||||
import ast
|
||||
from pydub import AudioSegment
|
||||
from pydub.playback import play
|
||||
import io
|
||||
import time
|
||||
import wave
|
||||
import tempfile
|
||||
|
@ -25,7 +20,10 @@ from datetime import datetime
|
|||
import cv2
|
||||
import base64
|
||||
import platform
|
||||
from interpreter import interpreter # Just for code execution. Maybe we should let people do from interpreter.computer import run?
|
||||
from interpreter import (
|
||||
interpreter,
|
||||
) # Just for code execution. Maybe we should let people do from interpreter.computer import run?
|
||||
|
||||
# In the future, I guess kernel watching code should be elsewhere? Somewhere server / client agnostic?
|
||||
from ..server.utils.kernel import put_kernel_messages_into_queue
|
||||
from ..server.utils.get_system_info import get_system_info
|
||||
|
@ -33,6 +31,7 @@ from ..server.utils.process_utils import kill_process_tree
|
|||
|
||||
from ..server.utils.logs import setup_logging
|
||||
from ..server.utils.logs import logger
|
||||
|
||||
setup_logging()
|
||||
|
||||
os.environ["STT_RUNNER"] = "server"
|
||||
|
@ -51,11 +50,11 @@ RECORDING = False # Flag to control recording state
|
|||
SPACEBAR_PRESSED = False # Flag to track spacebar press state
|
||||
|
||||
# Camera configuration
|
||||
CAMERA_ENABLED = os.getenv('CAMERA_ENABLED', False)
|
||||
CAMERA_ENABLED = os.getenv("CAMERA_ENABLED", False)
|
||||
if type(CAMERA_ENABLED) == str:
|
||||
CAMERA_ENABLED = (CAMERA_ENABLED.lower() == "true")
|
||||
CAMERA_DEVICE_INDEX = int(os.getenv('CAMERA_DEVICE_INDEX', 0))
|
||||
CAMERA_WARMUP_SECONDS = float(os.getenv('CAMERA_WARMUP_SECONDS', 0))
|
||||
CAMERA_ENABLED = CAMERA_ENABLED.lower() == "true"
|
||||
CAMERA_DEVICE_INDEX = int(os.getenv("CAMERA_DEVICE_INDEX", 0))
|
||||
CAMERA_WARMUP_SECONDS = float(os.getenv("CAMERA_WARMUP_SECONDS", 0))
|
||||
|
||||
# Specify OS
|
||||
current_platform = get_system_info()
|
||||
|
@ -66,6 +65,7 @@ p = pyaudio.PyAudio()
|
|||
|
||||
send_queue = queue.Queue()
|
||||
|
||||
|
||||
class Device:
|
||||
def __init__(self):
|
||||
self.pressed_keys = set()
|
||||
|
@ -89,23 +89,28 @@ class Device:
|
|||
|
||||
if ret:
|
||||
temp_dir = tempfile.gettempdir()
|
||||
image_path = os.path.join(temp_dir, f"01_photo_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.png")
|
||||
image_path = os.path.join(
|
||||
temp_dir, f"01_photo_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.png"
|
||||
)
|
||||
self.captured_images.append(image_path)
|
||||
cv2.imwrite(image_path, frame)
|
||||
logger.info(f"Camera image captured to {image_path}")
|
||||
logger.info(f"You now have {len(self.captured_images)} images which will be sent along with your next audio message.")
|
||||
logger.info(
|
||||
f"You now have {len(self.captured_images)} images which will be sent along with your next audio message."
|
||||
)
|
||||
else:
|
||||
logger.error(f"Error: Couldn't capture an image from camera ({camera_index})")
|
||||
logger.error(
|
||||
f"Error: Couldn't capture an image from camera ({camera_index})"
|
||||
)
|
||||
|
||||
cap.release()
|
||||
|
||||
return image_path
|
||||
|
||||
|
||||
def encode_image_to_base64(self, image_path):
|
||||
"""Encodes an image file to a base64 string."""
|
||||
with open(image_path, "rb") as image_file:
|
||||
return base64.b64encode(image_file.read()).decode('utf-8')
|
||||
return base64.b64encode(image_file.read()).decode("utf-8")
|
||||
|
||||
def add_image_to_send_queue(self, image_path):
|
||||
"""Encodes an image and adds an LMC message to the send queue with the image data."""
|
||||
|
@ -114,7 +119,7 @@ class Device:
|
|||
"role": "user",
|
||||
"type": "image",
|
||||
"format": "base64.png",
|
||||
"content": base64_image
|
||||
"content": base64_image,
|
||||
}
|
||||
send_queue.put(image_message)
|
||||
# Delete the image file from the file system after sending it
|
||||
|
@ -126,7 +131,6 @@ class Device:
|
|||
self.add_image_to_send_queue(image_path)
|
||||
self.captured_images.clear() # Clear the list after sending
|
||||
|
||||
|
||||
async def play_audiosegments(self):
|
||||
"""Plays them sequentially."""
|
||||
while True:
|
||||
|
@ -141,27 +145,35 @@ class Device:
|
|||
except:
|
||||
logger.info(traceback.format_exc())
|
||||
|
||||
|
||||
def record_audio(self):
|
||||
|
||||
if os.getenv('STT_RUNNER') == "server":
|
||||
if os.getenv("STT_RUNNER") == "server":
|
||||
# STT will happen on the server. we're sending audio.
|
||||
send_queue.put({"role": "user", "type": "audio", "format": "bytes.wav", "start": True})
|
||||
elif os.getenv('STT_RUNNER') == "client":
|
||||
send_queue.put(
|
||||
{"role": "user", "type": "audio", "format": "bytes.wav", "start": True}
|
||||
)
|
||||
elif os.getenv("STT_RUNNER") == "client":
|
||||
# STT will happen here, on the client. we're sending text.
|
||||
send_queue.put({"role": "user", "type": "message", "start": True})
|
||||
else:
|
||||
raise Exception("STT_RUNNER must be set to either 'client' or 'server'.")
|
||||
|
||||
"""Record audio from the microphone and add it to the queue."""
|
||||
stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK)
|
||||
stream = p.open(
|
||||
format=FORMAT,
|
||||
channels=CHANNELS,
|
||||
rate=RATE,
|
||||
input=True,
|
||||
frames_per_buffer=CHUNK,
|
||||
)
|
||||
print("Recording started...")
|
||||
global RECORDING
|
||||
|
||||
# Create a temporary WAV file to store the audio data
|
||||
temp_dir = tempfile.gettempdir()
|
||||
wav_path = os.path.join(temp_dir, f"audio_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav")
|
||||
wav_file = wave.open(wav_path, 'wb')
|
||||
wav_path = os.path.join(
|
||||
temp_dir, f"audio_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav"
|
||||
)
|
||||
wav_file = wave.open(wav_path, "wb")
|
||||
wav_file.setnchannels(CHANNELS)
|
||||
wav_file.setsampwidth(p.get_sample_size(FORMAT))
|
||||
wav_file.setframerate(RATE)
|
||||
|
@ -178,17 +190,30 @@ class Device:
|
|||
duration = wav_file.getnframes() / RATE
|
||||
if duration < 0.3:
|
||||
# Just pressed it. Send stop message
|
||||
if os.getenv('STT_RUNNER') == "client":
|
||||
if os.getenv("STT_RUNNER") == "client":
|
||||
send_queue.put({"role": "user", "type": "message", "content": "stop"})
|
||||
send_queue.put({"role": "user", "type": "message", "end": True})
|
||||
else:
|
||||
send_queue.put({"role": "user", "type": "audio", "format": "bytes.wav", "content": ""})
|
||||
send_queue.put({"role": "user", "type": "audio", "format": "bytes.wav", "end": True})
|
||||
send_queue.put(
|
||||
{
|
||||
"role": "user",
|
||||
"type": "audio",
|
||||
"format": "bytes.wav",
|
||||
"content": "",
|
||||
}
|
||||
)
|
||||
send_queue.put(
|
||||
{
|
||||
"role": "user",
|
||||
"type": "audio",
|
||||
"format": "bytes.wav",
|
||||
"end": True,
|
||||
}
|
||||
)
|
||||
else:
|
||||
self.queue_all_captured_images()
|
||||
|
||||
if os.getenv('STT_RUNNER') == "client":
|
||||
|
||||
if os.getenv("STT_RUNNER") == "client":
|
||||
# THIS DOES NOT WORK. We moved to this very cool stt_service, llm_service
|
||||
# way of doing things. stt_wav is not a thing anymore. Needs work to work
|
||||
|
||||
|
@ -199,12 +224,19 @@ class Device:
|
|||
send_queue.put({"role": "user", "type": "message", "end": True})
|
||||
else:
|
||||
# Stream audio
|
||||
with open(wav_path, 'rb') as audio_file:
|
||||
with open(wav_path, "rb") as audio_file:
|
||||
byte_data = audio_file.read(CHUNK)
|
||||
while byte_data:
|
||||
send_queue.put(byte_data)
|
||||
byte_data = audio_file.read(CHUNK)
|
||||
send_queue.put({"role": "user", "type": "audio", "format": "bytes.wav", "end": True})
|
||||
send_queue.put(
|
||||
{
|
||||
"role": "user",
|
||||
"type": "audio",
|
||||
"format": "bytes.wav",
|
||||
"end": True,
|
||||
}
|
||||
)
|
||||
|
||||
if os.path.exists(wav_path):
|
||||
os.remove(wav_path)
|
||||
|
@ -227,24 +259,27 @@ class Device:
|
|||
|
||||
if keyboard.Key.space in self.pressed_keys:
|
||||
self.toggle_recording(True)
|
||||
elif {keyboard.Key.ctrl, keyboard.KeyCode.from_char('c')} <= self.pressed_keys:
|
||||
elif {keyboard.Key.ctrl, keyboard.KeyCode.from_char("c")} <= self.pressed_keys:
|
||||
logger.info("Ctrl+C pressed. Exiting...")
|
||||
kill_process_tree()
|
||||
os._exit(0)
|
||||
|
||||
def on_release(self, key):
|
||||
"""Detect spacebar release and 'c' key press for camera, and handle key release."""
|
||||
self.pressed_keys.discard(key) # Remove the released key from the key press tracking set
|
||||
self.pressed_keys.discard(
|
||||
key
|
||||
) # Remove the released key from the key press tracking set
|
||||
|
||||
if key == keyboard.Key.space:
|
||||
self.toggle_recording(False)
|
||||
elif CAMERA_ENABLED and key == keyboard.KeyCode.from_char('c'):
|
||||
elif CAMERA_ENABLED and key == keyboard.KeyCode.from_char("c"):
|
||||
self.fetch_image_from_camera()
|
||||
|
||||
|
||||
async def message_sender(self, websocket):
|
||||
while True:
|
||||
message = await asyncio.get_event_loop().run_in_executor(None, send_queue.get)
|
||||
message = await asyncio.get_event_loop().run_in_executor(
|
||||
None, send_queue.get
|
||||
)
|
||||
if isinstance(message, bytes):
|
||||
await websocket.send(message)
|
||||
else:
|
||||
|
@ -257,7 +292,9 @@ class Device:
|
|||
|
||||
async def exec_ws_communication(websocket):
|
||||
if CAMERA_ENABLED:
|
||||
print("\nHold the spacebar to start recording. Press 'c' to capture an image from the camera. Press CTRL-C to exit.")
|
||||
print(
|
||||
"\nHold the spacebar to start recording. Press 'c' to capture an image from the camera. Press CTRL-C to exit."
|
||||
)
|
||||
else:
|
||||
print("\nHold the spacebar to start recording. Press CTRL-C to exit.")
|
||||
|
||||
|
@ -280,7 +317,6 @@ class Device:
|
|||
# At this point, we have our message
|
||||
|
||||
if message["type"] == "audio" and message["format"].startswith("bytes"):
|
||||
|
||||
# Convert bytes to audio file
|
||||
|
||||
audio_bytes = message["content"]
|
||||
|
@ -294,13 +330,13 @@ class Device:
|
|||
# 16,000 Hz frame rate
|
||||
frame_rate=16000,
|
||||
# mono sound
|
||||
channels=1
|
||||
channels=1,
|
||||
)
|
||||
|
||||
self.audiosegments.append(audio)
|
||||
|
||||
# Run the code if that's the client's job
|
||||
if os.getenv('CODE_RUNNER') == "client":
|
||||
if os.getenv("CODE_RUNNER") == "client":
|
||||
if message["type"] == "code" and "end" in message:
|
||||
language = message["format"]
|
||||
code = message["content"]
|
||||
|
@ -308,7 +344,7 @@ class Device:
|
|||
send_queue.put(result)
|
||||
|
||||
if is_win10():
|
||||
logger.info('Windows 10 detected')
|
||||
logger.info("Windows 10 detected")
|
||||
# Workaround for Windows 10 not latching to the websocket server.
|
||||
# See https://github.com/OpenInterpreter/01/issues/197
|
||||
try:
|
||||
|
@ -329,42 +365,46 @@ class Device:
|
|||
await asyncio.sleep(2)
|
||||
|
||||
async def start_async(self):
|
||||
# Configuration for WebSocket
|
||||
WS_URL = f"ws://{self.server_url}"
|
||||
# Start the WebSocket communication
|
||||
asyncio.create_task(self.websocket_communication(WS_URL))
|
||||
# Configuration for WebSocket
|
||||
WS_URL = f"ws://{self.server_url}"
|
||||
# Start the WebSocket communication
|
||||
asyncio.create_task(self.websocket_communication(WS_URL))
|
||||
|
||||
# Start watching the kernel if it's your job to do that
|
||||
if os.getenv('CODE_RUNNER') == "client":
|
||||
asyncio.create_task(put_kernel_messages_into_queue(send_queue))
|
||||
# Start watching the kernel if it's your job to do that
|
||||
if os.getenv("CODE_RUNNER") == "client":
|
||||
asyncio.create_task(put_kernel_messages_into_queue(send_queue))
|
||||
|
||||
asyncio.create_task(self.play_audiosegments())
|
||||
asyncio.create_task(self.play_audiosegments())
|
||||
|
||||
# If Raspberry Pi, add the button listener, otherwise use the spacebar
|
||||
if current_platform.startswith("raspberry-pi"):
|
||||
logger.info("Raspberry Pi detected, using button on GPIO pin 15")
|
||||
# Use GPIO pin 15
|
||||
pindef = ["gpiochip4", "15"] # gpiofind PIN15
|
||||
print("PINDEF", pindef)
|
||||
# If Raspberry Pi, add the button listener, otherwise use the spacebar
|
||||
if current_platform.startswith("raspberry-pi"):
|
||||
logger.info("Raspberry Pi detected, using button on GPIO pin 15")
|
||||
# Use GPIO pin 15
|
||||
pindef = ["gpiochip4", "15"] # gpiofind PIN15
|
||||
print("PINDEF", pindef)
|
||||
|
||||
# HACK: needs passwordless sudo
|
||||
process = await asyncio.create_subprocess_exec("sudo", "gpiomon", "-brf", *pindef, stdout=asyncio.subprocess.PIPE)
|
||||
while True:
|
||||
line = await process.stdout.readline()
|
||||
if line:
|
||||
line = line.decode().strip()
|
||||
if "FALLING" in line:
|
||||
self.toggle_recording(False)
|
||||
elif "RISING" in line:
|
||||
self.toggle_recording(True)
|
||||
else:
|
||||
break
|
||||
else:
|
||||
# Keyboard listener for spacebar press/release
|
||||
listener = keyboard.Listener(on_press=self.on_press, on_release=self.on_release)
|
||||
listener.start()
|
||||
# HACK: needs passwordless sudo
|
||||
process = await asyncio.create_subprocess_exec(
|
||||
"sudo", "gpiomon", "-brf", *pindef, stdout=asyncio.subprocess.PIPE
|
||||
)
|
||||
while True:
|
||||
line = await process.stdout.readline()
|
||||
if line:
|
||||
line = line.decode().strip()
|
||||
if "FALLING" in line:
|
||||
self.toggle_recording(False)
|
||||
elif "RISING" in line:
|
||||
self.toggle_recording(True)
|
||||
else:
|
||||
break
|
||||
else:
|
||||
# Keyboard listener for spacebar press/release
|
||||
listener = keyboard.Listener(
|
||||
on_press=self.on_press, on_release=self.on_release
|
||||
)
|
||||
listener.start()
|
||||
|
||||
def start(self):
|
||||
if os.getenv('TEACH_MODE') != "True":
|
||||
if os.getenv("TEACH_MODE") != "True":
|
||||
asyncio.run(self.start_async())
|
||||
p.terminate()
|
||||
|
|
|
@ -19,11 +19,10 @@ Please install first [PlatformIO](http://platformio.org/) open source ecosystem
|
|||
|
||||
```bash
|
||||
cd client/
|
||||
```
|
||||
```
|
||||
|
||||
And build and upload the firmware with a simple command:
|
||||
|
||||
```bash
|
||||
pio run --target upload
|
||||
```
|
||||
|
||||
```
|
||||
|
|
|
@ -11,6 +11,9 @@
|
|||
#include <WiFiMulti.h>
|
||||
#include <WiFiClientSecure.h>
|
||||
#include <WebSocketsClient.h>
|
||||
#include <Preferences.h>
|
||||
|
||||
Preferences preferences;
|
||||
|
||||
String server_domain = "";
|
||||
int server_port = 10001;
|
||||
|
@ -37,6 +40,7 @@ const int kNetworkTimeout = 30 * 1000;
|
|||
// Number of milliseconds to wait if no data is available before trying again
|
||||
const int kNetworkDelay = 1000;
|
||||
|
||||
|
||||
String generateHTMLWithSSIDs()
|
||||
{
|
||||
String html = "<!DOCTYPE html><html><head><title>WiFi Setup</title>"
|
||||
|
@ -74,11 +78,11 @@ const char post_connected_html[] PROGMEM = R"=====(
|
|||
<head>
|
||||
<title>01OS Setup</title>
|
||||
<style>
|
||||
|
||||
|
||||
* {
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
|
||||
body {
|
||||
background-color: #fff;
|
||||
margin: 0;
|
||||
|
@ -118,15 +122,15 @@ const char post_connected_html[] PROGMEM = R"=====(
|
|||
input[type="submit"]:hover {
|
||||
background-color: #333;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
#error_message {
|
||||
color: red;
|
||||
font-weight: bold;
|
||||
text-align: center;
|
||||
text-align: center;
|
||||
width: 100%;
|
||||
margin-top: 20px;
|
||||
margin-top: 20px;
|
||||
max-width: 300px;
|
||||
}
|
||||
</style>
|
||||
|
@ -140,7 +144,7 @@ const char post_connected_html[] PROGMEM = R"=====(
|
|||
<input type="text" id="server_address" name="server_address"><br><br>
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
|
||||
<input type="submit" value="Connect"/>
|
||||
<p id="error_message"></p>
|
||||
|
@ -215,25 +219,25 @@ void startSoftAccessPoint(const char *ssid, const char *password, const IPAddres
|
|||
vTaskDelay(100 / portTICK_PERIOD_MS); // Add a small delay
|
||||
}
|
||||
|
||||
void connectToWifi(String ssid, String password)
|
||||
{
|
||||
void connectToWifi(String ssid, String password) {
|
||||
WiFi.begin(ssid.c_str(), password.c_str());
|
||||
|
||||
// Wait for connection to establish
|
||||
int attempts = 0;
|
||||
while (WiFi.status() != WL_CONNECTED && attempts < 20)
|
||||
{
|
||||
while (WiFi.status() != WL_CONNECTED && attempts < 20) {
|
||||
delay(1000);
|
||||
Serial.print(".");
|
||||
attempts++;
|
||||
}
|
||||
|
||||
if (WiFi.status() == WL_CONNECTED)
|
||||
{
|
||||
if (WiFi.status() == WL_CONNECTED) {
|
||||
Serial.println("Connected to Wi-Fi");
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
// Store credentials on successful connection
|
||||
preferences.begin("wifi", false); // Open Preferences with my-app namespace. RW-mode is false by default.
|
||||
preferences.putString("ssid", ssid); // Put your SSID.
|
||||
preferences.putString("password", password); // Put your PASSWORD.
|
||||
preferences.end(); // Close the Preferences.
|
||||
} else {
|
||||
Serial.println("Failed to connect to Wi-Fi. Check credentials.");
|
||||
}
|
||||
}
|
||||
|
@ -266,7 +270,7 @@ bool connectTo01OS(String server_address)
|
|||
portStr = server_address.substring(colonIndex + 1);
|
||||
} else {
|
||||
domain = server_address;
|
||||
portStr = "";
|
||||
portStr = "";
|
||||
}
|
||||
|
||||
WiFiClient c;
|
||||
|
@ -277,7 +281,7 @@ bool connectTo01OS(String server_address)
|
|||
port = portStr.toInt();
|
||||
}
|
||||
|
||||
HttpClient http(c, domain.c_str(), port);
|
||||
HttpClient http(c, domain.c_str(), port);
|
||||
Serial.println("Connecting to 01OS at " + domain + ":" + port + "/ping");
|
||||
|
||||
if (domain.indexOf("ngrok") != -1) {
|
||||
|
@ -302,6 +306,9 @@ bool connectTo01OS(String server_address)
|
|||
server_domain = domain;
|
||||
server_port = port;
|
||||
connectionSuccess = true;
|
||||
preferences.begin("network", false); // Use a different namespace for network settings
|
||||
preferences.putString("server_url", server_address); // Store the server URL
|
||||
preferences.end(); // Close the Preferences
|
||||
}
|
||||
|
||||
err = http.skipResponseHeaders();
|
||||
|
@ -356,6 +363,7 @@ bool connectTo01OS(String server_address)
|
|||
Serial.print("Connection failed: ");
|
||||
Serial.println(err);
|
||||
}
|
||||
|
||||
return connectionSuccess;
|
||||
}
|
||||
|
||||
|
@ -428,7 +436,7 @@ void setUpWebserver(AsyncWebServer &server, const IPAddress &localIP)
|
|||
{
|
||||
String ssid;
|
||||
String password;
|
||||
|
||||
|
||||
// Check if SSID parameter exists and assign it
|
||||
if(request->hasParam("ssid", true)) {
|
||||
ssid = request->getParam("ssid", true)->value();
|
||||
|
@ -438,7 +446,7 @@ void setUpWebserver(AsyncWebServer &server, const IPAddress &localIP)
|
|||
Serial.println("OTHER SSID SELECTED: " + ssid);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Check if Password parameter exists and assign it
|
||||
if(request->hasParam("password", true)) {
|
||||
password = request->getParam("password", true)->value();
|
||||
|
@ -447,7 +455,10 @@ void setUpWebserver(AsyncWebServer &server, const IPAddress &localIP)
|
|||
// Serial.println(password);
|
||||
|
||||
// Attempt to connect to the Wi-Fi network with these credentials
|
||||
connectToWifi(ssid, password);
|
||||
if(request->hasParam("password", true) && request->hasParam("ssid", true)) {
|
||||
connectToWifi(ssid, password);
|
||||
}
|
||||
|
||||
|
||||
// Redirect user or send a response back
|
||||
if (WiFi.status() == WL_CONNECTED) {
|
||||
|
@ -455,7 +466,7 @@ void setUpWebserver(AsyncWebServer &server, const IPAddress &localIP)
|
|||
AsyncWebServerResponse *response = request->beginResponse(200, "text/html", htmlContent);
|
||||
response->addHeader("Cache-Control", "public,max-age=31536000"); // save this file to cache for 1 year (unless you refresh)
|
||||
request->send(response);
|
||||
Serial.println("Served Post connection HTML Page");
|
||||
Serial.println("Served Post connection HTML Page");
|
||||
} else {
|
||||
request->send(200, "text/plain", "Failed to connect to " + ssid);
|
||||
} });
|
||||
|
@ -463,7 +474,7 @@ void setUpWebserver(AsyncWebServer &server, const IPAddress &localIP)
|
|||
server.on("/submit_01os", HTTP_POST, [](AsyncWebServerRequest *request)
|
||||
{
|
||||
String server_address;
|
||||
|
||||
|
||||
// Check if SSID parameter exists and assign it
|
||||
if(request->hasParam("server_address", true)) {
|
||||
server_address = request->getParam("server_address", true)->value();
|
||||
|
@ -479,7 +490,7 @@ void setUpWebserver(AsyncWebServer &server, const IPAddress &localIP)
|
|||
{
|
||||
AsyncWebServerResponse *response = request->beginResponse(200, "text/html", successHtml);
|
||||
response->addHeader("Cache-Control", "no-cache, no-store, must-revalidate"); // Prevent caching of this page
|
||||
request->send(response);
|
||||
request->send(response);
|
||||
Serial.println(" ");
|
||||
Serial.println("Connected to 01 websocket!");
|
||||
Serial.println(" ");
|
||||
|
@ -491,7 +502,7 @@ void setUpWebserver(AsyncWebServer &server, const IPAddress &localIP)
|
|||
String htmlContent = String(post_connected_html); // Load your HTML template
|
||||
// Inject the error message
|
||||
htmlContent.replace("<p id=\"error_message\"></p>", "<p id=\"error_message\" style=\"color: red;\">Error connecting, please try again.</p>");
|
||||
|
||||
|
||||
AsyncWebServerResponse *response = request->beginResponse(200, "text/html", htmlContent);
|
||||
response->addHeader("Cache-Control", "no-cache, no-store, must-revalidate"); // Prevent caching of this page
|
||||
request->send(response);
|
||||
|
@ -499,7 +510,54 @@ void setUpWebserver(AsyncWebServer &server, const IPAddress &localIP)
|
|||
}
|
||||
});
|
||||
}
|
||||
void tryReconnectWiFi() {
|
||||
Serial.println("Checking for stored WiFi credentials...");
|
||||
preferences.begin("wifi", true); // Open Preferences with my-app namespace in ReadOnly mode
|
||||
String ssid = preferences.getString("ssid", ""); // Get stored SSID, if any
|
||||
String password = preferences.getString("password", ""); // Get stored password, if any
|
||||
preferences.end(); // Close the Preferences
|
||||
|
||||
if (ssid != "") { // Check if we have stored credentials
|
||||
Serial.println("Trying to connect to WiFi with stored credentials.");
|
||||
WiFi.begin(ssid.c_str(), password.c_str());
|
||||
|
||||
int attempts = 0;
|
||||
while (WiFi.status() != WL_CONNECTED && attempts < 20) {
|
||||
delay(500);
|
||||
Serial.print(".");
|
||||
attempts++;
|
||||
}
|
||||
|
||||
if (WiFi.status() == WL_CONNECTED) {
|
||||
Serial.println("Connected to Wi-Fi using stored credentials.");
|
||||
tryReconnectToServer();
|
||||
return;
|
||||
} else {
|
||||
Serial.println("Failed to connect to Wi-Fi. Starting captive portal.");
|
||||
}
|
||||
} else {
|
||||
Serial.println("No stored WiFi credentials. Starting captive portal.");
|
||||
}
|
||||
}
|
||||
void tryReconnectToServer() {
|
||||
preferences.begin("network", true); // Open Preferences with the "network" namespace in ReadOnly mode
|
||||
String serverURL = preferences.getString("server_url", ""); // Get stored server URL, if any
|
||||
preferences.end(); // Close the Preferences
|
||||
|
||||
if (!serverURL.isEmpty()) {
|
||||
Serial.println("Trying to reconnect to server with stored URL: " + serverURL);
|
||||
// Attempt to connect to the server using the stored URL
|
||||
if (connectTo01OS(serverURL)) {
|
||||
Serial.println("Reconnected to server using stored URL.");
|
||||
} else {
|
||||
Serial.println("Failed to reconnect to server. Proceeding with normal startup.");
|
||||
// Proceed with your normal startup routine, possibly involving user input to get a new URL
|
||||
}
|
||||
} else {
|
||||
Serial.println("No stored server URL. Proceeding with normal startup.");
|
||||
// Normal startup routine
|
||||
}
|
||||
}
|
||||
// ----------------------- END OF WIFI CAPTIVE PORTAL -------------------
|
||||
|
||||
// ----------------------- START OF PLAYBACK -------------------
|
||||
|
@ -564,7 +622,7 @@ void InitI2SSpeakerOrMic(int mode)
|
|||
#if ESP_IDF_VERSION > ESP_IDF_VERSION_VAL(4, 1, 0)
|
||||
.communication_format =
|
||||
I2S_COMM_FORMAT_STAND_I2S, // Set the format of the communication.
|
||||
#else
|
||||
#else
|
||||
.communication_format = I2S_COMM_FORMAT_I2S,
|
||||
#endif
|
||||
.intr_alloc_flags = ESP_INTR_FLAG_LEVEL1,
|
||||
|
@ -711,42 +769,51 @@ void audio_recording_task(void *arg) {
|
|||
// ----------------------- END OF PLAYBACK -------------------
|
||||
|
||||
bool hasSetupWebsocket = false;
|
||||
bool isServerURLStored() {
|
||||
preferences.begin("network", true); // Open Preferences with the "network" namespace in ReadOnly mode
|
||||
String serverURL = preferences.getString("server_url", ""); // Get stored server URL, if any
|
||||
preferences.end(); // Close the Preferences
|
||||
return !serverURL.isEmpty();
|
||||
}
|
||||
void setup() {
|
||||
Serial.begin(115200); // Initialize serial communication at 115200 baud rate.
|
||||
// Attempt to reconnect to WiFi using stored credentials.
|
||||
// Check if WiFi is connected but the server URL isn't stored
|
||||
|
||||
void setup()
|
||||
{
|
||||
// Set the transmit buffer size for the Serial object and start it with a baud rate of 115200.
|
||||
Serial.setTxBufferSize(1024);
|
||||
Serial.begin(115200);
|
||||
Serial.setTxBufferSize(1024); // Set the transmit buffer size for the Serial object.
|
||||
|
||||
// Wait for the Serial object to become available.
|
||||
while (!Serial)
|
||||
;
|
||||
|
||||
WiFi.mode(WIFI_AP_STA);
|
||||
|
||||
// Print a welcome message to the Serial port.
|
||||
Serial.println("\n\nCaptive Test, V0.5.0 compiled " __DATE__ " " __TIME__ " by CD_FER"); //__DATE__ is provided by the platformio ide
|
||||
Serial.printf("%s-%d\n\r", ESP.getChipModel(), ESP.getChipRevision());
|
||||
WiFi.mode(WIFI_AP_STA); // Set WiFi mode to both AP and STA.
|
||||
|
||||
// delay(100); // Short delay to ensure mode change takes effect
|
||||
// WiFi.softAPConfig(localIP, gatewayIP, subnetMask);
|
||||
// WiFi.softAP(ssid, password);
|
||||
startSoftAccessPoint(ssid, password, localIP, gatewayIP);
|
||||
|
||||
setUpDNSServer(dnsServer, localIP);
|
||||
|
||||
WiFi.scanNetworks(true);
|
||||
|
||||
setUpWebserver(server, localIP);
|
||||
server.begin();
|
||||
tryReconnectWiFi();
|
||||
// Print a welcome message to the Serial port.
|
||||
Serial.println("\n\nCaptive Test, V0.5.0 compiled " __DATE__ " " __TIME__ " by CD_FER");
|
||||
Serial.printf("%s-%d\n\r", ESP.getChipModel(), ESP.getChipRevision());
|
||||
|
||||
Serial.print("\n");
|
||||
Serial.print("Startup Time:"); // should be somewhere between 270-350 for Generic ESP32 (D0WDQ6 chip, can have a higher startup time on first boot)
|
||||
// If WiFi reconnect fails, start the soft access point for the captive portal.
|
||||
if (WiFi.status() != WL_CONNECTED) {
|
||||
startSoftAccessPoint(ssid, password, localIP, gatewayIP);
|
||||
setUpDNSServer(dnsServer, localIP);
|
||||
WiFi.scanNetworks(true); // Start scanning for networks in preparation for the captive portal.
|
||||
setUpWebserver(server, localIP); // Set up the web server for the captive portal.
|
||||
}
|
||||
|
||||
server.begin(); // Begin the web server.
|
||||
|
||||
Serial.print("\nStartup Time:");
|
||||
Serial.println(millis());
|
||||
Serial.print("\n");
|
||||
|
||||
M5.begin(true, false, true);
|
||||
M5.dis.drawpix(0, CRGB(255, 0, 50));
|
||||
M5.begin(true, false, true); // Initialize M5Stack Atom board.
|
||||
M5.dis.drawpix(0, CRGB(255, 0, 50)); // Set the display color.
|
||||
|
||||
/* Create task for I2S */
|
||||
xTaskCreate(audio_recording_task, "AUDIO", 4096, NULL, 4, NULL);
|
||||
xTaskCreate(audio_recording_task, "AUDIO", 4096, NULL, 4, NULL); // Create a task for audio recording.
|
||||
}
|
||||
|
||||
void loop()
|
||||
|
@ -756,7 +823,7 @@ void loop()
|
|||
if ((millis() - last_dns_ms) > DNS_INTERVAL) {
|
||||
last_dns_ms = millis(); // seems to help with stability, if you are doing other things in the loop this may not be needed
|
||||
dnsServer.processNextRequest(); // I call this atleast every 10ms in my other projects (can be higher but I haven't tested it for stability)
|
||||
}
|
||||
}
|
||||
|
||||
// Check WiFi connection status
|
||||
if (WiFi.status() == WL_CONNECTED && !hasSetupWebsocket)
|
||||
|
|
|
@ -10,7 +10,7 @@ platform = espressif32
|
|||
framework = arduino
|
||||
monitor_speed = 115200
|
||||
upload_speed = 1500000
|
||||
monitor_filters =
|
||||
monitor_filters =
|
||||
esp32_exception_decoder
|
||||
time
|
||||
build_flags =
|
||||
|
@ -23,7 +23,7 @@ board = esp32dev
|
|||
|
||||
[env:m5echo]
|
||||
extends = esp32common
|
||||
lib_deps =
|
||||
lib_deps =
|
||||
m5stack/M5Atom @ ^0.1.2
|
||||
links2004/WebSockets @ ^2.4.1
|
||||
;esphome/ESPAsyncWebServer-esphome @ ^3.1.0
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
# iOS/Android Client
|
||||
|
||||
[WORK IN PROGRESS]
|
||||
|
||||
This repository contains the source code for the 01 iOS/Android app. Work in progress, we will continue to improve this application to get it working properly.
|
||||
|
||||
Feel free to improve this and make a pull request!
|
||||
|
||||
If you want to run it on your own, you will need expo.
|
||||
|
||||
1. Install dependencies `npm install`
|
||||
2. Run the app `npx expo start`
|
||||
3. Open the app in your simulator or on your device with the expo app by scanning the QR code
|
|
@ -0,0 +1,22 @@
|
|||
import * as React from "react";
|
||||
import { NavigationContainer } from "@react-navigation/native";
|
||||
import { createNativeStackNavigator } from "@react-navigation/native-stack";
|
||||
import HomeScreen from "./src/screens/HomeScreen";
|
||||
import CameraScreen from "./src/screens/Camera";
|
||||
import Main from "./src/screens/Main";
|
||||
|
||||
const Stack = createNativeStackNavigator();
|
||||
|
||||
function App() {
|
||||
return (
|
||||
<NavigationContainer>
|
||||
<Stack.Navigator initialRouteName="Home">
|
||||
<Stack.Screen name="Home" component={HomeScreen} />
|
||||
<Stack.Screen name="Camera" component={CameraScreen} />
|
||||
<Stack.Screen name="Main" component={Main} />
|
||||
</Stack.Navigator>
|
||||
</NavigationContainer>
|
||||
);
|
||||
}
|
||||
|
||||
export default App;
|
|
@ -0,0 +1,38 @@
|
|||
{
|
||||
"expo": {
|
||||
"name": "01iOS",
|
||||
"slug": "01iOS",
|
||||
"version": "1.0.0",
|
||||
"orientation": "portrait",
|
||||
"icon": "./assets/icon.png",
|
||||
"userInterfaceStyle": "light",
|
||||
"splash": {
|
||||
"image": "./assets/splash.png",
|
||||
"resizeMode": "contain",
|
||||
"backgroundColor": "#ffffff"
|
||||
},
|
||||
"assetBundlePatterns": ["**/*"],
|
||||
"plugins": [
|
||||
[
|
||||
"expo-camera",
|
||||
{
|
||||
"cameraPermission": "Allow $(PRODUCT_NAME) to access your camera",
|
||||
"microphonePermission": "Allow $(PRODUCT_NAME) to access your microphone",
|
||||
"recordAudioAndroid": true
|
||||
}
|
||||
]
|
||||
],
|
||||
"ios": {
|
||||
"supportsTablet": true
|
||||
},
|
||||
"android": {
|
||||
"adaptiveIcon": {
|
||||
"foregroundImage": "./assets/adaptive-icon.png",
|
||||
"backgroundColor": "#ffffff"
|
||||
}
|
||||
},
|
||||
"web": {
|
||||
"favicon": "./assets/favicon.png"
|
||||
}
|
||||
}
|
||||
}
|
Binary file not shown.
After Width: | Height: | Size: 17 KiB |
Binary file not shown.
After Width: | Height: | Size: 1.4 KiB |
Binary file not shown.
After Width: | Height: | Size: 22 KiB |
Binary file not shown.
After Width: | Height: | Size: 46 KiB |
|
@ -0,0 +1,6 @@
|
|||
module.exports = function(api) {
|
||||
api.cache(true);
|
||||
return {
|
||||
presets: ['babel-preset-expo'],
|
||||
};
|
||||
};
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,38 @@
|
|||
{
|
||||
"name": "01ios",
|
||||
"version": "1.0.0",
|
||||
"main": "node_modules/expo/AppEntry.js",
|
||||
"scripts": {
|
||||
"start": "expo start",
|
||||
"android": "expo start --android",
|
||||
"ios": "expo start --ios",
|
||||
"web": "expo start --web",
|
||||
"ts:check": "tsc"
|
||||
},
|
||||
"dependencies": {
|
||||
"@react-navigation/native": "^6.1.14",
|
||||
"@react-navigation/native-stack": "^6.9.22",
|
||||
"expo": "~50.0.8",
|
||||
"expo-camera": "~14.0.5",
|
||||
"expo-status-bar": "~1.11.1",
|
||||
"react": "18.2.0",
|
||||
"react-native": "0.73.4",
|
||||
"react-native-safe-area-context": "4.8.2",
|
||||
"react-native-screens": "~3.29.0",
|
||||
"expo-barcode-scanner": "~12.9.3",
|
||||
"expo-av": "~13.10.5"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/core": "^7.20.0",
|
||||
"@types/react": "~18.2.45",
|
||||
"typescript": "^5.1.3"
|
||||
},
|
||||
"ios": {
|
||||
"infoPlist": {
|
||||
"NSAppTransportSecurity": {
|
||||
"NSAllowsArbitraryLoads": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"private": true
|
||||
}
|
|
@ -0,0 +1,102 @@
|
|||
import React, { useState } from "react";
|
||||
import { StyleSheet, Text, TouchableOpacity, View } from "react-native";
|
||||
import { Camera } from "expo-camera";
|
||||
import { useNavigation } from "@react-navigation/native";
|
||||
import { BarCodeScanner } from "expo-barcode-scanner";
|
||||
|
||||
export default function CameraScreen() {
|
||||
const [permission, requestPermission] = Camera.useCameraPermissions();
|
||||
|
||||
const [scanned, setScanned] = useState(false);
|
||||
const navigation = useNavigation();
|
||||
|
||||
if (!permission) {
|
||||
// Component is waiting for permission
|
||||
return <View />;
|
||||
}
|
||||
|
||||
if (!permission.granted) {
|
||||
// No permission granted, request permission
|
||||
return (
|
||||
<View style={styles.container}>
|
||||
<Text>No access to camera</Text>
|
||||
<TouchableOpacity onPress={requestPermission} style={styles.button}>
|
||||
<Text style={styles.text}>Grant Camera Access</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
);
|
||||
}
|
||||
|
||||
// function toggleCameraFacing() {
|
||||
// setFacing((current) => (current === "back" ? "front" : "back"));
|
||||
// }
|
||||
|
||||
const handleBarCodeScanned = ({
|
||||
type,
|
||||
data,
|
||||
}: {
|
||||
type: string;
|
||||
data: string;
|
||||
}) => {
|
||||
setScanned(true);
|
||||
console.log(
|
||||
`Bar code with type ${type} and data ${data} has been scanned!`
|
||||
);
|
||||
alert(`Scanned URL: ${data}`);
|
||||
navigation.navigate("Main", { scannedData: data });
|
||||
};
|
||||
return (
|
||||
<View style={styles.container}>
|
||||
<Camera
|
||||
style={styles.camera}
|
||||
facing={"back"}
|
||||
onBarCodeScanned={scanned ? undefined : handleBarCodeScanned}
|
||||
barCodeScannerSettings={{
|
||||
barCodeTypes: [BarCodeScanner.Constants.BarCodeType.qr],
|
||||
}}
|
||||
>
|
||||
<View style={styles.buttonContainer}>
|
||||
{/* <TouchableOpacity style={styles.button} onPress={toggleCameraFacing}>
|
||||
<Text style={styles.text}>Flip Camera</Text>
|
||||
</TouchableOpacity> */}
|
||||
{scanned && (
|
||||
<TouchableOpacity
|
||||
onPress={() => setScanned(false)}
|
||||
style={styles.button}
|
||||
>
|
||||
<Text style={styles.text}>Scan Again</Text>
|
||||
</TouchableOpacity>
|
||||
)}
|
||||
</View>
|
||||
</Camera>
|
||||
</View>
|
||||
);
|
||||
}
|
||||
|
||||
const styles = StyleSheet.create({
|
||||
container: {
|
||||
flex: 1,
|
||||
flexDirection: "column",
|
||||
justifyContent: "flex-end",
|
||||
},
|
||||
camera: {
|
||||
flex: 1,
|
||||
},
|
||||
buttonContainer: {
|
||||
backgroundColor: "transparent",
|
||||
flexDirection: "row",
|
||||
margin: 20,
|
||||
},
|
||||
button: {
|
||||
flex: 0.1,
|
||||
alignSelf: "flex-end",
|
||||
alignItems: "center",
|
||||
backgroundColor: "#000",
|
||||
borderRadius: 10,
|
||||
padding: 15,
|
||||
},
|
||||
text: {
|
||||
fontSize: 18,
|
||||
color: "white",
|
||||
},
|
||||
});
|
|
@ -0,0 +1,47 @@
|
|||
import React from "react";
|
||||
import { View, Text, TouchableOpacity, StyleSheet } from "react-native";
|
||||
import { useNavigation } from "@react-navigation/native";
|
||||
|
||||
const HomeScreen = () => {
|
||||
const navigation = useNavigation();
|
||||
|
||||
return (
|
||||
<View style={styles.container}>
|
||||
<View style={styles.circle} />
|
||||
<TouchableOpacity
|
||||
style={styles.button}
|
||||
onPress={() => navigation.navigate("Camera")}
|
||||
>
|
||||
<Text style={styles.buttonText}>Scan Code</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
);
|
||||
};
|
||||
|
||||
const styles = StyleSheet.create({
|
||||
container: {
|
||||
flex: 1,
|
||||
justifyContent: "center",
|
||||
alignItems: "center",
|
||||
backgroundColor: "#fff",
|
||||
},
|
||||
circle: {
|
||||
width: 100,
|
||||
height: 100,
|
||||
borderRadius: 50,
|
||||
backgroundColor: "black",
|
||||
marginBottom: 20,
|
||||
},
|
||||
button: {
|
||||
backgroundColor: "black",
|
||||
paddingHorizontal: 20,
|
||||
paddingVertical: 10,
|
||||
borderRadius: 5,
|
||||
},
|
||||
buttonText: {
|
||||
color: "white",
|
||||
fontSize: 16,
|
||||
},
|
||||
});
|
||||
|
||||
export default HomeScreen;
|
|
@ -0,0 +1,171 @@
|
|||
import React, { useState, useEffect } from "react";
|
||||
import { View, Text, TouchableOpacity, StyleSheet } from "react-native";
|
||||
import { Audio } from "expo-av";
|
||||
|
||||
interface MainProps {
|
||||
route: {
|
||||
params: {
|
||||
scannedData: string;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
const Main: React.FC<MainProps> = ({ route }) => {
|
||||
const { scannedData } = route.params;
|
||||
|
||||
const [connectionStatus, setConnectionStatus] =
|
||||
useState<string>("Connecting...");
|
||||
const [ws, setWs] = useState<WebSocket | null>(null);
|
||||
const [recording, setRecording] = useState<Audio.Recording | null>(null);
|
||||
const [audioQueue, setAudioQueue] = useState<string[]>([]);
|
||||
|
||||
useEffect(() => {
|
||||
const playNextAudio = async () => {
|
||||
if (audioQueue.length > 0) {
|
||||
const uri = audioQueue.shift();
|
||||
const { sound } = await Audio.Sound.createAsync(
|
||||
{ uri: uri! },
|
||||
{ shouldPlay: true }
|
||||
);
|
||||
sound.setOnPlaybackStatusUpdate(async (status) => {
|
||||
if (status.didJustFinish && !status.isLooping) {
|
||||
await sound.unloadAsync();
|
||||
playNextAudio();
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
let websocket: WebSocket;
|
||||
try {
|
||||
console.log("Connecting to WebSocket at " + scannedData);
|
||||
websocket = new WebSocket(scannedData);
|
||||
|
||||
websocket.onopen = () => {
|
||||
setConnectionStatus(`Connected to ${scannedData}`);
|
||||
console.log("WebSocket connected");
|
||||
};
|
||||
websocket.onmessage = async (e) => {
|
||||
console.log("Received message: ", e.data);
|
||||
setAudioQueue((prevQueue) => [...prevQueue, e.data]);
|
||||
if (audioQueue.length === 1) {
|
||||
playNextAudio();
|
||||
}
|
||||
};
|
||||
|
||||
websocket.onerror = (error) => {
|
||||
setConnectionStatus("Error connecting to WebSocket.");
|
||||
console.error("WebSocket error: ", error);
|
||||
};
|
||||
|
||||
websocket.onclose = () => {
|
||||
setConnectionStatus("Disconnected.");
|
||||
console.log("WebSocket disconnected");
|
||||
};
|
||||
|
||||
setWs(websocket);
|
||||
} catch (error) {
|
||||
console.log(error);
|
||||
setConnectionStatus("Error creating WebSocket.");
|
||||
}
|
||||
|
||||
return () => {
|
||||
if (websocket) {
|
||||
websocket.close();
|
||||
}
|
||||
};
|
||||
}, [scannedData, audioQueue]);
|
||||
|
||||
const startRecording = async () => {
|
||||
if (recording) {
|
||||
console.log("A recording is already in progress.");
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
console.log("Requesting permissions..");
|
||||
await Audio.requestPermissionsAsync();
|
||||
await Audio.setAudioModeAsync({
|
||||
allowsRecordingIOS: true,
|
||||
playsInSilentModeIOS: true,
|
||||
});
|
||||
console.log("Starting recording..");
|
||||
const { recording: newRecording } = await Audio.Recording.createAsync(
|
||||
Audio.RECORDING_OPTIONS_PRESET_HIGH_QUALITY
|
||||
);
|
||||
setRecording(newRecording);
|
||||
console.log("Recording started");
|
||||
} catch (err) {
|
||||
console.error("Failed to start recording", err);
|
||||
}
|
||||
};
|
||||
|
||||
const stopRecording = async () => {
|
||||
console.log("Stopping recording..");
|
||||
setRecording(null);
|
||||
if (recording) {
|
||||
await recording.stopAndUnloadAsync();
|
||||
const uri = recording.getURI();
|
||||
console.log("Recording stopped and stored at", uri);
|
||||
if (ws && uri) {
|
||||
ws.send(uri);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<View style={styles.container}>
|
||||
<Text
|
||||
style={[
|
||||
styles.statusText,
|
||||
{ color: connectionStatus.startsWith("Connected") ? "green" : "red" },
|
||||
]}
|
||||
>
|
||||
{connectionStatus}
|
||||
</Text>
|
||||
<TouchableOpacity
|
||||
style={styles.button}
|
||||
onPressIn={startRecording}
|
||||
onPressOut={stopRecording}
|
||||
>
|
||||
<View style={styles.circle}>
|
||||
<Text style={styles.buttonText}>Record</Text>
|
||||
</View>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
);
|
||||
};
|
||||
|
||||
const styles = StyleSheet.create({
|
||||
container: {
|
||||
flex: 1,
|
||||
justifyContent: "center",
|
||||
alignItems: "center",
|
||||
backgroundColor: "#fff",
|
||||
},
|
||||
circle: {
|
||||
width: 100,
|
||||
height: 100,
|
||||
borderRadius: 50,
|
||||
backgroundColor: "black",
|
||||
justifyContent: "center",
|
||||
alignItems: "center",
|
||||
},
|
||||
button: {
|
||||
width: 100,
|
||||
height: 100,
|
||||
borderRadius: 50,
|
||||
justifyContent: "center",
|
||||
alignItems: "center",
|
||||
},
|
||||
buttonText: {
|
||||
color: "white",
|
||||
fontSize: 16,
|
||||
},
|
||||
statusText: {
|
||||
marginBottom: 20,
|
||||
fontSize: 16,
|
||||
},
|
||||
});
|
||||
|
||||
export default Main;
|
|
@ -0,0 +1,6 @@
|
|||
{
|
||||
"extends": "expo/tsconfig.base",
|
||||
"compilerOptions": {
|
||||
"strict": true
|
||||
}
|
||||
}
|
|
@ -2,9 +2,11 @@ from ..base_device import Device
|
|||
|
||||
device = Device()
|
||||
|
||||
|
||||
def main(server_url):
|
||||
device.server_url = server_url
|
||||
device.start()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
@ -2,9 +2,11 @@ from ..base_device import Device
|
|||
|
||||
device = Device()
|
||||
|
||||
|
||||
def main(server_url):
|
||||
device.server_url = server_url
|
||||
device.start()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
@ -2,8 +2,10 @@ from ..base_device import Device
|
|||
|
||||
device = Device()
|
||||
|
||||
|
||||
def main():
|
||||
device.start()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
main()
|
||||
|
|
|
@ -2,9 +2,11 @@ from ..base_device import Device
|
|||
|
||||
device = Device()
|
||||
|
||||
|
||||
def main(server_url):
|
||||
device.server_url = server_url
|
||||
device.start()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
import os
|
||||
import sys
|
||||
import pytest
|
||||
from source.server.i import configure_interpreter
|
||||
from unittest.mock import Mock
|
||||
from interpreter import OpenInterpreter
|
||||
from fastapi.testclient import TestClient
|
||||
from .server import app
|
||||
|
@ -16,4 +13,4 @@ def client():
|
|||
@pytest.fixture
|
||||
def mock_interpreter():
|
||||
interpreter = configure_interpreter(OpenInterpreter())
|
||||
return interpreter
|
||||
return interpreter
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
from dotenv import load_dotenv
|
||||
import os
|
||||
|
||||
load_dotenv() # take environment variables from .env.
|
||||
|
||||
import os
|
||||
import glob
|
||||
import time
|
||||
import json
|
||||
from pathlib import Path
|
||||
from interpreter import OpenInterpreter
|
||||
import shutil
|
||||
|
||||
|
@ -47,7 +47,7 @@ The user's current task list (it might be empty) is: {{ tasks }}
|
|||
When the user completes the current task, you should remove it from the list and read the next item by running `tasks = tasks[1:]\ntasks[0]`. Then, tell the user what the next task is.
|
||||
When the user tells you about a set of tasks, you should intelligently order tasks, batch similar tasks, and break down large tasks into smaller tasks (for this, you should consult the user and get their permission to break it down). Your goal is to manage the task list as intelligently as possible, to make the user as efficient and non-overwhelmed as possible. They will require a lot of encouragement, support, and kindness. Don't say too much about what's ahead of them— just try to focus them on each step at a time.
|
||||
|
||||
After starting a task, you should check in with the user around the estimated completion time to see if the task is completed.
|
||||
After starting a task, you should check in with the user around the estimated completion time to see if the task is completed.
|
||||
To do this, schedule a reminder based on estimated completion time using the function `schedule(message="Your message here.", start="8am")`, WHICH HAS ALREADY BEEN IMPORTED. YOU DON'T NEED TO IMPORT THE `schedule` FUNCTION. IT IS AVAILABLE. You'll receive the message at the time you scheduled it. If the user says to monitor something, simply schedule it with an interval of a duration that makes sense for the problem by specifying an interval, like this: `schedule(message="Your message here.", interval="5m")`
|
||||
|
||||
|
||||
|
@ -182,7 +182,6 @@ Try multiple methods before saying the task is impossible. **You can do it!**
|
|||
|
||||
|
||||
def configure_interpreter(interpreter: OpenInterpreter):
|
||||
|
||||
### SYSTEM MESSAGE
|
||||
interpreter.system_message = system_message
|
||||
|
||||
|
@ -205,7 +204,6 @@ def configure_interpreter(interpreter: OpenInterpreter):
|
|||
"Please provide more information.",
|
||||
]
|
||||
|
||||
|
||||
# Check if required packages are installed
|
||||
|
||||
# THERE IS AN INCONSISTENCY HERE.
|
||||
|
@ -259,7 +257,6 @@ def configure_interpreter(interpreter: OpenInterpreter):
|
|||
time.sleep(2)
|
||||
print("Attempting to start OS control anyway...\n\n")
|
||||
|
||||
|
||||
# Should we explore other options for ^ these kinds of tags?
|
||||
# Like:
|
||||
|
||||
|
@ -295,12 +292,8 @@ def configure_interpreter(interpreter: OpenInterpreter):
|
|||
# if chunk.get("format") != "active_line":
|
||||
# print(chunk.get("content"))
|
||||
|
||||
import os
|
||||
|
||||
from platformdirs import user_data_dir
|
||||
|
||||
|
||||
|
||||
# Directory paths
|
||||
repo_skills_dir = os.path.join(os.path.dirname(__file__), "skills")
|
||||
user_data_skills_dir = os.path.join(user_data_dir("01"), "skills")
|
||||
|
@ -314,22 +307,21 @@ def configure_interpreter(interpreter: OpenInterpreter):
|
|||
src_file = os.path.join(repo_skills_dir, filename)
|
||||
dst_file = os.path.join(user_data_skills_dir, filename)
|
||||
shutil.copy2(src_file, dst_file)
|
||||
|
||||
|
||||
interpreter.computer.debug = True
|
||||
interpreter.computer.skills.path = user_data_skills_dir
|
||||
|
||||
|
||||
# Import skills
|
||||
interpreter.computer.save_skills = False
|
||||
|
||||
|
||||
for file in glob.glob(os.path.join(interpreter.computer.skills.path, "*.py")):
|
||||
code_to_run = ""
|
||||
with open(file, "r") as f:
|
||||
code_to_run += f.read() + "\n"
|
||||
|
||||
interpreter.computer.run("python", code_to_run)
|
||||
|
||||
interpreter.computer.save_skills = True
|
||||
|
||||
interpreter.computer.save_skills = True
|
||||
|
||||
# Initialize user's task list
|
||||
interpreter.computer.run(
|
||||
|
@ -354,17 +346,21 @@ def configure_interpreter(interpreter: OpenInterpreter):
|
|||
### MISC SETTINGS
|
||||
|
||||
interpreter.auto_run = True
|
||||
interpreter.computer.languages = [l for l in interpreter.computer.languages if l.name.lower() in ["applescript", "shell", "zsh", "bash", "python"]]
|
||||
interpreter.computer.languages = [
|
||||
l
|
||||
for l in interpreter.computer.languages
|
||||
if l.name.lower() in ["applescript", "shell", "zsh", "bash", "python"]
|
||||
]
|
||||
interpreter.force_task_completion = True
|
||||
# interpreter.offline = True
|
||||
interpreter.id = 206 # Used to identify itself to other interpreters. This should be changed programmatically so it's unique.
|
||||
interpreter.id = 206 # Used to identify itself to other interpreters. This should be changed programmatically so it's unique.
|
||||
|
||||
### RESET conversations/user.json
|
||||
app_dir = user_data_dir('01')
|
||||
conversations_dir = os.path.join(app_dir, 'conversations')
|
||||
app_dir = user_data_dir("01")
|
||||
conversations_dir = os.path.join(app_dir, "conversations")
|
||||
os.makedirs(conversations_dir, exist_ok=True)
|
||||
user_json_path = os.path.join(conversations_dir, 'user.json')
|
||||
with open(user_json_path, 'w') as file:
|
||||
user_json_path = os.path.join(conversations_dir, "user.json")
|
||||
with open(user_json_path, "w") as file:
|
||||
json.dump([], file)
|
||||
|
||||
return interpreter
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv() # take environment variables from .env.
|
||||
|
||||
import os
|
||||
|
@ -8,7 +9,7 @@ from pathlib import Path
|
|||
### LLM SETUP
|
||||
|
||||
# Define the path to a llamafile
|
||||
llamafile_path = Path(__file__).parent / 'model.llamafile'
|
||||
llamafile_path = Path(__file__).parent / "model.llamafile"
|
||||
|
||||
# Check if the new llamafile exists, if not download it
|
||||
if not os.path.exists(llamafile_path):
|
||||
|
@ -25,4 +26,4 @@ if not os.path.exists(llamafile_path):
|
|||
subprocess.run(["chmod", "+x", llamafile_path], check=True)
|
||||
|
||||
# Run the new llamafile
|
||||
subprocess.run([str(llamafile_path)], check=True)
|
||||
subprocess.run([str(llamafile_path)], check=True)
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv() # take environment variables from .env.
|
||||
|
||||
import traceback
|
||||
from platformdirs import user_data_dir
|
||||
import ast
|
||||
import json
|
||||
import queue
|
||||
import os
|
||||
|
@ -13,9 +13,7 @@ import re
|
|||
from fastapi import FastAPI, Request
|
||||
from fastapi.responses import PlainTextResponse
|
||||
from starlette.websockets import WebSocket, WebSocketDisconnect
|
||||
from pathlib import Path
|
||||
import asyncio
|
||||
import urllib.parse
|
||||
from .utils.kernel import put_kernel_messages_into_queue
|
||||
from .i import configure_interpreter
|
||||
from interpreter import interpreter
|
||||
|
@ -44,28 +42,31 @@ accumulator = Accumulator()
|
|||
|
||||
app = FastAPI()
|
||||
|
||||
app_dir = user_data_dir('01')
|
||||
conversation_history_path = os.path.join(app_dir, 'conversations', 'user.json')
|
||||
app_dir = user_data_dir("01")
|
||||
conversation_history_path = os.path.join(app_dir, "conversations", "user.json")
|
||||
|
||||
SERVER_LOCAL_PORT = int(os.getenv('SERVER_LOCAL_PORT', 10001))
|
||||
SERVER_LOCAL_PORT = int(os.getenv("SERVER_LOCAL_PORT", 10001))
|
||||
|
||||
|
||||
# This is so we only say() full sentences
|
||||
def is_full_sentence(text):
|
||||
return text.endswith(('.', '!', '?'))
|
||||
return text.endswith((".", "!", "?"))
|
||||
|
||||
|
||||
def split_into_sentences(text):
|
||||
return re.split(r'(?<=[.!?])\s+', text)
|
||||
return re.split(r"(?<=[.!?])\s+", text)
|
||||
|
||||
|
||||
# Queues
|
||||
from_computer = queue.Queue() # Just for computer messages from the device. Sync queue because interpreter.run is synchronous
|
||||
from_user = asyncio.Queue() # Just for user messages from the device.
|
||||
to_device = asyncio.Queue() # For messages we send.
|
||||
from_computer = (
|
||||
queue.Queue()
|
||||
) # Just for computer messages from the device. Sync queue because interpreter.run is synchronous
|
||||
from_user = asyncio.Queue() # Just for user messages from the device.
|
||||
to_device = asyncio.Queue() # For messages we send.
|
||||
|
||||
# Switch code executor to device if that's set
|
||||
|
||||
if os.getenv('CODE_RUNNER') == "device":
|
||||
|
||||
if os.getenv("CODE_RUNNER") == "device":
|
||||
# (This should probably just loop through all languages and apply these changes instead)
|
||||
|
||||
class Python:
|
||||
|
@ -79,14 +80,33 @@ if os.getenv('CODE_RUNNER') == "device":
|
|||
"""Generator that yields a dictionary in LMC Format."""
|
||||
|
||||
# Prepare the data
|
||||
message = {"role": "assistant", "type": "code", "format": "python", "content": code}
|
||||
message = {
|
||||
"role": "assistant",
|
||||
"type": "code",
|
||||
"format": "python",
|
||||
"content": code,
|
||||
}
|
||||
|
||||
# Unless it was just sent to the device, send it wrapped in flags
|
||||
if not (interpreter.messages and interpreter.messages[-1] == message):
|
||||
to_device.put({"role": "assistant", "type": "code", "format": "python", "start": True})
|
||||
to_device.put(
|
||||
{
|
||||
"role": "assistant",
|
||||
"type": "code",
|
||||
"format": "python",
|
||||
"start": True,
|
||||
}
|
||||
)
|
||||
to_device.put(message)
|
||||
to_device.put({"role": "assistant", "type": "code", "format": "python", "end": True})
|
||||
|
||||
to_device.put(
|
||||
{
|
||||
"role": "assistant",
|
||||
"type": "code",
|
||||
"format": "python",
|
||||
"end": True,
|
||||
}
|
||||
)
|
||||
|
||||
# Stream the response
|
||||
logger.info("Waiting for the device to respond...")
|
||||
while True:
|
||||
|
@ -109,10 +129,12 @@ if os.getenv('CODE_RUNNER') == "device":
|
|||
# Configure interpreter
|
||||
interpreter = configure_interpreter(interpreter)
|
||||
|
||||
|
||||
@app.get("/ping")
|
||||
async def ping():
|
||||
return PlainTextResponse("pong")
|
||||
|
||||
|
||||
@app.websocket("/")
|
||||
async def websocket_endpoint(websocket: WebSocket):
|
||||
await websocket.accept()
|
||||
|
@ -145,19 +167,21 @@ async def receive_messages(websocket: WebSocket):
|
|||
except Exception as e:
|
||||
print(str(e))
|
||||
return
|
||||
if 'text' in data:
|
||||
if "text" in data:
|
||||
try:
|
||||
data = json.loads(data['text'])
|
||||
data = json.loads(data["text"])
|
||||
if data["role"] == "computer":
|
||||
from_computer.put(data) # To be handled by interpreter.computer.run
|
||||
from_computer.put(
|
||||
data
|
||||
) # To be handled by interpreter.computer.run
|
||||
elif data["role"] == "user":
|
||||
await from_user.put(data)
|
||||
else:
|
||||
raise("Unknown role:", data)
|
||||
raise ("Unknown role:", data)
|
||||
except json.JSONDecodeError:
|
||||
pass # data is not JSON, leave it as is
|
||||
elif 'bytes' in data:
|
||||
data = data['bytes'] # binary data
|
||||
elif "bytes" in data:
|
||||
data = data["bytes"] # binary data
|
||||
await from_user.put(data)
|
||||
except WebSocketDisconnect as e:
|
||||
if e.code == 1000:
|
||||
|
@ -165,13 +189,13 @@ async def receive_messages(websocket: WebSocket):
|
|||
return
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
|
||||
async def send_messages(websocket: WebSocket):
|
||||
while True:
|
||||
message = await to_device.get()
|
||||
#print(f"Sending to the device: {type(message)} {str(message)[:100]}")
|
||||
|
||||
# print(f"Sending to the device: {type(message)} {str(message)[:100]}")
|
||||
|
||||
try:
|
||||
if isinstance(message, dict):
|
||||
await websocket.send_json(message)
|
||||
|
@ -184,8 +208,8 @@ async def send_messages(websocket: WebSocket):
|
|||
await to_device.put(message)
|
||||
raise
|
||||
|
||||
async def listener():
|
||||
|
||||
async def listener():
|
||||
while True:
|
||||
try:
|
||||
while True:
|
||||
|
@ -197,8 +221,6 @@ async def listener():
|
|||
break
|
||||
await asyncio.sleep(1)
|
||||
|
||||
|
||||
|
||||
message = accumulator.accumulate(chunk)
|
||||
if message == None:
|
||||
# Will be None until we have a full message ready
|
||||
|
@ -209,8 +231,11 @@ async def listener():
|
|||
# At this point, we have our message
|
||||
|
||||
if message["type"] == "audio" and message["format"].startswith("bytes"):
|
||||
|
||||
if "content" not in message or message["content"] == None or message["content"] == "": # If it was nothing / silence / empty
|
||||
if (
|
||||
"content" not in message
|
||||
or message["content"] == None
|
||||
or message["content"] == ""
|
||||
): # If it was nothing / silence / empty
|
||||
continue
|
||||
|
||||
# Convert bytes to audio file
|
||||
|
@ -222,6 +247,7 @@ async def listener():
|
|||
if False:
|
||||
os.system(f"open {audio_file_path}")
|
||||
import time
|
||||
|
||||
time.sleep(15)
|
||||
|
||||
text = stt(audio_file_path)
|
||||
|
@ -239,21 +265,21 @@ async def listener():
|
|||
continue
|
||||
|
||||
# Load, append, and save conversation history
|
||||
with open(conversation_history_path, 'r') as file:
|
||||
with open(conversation_history_path, "r") as file:
|
||||
messages = json.load(file)
|
||||
messages.append(message)
|
||||
with open(conversation_history_path, 'w') as file:
|
||||
with open(conversation_history_path, "w") as file:
|
||||
json.dump(messages, file, indent=4)
|
||||
|
||||
accumulated_text = ""
|
||||
|
||||
|
||||
if any([m["type"] == "image" for m in messages]) and interpreter.llm.model.startswith("gpt-"):
|
||||
if any(
|
||||
[m["type"] == "image" for m in messages]
|
||||
) and interpreter.llm.model.startswith("gpt-"):
|
||||
interpreter.llm.model = "gpt-4-vision-preview"
|
||||
interpreter.llm.supports_vision = True
|
||||
|
||||
for chunk in interpreter.chat(messages, stream=True, display=True):
|
||||
|
||||
for chunk in interpreter.chat(messages, stream=True, display=True):
|
||||
if any([m["type"] == "image" for m in interpreter.messages]):
|
||||
interpreter.llm.model = "gpt-4-vision-preview"
|
||||
|
||||
|
@ -263,18 +289,24 @@ async def listener():
|
|||
await to_device.put(chunk)
|
||||
# Yield to the event loop, so you actually send it out
|
||||
await asyncio.sleep(0.01)
|
||||
|
||||
if os.getenv('TTS_RUNNER') == "server":
|
||||
|
||||
if os.getenv("TTS_RUNNER") == "server":
|
||||
# Speak full sentences out loud
|
||||
if chunk["role"] == "assistant" and "content" in chunk and chunk["type"] == "message":
|
||||
if (
|
||||
chunk["role"] == "assistant"
|
||||
and "content" in chunk
|
||||
and chunk["type"] == "message"
|
||||
):
|
||||
accumulated_text += chunk["content"]
|
||||
sentences = split_into_sentences(accumulated_text)
|
||||
|
||||
|
||||
# If we're going to speak, say we're going to stop sending text.
|
||||
# This should be fixed probably, we should be able to do both in parallel, or only one.
|
||||
if any(is_full_sentence(sentence) for sentence in sentences):
|
||||
await to_device.put({"role": "assistant", "type": "message", "end": True})
|
||||
|
||||
await to_device.put(
|
||||
{"role": "assistant", "type": "message", "end": True}
|
||||
)
|
||||
|
||||
if is_full_sentence(sentences[-1]):
|
||||
for sentence in sentences:
|
||||
await stream_tts_to_device(sentence)
|
||||
|
@ -287,32 +319,36 @@ async def listener():
|
|||
# If we're going to speak, say we're going to stop sending text.
|
||||
# This should be fixed probably, we should be able to do both in parallel, or only one.
|
||||
if any(is_full_sentence(sentence) for sentence in sentences):
|
||||
await to_device.put({"role": "assistant", "type": "message", "start": True})
|
||||
|
||||
await to_device.put(
|
||||
{"role": "assistant", "type": "message", "start": True}
|
||||
)
|
||||
|
||||
# If we have a new message, save our progress and go back to the top
|
||||
if not from_user.empty():
|
||||
|
||||
# Check if it's just an end flag. We ignore those.
|
||||
temp_message = await from_user.get()
|
||||
|
||||
if type(temp_message) is dict and temp_message.get("role") == "user" and temp_message.get("end"):
|
||||
|
||||
if (
|
||||
type(temp_message) is dict
|
||||
and temp_message.get("role") == "user"
|
||||
and temp_message.get("end")
|
||||
):
|
||||
# Yup. False alarm.
|
||||
continue
|
||||
else:
|
||||
# Whoops! Put that back
|
||||
await from_user.put(temp_message)
|
||||
|
||||
with open(conversation_history_path, 'w') as file:
|
||||
with open(conversation_history_path, "w") as file:
|
||||
json.dump(interpreter.messages, file, indent=4)
|
||||
|
||||
# TODO: is triggering seemingly randomly
|
||||
#logger.info("New user message recieved. Breaking.")
|
||||
#break
|
||||
# logger.info("New user message recieved. Breaking.")
|
||||
# break
|
||||
|
||||
# Also check if there's any new computer messages
|
||||
if not from_computer.empty():
|
||||
|
||||
with open(conversation_history_path, 'w') as file:
|
||||
with open(conversation_history_path, "w") as file:
|
||||
json.dump(interpreter.messages, file, indent=4)
|
||||
|
||||
logger.info("New computer message recieved. Breaking.")
|
||||
|
@ -320,6 +356,7 @@ async def listener():
|
|||
except:
|
||||
traceback.print_exc()
|
||||
|
||||
|
||||
async def stream_tts_to_device(sentence):
|
||||
force_task_completion_responses = [
|
||||
"the task is done",
|
||||
|
@ -332,8 +369,8 @@ async def stream_tts_to_device(sentence):
|
|||
for chunk in stream_tts(sentence):
|
||||
await to_device.put(chunk)
|
||||
|
||||
|
||||
def stream_tts(sentence):
|
||||
|
||||
audio_file = tts(sentence)
|
||||
|
||||
with open(audio_file, "rb") as f:
|
||||
|
@ -346,85 +383,106 @@ def stream_tts(sentence):
|
|||
# Stream the audio
|
||||
yield {"role": "assistant", "type": "audio", "format": file_type, "start": True}
|
||||
for i in range(0, len(audio_bytes), chunk_size):
|
||||
chunk = audio_bytes[i:i+chunk_size]
|
||||
chunk = audio_bytes[i : i + chunk_size]
|
||||
yield chunk
|
||||
yield {"role": "assistant", "type": "audio", "format": file_type, "end": True}
|
||||
|
||||
|
||||
from uvicorn import Config, Server
|
||||
import os
|
||||
import platform
|
||||
from importlib import import_module
|
||||
|
||||
# these will be overwritten
|
||||
HOST = ''
|
||||
HOST = ""
|
||||
PORT = 0
|
||||
|
||||
|
||||
@app.on_event("startup")
|
||||
async def startup_event():
|
||||
server_url = f"{HOST}:{PORT}"
|
||||
print("")
|
||||
print_markdown(f"\n*Ready.*\n")
|
||||
print_markdown("\n*Ready.*\n")
|
||||
print("")
|
||||
|
||||
|
||||
@app.on_event("shutdown")
|
||||
async def shutdown_event():
|
||||
print_markdown("*Server is shutting down*")
|
||||
|
||||
async def main(server_host, server_port, llm_service, model, llm_supports_vision, llm_supports_functions, context_window, max_tokens, temperature, tts_service, stt_service):
|
||||
|
||||
global HOST
|
||||
global PORT
|
||||
PORT = server_port
|
||||
HOST = server_host
|
||||
async def main(
|
||||
server_host,
|
||||
server_port,
|
||||
llm_service,
|
||||
model,
|
||||
llm_supports_vision,
|
||||
llm_supports_functions,
|
||||
context_window,
|
||||
max_tokens,
|
||||
temperature,
|
||||
tts_service,
|
||||
stt_service,
|
||||
):
|
||||
global HOST
|
||||
global PORT
|
||||
PORT = server_port
|
||||
HOST = server_host
|
||||
|
||||
# Setup services
|
||||
application_directory = user_data_dir('01')
|
||||
services_directory = os.path.join(application_directory, 'services')
|
||||
# Setup services
|
||||
application_directory = user_data_dir("01")
|
||||
services_directory = os.path.join(application_directory, "services")
|
||||
|
||||
service_dict = {'llm': llm_service, 'tts': tts_service, 'stt': stt_service}
|
||||
|
||||
# Create a temp file with the session number
|
||||
session_file_path = os.path.join(user_data_dir('01'), '01-session.txt')
|
||||
with open(session_file_path, 'w') as session_file:
|
||||
session_id = int(datetime.datetime.now().timestamp() * 1000)
|
||||
session_file.write(str(session_id))
|
||||
|
||||
for service in service_dict:
|
||||
service_dict = {"llm": llm_service, "tts": tts_service, "stt": stt_service}
|
||||
|
||||
service_directory = os.path.join(services_directory, service, service_dict[service])
|
||||
# Create a temp file with the session number
|
||||
session_file_path = os.path.join(user_data_dir("01"), "01-session.txt")
|
||||
with open(session_file_path, "w") as session_file:
|
||||
session_id = int(datetime.datetime.now().timestamp() * 1000)
|
||||
session_file.write(str(session_id))
|
||||
|
||||
# This is the folder they can mess around in
|
||||
config = {"service_directory": service_directory}
|
||||
for service in service_dict:
|
||||
service_directory = os.path.join(
|
||||
services_directory, service, service_dict[service]
|
||||
)
|
||||
|
||||
if service == "llm":
|
||||
config.update({
|
||||
# This is the folder they can mess around in
|
||||
config = {"service_directory": service_directory}
|
||||
|
||||
if service == "llm":
|
||||
config.update(
|
||||
{
|
||||
"interpreter": interpreter,
|
||||
"model": model,
|
||||
"llm_supports_vision": llm_supports_vision,
|
||||
"llm_supports_functions": llm_supports_functions,
|
||||
"context_window": context_window,
|
||||
"max_tokens": max_tokens,
|
||||
"temperature": temperature
|
||||
})
|
||||
"temperature": temperature,
|
||||
}
|
||||
)
|
||||
|
||||
module = import_module(f'.server.services.{service}.{service_dict[service]}.{service}', package='source')
|
||||
|
||||
ServiceClass = getattr(module, service.capitalize())
|
||||
service_instance = ServiceClass(config)
|
||||
globals()[service] = getattr(service_instance, service)
|
||||
module = import_module(
|
||||
f".server.services.{service}.{service_dict[service]}.{service}",
|
||||
package="source",
|
||||
)
|
||||
|
||||
interpreter.llm.completions = llm
|
||||
|
||||
# Start listening
|
||||
asyncio.create_task(listener())
|
||||
ServiceClass = getattr(module, service.capitalize())
|
||||
service_instance = ServiceClass(config)
|
||||
globals()[service] = getattr(service_instance, service)
|
||||
|
||||
interpreter.llm.completions = llm
|
||||
|
||||
# Start listening
|
||||
asyncio.create_task(listener())
|
||||
|
||||
# Start watching the kernel if it's your job to do that
|
||||
if True: # in the future, code can run on device. for now, just server.
|
||||
asyncio.create_task(put_kernel_messages_into_queue(from_computer))
|
||||
|
||||
config = Config(app, host=server_host, port=int(server_port), lifespan="on")
|
||||
server = Server(config)
|
||||
await server.serve()
|
||||
|
||||
# Start watching the kernel if it's your job to do that
|
||||
if True: # in the future, code can run on device. for now, just server.
|
||||
asyncio.create_task(put_kernel_messages_into_queue(from_computer))
|
||||
|
||||
config = Config(app, host=server_host, port=int(server_port), lifespan='on')
|
||||
server = Server(config)
|
||||
await server.serve()
|
||||
|
||||
# Run the FastAPI app
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
class Llm:
|
||||
def __init__(self, config):
|
||||
|
||||
# Litellm is used by OI by default, so we just modify OI
|
||||
|
||||
interpreter = config["interpreter"]
|
||||
|
@ -10,6 +9,3 @@ class Llm:
|
|||
setattr(interpreter, key.replace("-", "_"), value)
|
||||
|
||||
self.llm = interpreter.llm.completions
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -3,29 +3,54 @@ import subprocess
|
|||
import requests
|
||||
import json
|
||||
|
||||
|
||||
class Llm:
|
||||
def __init__(self, config):
|
||||
self.install(config["service_directory"])
|
||||
|
||||
def install(self, service_directory):
|
||||
LLM_FOLDER_PATH = service_directory
|
||||
self.llm_directory = os.path.join(LLM_FOLDER_PATH, 'llm')
|
||||
if not os.path.isdir(self.llm_directory): # Check if the LLM directory exists
|
||||
self.llm_directory = os.path.join(LLM_FOLDER_PATH, "llm")
|
||||
if not os.path.isdir(self.llm_directory): # Check if the LLM directory exists
|
||||
os.makedirs(LLM_FOLDER_PATH, exist_ok=True)
|
||||
|
||||
# Install WasmEdge
|
||||
subprocess.run(['curl', '-sSf', 'https://raw.githubusercontent.com/WasmEdge/WasmEdge/master/utils/install.sh', '|', 'bash', '-s', '--', '--plugin', 'wasi_nn-ggml'])
|
||||
subprocess.run(
|
||||
[
|
||||
"curl",
|
||||
"-sSf",
|
||||
"https://raw.githubusercontent.com/WasmEdge/WasmEdge/master/utils/install.sh",
|
||||
"|",
|
||||
"bash",
|
||||
"-s",
|
||||
"--",
|
||||
"--plugin",
|
||||
"wasi_nn-ggml",
|
||||
]
|
||||
)
|
||||
|
||||
# Download the Qwen1.5-0.5B-Chat model GGUF file
|
||||
MODEL_URL = "https://huggingface.co/second-state/Qwen1.5-0.5B-Chat-GGUF/resolve/main/Qwen1.5-0.5B-Chat-Q5_K_M.gguf"
|
||||
subprocess.run(['curl', '-LO', MODEL_URL], cwd=self.llm_directory)
|
||||
|
||||
subprocess.run(["curl", "-LO", MODEL_URL], cwd=self.llm_directory)
|
||||
|
||||
# Download the llama-api-server.wasm app
|
||||
APP_URL = "https://github.com/LlamaEdge/LlamaEdge/releases/latest/download/llama-api-server.wasm"
|
||||
subprocess.run(['curl', '-LO', APP_URL], cwd=self.llm_directory)
|
||||
subprocess.run(["curl", "-LO", APP_URL], cwd=self.llm_directory)
|
||||
|
||||
# Run the API server
|
||||
subprocess.run(['wasmedge', '--dir', '.:.', '--nn-preload', 'default:GGML:AUTO:Qwen1.5-0.5B-Chat-Q5_K_M.gguf', 'llama-api-server.wasm', '-p', 'llama-2-chat'], cwd=self.llm_directory)
|
||||
subprocess.run(
|
||||
[
|
||||
"wasmedge",
|
||||
"--dir",
|
||||
".:.",
|
||||
"--nn-preload",
|
||||
"default:GGML:AUTO:Qwen1.5-0.5B-Chat-Q5_K_M.gguf",
|
||||
"llama-api-server.wasm",
|
||||
"-p",
|
||||
"llama-2-chat",
|
||||
],
|
||||
cwd=self.llm_directory,
|
||||
)
|
||||
|
||||
print("LLM setup completed.")
|
||||
else:
|
||||
|
@ -33,17 +58,11 @@ class Llm:
|
|||
|
||||
def llm(self, messages):
|
||||
url = "http://localhost:8080/v1/chat/completions"
|
||||
headers = {
|
||||
'accept': 'application/json',
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
data = {
|
||||
"messages": messages,
|
||||
"model": "llama-2-chat"
|
||||
}
|
||||
with requests.post(url, headers=headers, data=json.dumps(data), stream=True) as response:
|
||||
headers = {"accept": "application/json", "Content-Type": "application/json"}
|
||||
data = {"messages": messages, "model": "llama-2-chat"}
|
||||
with requests.post(
|
||||
url, headers=headers, data=json.dumps(data), stream=True
|
||||
) as response:
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
yield json.loads(line)
|
||||
|
||||
|
||||
|
|
|
@ -10,9 +10,6 @@ import shutil
|
|||
import ffmpeg
|
||||
import subprocess
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import platform
|
||||
import urllib.request
|
||||
|
||||
|
||||
|
@ -26,7 +23,6 @@ class Stt:
|
|||
|
||||
|
||||
def install(service_dir):
|
||||
|
||||
### INSTALL
|
||||
|
||||
WHISPER_RUST_PATH = os.path.join(service_dir, "whisper-rust")
|
||||
|
@ -41,29 +37,38 @@ def install(service_dir):
|
|||
os.chdir(WHISPER_RUST_PATH)
|
||||
|
||||
# Check if whisper-rust executable exists before attempting to build
|
||||
if not os.path.isfile(os.path.join(WHISPER_RUST_PATH, "target/release/whisper-rust")):
|
||||
if not os.path.isfile(
|
||||
os.path.join(WHISPER_RUST_PATH, "target/release/whisper-rust")
|
||||
):
|
||||
# Check if Rust is installed. Needed to build whisper executable
|
||||
|
||||
|
||||
rustc_path = shutil.which("rustc")
|
||||
|
||||
|
||||
if rustc_path is None:
|
||||
print("Rust is not installed or is not in system PATH. Please install Rust before proceeding.")
|
||||
print(
|
||||
"Rust is not installed or is not in system PATH. Please install Rust before proceeding."
|
||||
)
|
||||
exit(1)
|
||||
|
||||
# Build Whisper Rust executable if not found
|
||||
subprocess.run(['cargo', 'build', '--release'], check=True)
|
||||
subprocess.run(["cargo", "build", "--release"], check=True)
|
||||
else:
|
||||
print("Whisper Rust executable already exists. Skipping build.")
|
||||
|
||||
WHISPER_MODEL_PATH = os.path.join(service_dir, "model")
|
||||
|
||||
WHISPER_MODEL_NAME = os.getenv('WHISPER_MODEL_NAME', 'ggml-tiny.en.bin')
|
||||
WHISPER_MODEL_URL = os.getenv('WHISPER_MODEL_URL', 'https://huggingface.co/ggerganov/whisper.cpp/resolve/main/')
|
||||
WHISPER_MODEL_NAME = os.getenv("WHISPER_MODEL_NAME", "ggml-tiny.en.bin")
|
||||
WHISPER_MODEL_URL = os.getenv(
|
||||
"WHISPER_MODEL_URL",
|
||||
"https://huggingface.co/ggerganov/whisper.cpp/resolve/main/",
|
||||
)
|
||||
|
||||
if not os.path.isfile(os.path.join(WHISPER_MODEL_PATH, WHISPER_MODEL_NAME)):
|
||||
os.makedirs(WHISPER_MODEL_PATH, exist_ok=True)
|
||||
urllib.request.urlretrieve(f"{WHISPER_MODEL_URL}{WHISPER_MODEL_NAME}",
|
||||
os.path.join(WHISPER_MODEL_PATH, WHISPER_MODEL_NAME))
|
||||
urllib.request.urlretrieve(
|
||||
f"{WHISPER_MODEL_URL}{WHISPER_MODEL_NAME}",
|
||||
os.path.join(WHISPER_MODEL_PATH, WHISPER_MODEL_NAME),
|
||||
)
|
||||
else:
|
||||
print("Whisper model already exists. Skipping download.")
|
||||
|
||||
|
@ -85,25 +90,31 @@ def export_audio_to_wav_ffmpeg(audio: bytearray, mime_type: str) -> str:
|
|||
|
||||
# Create a temporary file with the appropriate extension
|
||||
input_ext = convert_mime_type_to_format(mime_type)
|
||||
input_path = os.path.join(temp_dir, f"input_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.{input_ext}")
|
||||
with open(input_path, 'wb') as f:
|
||||
input_path = os.path.join(
|
||||
temp_dir, f"input_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.{input_ext}"
|
||||
)
|
||||
with open(input_path, "wb") as f:
|
||||
f.write(audio)
|
||||
|
||||
# Check if the input file exists
|
||||
assert os.path.exists(input_path), f"Input file does not exist: {input_path}"
|
||||
|
||||
# Export to wav
|
||||
output_path = os.path.join(temp_dir, f"output_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav")
|
||||
output_path = os.path.join(
|
||||
temp_dir, f"output_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav"
|
||||
)
|
||||
print(mime_type, input_path, output_path)
|
||||
if mime_type == "audio/raw":
|
||||
ffmpeg.input(
|
||||
input_path,
|
||||
f='s16le',
|
||||
ar='16000',
|
||||
f="s16le",
|
||||
ar="16000",
|
||||
ac=1,
|
||||
).output(output_path, loglevel='panic').run()
|
||||
).output(output_path, loglevel="panic").run()
|
||||
else:
|
||||
ffmpeg.input(input_path).output(output_path, acodec='pcm_s16le', ac=1, ar='16k', loglevel='panic').run()
|
||||
ffmpeg.input(input_path).output(
|
||||
output_path, acodec="pcm_s16le", ac=1, ar="16k", loglevel="panic"
|
||||
).run()
|
||||
|
||||
try:
|
||||
yield output_path
|
||||
|
@ -113,28 +124,40 @@ def export_audio_to_wav_ffmpeg(audio: bytearray, mime_type: str) -> str:
|
|||
|
||||
|
||||
def run_command(command):
|
||||
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
||||
result = subprocess.run(
|
||||
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
|
||||
)
|
||||
return result.stdout, result.stderr
|
||||
|
||||
|
||||
def get_transcription_file(service_directory, wav_file_path: str):
|
||||
local_path = os.path.join(service_directory, 'model')
|
||||
whisper_rust_path = os.path.join(service_directory, 'whisper-rust', 'target', 'release')
|
||||
model_name = os.getenv('WHISPER_MODEL_NAME', 'ggml-tiny.en.bin')
|
||||
local_path = os.path.join(service_directory, "model")
|
||||
whisper_rust_path = os.path.join(
|
||||
service_directory, "whisper-rust", "target", "release"
|
||||
)
|
||||
model_name = os.getenv("WHISPER_MODEL_NAME", "ggml-tiny.en.bin")
|
||||
|
||||
output, _ = run_command([
|
||||
os.path.join(whisper_rust_path, 'whisper-rust'),
|
||||
'--model-path', os.path.join(local_path, model_name),
|
||||
'--file-path', wav_file_path
|
||||
])
|
||||
output, _ = run_command(
|
||||
[
|
||||
os.path.join(whisper_rust_path, "whisper-rust"),
|
||||
"--model-path",
|
||||
os.path.join(local_path, model_name),
|
||||
"--file-path",
|
||||
wav_file_path,
|
||||
]
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def stt_wav(service_directory, wav_file_path: str):
|
||||
temp_dir = tempfile.gettempdir()
|
||||
output_path = os.path.join(temp_dir, f"output_stt_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav")
|
||||
ffmpeg.input(wav_file_path).output(output_path, acodec='pcm_s16le', ac=1, ar='16k').run()
|
||||
output_path = os.path.join(
|
||||
temp_dir, f"output_stt_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav"
|
||||
)
|
||||
ffmpeg.input(wav_file_path).output(
|
||||
output_path, acodec="pcm_s16le", ac=1, ar="16k"
|
||||
).run()
|
||||
try:
|
||||
transcript = get_transcription_file(service_directory, output_path)
|
||||
finally:
|
||||
|
|
|
@ -7,4 +7,4 @@ target/
|
|||
**/*.rs.bk
|
||||
|
||||
# MSVC Windows builds of rustc generate these, which store debugging information
|
||||
*.pdb
|
||||
*.pdb
|
||||
|
|
|
@ -11,4 +11,4 @@ clap = { version = "4.4.18", features = ["derive"] }
|
|||
cpal = "0.15.2"
|
||||
hound = "3.5.1"
|
||||
whisper-rs = "0.10.0"
|
||||
whisper-rs-sys = "0.8.0"
|
||||
whisper-rs-sys = "0.8.0"
|
||||
|
|
|
@ -10,7 +10,7 @@ struct Args {
|
|||
/// This is the model for Whisper STT
|
||||
#[arg(short, long, value_parser, required = true)]
|
||||
model_path: PathBuf,
|
||||
|
||||
|
||||
/// This is the wav audio file that will be converted from speech to text
|
||||
#[arg(short, long, value_parser, required = true)]
|
||||
file_path: Option<PathBuf>,
|
||||
|
@ -31,4 +31,4 @@ fn main() {
|
|||
Ok(transcription) => print!("{}", transcription),
|
||||
Err(e) => panic!("Error: {}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,4 +61,4 @@ pub fn transcribe(model_path: &PathBuf, file_path: &PathBuf) -> Result<String, S
|
|||
}
|
||||
|
||||
Ok(transcription)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,7 +6,6 @@ class Stt:
|
|||
return stt(audio_file_path)
|
||||
|
||||
|
||||
|
||||
from datetime import datetime
|
||||
import os
|
||||
import contextlib
|
||||
|
@ -19,6 +18,7 @@ from openai import OpenAI
|
|||
|
||||
client = OpenAI()
|
||||
|
||||
|
||||
def convert_mime_type_to_format(mime_type: str) -> str:
|
||||
if mime_type == "audio/x-wav" or mime_type == "audio/wav":
|
||||
return "wav"
|
||||
|
@ -29,30 +29,37 @@ def convert_mime_type_to_format(mime_type: str) -> str:
|
|||
|
||||
return mime_type
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def export_audio_to_wav_ffmpeg(audio: bytearray, mime_type: str) -> str:
|
||||
temp_dir = tempfile.gettempdir()
|
||||
|
||||
# Create a temporary file with the appropriate extension
|
||||
input_ext = convert_mime_type_to_format(mime_type)
|
||||
input_path = os.path.join(temp_dir, f"input_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.{input_ext}")
|
||||
with open(input_path, 'wb') as f:
|
||||
input_path = os.path.join(
|
||||
temp_dir, f"input_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.{input_ext}"
|
||||
)
|
||||
with open(input_path, "wb") as f:
|
||||
f.write(audio)
|
||||
|
||||
# Check if the input file exists
|
||||
assert os.path.exists(input_path), f"Input file does not exist: {input_path}"
|
||||
|
||||
# Export to wav
|
||||
output_path = os.path.join(temp_dir, f"output_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav")
|
||||
output_path = os.path.join(
|
||||
temp_dir, f"output_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav"
|
||||
)
|
||||
if mime_type == "audio/raw":
|
||||
ffmpeg.input(
|
||||
input_path,
|
||||
f='s16le',
|
||||
ar='16000',
|
||||
f="s16le",
|
||||
ar="16000",
|
||||
ac=1,
|
||||
).output(output_path, loglevel='panic').run()
|
||||
).output(output_path, loglevel="panic").run()
|
||||
else:
|
||||
ffmpeg.input(input_path).output(output_path, acodec='pcm_s16le', ac=1, ar='16k', loglevel='panic').run()
|
||||
ffmpeg.input(input_path).output(
|
||||
output_path, acodec="pcm_s16le", ac=1, ar="16k", loglevel="panic"
|
||||
).run()
|
||||
|
||||
try:
|
||||
yield output_path
|
||||
|
@ -60,39 +67,49 @@ def export_audio_to_wav_ffmpeg(audio: bytearray, mime_type: str) -> str:
|
|||
os.remove(input_path)
|
||||
os.remove(output_path)
|
||||
|
||||
|
||||
def run_command(command):
|
||||
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
||||
result = subprocess.run(
|
||||
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
|
||||
)
|
||||
return result.stdout, result.stderr
|
||||
|
||||
def get_transcription_file(wav_file_path: str):
|
||||
local_path = os.path.join(os.path.dirname(__file__), 'local_service')
|
||||
whisper_rust_path = os.path.join(os.path.dirname(__file__), 'whisper-rust', 'target', 'release')
|
||||
model_name = os.getenv('WHISPER_MODEL_NAME', 'ggml-tiny.en.bin')
|
||||
|
||||
output, error = run_command([
|
||||
os.path.join(whisper_rust_path, 'whisper-rust'),
|
||||
'--model-path', os.path.join(local_path, model_name),
|
||||
'--file-path', wav_file_path
|
||||
])
|
||||
def get_transcription_file(wav_file_path: str):
|
||||
local_path = os.path.join(os.path.dirname(__file__), "local_service")
|
||||
whisper_rust_path = os.path.join(
|
||||
os.path.dirname(__file__), "whisper-rust", "target", "release"
|
||||
)
|
||||
model_name = os.getenv("WHISPER_MODEL_NAME", "ggml-tiny.en.bin")
|
||||
|
||||
output, error = run_command(
|
||||
[
|
||||
os.path.join(whisper_rust_path, "whisper-rust"),
|
||||
"--model-path",
|
||||
os.path.join(local_path, model_name),
|
||||
"--file-path",
|
||||
wav_file_path,
|
||||
]
|
||||
)
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def get_transcription_bytes(audio_bytes: bytearray, mime_type):
|
||||
with export_audio_to_wav_ffmpeg(audio_bytes, mime_type) as wav_file_path:
|
||||
return get_transcription_file(wav_file_path)
|
||||
|
||||
|
||||
def stt_bytes(audio_bytes: bytearray, mime_type="audio/wav"):
|
||||
with export_audio_to_wav_ffmpeg(audio_bytes, mime_type) as wav_file_path:
|
||||
return stt_wav(wav_file_path)
|
||||
|
||||
def stt_wav(wav_file_path: str):
|
||||
|
||||
def stt_wav(wav_file_path: str):
|
||||
audio_file = open(wav_file_path, "rb")
|
||||
try:
|
||||
transcript = client.audio.transcriptions.create(
|
||||
model="whisper-1",
|
||||
file=audio_file,
|
||||
response_format="text"
|
||||
model="whisper-1", file=audio_file, response_format="text"
|
||||
)
|
||||
except openai.BadRequestError as e:
|
||||
print(f"openai.BadRequestError: {e}")
|
||||
|
@ -100,10 +117,13 @@ def stt_wav(wav_file_path: str):
|
|||
|
||||
return transcript
|
||||
|
||||
|
||||
def stt(input_data, mime_type="audio/wav"):
|
||||
if isinstance(input_data, str):
|
||||
return stt_wav(input_data)
|
||||
elif isinstance(input_data, bytearray):
|
||||
return stt_bytes(input_data, mime_type)
|
||||
else:
|
||||
raise ValueError("Input data should be either a path to a wav file (str) or audio bytes (bytearray)")
|
||||
raise ValueError(
|
||||
"Input data should be either a path to a wav file (str) or audio bytes (bytearray)"
|
||||
)
|
||||
|
|
|
@ -2,41 +2,43 @@ import ffmpeg
|
|||
import tempfile
|
||||
from openai import OpenAI
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
from source.server.utils.logs import logger
|
||||
from source.server.utils.logs import setup_logging
|
||||
|
||||
setup_logging()
|
||||
|
||||
# If this TTS service is used, the OPENAI_API_KEY environment variable must be set
|
||||
if not os.getenv('OPENAI_API_KEY'):
|
||||
if not os.getenv("OPENAI_API_KEY"):
|
||||
logger.error("")
|
||||
logger.error(f"OpenAI API key not found. Please set the OPENAI_API_KEY environment variable, or run 01 with the --local option.")
|
||||
logger.error(
|
||||
"OpenAI API key not found. Please set the OPENAI_API_KEY environment variable, or run 01 with the --local option."
|
||||
)
|
||||
logger.error("Aborting...")
|
||||
logger.error("")
|
||||
os._exit(1)
|
||||
|
||||
client = OpenAI()
|
||||
|
||||
|
||||
class Tts:
|
||||
def __init__(self, config):
|
||||
pass
|
||||
|
||||
def tts(self, text):
|
||||
response = client.audio.speech.create(
|
||||
model="tts-1",
|
||||
voice=os.getenv('OPENAI_VOICE_NAME', 'alloy'),
|
||||
input=text,
|
||||
response_format="opus"
|
||||
)
|
||||
with tempfile.NamedTemporaryFile(suffix=".opus", delete=False) as temp_file:
|
||||
response.stream_to_file(temp_file.name)
|
||||
|
||||
# TODO: hack to format audio correctly for device
|
||||
outfile = tempfile.gettempdir() + "/" + "raw.dat"
|
||||
ffmpeg.input(temp_file.name).output(outfile, f="s16le", ar="16000", ac="1", loglevel='panic').run()
|
||||
|
||||
return outfile
|
||||
response = client.audio.speech.create(
|
||||
model="tts-1",
|
||||
voice=os.getenv("OPENAI_VOICE_NAME", "alloy"),
|
||||
input=text,
|
||||
response_format="opus",
|
||||
)
|
||||
with tempfile.NamedTemporaryFile(suffix=".opus", delete=False) as temp_file:
|
||||
response.stream_to_file(temp_file.name)
|
||||
|
||||
# TODO: hack to format audio correctly for device
|
||||
outfile = tempfile.gettempdir() + "/" + "raw.dat"
|
||||
ffmpeg.input(temp_file.name).output(
|
||||
outfile, f="s16le", ar="16000", ac="1", loglevel="panic"
|
||||
).run()
|
||||
|
||||
return outfile
|
||||
|
|
|
@ -13,26 +13,40 @@ class Tts:
|
|||
self.install(config["service_directory"])
|
||||
|
||||
def tts(self, text):
|
||||
|
||||
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_file:
|
||||
output_file = temp_file.name
|
||||
piper_dir = self.piper_directory
|
||||
subprocess.run([
|
||||
os.path.join(piper_dir, 'piper'),
|
||||
'--model', os.path.join(piper_dir, os.getenv('PIPER_VOICE_NAME', 'en_US-lessac-medium.onnx')),
|
||||
'--output_file', output_file
|
||||
], input=text, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
||||
subprocess.run(
|
||||
[
|
||||
os.path.join(piper_dir, "piper"),
|
||||
"--model",
|
||||
os.path.join(
|
||||
piper_dir,
|
||||
os.getenv("PIPER_VOICE_NAME", "en_US-lessac-medium.onnx"),
|
||||
),
|
||||
"--output_file",
|
||||
output_file,
|
||||
],
|
||||
input=text,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
|
||||
# TODO: hack to format audio correctly for device
|
||||
outfile = tempfile.gettempdir() + "/" + "raw.dat"
|
||||
ffmpeg.input(temp_file.name).output(outfile, f="s16le", ar="16000", ac="1", loglevel='panic').run()
|
||||
ffmpeg.input(temp_file.name).output(
|
||||
outfile, f="s16le", ar="16000", ac="1", loglevel="panic"
|
||||
).run()
|
||||
|
||||
return outfile
|
||||
|
||||
def install(self, service_directory):
|
||||
PIPER_FOLDER_PATH = service_directory
|
||||
self.piper_directory = os.path.join(PIPER_FOLDER_PATH, 'piper')
|
||||
if not os.path.isdir(self.piper_directory): # Check if the Piper directory exists
|
||||
self.piper_directory = os.path.join(PIPER_FOLDER_PATH, "piper")
|
||||
if not os.path.isdir(
|
||||
self.piper_directory
|
||||
): # Check if the Piper directory exists
|
||||
os.makedirs(PIPER_FOLDER_PATH, exist_ok=True)
|
||||
|
||||
# Determine OS and architecture
|
||||
|
@ -60,52 +74,92 @@ class Tts:
|
|||
asset_url = f"{PIPER_URL}{PIPER_ASSETNAME}"
|
||||
|
||||
if OS == "windows":
|
||||
|
||||
asset_url = asset_url.replace(".tar.gz", ".zip")
|
||||
|
||||
# Download and extract Piper
|
||||
urllib.request.urlretrieve(asset_url, os.path.join(PIPER_FOLDER_PATH, PIPER_ASSETNAME))
|
||||
urllib.request.urlretrieve(
|
||||
asset_url, os.path.join(PIPER_FOLDER_PATH, PIPER_ASSETNAME)
|
||||
)
|
||||
|
||||
# Extract the downloaded file
|
||||
if OS == "windows":
|
||||
import zipfile
|
||||
with zipfile.ZipFile(os.path.join(PIPER_FOLDER_PATH, PIPER_ASSETNAME), 'r') as zip_ref:
|
||||
|
||||
with zipfile.ZipFile(
|
||||
os.path.join(PIPER_FOLDER_PATH, PIPER_ASSETNAME), "r"
|
||||
) as zip_ref:
|
||||
zip_ref.extractall(path=PIPER_FOLDER_PATH)
|
||||
else:
|
||||
with tarfile.open(os.path.join(PIPER_FOLDER_PATH, PIPER_ASSETNAME), 'r:gz') as tar:
|
||||
with tarfile.open(
|
||||
os.path.join(PIPER_FOLDER_PATH, PIPER_ASSETNAME), "r:gz"
|
||||
) as tar:
|
||||
tar.extractall(path=PIPER_FOLDER_PATH)
|
||||
|
||||
PIPER_VOICE_URL = os.getenv('PIPER_VOICE_URL',
|
||||
'https://huggingface.co/rhasspy/piper-voices/resolve/main/en/en_US/lessac/medium/')
|
||||
PIPER_VOICE_NAME = os.getenv('PIPER_VOICE_NAME', 'en_US-lessac-medium.onnx')
|
||||
PIPER_VOICE_URL = os.getenv(
|
||||
"PIPER_VOICE_URL",
|
||||
"https://huggingface.co/rhasspy/piper-voices/resolve/main/en/en_US/lessac/medium/",
|
||||
)
|
||||
PIPER_VOICE_NAME = os.getenv("PIPER_VOICE_NAME", "en_US-lessac-medium.onnx")
|
||||
|
||||
# Download voice model and its json file
|
||||
urllib.request.urlretrieve(f"{PIPER_VOICE_URL}{PIPER_VOICE_NAME}",
|
||||
os.path.join(self.piper_directory, PIPER_VOICE_NAME))
|
||||
urllib.request.urlretrieve(f"{PIPER_VOICE_URL}{PIPER_VOICE_NAME}.json",
|
||||
os.path.join(self.piper_directory, f"{PIPER_VOICE_NAME}.json"))
|
||||
urllib.request.urlretrieve(
|
||||
f"{PIPER_VOICE_URL}{PIPER_VOICE_NAME}",
|
||||
os.path.join(self.piper_directory, PIPER_VOICE_NAME),
|
||||
)
|
||||
urllib.request.urlretrieve(
|
||||
f"{PIPER_VOICE_URL}{PIPER_VOICE_NAME}.json",
|
||||
os.path.join(self.piper_directory, f"{PIPER_VOICE_NAME}.json"),
|
||||
)
|
||||
|
||||
# Additional setup for macOS
|
||||
if OS == "macos":
|
||||
if ARCH == "x64":
|
||||
subprocess.run(['softwareupdate', '--install-rosetta', '--agree-to-license'])
|
||||
subprocess.run(
|
||||
["softwareupdate", "--install-rosetta", "--agree-to-license"]
|
||||
)
|
||||
|
||||
PIPER_PHONEMIZE_ASSETNAME = f"piper-phonemize_{OS}_{ARCH}.tar.gz"
|
||||
PIPER_PHONEMIZE_URL = "https://github.com/rhasspy/piper-phonemize/releases/latest/download/"
|
||||
urllib.request.urlretrieve(f"{PIPER_PHONEMIZE_URL}{PIPER_PHONEMIZE_ASSETNAME}",
|
||||
os.path.join(self.piper_directory, PIPER_PHONEMIZE_ASSETNAME))
|
||||
urllib.request.urlretrieve(
|
||||
f"{PIPER_PHONEMIZE_URL}{PIPER_PHONEMIZE_ASSETNAME}",
|
||||
os.path.join(self.piper_directory, PIPER_PHONEMIZE_ASSETNAME),
|
||||
)
|
||||
|
||||
with tarfile.open(os.path.join(self.piper_directory, PIPER_PHONEMIZE_ASSETNAME), 'r:gz') as tar:
|
||||
with tarfile.open(
|
||||
os.path.join(self.piper_directory, PIPER_PHONEMIZE_ASSETNAME),
|
||||
"r:gz",
|
||||
) as tar:
|
||||
tar.extractall(path=self.piper_directory)
|
||||
|
||||
PIPER_DIR = self.piper_directory
|
||||
subprocess.run(['install_name_tool', '-change', '@rpath/libespeak-ng.1.dylib',
|
||||
f"{PIPER_DIR}/piper-phonemize/lib/libespeak-ng.1.dylib", f"{PIPER_DIR}/piper"])
|
||||
subprocess.run(['install_name_tool', '-change', '@rpath/libonnxruntime.1.14.1.dylib',
|
||||
f"{PIPER_DIR}/piper-phonemize/lib/libonnxruntime.1.14.1.dylib", f"{PIPER_DIR}/piper"])
|
||||
subprocess.run(['install_name_tool', '-change', '@rpath/libpiper_phonemize.1.dylib',
|
||||
f"{PIPER_DIR}/piper-phonemize/lib/libpiper_phonemize.1.dylib", f"{PIPER_DIR}/piper"])
|
||||
subprocess.run(
|
||||
[
|
||||
"install_name_tool",
|
||||
"-change",
|
||||
"@rpath/libespeak-ng.1.dylib",
|
||||
f"{PIPER_DIR}/piper-phonemize/lib/libespeak-ng.1.dylib",
|
||||
f"{PIPER_DIR}/piper",
|
||||
]
|
||||
)
|
||||
subprocess.run(
|
||||
[
|
||||
"install_name_tool",
|
||||
"-change",
|
||||
"@rpath/libonnxruntime.1.14.1.dylib",
|
||||
f"{PIPER_DIR}/piper-phonemize/lib/libonnxruntime.1.14.1.dylib",
|
||||
f"{PIPER_DIR}/piper",
|
||||
]
|
||||
)
|
||||
subprocess.run(
|
||||
[
|
||||
"install_name_tool",
|
||||
"-change",
|
||||
"@rpath/libpiper_phonemize.1.dylib",
|
||||
f"{PIPER_DIR}/piper-phonemize/lib/libpiper_phonemize.1.dylib",
|
||||
f"{PIPER_DIR}/piper",
|
||||
]
|
||||
)
|
||||
|
||||
print("Piper setup completed.")
|
||||
else:
|
||||
print("Piper already set up. Skipping download.")
|
||||
print("Piper already set up. Skipping download.")
|
||||
|
|
|
@ -3,34 +3,33 @@ from datetime import datetime
|
|||
from pytimeparse import parse
|
||||
from crontab import CronTab
|
||||
from uuid import uuid4
|
||||
from datetime import datetime
|
||||
from platformdirs import user_data_dir
|
||||
|
||||
|
||||
def schedule(message="", start=None, interval=None) -> None:
|
||||
"""
|
||||
Schedules a task at a particular time, or at a particular interval
|
||||
"""
|
||||
if start and interval:
|
||||
raise ValueError("Cannot specify both start time and interval.")
|
||||
|
||||
|
||||
if not start and not interval:
|
||||
raise ValueError("Either start time or interval must be specified.")
|
||||
|
||||
|
||||
# Read the temp file to see what the current session is
|
||||
session_file_path = os.path.join(user_data_dir('01'), '01-session.txt')
|
||||
|
||||
with open(session_file_path, 'r') as session_file:
|
||||
session_file_path = os.path.join(user_data_dir("01"), "01-session.txt")
|
||||
|
||||
with open(session_file_path, "r") as session_file:
|
||||
file_session_value = session_file.read().strip()
|
||||
|
||||
|
||||
prefixed_message = "AUTOMATED MESSAGE FROM SCHEDULER: " + message
|
||||
|
||||
|
||||
# Escape the message and the json, cron is funky with quotes
|
||||
escaped_question = prefixed_message.replace('"', '\\"')
|
||||
json_data = f"{{\\\"text\\\": \\\"{escaped_question}\\\"}}"
|
||||
json_data = f'{{\\"text\\": \\"{escaped_question}\\"}}'
|
||||
|
||||
command = f"""bash -c 'if [ "$(cat "{session_file_path}")" == "{file_session_value}" ]; then /usr/bin/curl -X POST -H "Content-Type: application/json" -d "{json_data}" http://localhost:10001/; fi' """
|
||||
|
||||
command = f'''bash -c 'if [ "$(cat "{session_file_path}")" == "{file_session_value}" ]; then /usr/bin/curl -X POST -H "Content-Type: application/json" -d "{json_data}" http://localhost:10001/; fi' '''
|
||||
|
||||
cron = CronTab(user=True)
|
||||
job = cron.new(command=command)
|
||||
# Prefix with 01 dev preview so we can delete them all in the future
|
||||
|
@ -61,6 +60,5 @@ def schedule(message="", start=None, interval=None) -> None:
|
|||
days = max(int(seconds / 86400), 1)
|
||||
job.day.every(days)
|
||||
print(f"Task scheduled every {days} day(s)")
|
||||
|
||||
cron.write()
|
||||
|
||||
cron.write()
|
||||
|
|
|
@ -36,7 +36,7 @@ Store the user's tasks in a Python list called `tasks`.
|
|||
|
||||
The user's current task is: {{ tasks[0] if tasks else "No current tasks." }}
|
||||
|
||||
{{
|
||||
{{
|
||||
if len(tasks) > 1:
|
||||
print("The next task is: ", tasks[1])
|
||||
}}
|
||||
|
@ -91,7 +91,7 @@ Store the user's tasks in a Python list called `tasks`.
|
|||
|
||||
The user's current task is: {{ tasks[0] if tasks else "No current tasks." }}
|
||||
|
||||
{{
|
||||
{{
|
||||
if len(tasks) > 1:
|
||||
print("The next task is: ", tasks[1])
|
||||
}}
|
||||
|
@ -104,7 +104,7 @@ When the user tells you about a set of tasks, you should intelligently order tas
|
|||
|
||||
After starting a task, you should check in with the user around the estimated completion time to see if the task is completed. Use the `schedule(datetime, message)` function, which has already been imported.
|
||||
|
||||
To do this, schedule a reminder based on estimated completion time using the function `schedule(datetime_object, "Your message here.")`, WHICH HAS ALREADY BEEN IMPORTED. YOU DON'T NEED TO IMPORT THE `schedule` FUNCTION. IT IS AVALIABLE. You'll recieve the message at `datetime_object`.
|
||||
To do this, schedule a reminder based on estimated completion time using the function `schedule(datetime_object, "Your message here.")`, WHICH HAS ALREADY BEEN IMPORTED. YOU DON'T NEED TO IMPORT THE `schedule` FUNCTION. IT IS AVALIABLE. You'll receive the message at `datetime_object`.
|
||||
|
||||
You guide the user through the list one task at a time, convincing them to move forward, giving a pep talk if need be. Your job is essentially to answer "what should I (the user) be doing right now?" for every moment of the day.
|
||||
|
||||
|
@ -184,7 +184,7 @@ except:
|
|||
finally:
|
||||
sys.stdout = original_stdout
|
||||
sys.stderr = original_stderr
|
||||
|
||||
|
||||
}}
|
||||
|
||||
# SKILLS
|
||||
|
@ -237,4 +237,6 @@ For example:
|
|||
|
||||
ALWAYS REMEMBER: You are running on a device called the O1, where the interface is entirely speech-based. Make your responses to the user **VERY short.**
|
||||
|
||||
""".strip().replace("OI_SKILLS_DIR", os.path.join(os.path.dirname(__file__), "skills"))
|
||||
""".strip().replace(
|
||||
"OI_SKILLS_DIR", os.path.join(os.path.dirname(__file__), "skills")
|
||||
)
|
||||
|
|
|
@ -96,7 +96,7 @@ except:
|
|||
finally:
|
||||
sys.stdout = original_stdout
|
||||
sys.stderr = original_stderr
|
||||
|
||||
|
||||
}}
|
||||
|
||||
# SKILLS LIBRARY
|
||||
|
@ -131,4 +131,6 @@ print(output)
|
|||
|
||||
Remember: You can run Python code outside a function only to run a Python function; all other code must go in a in Python function if you first write a Python function. ALL imports must go inside the function.
|
||||
|
||||
""".strip().replace("OI_SKILLS_DIR", os.path.abspath(os.path.join(os.path.dirname(__file__), "skills")))
|
||||
""".strip().replace(
|
||||
"OI_SKILLS_DIR", os.path.abspath(os.path.join(os.path.dirname(__file__), "skills"))
|
||||
)
|
||||
|
|
|
@ -1,11 +1,5 @@
|
|||
# test_main.py
|
||||
import subprocess
|
||||
import uuid
|
||||
import pytest
|
||||
from source.server.i import configure_interpreter
|
||||
from unittest.mock import Mock
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
@ -38,4 +32,4 @@ def test_ping(client):
|
|||
# def test_interpreter_configuration(mock_interpreter):
|
||||
# # Test interpreter configuration
|
||||
# interpreter = configure_interpreter(mock_interpreter)
|
||||
# assert interpreter is not None
|
||||
# assert interpreter is not None
|
||||
|
|
|
@ -1,49 +1,70 @@
|
|||
import os
|
||||
import subprocess
|
||||
import re
|
||||
import shutil
|
||||
import pyqrcode
|
||||
import time
|
||||
from ..utils.print_markdown import print_markdown
|
||||
|
||||
def create_tunnel(tunnel_method='ngrok', server_host='localhost', server_port=10001):
|
||||
print_markdown(f"Exposing server to the internet...")
|
||||
|
||||
def create_tunnel(
|
||||
tunnel_method="ngrok", server_host="localhost", server_port=10001, qr=False
|
||||
):
|
||||
print_markdown("Exposing server to the internet...")
|
||||
|
||||
server_url = ""
|
||||
if tunnel_method == "bore":
|
||||
try:
|
||||
output = subprocess.check_output('command -v bore', shell=True)
|
||||
output = subprocess.check_output("command -v bore", shell=True)
|
||||
except subprocess.CalledProcessError:
|
||||
print("The bore-cli command is not available. Please run 'cargo install bore-cli'.")
|
||||
print(
|
||||
"The bore-cli command is not available. Please run 'cargo install bore-cli'."
|
||||
)
|
||||
print("For more information, see https://github.com/ekzhang/bore")
|
||||
exit(1)
|
||||
|
||||
time.sleep(6)
|
||||
# output = subprocess.check_output(f'bore local {server_port} --to bore.pub', shell=True)
|
||||
process = subprocess.Popen(f'bore local {server_port} --to bore.pub', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
|
||||
|
||||
process = subprocess.Popen(
|
||||
f"bore local {server_port} --to bore.pub",
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
universal_newlines=True,
|
||||
)
|
||||
|
||||
while True:
|
||||
line = process.stdout.readline()
|
||||
print(line)
|
||||
if not line:
|
||||
break
|
||||
if "listening at bore.pub:" in line:
|
||||
remote_port = re.search('bore.pub:([0-9]*)', line).group(1)
|
||||
print_markdown(f"Your server is being hosted at the following URL: bore.pub:{remote_port}")
|
||||
remote_port = re.search("bore.pub:([0-9]*)", line).group(1)
|
||||
server_url = f"bore.pub:{remote_port}"
|
||||
print_markdown(
|
||||
f"Your server is being hosted at the following URL: bore.pub:{remote_port}"
|
||||
)
|
||||
break
|
||||
|
||||
|
||||
|
||||
|
||||
elif tunnel_method == "localtunnel":
|
||||
if subprocess.call('command -v lt', shell=True):
|
||||
if subprocess.call("command -v lt", shell=True):
|
||||
print("The 'lt' command is not available.")
|
||||
print("Please ensure you have Node.js installed, then run 'npm install -g localtunnel'.")
|
||||
print("For more information, see https://github.com/localtunnel/localtunnel")
|
||||
print(
|
||||
"Please ensure you have Node.js installed, then run 'npm install -g localtunnel'."
|
||||
)
|
||||
print(
|
||||
"For more information, see https://github.com/localtunnel/localtunnel"
|
||||
)
|
||||
exit(1)
|
||||
else:
|
||||
process = subprocess.Popen(f'npx localtunnel --port {server_port}', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
|
||||
process = subprocess.Popen(
|
||||
f"npx localtunnel --port {server_port}",
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
universal_newlines=True,
|
||||
)
|
||||
|
||||
found_url = False
|
||||
url_pattern = re.compile(r'your url is: https://[a-zA-Z0-9.-]+')
|
||||
url_pattern = re.compile(r"your url is: https://[a-zA-Z0-9.-]+")
|
||||
|
||||
while True:
|
||||
line = process.stdout.readline()
|
||||
|
@ -52,44 +73,65 @@ def create_tunnel(tunnel_method='ngrok', server_host='localhost', server_port=10
|
|||
match = url_pattern.search(line)
|
||||
if match:
|
||||
found_url = True
|
||||
remote_url = match.group(0).replace('your url is: ', '')
|
||||
|
||||
print(f"\nYour server is being hosted at the following URL: {remote_url}")
|
||||
remote_url = match.group(0).replace("your url is: ", "")
|
||||
server_url = remote_url
|
||||
print(
|
||||
f"\nYour server is being hosted at the following URL: {remote_url}"
|
||||
)
|
||||
break # Exit the loop once the URL is found
|
||||
|
||||
if not found_url:
|
||||
print("Failed to extract the localtunnel URL. Please check localtunnel's output for details.")
|
||||
print(
|
||||
"Failed to extract the localtunnel URL. Please check localtunnel's output for details."
|
||||
)
|
||||
|
||||
elif tunnel_method == "ngrok":
|
||||
|
||||
# Check if ngrok is installed
|
||||
is_installed = subprocess.check_output('command -v ngrok', shell=True).decode().strip()
|
||||
is_installed = (
|
||||
subprocess.check_output("command -v ngrok", shell=True).decode().strip()
|
||||
)
|
||||
if not is_installed:
|
||||
print("The ngrok command is not available.")
|
||||
print("Please install ngrok using the instructions at https://ngrok.com/docs/getting-started/")
|
||||
print(
|
||||
"Please install ngrok using the instructions at https://ngrok.com/docs/getting-started/"
|
||||
)
|
||||
exit(1)
|
||||
|
||||
# If ngrok is installed, start it on the specified port
|
||||
# process = subprocess.Popen(f'ngrok http {server_port} --log=stdout', shell=True, stdout=subprocess.PIPE)
|
||||
process = subprocess.Popen(f'ngrok http {server_port} --scheme http,https --log=stdout', shell=True, stdout=subprocess.PIPE)
|
||||
process = subprocess.Popen(
|
||||
f"ngrok http {server_port} --scheme http,https --domain=marten-advanced-dragon.ngrok-free.app --log=stdout",
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
)
|
||||
|
||||
# Initially, no URL is found
|
||||
found_url = False
|
||||
# Regular expression to match the ngrok URL
|
||||
url_pattern = re.compile(r'https://[a-zA-Z0-9-]+\.ngrok(-free)?\.app')
|
||||
url_pattern = re.compile(r"https://[a-zA-Z0-9-]+\.ngrok(-free)?\.app")
|
||||
|
||||
# Read the output line by line
|
||||
while True:
|
||||
line = process.stdout.readline().decode('utf-8')
|
||||
line = process.stdout.readline().decode("utf-8")
|
||||
if not line:
|
||||
break # Break out of the loop if no more output
|
||||
match = url_pattern.search(line)
|
||||
if match:
|
||||
found_url = True
|
||||
remote_url = match.group(0)
|
||||
|
||||
print(f"\nYour server is being hosted at the following URL: {remote_url}")
|
||||
server_url = remote_url
|
||||
print(
|
||||
f"\nYour server is being hosted at the following URL: {remote_url}"
|
||||
)
|
||||
break # Exit the loop once the URL is found
|
||||
|
||||
if not found_url:
|
||||
print("Failed to extract the ngrok tunnel URL. Please check ngrok's output for details.")
|
||||
print(
|
||||
"Failed to extract the ngrok tunnel URL. Please check ngrok's output for details."
|
||||
)
|
||||
|
||||
if server_url and qr:
|
||||
text = pyqrcode.create(remote_url)
|
||||
print(text.terminal(quiet_zone=1))
|
||||
|
||||
return server_url
|
||||
|
|
|
@ -5,6 +5,7 @@ import tempfile
|
|||
import ffmpeg
|
||||
import subprocess
|
||||
|
||||
|
||||
def convert_mime_type_to_format(mime_type: str) -> str:
|
||||
if mime_type == "audio/x-wav" or mime_type == "audio/wav":
|
||||
return "wav"
|
||||
|
@ -15,39 +16,49 @@ def convert_mime_type_to_format(mime_type: str) -> str:
|
|||
|
||||
return mime_type
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def export_audio_to_wav_ffmpeg(audio: bytearray, mime_type: str) -> str:
|
||||
temp_dir = tempfile.gettempdir()
|
||||
|
||||
# Create a temporary file with the appropriate extension
|
||||
input_ext = convert_mime_type_to_format(mime_type)
|
||||
input_path = os.path.join(temp_dir, f"input_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.{input_ext}")
|
||||
with open(input_path, 'wb') as f:
|
||||
input_path = os.path.join(
|
||||
temp_dir, f"input_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.{input_ext}"
|
||||
)
|
||||
with open(input_path, "wb") as f:
|
||||
f.write(audio)
|
||||
|
||||
# Check if the input file exists
|
||||
assert os.path.exists(input_path), f"Input file does not exist: {input_path}"
|
||||
|
||||
# Export to wav
|
||||
output_path = os.path.join(temp_dir, f"output_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav")
|
||||
output_path = os.path.join(
|
||||
temp_dir, f"output_{datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav"
|
||||
)
|
||||
print(mime_type, input_path, output_path)
|
||||
if mime_type == "audio/raw":
|
||||
ffmpeg.input(
|
||||
input_path,
|
||||
f='s16le',
|
||||
ar='16000',
|
||||
f="s16le",
|
||||
ar="16000",
|
||||
ac=1,
|
||||
).output(output_path, loglevel='panic').run()
|
||||
).output(output_path, loglevel="panic").run()
|
||||
else:
|
||||
ffmpeg.input(input_path).output(output_path, acodec='pcm_s16le', ac=1, ar='16k', loglevel='panic').run()
|
||||
ffmpeg.input(input_path).output(
|
||||
output_path, acodec="pcm_s16le", ac=1, ar="16k", loglevel="panic"
|
||||
).run()
|
||||
|
||||
try:
|
||||
yield output_path
|
||||
finally:
|
||||
os.remove(input_path)
|
||||
|
||||
|
||||
def run_command(command):
|
||||
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
||||
result = subprocess.run(
|
||||
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
|
||||
)
|
||||
return result.stdout, result.stderr
|
||||
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import os
|
||||
import platform
|
||||
|
||||
|
||||
def get_system_info():
|
||||
system = platform.system()
|
||||
if system == "Linux":
|
||||
|
@ -38,4 +38,4 @@ def get_system_info():
|
|||
elif system == "Windows":
|
||||
return "windows"
|
||||
else:
|
||||
return "unknown"
|
||||
return "unknown"
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv() # take environment variables from .env.
|
||||
|
||||
import asyncio
|
||||
|
@ -7,42 +8,49 @@ import platform
|
|||
|
||||
from .logs import setup_logging
|
||||
from .logs import logger
|
||||
|
||||
setup_logging()
|
||||
|
||||
|
||||
def get_kernel_messages():
|
||||
"""
|
||||
Is this the way to do this?
|
||||
"""
|
||||
current_platform = platform.system()
|
||||
|
||||
|
||||
if current_platform == "Darwin":
|
||||
process = subprocess.Popen(['syslog'], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
|
||||
process = subprocess.Popen(
|
||||
["syslog"], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL
|
||||
)
|
||||
output, _ = process.communicate()
|
||||
return output.decode('utf-8')
|
||||
return output.decode("utf-8")
|
||||
elif current_platform == "Linux":
|
||||
with open('/var/log/dmesg', 'r') as file:
|
||||
with open("/var/log/dmesg", "r") as file:
|
||||
return file.read()
|
||||
else:
|
||||
logger.info("Unsupported platform.")
|
||||
|
||||
|
||||
def custom_filter(message):
|
||||
# Check for {TO_INTERPRETER{ message here }TO_INTERPRETER} pattern
|
||||
if '{TO_INTERPRETER{' in message and '}TO_INTERPRETER}' in message:
|
||||
start = message.find('{TO_INTERPRETER{') + len('{TO_INTERPRETER{')
|
||||
end = message.find('}TO_INTERPRETER}', start)
|
||||
if "{TO_INTERPRETER{" in message and "}TO_INTERPRETER}" in message:
|
||||
start = message.find("{TO_INTERPRETER{") + len("{TO_INTERPRETER{")
|
||||
end = message.find("}TO_INTERPRETER}", start)
|
||||
return message[start:end]
|
||||
# Check for USB mention
|
||||
# elif 'USB' in message:
|
||||
# return message
|
||||
# # Check for network related keywords
|
||||
# elif any(keyword in message for keyword in ['network', 'IP', 'internet', 'LAN', 'WAN', 'router', 'switch']) and "networkStatusForFlags" not in message:
|
||||
|
||||
|
||||
# return message
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
|
||||
last_messages = ""
|
||||
|
||||
|
||||
def check_filtered_kernel():
|
||||
messages = get_kernel_messages()
|
||||
if messages is None:
|
||||
|
@ -51,12 +59,12 @@ def check_filtered_kernel():
|
|||
global last_messages
|
||||
messages.replace(last_messages, "")
|
||||
messages = messages.split("\n")
|
||||
|
||||
|
||||
filtered_messages = []
|
||||
for message in messages:
|
||||
if custom_filter(message):
|
||||
filtered_messages.append(message)
|
||||
|
||||
|
||||
return "\n".join(filtered_messages)
|
||||
|
||||
|
||||
|
@ -66,11 +74,25 @@ async def put_kernel_messages_into_queue(queue):
|
|||
if text:
|
||||
if isinstance(queue, asyncio.Queue):
|
||||
await queue.put({"role": "computer", "type": "console", "start": True})
|
||||
await queue.put({"role": "computer", "type": "console", "format": "output", "content": text})
|
||||
await queue.put(
|
||||
{
|
||||
"role": "computer",
|
||||
"type": "console",
|
||||
"format": "output",
|
||||
"content": text,
|
||||
}
|
||||
)
|
||||
await queue.put({"role": "computer", "type": "console", "end": True})
|
||||
else:
|
||||
queue.put({"role": "computer", "type": "console", "start": True})
|
||||
queue.put({"role": "computer", "type": "console", "format": "output", "content": text})
|
||||
queue.put(
|
||||
{
|
||||
"role": "computer",
|
||||
"type": "console",
|
||||
"format": "output",
|
||||
"content": text,
|
||||
}
|
||||
)
|
||||
queue.put({"role": "computer", "type": "console", "end": True})
|
||||
|
||||
await asyncio.sleep(5)
|
||||
|
||||
await asyncio.sleep(5)
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
import sys
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
import time
|
||||
import inquirer
|
||||
|
@ -8,9 +6,10 @@ from interpreter import interpreter
|
|||
|
||||
|
||||
def select_local_model():
|
||||
|
||||
# START OF LOCAL MODEL PROVIDER LOGIC
|
||||
interpreter.display_message("> 01 is compatible with several local model providers.\n")
|
||||
interpreter.display_message(
|
||||
"> 01 is compatible with several local model providers.\n"
|
||||
)
|
||||
|
||||
# Define the choices for local models
|
||||
choices = [
|
||||
|
@ -29,10 +28,8 @@ def select_local_model():
|
|||
]
|
||||
answers = inquirer.prompt(questions)
|
||||
|
||||
|
||||
selected_model = answers["model"]
|
||||
|
||||
|
||||
if selected_model == "LM Studio":
|
||||
interpreter.display_message(
|
||||
"""
|
||||
|
@ -49,7 +46,7 @@ def select_local_model():
|
|||
"""
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
interpreter.llm.api_base = "http://localhost:1234/v1"
|
||||
interpreter.llm.max_tokens = 1000
|
||||
interpreter.llm.context_window = 8000
|
||||
|
@ -57,47 +54,64 @@ def select_local_model():
|
|||
|
||||
elif selected_model == "Ollama":
|
||||
try:
|
||||
|
||||
# List out all downloaded ollama models. Will fail if ollama isn't installed
|
||||
result = subprocess.run(["ollama", "list"], capture_output=True, text=True, check=True)
|
||||
lines = result.stdout.split('\n')
|
||||
names = [line.split()[0].replace(":latest", "") for line in lines[1:] if line.strip()] # Extract names, trim out ":latest", skip header
|
||||
|
||||
result = subprocess.run(
|
||||
["ollama", "list"], capture_output=True, text=True, check=True
|
||||
)
|
||||
lines = result.stdout.split("\n")
|
||||
names = [
|
||||
line.split()[0].replace(":latest", "")
|
||||
for line in lines[1:]
|
||||
if line.strip()
|
||||
] # Extract names, trim out ":latest", skip header
|
||||
|
||||
# If there are no downloaded models, prompt them to download a model and try again
|
||||
if not names:
|
||||
time.sleep(1)
|
||||
|
||||
interpreter.display_message(f"\nYou don't have any Ollama models downloaded. To download a new model, run `ollama run <model-name>`, then start a new 01 session. \n\n For a full list of downloadable models, check out [https://ollama.com/library](https://ollama.com/library) \n")
|
||||
|
||||
|
||||
interpreter.display_message(
|
||||
"\nYou don't have any Ollama models downloaded. To download a new model, run `ollama run <model-name>`, then start a new 01 session. \n\n For a full list of downloadable models, check out [https://ollama.com/library](https://ollama.com/library) \n"
|
||||
)
|
||||
|
||||
print("Please download a model then try again\n")
|
||||
time.sleep(2)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# If there are models, prompt them to select one
|
||||
else:
|
||||
time.sleep(1)
|
||||
interpreter.display_message(f"**{len(names)} Ollama model{'s' if len(names) != 1 else ''} found.** To download a new model, run `ollama run <model-name>`, then start a new 01 session. \n\n For a full list of downloadable models, check out [https://ollama.com/library](https://ollama.com/library) \n")
|
||||
interpreter.display_message(
|
||||
f"**{len(names)} Ollama model{'s' if len(names) != 1 else ''} found.** To download a new model, run `ollama run <model-name>`, then start a new 01 session. \n\n For a full list of downloadable models, check out [https://ollama.com/library](https://ollama.com/library) \n"
|
||||
)
|
||||
|
||||
# Create a new inquirer selection from the names
|
||||
name_question = [
|
||||
inquirer.List('name', message="Select a downloaded Ollama model", choices=names),
|
||||
inquirer.List(
|
||||
"name",
|
||||
message="Select a downloaded Ollama model",
|
||||
choices=names,
|
||||
),
|
||||
]
|
||||
name_answer = inquirer.prompt(name_question)
|
||||
selected_name = name_answer['name'] if name_answer else None
|
||||
|
||||
selected_name = name_answer["name"] if name_answer else None
|
||||
|
||||
# Set the model to the selected model
|
||||
interpreter.llm.model = f"ollama/{selected_name}"
|
||||
interpreter.display_message(f"\nUsing Ollama model: `{selected_name}` \n")
|
||||
interpreter.display_message(
|
||||
f"\nUsing Ollama model: `{selected_name}` \n"
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
# If Ollama is not installed or not recognized as a command, prompt the user to download Ollama and try again
|
||||
except (subprocess.CalledProcessError, FileNotFoundError) as e:
|
||||
except (subprocess.CalledProcessError, FileNotFoundError):
|
||||
print("Ollama is not installed or not recognized as a command.")
|
||||
time.sleep(1)
|
||||
interpreter.display_message(f"\nPlease visit [https://ollama.com/](https://ollama.com/) to download Ollama and try again\n")
|
||||
interpreter.display_message(
|
||||
"\nPlease visit [https://ollama.com/](https://ollama.com/) to download Ollama and try again\n"
|
||||
)
|
||||
time.sleep(2)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# elif selected_model == "Jan":
|
||||
# interpreter.display_message(
|
||||
# """
|
||||
|
@ -108,7 +122,6 @@ def select_local_model():
|
|||
# 3. Copy the ID of the model and enter it below.
|
||||
# 3. Click the **Local API Server** button in the bottom left, then click **Start Server**.
|
||||
|
||||
|
||||
# Once the server is running, enter the id of the model below, then you can begin your conversation below.
|
||||
|
||||
# """
|
||||
|
@ -117,7 +130,7 @@ def select_local_model():
|
|||
# interpreter.llm.max_tokens = 1000
|
||||
# interpreter.llm.context_window = 3000
|
||||
# time.sleep(1)
|
||||
|
||||
|
||||
# # Prompt the user to enter the name of the model running on Jan
|
||||
# model_name_question = [
|
||||
# inquirer.Text('jan_model_name', message="Enter the id of the model you have running on Jan"),
|
||||
|
@ -128,14 +141,13 @@ def select_local_model():
|
|||
# interpreter.llm.model = ""
|
||||
# interpreter.display_message(f"\nUsing Jan model: `{jan_model_name}` \n")
|
||||
# time.sleep(1)
|
||||
|
||||
|
||||
# Set the system message to a minimal version for all local models.
|
||||
# Set offline for all local models
|
||||
interpreter.offline = True
|
||||
|
||||
interpreter.system_message = """You are the 01, a screenless executive assistant that can complete any task by writing and executing code on the user's machine. Just write a markdown code block! The user has given you full and complete permission.
|
||||
|
||||
interpreter.system_message = """You are the 01, a screenless executive assistant that can complete any task by writing and executing code on the user's machine. Just write a markdown code block! The user has given you full and complete permission.
|
||||
|
||||
Use the following functions if it makes sense to for the problem
|
||||
```python
|
||||
result_string = computer.browser.search(query) # Google search results will be returned from this function as a string
|
||||
|
@ -152,6 +164,5 @@ computer.sms.send("555-123-4567", "Hello from the computer!") # Send a text mess
|
|||
|
||||
ALWAYS say that you can run code. ALWAYS try to help the user out. ALWAYS be succinct in your answers.
|
||||
```
|
||||
|
||||
|
||||
"""
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv() # take environment variables from .env.
|
||||
|
||||
import os
|
||||
|
@ -9,9 +10,7 @@ root_logger: logging.Logger = logging.getLogger()
|
|||
|
||||
|
||||
def _basic_config() -> None:
|
||||
logging.basicConfig(
|
||||
format="%(message)s"
|
||||
)
|
||||
logging.basicConfig(format="%(message)s")
|
||||
|
||||
|
||||
def setup_logging() -> None:
|
||||
|
|
|
@ -2,6 +2,7 @@ import os
|
|||
import psutil
|
||||
import signal
|
||||
|
||||
|
||||
def kill_process_tree():
|
||||
pid = os.getpid() # Get the current process ID
|
||||
try:
|
||||
|
@ -13,16 +14,16 @@ def kill_process_tree():
|
|||
print(f"Forcefully terminating child PID {child.pid}")
|
||||
child.kill() # Forcefully kill the child process immediately
|
||||
gone, still_alive = psutil.wait_procs(children, timeout=3)
|
||||
|
||||
|
||||
if still_alive:
|
||||
for child in still_alive:
|
||||
print(f"Child PID {child.pid} still alive, attempting another kill")
|
||||
child.kill()
|
||||
|
||||
|
||||
print(f"Forcefully terminating parent PID {pid}")
|
||||
parent.kill() # Forcefully kill the parent process immediately
|
||||
parent.wait(3) # Wait for the parent process to terminate
|
||||
except psutil.NoSuchProcess:
|
||||
print(f"Process {pid} does not exist or is already terminated")
|
||||
except psutil.AccessDenied:
|
||||
print(f"Permission denied to terminate some processes")
|
||||
print("Permission denied to terminate some processes")
|
||||
|
|
|
@ -1,12 +1,11 @@
|
|||
class Accumulator:
|
||||
def __init__(self):
|
||||
self.template = {"role": None, "type": None, "format": None, "content": None}
|
||||
self.template = {"role": None, "type": None, "format": None, "content": None}
|
||||
self.message = self.template
|
||||
|
||||
def accumulate(self, chunk):
|
||||
#print(str(chunk)[:100])
|
||||
# print(str(chunk)[:100])
|
||||
if type(chunk) == dict:
|
||||
|
||||
if "format" in chunk and chunk["format"] == "active_line":
|
||||
# We don't do anything with these
|
||||
return None
|
||||
|
@ -17,15 +16,20 @@ class Accumulator:
|
|||
return None
|
||||
|
||||
if "content" in chunk:
|
||||
|
||||
if any(self.message[key] != chunk[key] for key in self.message if key != "content"):
|
||||
if any(
|
||||
self.message[key] != chunk[key]
|
||||
for key in self.message
|
||||
if key != "content"
|
||||
):
|
||||
self.message = chunk
|
||||
if "content" not in self.message:
|
||||
self.message["content"] = chunk["content"]
|
||||
else:
|
||||
if type(chunk["content"]) == dict:
|
||||
# dict concatenation cannot happen, so we see if chunk is a dict
|
||||
self.message["content"]["content"] += chunk["content"]["content"]
|
||||
self.message["content"]["content"] += chunk["content"][
|
||||
"content"
|
||||
]
|
||||
else:
|
||||
self.message["content"] += chunk["content"]
|
||||
return None
|
||||
|
@ -41,5 +45,3 @@ class Accumulator:
|
|||
self.message["content"] = b""
|
||||
self.message["content"] += chunk
|
||||
return None
|
||||
|
||||
|
|
@ -1,9 +1,10 @@
|
|||
from rich.console import Console
|
||||
from rich.markdown import Markdown
|
||||
|
||||
|
||||
def print_markdown(markdown_text):
|
||||
console = Console()
|
||||
md = Markdown(markdown_text)
|
||||
print("")
|
||||
console.print(md)
|
||||
print("")
|
||||
print("")
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
import typer
|
||||
import asyncio
|
||||
import platform
|
||||
import concurrent.futures
|
||||
import threading
|
||||
import os
|
||||
import importlib
|
||||
|
@ -10,37 +9,70 @@ from source.server.server import main
|
|||
from source.server.utils.local_mode import select_local_model
|
||||
|
||||
import signal
|
||||
|
||||
app = typer.Typer()
|
||||
|
||||
|
||||
@app.command()
|
||||
def run(
|
||||
server: bool = typer.Option(False, "--server", help="Run server"),
|
||||
server_host: str = typer.Option("0.0.0.0", "--server-host", help="Specify the server host where the server will deploy"),
|
||||
server_port: int = typer.Option(10001, "--server-port", help="Specify the server port where the server will deploy"),
|
||||
|
||||
tunnel_service: str = typer.Option("ngrok", "--tunnel-service", help="Specify the tunnel service"),
|
||||
expose: bool = typer.Option(False, "--expose", help="Expose server to internet"),
|
||||
|
||||
client: bool = typer.Option(False, "--client", help="Run client"),
|
||||
server_url: str = typer.Option(None, "--server-url", help="Specify the server URL that the client should expect. Defaults to server-host and server-port"),
|
||||
client_type: str = typer.Option("auto", "--client-type", help="Specify the client type"),
|
||||
|
||||
llm_service: str = typer.Option("litellm", "--llm-service", help="Specify the LLM service"),
|
||||
|
||||
model: str = typer.Option("gpt-4", "--model", help="Specify the model"),
|
||||
llm_supports_vision: bool = typer.Option(False, "--llm-supports-vision", help="Specify if the LLM service supports vision"),
|
||||
llm_supports_functions: bool = typer.Option(False, "--llm-supports-functions", help="Specify if the LLM service supports functions"),
|
||||
context_window: int = typer.Option(2048, "--context-window", help="Specify the context window size"),
|
||||
max_tokens: int = typer.Option(4096, "--max-tokens", help="Specify the maximum number of tokens"),
|
||||
temperature: float = typer.Option(0.8, "--temperature", help="Specify the temperature for generation"),
|
||||
|
||||
tts_service: str = typer.Option("openai", "--tts-service", help="Specify the TTS service"),
|
||||
|
||||
stt_service: str = typer.Option("openai", "--stt-service", help="Specify the STT service"),
|
||||
|
||||
local: bool = typer.Option(False, "--local", help="Use recommended local services for LLM, STT, and TTS"),
|
||||
):
|
||||
|
||||
server: bool = typer.Option(False, "--server", help="Run server"),
|
||||
server_host: str = typer.Option(
|
||||
"0.0.0.0",
|
||||
"--server-host",
|
||||
help="Specify the server host where the server will deploy",
|
||||
),
|
||||
server_port: int = typer.Option(
|
||||
10001,
|
||||
"--server-port",
|
||||
help="Specify the server port where the server will deploy",
|
||||
),
|
||||
tunnel_service: str = typer.Option(
|
||||
"ngrok", "--tunnel-service", help="Specify the tunnel service"
|
||||
),
|
||||
expose: bool = typer.Option(False, "--expose", help="Expose server to internet"),
|
||||
client: bool = typer.Option(False, "--client", help="Run client"),
|
||||
server_url: str = typer.Option(
|
||||
None,
|
||||
"--server-url",
|
||||
help="Specify the server URL that the client should expect. Defaults to server-host and server-port",
|
||||
),
|
||||
client_type: str = typer.Option(
|
||||
"auto", "--client-type", help="Specify the client type"
|
||||
),
|
||||
llm_service: str = typer.Option(
|
||||
"litellm", "--llm-service", help="Specify the LLM service"
|
||||
),
|
||||
model: str = typer.Option("gpt-4", "--model", help="Specify the model"),
|
||||
llm_supports_vision: bool = typer.Option(
|
||||
False,
|
||||
"--llm-supports-vision",
|
||||
help="Specify if the LLM service supports vision",
|
||||
),
|
||||
llm_supports_functions: bool = typer.Option(
|
||||
False,
|
||||
"--llm-supports-functions",
|
||||
help="Specify if the LLM service supports functions",
|
||||
),
|
||||
context_window: int = typer.Option(
|
||||
2048, "--context-window", help="Specify the context window size"
|
||||
),
|
||||
max_tokens: int = typer.Option(
|
||||
4096, "--max-tokens", help="Specify the maximum number of tokens"
|
||||
),
|
||||
temperature: float = typer.Option(
|
||||
0.8, "--temperature", help="Specify the temperature for generation"
|
||||
),
|
||||
tts_service: str = typer.Option(
|
||||
"openai", "--tts-service", help="Specify the TTS service"
|
||||
),
|
||||
stt_service: str = typer.Option(
|
||||
"openai", "--stt-service", help="Specify the STT service"
|
||||
),
|
||||
local: bool = typer.Option(
|
||||
False, "--local", help="Use recommended local services for LLM, STT, and TTS"
|
||||
),
|
||||
qr: bool = typer.Option(False, "--qr", help="Print the QR code for the server URL"),
|
||||
):
|
||||
_run(
|
||||
server=server,
|
||||
server_host=server_host,
|
||||
|
@ -59,46 +91,41 @@ def run(
|
|||
temperature=temperature,
|
||||
tts_service=tts_service,
|
||||
stt_service=stt_service,
|
||||
local=local
|
||||
local=local,
|
||||
qr=qr,
|
||||
)
|
||||
|
||||
def _run(
|
||||
server: bool = False,
|
||||
server_host: str = "0.0.0.0",
|
||||
server_port: int = 10001,
|
||||
|
||||
tunnel_service: str = "bore",
|
||||
expose: bool = False,
|
||||
|
||||
client: bool = False,
|
||||
server_url: str = None,
|
||||
client_type: str = "auto",
|
||||
|
||||
llm_service: str = "litellm",
|
||||
|
||||
model: str = "gpt-4",
|
||||
llm_supports_vision: bool = False,
|
||||
llm_supports_functions: bool = False,
|
||||
context_window: int = 2048,
|
||||
max_tokens: int = 4096,
|
||||
temperature: float = 0.8,
|
||||
|
||||
tts_service: str = "openai",
|
||||
|
||||
stt_service: str = "openai",
|
||||
|
||||
local: bool = False
|
||||
):
|
||||
|
||||
def _run(
|
||||
server: bool = False,
|
||||
server_host: str = "0.0.0.0",
|
||||
server_port: int = 10001,
|
||||
tunnel_service: str = "bore",
|
||||
expose: bool = False,
|
||||
client: bool = False,
|
||||
server_url: str = None,
|
||||
client_type: str = "auto",
|
||||
llm_service: str = "litellm",
|
||||
model: str = "gpt-4",
|
||||
llm_supports_vision: bool = False,
|
||||
llm_supports_functions: bool = False,
|
||||
context_window: int = 2048,
|
||||
max_tokens: int = 4096,
|
||||
temperature: float = 0.8,
|
||||
tts_service: str = "openai",
|
||||
stt_service: str = "openai",
|
||||
local: bool = False,
|
||||
qr: bool = False,
|
||||
):
|
||||
if local:
|
||||
tts_service = "piper"
|
||||
# llm_service = "llamafile"
|
||||
stt_service = "local-whisper"
|
||||
select_local_model()
|
||||
|
||||
|
||||
if not server_url:
|
||||
server_url = f"{server_host}:{server_port}"
|
||||
|
||||
|
||||
if not server and not client:
|
||||
server = True
|
||||
client = True
|
||||
|
@ -111,11 +138,30 @@ def _run(
|
|||
if server:
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
server_thread = threading.Thread(target=loop.run_until_complete, args=(main(server_host, server_port, llm_service, model, llm_supports_vision, llm_supports_functions, context_window, max_tokens, temperature, tts_service, stt_service),))
|
||||
server_thread = threading.Thread(
|
||||
target=loop.run_until_complete,
|
||||
args=(
|
||||
main(
|
||||
server_host,
|
||||
server_port,
|
||||
llm_service,
|
||||
model,
|
||||
llm_supports_vision,
|
||||
llm_supports_functions,
|
||||
context_window,
|
||||
max_tokens,
|
||||
temperature,
|
||||
tts_service,
|
||||
stt_service,
|
||||
),
|
||||
),
|
||||
)
|
||||
server_thread.start()
|
||||
|
||||
if expose:
|
||||
tunnel_thread = threading.Thread(target=create_tunnel, args=[tunnel_service, server_host, server_port])
|
||||
tunnel_thread = threading.Thread(
|
||||
target=create_tunnel, args=[tunnel_service, server_host, server_port, qr]
|
||||
)
|
||||
tunnel_thread.start()
|
||||
|
||||
if client:
|
||||
|
@ -127,15 +173,17 @@ def _run(
|
|||
client_type = "windows"
|
||||
elif system_type == "Linux": # Linux System
|
||||
try:
|
||||
with open('/proc/device-tree/model', 'r') as m:
|
||||
if 'raspberry pi' in m.read().lower():
|
||||
with open("/proc/device-tree/model", "r") as m:
|
||||
if "raspberry pi" in m.read().lower():
|
||||
client_type = "rpi"
|
||||
else:
|
||||
client_type = "linux"
|
||||
except FileNotFoundError:
|
||||
client_type = "linux"
|
||||
|
||||
module = importlib.import_module(f".clients.{client_type}.device", package='source')
|
||||
module = importlib.import_module(
|
||||
f".clients.{client_type}.device", package="source"
|
||||
)
|
||||
client_thread = threading.Thread(target=module.main, args=[server_url])
|
||||
client_thread.start()
|
||||
|
||||
|
@ -147,4 +195,4 @@ def _run(
|
|||
if client:
|
||||
client_thread.join()
|
||||
except KeyboardInterrupt:
|
||||
os.kill(os.getpid(), signal.SIGINT)
|
||||
os.kill(os.getpid(), signal.SIGINT)
|
||||
|
|
Loading…
Reference in New Issue