queue audio
This commit is contained in:
parent
0602348f1c
commit
1e1db0836c
|
@ -170,5 +170,6 @@ _.aifs
|
||||||
software/output_audio.wav
|
software/output_audio.wav
|
||||||
.DS_Store
|
.DS_Store
|
||||||
|
|
||||||
|
# ignore node modules and .expo files
|
||||||
node_modules/
|
node_modules/
|
||||||
.expo/
|
.expo/
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import React, { useState, useEffect } from "react";
|
import React, { useState, useEffect } from "react";
|
||||||
import { View, Text, TouchableOpacity, StyleSheet } from "react-native";
|
import { View, Text, TouchableOpacity, StyleSheet } from "react-native";
|
||||||
import * as FileSystem from 'expo-file-system';
|
import * as FileSystem from 'expo-file-system';
|
||||||
import { Audio } from "expo-av";
|
import { AVPlaybackStatus, AVPlaybackStatusSuccess, Audio } from "expo-av";
|
||||||
|
|
||||||
interface MainProps {
|
interface MainProps {
|
||||||
route: {
|
route: {
|
||||||
|
@ -19,30 +19,14 @@ const Main: React.FC<MainProps> = ({ route }) => {
|
||||||
const [audioQueue, setAudioQueue] = useState<string[]>([]);
|
const [audioQueue, setAudioQueue] = useState<string[]>([]);
|
||||||
const [sound, setSound] = useState<Audio.Sound | null>();
|
const [sound, setSound] = useState<Audio.Sound | null>();
|
||||||
const audioDir = FileSystem.documentDirectory + '01/audio/';
|
const audioDir = FileSystem.documentDirectory + '01/audio/';
|
||||||
const Buffer = require('buffer').Buffer;
|
|
||||||
|
|
||||||
const toBuffer = async (blob: Blob) => {
|
const constructTempFilePath = async (buffer: string) => {
|
||||||
|
await dirExists();
|
||||||
|
|
||||||
const uri = await toDataURI(blob);
|
|
||||||
const base64 = uri.replace(/^.*,/g, "");
|
|
||||||
return Buffer.from(base64, "base64");
|
|
||||||
};
|
|
||||||
|
|
||||||
const toDataURI = (blob: Blob) =>
|
|
||||||
new Promise((resolve) => {
|
|
||||||
const reader = new FileReader();
|
|
||||||
reader.readAsDataURL(blob);
|
|
||||||
reader.onloadend = () => {
|
|
||||||
const uri = reader.result?.toString();
|
|
||||||
resolve(uri);
|
|
||||||
};
|
|
||||||
});
|
|
||||||
|
|
||||||
const constructTempFilePath = async (buffer: Buffer) => {
|
|
||||||
const tempFilePath = `${audioDir}${Date.now()}.wav`;
|
const tempFilePath = `${audioDir}${Date.now()}.wav`;
|
||||||
await FileSystem.writeAsStringAsync(
|
await FileSystem.writeAsStringAsync(
|
||||||
tempFilePath,
|
tempFilePath,
|
||||||
buffer.toString(),
|
buffer,
|
||||||
{
|
{
|
||||||
encoding: FileSystem.EncodingType.Base64,
|
encoding: FileSystem.EncodingType.Base64,
|
||||||
}
|
}
|
||||||
|
@ -66,6 +50,12 @@ const Main: React.FC<MainProps> = ({ route }) => {
|
||||||
const playNextAudio = async () => {
|
const playNextAudio = async () => {
|
||||||
console.log("in playNextAudio audioQueue is", audioQueue.length);
|
console.log("in playNextAudio audioQueue is", audioQueue.length);
|
||||||
|
|
||||||
|
if (sound != null){
|
||||||
|
console.log('Unloading Sound');
|
||||||
|
await sound.unloadAsync();
|
||||||
|
setSound(null);
|
||||||
|
}
|
||||||
|
|
||||||
if (audioQueue.length > 0) {
|
if (audioQueue.length > 0) {
|
||||||
const uri = audioQueue.shift() as string;
|
const uri = audioQueue.shift() as string;
|
||||||
console.log("load audio from", uri);
|
console.log("load audio from", uri);
|
||||||
|
@ -76,6 +66,9 @@ const Main: React.FC<MainProps> = ({ route }) => {
|
||||||
|
|
||||||
console.log("playing audio from", uri);
|
console.log("playing audio from", uri);
|
||||||
await sound?.playAsync();
|
await sound?.playAsync();
|
||||||
|
|
||||||
|
sound.setOnPlaybackStatusUpdate(_onPlayBackStatusUpdate);
|
||||||
|
|
||||||
} catch (error){
|
} catch (error){
|
||||||
console.log("Error playing audio", error);
|
console.log("Error playing audio", error);
|
||||||
playNextAudio();
|
playNextAudio();
|
||||||
|
@ -84,16 +77,17 @@ const Main: React.FC<MainProps> = ({ route }) => {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
useEffect(() => {
|
const isAVPlaybackStatusSuccess = (
|
||||||
return sound
|
status: AVPlaybackStatus
|
||||||
? () => {
|
): status is AVPlaybackStatusSuccess => {
|
||||||
console.log('Unloading Sound');
|
return (status as AVPlaybackStatusSuccess).isLoaded !== undefined;
|
||||||
sound.unloadAsync();
|
};
|
||||||
setSound(null);
|
|
||||||
playNextAudio();
|
const _onPlayBackStatusUpdate = (status: AVPlaybackStatus) => {
|
||||||
}
|
if (isAVPlaybackStatusSuccess(status) && status.didJustFinish){
|
||||||
: undefined;
|
playNextAudio();
|
||||||
}, [sound]);
|
}
|
||||||
|
}
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
console.log("audioQueue has been updated:", audioQueue.length);
|
console.log("audioQueue has been updated:", audioQueue.length);
|
||||||
|
@ -115,15 +109,16 @@ const Main: React.FC<MainProps> = ({ route }) => {
|
||||||
};
|
};
|
||||||
|
|
||||||
websocket.onmessage = async (e) => {
|
websocket.onmessage = async (e) => {
|
||||||
console.log("Received message from WebSocket", e.data);
|
|
||||||
|
|
||||||
const blob = await e.data;
|
const message = JSON.parse(e.data);
|
||||||
const buffer = await toBuffer(blob);
|
console.log(message.content);
|
||||||
|
|
||||||
|
const buffer = await message.content;
|
||||||
const filePath = await constructTempFilePath(buffer);
|
const filePath = await constructTempFilePath(buffer);
|
||||||
setAudioQueue((prevQueue) => [...prevQueue, filePath]);
|
setAudioQueue((prevQueue) => [...prevQueue, filePath]);
|
||||||
console.log("audio file written to", filePath);
|
console.log("audio file written to", filePath);
|
||||||
|
|
||||||
if (e.data.format === "bytes.raw" && e.data.end && audioQueue.length > 1) {
|
if (message.format === "bytes.raw" && message.end && audioQueue.length >= 1) {
|
||||||
console.log("calling playNextAudio");
|
console.log("calling playNextAudio");
|
||||||
playNextAudio();
|
playNextAudio();
|
||||||
}
|
}
|
||||||
|
|
|
@ -393,8 +393,10 @@ def stream_tts(sentence):
|
||||||
|
|
||||||
with open(audio_file, "rb") as f:
|
with open(audio_file, "rb") as f:
|
||||||
audio_bytes = f.read()
|
audio_bytes = f.read()
|
||||||
desktop_path = os.path.join(os.path.expanduser('~'), 'Desktop')
|
desktop_path = os.path.join(os.path.expanduser("~"), "Desktop")
|
||||||
desktop_audio_file = os.path.join(desktop_path, os.path.basename(audio_file))
|
desktop_audio_file = os.path.join(
|
||||||
|
desktop_path, f"{datetime.datetime.now()}" + os.path.basename(audio_file)
|
||||||
|
)
|
||||||
shutil.copy(audio_file, desktop_audio_file)
|
shutil.copy(audio_file, desktop_audio_file)
|
||||||
print(f"Audio file saved to Desktop: {desktop_audio_file}")
|
print(f"Audio file saved to Desktop: {desktop_audio_file}")
|
||||||
# storage_client = storage.Client(project="react-native-421323")
|
# storage_client = storage.Client(project="react-native-421323")
|
||||||
|
@ -409,15 +411,23 @@ def stream_tts(sentence):
|
||||||
# f"Audio file {audio_file} uploaded to {datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav"
|
# f"Audio file {audio_file} uploaded to {datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')}.wav"
|
||||||
# )
|
# )
|
||||||
|
|
||||||
os.remove(audio_file)
|
|
||||||
|
|
||||||
file_type = "audio/wav"
|
file_type = "audio/wav"
|
||||||
# Read the entire WAV file
|
# Read the entire WAV file
|
||||||
with open(audio_file, "rb") as f:
|
with open(audio_file, "rb") as f:
|
||||||
audio_bytes = f.read()
|
audio_bytes = f.read()
|
||||||
|
|
||||||
|
os.remove(audio_file)
|
||||||
|
|
||||||
# Stream the audio as a single message
|
# Stream the audio as a single message
|
||||||
yield {"role": "assistant", "type": "audio", "format": file_type, "content": base64.b64encode(audio_bytes).decode('utf-8'), "start": True, "end": True}
|
yield {
|
||||||
|
"role": "assistant",
|
||||||
|
"type": "audio",
|
||||||
|
"format": file_type,
|
||||||
|
"content": base64.b64encode(audio_bytes).decode("utf-8"),
|
||||||
|
"start": True,
|
||||||
|
"end": True,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
from uvicorn import Config, Server
|
from uvicorn import Config, Server
|
||||||
import os
|
import os
|
||||||
|
|
Loading…
Reference in New Issue