File size: 4,239 Bytes
3b4968e
 
 
 
 
 
 
 
 
7db8704
3b4968e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5dda573
3b4968e
c5a6d28
7db8704
 
 
 
 
 
 
 
3b4968e
7db8704
 
 
 
 
 
3b4968e
 
 
 
 
 
 
 
 
 
 
 
6962c3c
 
3b4968e
3268eef
3b4968e
 
d09db28
 
 
 
 
 
3b4968e
 
 
d09db28
3b4968e
6962c3c
d09db28
6962c3c
 
 
 
 
 
 
 
3b4968e
6962c3c
 
 
 
 
 
3b4968e
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
from fastapi import FastAPI, UploadFile, File, Response, Request
from fastapi.staticfiles import StaticFiles
import ggwave
import scipy.io.wavfile as wav
import numpy as np
import os
from pydantic import BaseModel
from groq import Groq
import io
import wave

app = FastAPI()

# Serve static files
app.mount("/static", StaticFiles(directory="static"), name="static")

# Initialize ggwave instance
instance = ggwave.init()

# Initialize Groq client
client = Groq(api_key=os.environ.get("GROQ_API_KEY"))

class TextInput(BaseModel):
    text: str

@app.get("/")
async def serve_homepage():
    """Serve the chat interface HTML."""
    with open("static/index.html", "r") as f:
        return Response(content=f.read(), media_type="text/html")

@app.post("/stt/")
async def speech_to_text(file: UploadFile = File(...)):
    """Convert WAV audio file to text using ggwave."""
    with open("temp.wav", "wb") as audio_file:
        audio_file.write(await file.read())
    
    # Load WAV file
    fs, recorded_waveform = wav.read("temp.wav")
    os.remove("temp.wav")
    
    # Convert to bytes and decode
    waveform_bytes = recorded_waveform.astype(np.uint8).tobytes()
    decoded_message = ggwave.decode(instance, waveform_bytes)
    
    return {"text": decoded_message}

@app.post("/tts/")
def text_to_speech(input_text: TextInput):
    """Convert text to a WAV audio file using ggwave and return as response."""
    encoded_waveform = ggwave.encode(input_text.text, protocolId=1, volume=100)
    
    # Convert byte data into float32 array
    waveform_float32 = np.frombuffer(encoded_waveform, dtype=np.float32)
    
    # Normalize float32 data to the range of int16
    waveform_int16 = np.int16(waveform_float32 * 32767)
    
    # Save to buffer instead of a file
    buffer = io.BytesIO()
    with wave.open(buffer, "wb") as wf:
        wf.setnchannels(1)                  # Mono audio
        wf.setsampwidth(2)                  # 2 bytes per sample (16-bit PCM)
        wf.setframerate(48000)              # Sample rate
        wf.writeframes(waveform_int16.tobytes())  # Write waveform as bytes
    
    buffer.seek(0)
    return Response(content=buffer.getvalue(), media_type="audio/wav")

@app.post("/chat/")
async def chat_with_llm(file: UploadFile = File(...)):
    """Process input WAV, send text to LLM, and return generated response as WAV."""
    with open("input_chat.wav", "wb") as audio_file:
        audio_file.write(await file.read())
    
    # Load WAV file
    fs, recorded_waveform = wav.read("input_chat.wav")
    os.remove("input_chat.wav")
    recorded_waveform = recorded_waveform.astype(np.float32) / 32767.0
    waveform_bytes = recorded_waveform.tobytes()
    user_message = ggwave.decode(instance, waveform_bytes)
    print("user_message" + user_message.decode("utf-8") )
    # Send to LLM
    chat_completion = client.chat.completions.create(
        messages=[
            {
            "role": "system",
            "content": "you are a helpful assistant. answer alway in one sentence"
        },
            {"role": "user", "content": user_message.decode("utf-8")}],
        model="llama-3.3-70b-versatile",
    )
    llm_response = chat_completion.choices[0].message.content
    print(llm_response)
    # Convert response to audio
    """Convert text to a WAV audio file using ggwave and return as response."""
    encoded_waveform = ggwave.encode(llm_response , protocolId=1, volume=100)
    
    # Convert byte data into float32 array
    waveform_float32 = np.frombuffer(encoded_waveform, dtype=np.float32)
    
    # Normalize float32 data to the range of int16
    waveform_int16 = np.int16(waveform_float32 * 32767)
    
    # Save to buffer instead of a file
    buffer = io.BytesIO()
    with wave.open(buffer, "wb") as wf:
        wf.setnchannels(1)                  # Mono audio
        wf.setsampwidth(2)                  # 2 bytes per sample (16-bit PCM)
        wf.setframerate(48000)              # Sample rate
        wf.writeframes(waveform_int16.tobytes())  # Write waveform as bytes
    
    buffer.seek(0)
    
    return Response(content=buffer.getvalue(), media_type="audio/wav", headers={
        "X-User-Message": user_message,
        "X-LLM-Response": llm_response
    })