SalexAI commited on
Commit
341331a
·
verified ·
1 Parent(s): 60778ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +57 -75
app.py CHANGED
@@ -1,77 +1,59 @@
1
- import os
2
- import httpx
3
- import numpy as np
4
- from queue import Queue, Empty
5
-
6
- from fastrtc import Stream, StreamHandler, get_stt_model, get_tts_model
7
- from openai import OpenAI
8
-
9
- # Initialize OpenAI client and on-device models
10
- sambanova_client = OpenAI(
11
- api_key=os.getenv("key"),
12
- base_url="https://api.deepinfra.com/v1"
13
- )
14
- stt_model = get_stt_model()
15
- tts_model = get_tts_model()
16
-
17
- class EchoHandler(StreamHandler):
18
- def __init__(self):
19
- super().__init__() # uses default sample rates/layouts
20
- self.queue: Queue[tuple[int, np.ndarray]] = Queue()
21
-
22
- def start_up(self) -> None:
23
- # Optional: warm up models or state here
24
- pass
25
-
26
- def receive(self, frame: tuple[int, np.ndarray]) -> None:
27
- # frame is (sample_rate, numpy array)
28
- sample_rate, audio_array = frame
29
-
30
- # 1) Transcribe speech → text
31
- text = stt_model.stt(frame)
32
-
33
- # 2) Chat completion
34
- response = sambanova_client.chat.completions.create(
35
- model="mistralai/Mistral-Small-24B-Instruct-2501",
36
- messages=[{"role": "user", "content": text}],
37
- max_tokens=200,
38
- )
39
- reply = response.choices[0].message.content
40
-
41
- # 3) Generate TTS chunks and enqueue them
42
- for tts_chunk in tts_model.stream_tts_sync(reply):
43
- # each tts_chunk is a numpy array of shape (1, N)
44
- self.queue.put((sample_rate, tts_chunk))
45
-
46
- def emit(self):
47
- try:
48
- return self.queue.get_nowait()
49
- except Empty:
50
- return None # no audio to send right now
51
-
52
- def copy(self) -> "EchoHandler":
53
- return EchoHandler()
54
-
55
- def shutdown(self) -> None:
56
- # Optional cleanup
57
  pass
58
 
59
- def get_cloudflare_turn_credentials(
60
- turn_key_id=None,
61
- turn_key_api_token=None,
62
- hf_token=None,
63
- ttl=600,
64
- client: httpx.AsyncClient | None = None,
65
- ):
66
- # Replace with your real TURN creds logic
67
- return {"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]}
68
-
69
- # Wire up the stream with the new handler
70
- stream = Stream(
71
- handler=EchoHandler(),
72
- modality="audio",
73
- mode="send-receive",
74
- rtc_configuration=get_cloudflare_turn_credentials
75
- )
76
-
77
- stream.fastphone()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from fastapi import FastAPI, Request
3
+ import uvicorn
4
+
5
+ # Initialize FastAPI and Gradio
6
+ app = FastAPI()
7
+ gr_interface = None
8
+ STORAGE_PATH = "storage.txt"
9
+
10
+ # Initialize storage
11
+ def init_storage():
12
+ try:
13
+ with open(STORAGE_PATH, "x") as f:
14
+ f.write("")
15
+ except FileExistsError:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  pass
17
 
18
+ init_storage()
19
+
20
+ # Function to read stored data
21
+ def read_data():
22
+ with open(STORAGE_PATH, "r") as f:
23
+ return f.read()
24
+
25
+ # Function to write data
26
+ def write_data(text):
27
+ with open(STORAGE_PATH, "w") as f:
28
+ f.write(text)
29
+
30
+ # Gradio UI function
31
+ def show_latest_data():
32
+ return read_data()
33
+
34
+ # Set up the Gradio Blocks interface
35
+ with gr.Blocks() as gr_app:
36
+ gr.Markdown("## Latest Sent Text")
37
+ output = gr.Textbox(label="Stored Text", value=read_data(), interactive=False)
38
+ refresh_btn = gr.Button("Refresh")
39
+
40
+ refresh_btn.click(fn=show_latest_data, inputs=[], outputs=output)
41
+
42
+ # Mount Gradio to FastAPI
43
+ @app.get("/store")
44
+ async def store_text(request: Request):
45
+ text = request.query_params.get("text")
46
+ if text:
47
+ write_data(text)
48
+ return {"status": "success", "stored": text}
49
+ else:
50
+ return {"status": "error", "message": "No text parameter provided."}
51
+
52
+ @app.get("/")
53
+ async def root():
54
+ return gr_app
55
+
56
+ # Launch Gradio when run as __main__
57
+ if __name__ == "__main__":
58
+ gr_interface = gr.mount_gradio_app(app, gr_app, path="/")
59
+ uvicorn.run(app, host="0.0.0.0", port=7860)