File size: 2,220 Bytes
ab22d1a
83ea845
9bdac3d
 
 
cb2f705
9bdac3d
 
 
471affe
 
730ea7e
98c9c23
9bdac3d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
730ea7e
9bdac3d
 
 
 
 
9494251
9bdac3d
 
 
 
471affe
b1cc7ae
471affe
9bdac3d
 
b1cc7ae
9bdac3d
 
471affe
 
 
 
9bdac3d
 
 
 
b67c020
0c38083
9bdac3d
 
b67c020
 
9bdac3d
 
 
b67c020
9bdac3d
 
ab22d1a
 
 
 
 
b67c020
9bdac3d
 
 
 
471affe
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
from fastapi import FastAPI, WebSocket, WebSocketDisconnect
from transcribe.serve import WhisperTranscriptionService
from uuid import uuid1
from logging import getLogger
import numpy as np
from transcribe.processing import ProcessingPipes
from contextlib import asynccontextmanager
from multiprocessing import Process, freeze_support
from fastapi.staticfiles import StaticFiles
from fastapi.responses import RedirectResponse
import os
from transcribe.utils import pcm_bytes_to_np_array
from config import BASE_DIR
logger = getLogger(__name__)


async def get_audio_from_websocket(websocket)->np.array:
    """
    Receives audio buffer from websocket and creates a numpy array out of it.

    Args:
        websocket: The websocket to receive audio from.

    Returns:
        A numpy array containing the audio.
    """
    frame_data = await websocket.receive_bytes()
    if frame_data == b"END_OF_AUDIO":
        return False
    return pcm_bytes_to_np_array(frame_data)


@asynccontextmanager
async def lifespan(app:FastAPI):
    global pipe
    pipe = ProcessingPipes()
    pipe.wait_ready()
    logger.info("Pipeline is ready.")
    yield


FRONTEND_DIR = os.path.join(BASE_DIR, "web")


app = FastAPI(lifespan=lifespan)
app.mount("/app", StaticFiles(directory=FRONTEND_DIR, html=True), name="web")
pipe = None

@app.get("/")
async def root():
    return RedirectResponse(url="/app/")

@app.websocket("/ws")
async def translate(websocket: WebSocket):
    query_parameters_dict = websocket.query_params
    from_lang, to_lang = query_parameters_dict.get('from'), query_parameters_dict.get('to')

    client = WhisperTranscriptionService(
        websocket,
        pipe,
        language=from_lang,
        dst_lang=to_lang,
        client_uid=f"{uuid1()}",
    )

    if from_lang and to_lang and client:
        logger.info(f"Source lange: {from_lang}  -> Dst lange: {to_lang}")
        await websocket.accept()
    try:
        while True:
            frame_data = await get_audio_from_websocket(websocket)
            client.add_frames(frame_data)
    except WebSocketDisconnect:
        return

if __name__ == '__main__':
    freeze_support()
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=9191)