from contextlib import asynccontextmanager from fastapi import FastAPI, WebSocket, WebSocketDisconnect from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import JSONResponse from fastapi.staticfiles import StaticFiles from fastapi.responses import FileResponse import asyncio import logging import os import traceback import argparse import uvicorn from core import WhisperLiveKit from audio_processor import AudioProcessor logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") logging.getLogger().setLevel(logging.WARNING) logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) kit = None @asynccontextmanager async def lifespan(app: FastAPI): global kit kit = WhisperLiveKit() yield app = FastAPI(lifespan=lifespan) app.add_middleware( CORSMiddleware, allow_origins=["*"], # Allows all origins allow_credentials=True, allow_methods=["*"], # Allows all methods allow_headers=["*"], # Allows all headers ) # Mount static files app.mount("/static", StaticFiles(directory="static"), name="static") @app.get("/") async def read_root(): return FileResponse("static/index.html") @app.get("/health") async def health_check(): return JSONResponse({"status": "healthy"}) async def handle_websocket_results(websocket, results_generator): """Consumes results from the audio processor and sends them via WebSocket.""" try: async for response in results_generator: try: logger.debug(f"Sending response: {response}") if isinstance(response, dict): # Ensure the response has a consistent format if 'buffer_transcription' in response: await websocket.send_json({ 'buffer_transcription': response['buffer_transcription'] }) elif 'full_transcription' in response: await websocket.send_json({ 'full_transcription': response['full_transcription'] }) else: await websocket.send_json(response) else: # If response is not a dict, wrap it in a text field await websocket.send_json({"text": str(response)}) except Exception as e: logger.error(f"Error sending message: {e}") logger.error(f"Traceback: {traceback.format_exc()}") raise except Exception as e: logger.warning(f"Error in WebSocket results handler: {e}") logger.warning(f"Traceback: {traceback.format_exc()}") @app.websocket("/asr") async def websocket_endpoint(websocket: WebSocket): logger.info("New WebSocket connection request") audio_processor = None websocket_task = None try: await websocket.accept() logger.info("WebSocket connection accepted") audio_processor = AudioProcessor() results_generator = await audio_processor.create_tasks() websocket_task = asyncio.create_task(handle_websocket_results(websocket, results_generator)) while True: try: message = await websocket.receive_bytes() logger.debug(f"Received audio chunk of size: {len(message)}") await audio_processor.process_audio(message) except WebSocketDisconnect: logger.warning("WebSocket disconnected.") break except Exception as e: logger.error(f"Error processing audio chunk: {e}") logger.error(f"Traceback: {traceback.format_exc()}") break except WebSocketDisconnect: logger.warning("WebSocket disconnected during setup.") except Exception as e: logger.error(f"Error in WebSocket endpoint: {e}") logger.error(f"Traceback: {traceback.format_exc()}") finally: if websocket_task: websocket_task.cancel() try: await websocket_task except asyncio.CancelledError: pass if audio_processor: await audio_processor.cleanup() logger.info("WebSocket endpoint cleaned up.") def parse_args(): parser = argparse.ArgumentParser(description="Whisper FastAPI Online Server") parser.add_argument( "--host", type=str, default="localhost", help="The host address to bind the server to.", ) parser.add_argument( "--port", type=int, default=8000, help="The port number to bind the server to." ) parser.add_argument( "--warmup-file", type=str, default=None, dest="warmup_file", help=""" The path to a speech audio wav file to warm up Whisper so that the very first chunk processing is fast. If not set, uses https://github.com/ggerganov/whisper.cpp/raw/master/samples/jfk.wav. If False, no warmup is performed. """, ) parser.add_argument( "--confidence-validation", action="store_true", help="Accelerates validation of tokens using confidence scores. Transcription will be faster but punctuation might be less accurate.", ) parser.add_argument( "--diarization", action="store_true", default=False, help="Enable speaker diarization.", ) parser.add_argument( "--no-transcription", action="store_true", help="Disable transcription to only see live diarization results.", ) parser.add_argument( "--min-chunk-size", type=float, default=0.5, help="Minimum audio chunk size in seconds. It waits up to this time to do processing. If the processing takes shorter time, it waits, otherwise it processes the whole segment that was received by this time.", ) parser.add_argument( "--model", type=str, default="tiny", help="Name size of the Whisper model to use (default: tiny). Suggested values: tiny.en,tiny,base.en,base,small.en,small,medium.en,medium,large-v1,large-v2,large-v3,large,large-v3-turbo. The model is automatically downloaded from the model hub if not present in model cache dir.", ) parser.add_argument( "--model_cache_dir", type=str, default=None, help="Overriding the default model cache dir where models downloaded from the hub are saved", ) parser.add_argument( "--model_dir", type=str, default=None, help="Dir where Whisper model.bin and other files are saved. This option overrides --model and --model_cache_dir parameter.", ) parser.add_argument( "--lan", "--language", type=str, default="en", help="Source language code, e.g. en,de,cs, or 'auto' for language detection.", ) parser.add_argument( "--task", type=str, default="transcribe", choices=["transcribe", "translate"], help="Transcribe or translate.", ) parser.add_argument( "--backend", type=str, default="faster-whisper", choices=["faster-whisper", "whisper_timestamped", "mlx-whisper", "openai-api"], help="Load only this backend for Whisper processing.", ) parser.add_argument( "--vac", action="store_true", default=False, help="Use VAC = voice activity controller. Recommended. Requires torch.", ) parser.add_argument( "--vac-chunk-size", type=float, default=0.04, help="VAC sample size in seconds." ) parser.add_argument( "--no-vad", action="store_true", default=False, help="Disable VAD = voice activity detection. Not recommended.", ) parser.add_argument( "--buffer_trimming", type=str, default="sentence", choices=["sentence", "segment"], help="Buffer trimming strategy.", ) parser.add_argument( "--buffer_trimming_sec", type=float, default=1.0, help="Buffer trimming length in seconds.", ) parser.add_argument( "-l", "--log-level", type=str, default="INFO", choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], help="Set the logging level.", ) args = parser.parse_args() args.transcription = not args.no_transcription args.vad = not args.no_vad delattr(args, 'no_transcription') delattr(args, 'no_vad') return args def main(): args = parse_args() # Initialize WhisperLiveKit with parsed arguments kit = WhisperLiveKit(args=args) # Start the server uvicorn.run( "main:app", host=args.host, port=args.port, log_level=args.log_level.lower() ) if __name__ == "__main__": main()