Spaces:
Sleeping
Sleeping
File size: 5,169 Bytes
efebc5f 4814c74 efebc5f 830d345 4bde5af 81bbb17 4bde5af 9b10cb5 4bde5af 7ac2a83 9b10cb5 4bde5af 3cb6ef7 4bde5af 55f6480 4bde5af 4814c74 4bde5af 43d9632 4bde5af 4814c74 3cb6ef7 4bde5af 74a687d 4bde5af 43d9632 4bde5af 4814c74 43d9632 4814c74 9b10cb5 7ac2a83 cfb7438 7ac2a83 4bde5af 74a687d 3cb6ef7 74a687d 4814c74 4bde5af 041e763 4bde5af 041e763 4bde5af 830d345 4bde5af 041e763 4814c74 4bde5af 041e763 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 |
import os
os.environ['NUMBA_CACHE_DIR'] = '/tmp/'
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, Request, UploadFile, File
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse, HTMLResponse
from fastapi.staticfiles import StaticFiles
import numpy as np
import librosa
import soundfile as sf
import joblib
import uvicorn
import logging
import io
from pydub import AudioSegment
from typing import List
from collections import deque
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
app = FastAPI()
app.mount("/static", StaticFiles(directory="static"), name="static")
@app.get("/", response_class=HTMLResponse)
async def get(request: Request):
logger.info("Serving the index page")
with open("templates/index.html") as f:
html_content = f.read()
return HTMLResponse(content=html_content, status_code=200)
@app.get("/health")
def health_check():
return {"status": "ok"}
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
with open("header.webm", 'rb') as source_file:
header_data = source_file.read(1024)
is_detecting = False
model = joblib.load('models/xgb_test.pkl')
q = deque()
class ConnectionManager:
def __init__(self):
self.active_connections: List[WebSocket] = []
async def connect(self, websocket: WebSocket):
await websocket.accept()
self.active_connections.append(websocket)
def disconnect(self, websocket: WebSocket):
self.active_connections.remove(websocket)
async def send_message(self, websocket: WebSocket, message: str):
await websocket.send_text(message)
manager = ConnectionManager()
def extract_features(audio, sr = 16000):
mfccs = librosa.feature.mfcc(y=audio, sr=sr, n_mfcc=13)
mfccs = np.mean(mfccs, axis=1)
chroma = librosa.feature.chroma_stft(y=audio, sr=sr)
chroma = np.mean(chroma, axis=1)
contrast = librosa.feature.spectral_contrast(y=audio, sr=sr)
contrast = np.mean(contrast, axis=1)
centroid = librosa.feature.spectral_centroid(y=audio, sr=sr)
centroid = np.mean(centroid, axis=1)
combined_features = np.hstack([mfccs, chroma, contrast, centroid])
return combined_features
async def process_audio_data(audio_data):
try:
full_audio_data = header_data + audio_data
audio_segment = AudioSegment.from_file(io.BytesIO(full_audio_data), format="webm")
wav_io = io.BytesIO()
audio_segment.export(wav_io, format="wav")
wav_io.seek(0)
audio, sr = sf.read(wav_io, dtype='float32')
except Exception as e:
logger.error(f"Failed to read audio data: {e}")
return
if audio.ndim > 1: # If audio has more than one channel, average them
audio = np.mean(audio, axis=1)
features = extract_features(audio)
features = features.reshape(1, -1)
prediction = model.predict(features)
is_fake = prediction[0]
result = 'fake' if is_fake else 'real'
q.append(is_fake)
if len(q) > 2:
if sum(q) == 2:
for connection in manager.active_connections:
await manager.send_message(connection, "global-fake")
q.clear()
else:
q.popleft()
return result
@app.post("/start_detection")
async def start_detection():
global is_detecting
if not is_detecting:
is_detecting = True
return JSONResponse(content={'status': 'detection_started'})
@app.post("/stop_detection")
async def stop_detection():
global is_detecting
is_detecting = False
return JSONResponse(content={'status': 'detection_stopped'})
@app.post("/upload_audio/")
async def upload_audio(file: UploadFile = File(...)):
try:
audio_data = await file.read()
audio_segment = AudioSegment.from_file(io.BytesIO(audio_data), format=file.filename.split('.')[-1])
wav_io = io.BytesIO()
audio_segment.export(wav_io, format="wav")
wav_io.seek(0)
audio, sr = sf.read(wav_io, dtype='float32')
if audio.ndim > 1:
audio = np.mean(audio, axis=1)
features = extract_features(audio)
features = features.reshape(1, -1)
prediction = model.predict(features)
is_fake = prediction[0]
result = 'fake' if is_fake else 'real'
return JSONResponse(content={'status': 'success', 'result': result})
except Exception as e:
logger.error(f"Failed to process audio file: {e}")
return JSONResponse(content={'status': 'error', 'message': str(e)}, status_code=500)
@app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
await manager.connect(websocket)
try:
while True:
data = await websocket.receive_bytes()
result = await process_audio_data(data)
if result:
await manager.send_message(websocket, result)
except WebSocketDisconnect:
manager.disconnect(websocket)
if __name__ == '__main__':
uvicorn.run(app, host="0.0.0.0", port=7860)
|