Spaces:
Sleeping
Sleeping
File size: 6,200 Bytes
521243d 46a11a0 94ba3d3 521243d 0356e8f 1dbeaf5 d8e677e 521243d 1d61cef 521243d 1d61cef 521243d 1d61cef 521243d 1d61cef 46a11a0 521243d 46a11a0 1d61cef 521243d 1d61cef 521243d 1d61cef 521243d 1d61cef 521243d 1d61cef 0356e8f 1d61cef 0356e8f 521243d 0356e8f 1d61cef 521243d 1d61cef 521243d 46a11a0 521243d 94ba3d3 521243d 94ba3d3 521243d 1d61cef 521243d 94ba3d3 521243d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
import os
import torch
import librosa
import numpy as np
import tempfile
from fastapi import FastAPI, UploadFile, File, HTTPException
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
from librosa.sequence import dtw
from contextlib import asynccontextmanager
os.environ["NUMBA_CACHE_DIR"] = "/tmp" # Ensure Numba caching works in container environments
# --- Core Class Definition ---
class QuranRecitationComparer:
def __init__(self, model_name="jonatasgrosman/wav2vec2-large-xlsr-53-arabic", auth_token=None):
"""
Initialize the Quran recitation comparer with a specific Wav2Vec2 model.
"""
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if auth_token:
self.processor = Wav2Vec2Processor.from_pretrained(model_name, token=auth_token)
self.model = Wav2Vec2ForCTC.from_pretrained(model_name, token=auth_token)
else:
self.processor = Wav2Vec2Processor.from_pretrained(model_name)
self.model = Wav2Vec2ForCTC.from_pretrained(model_name)
self.model = self.model.to(self.device)
self.model.eval()
self.embedding_cache = {}
def load_audio(self, file_path, target_sr=16000, trim_silence=True, normalize=True):
if not os.path.exists(file_path):
raise FileNotFoundError(f"Audio file not found: {file_path}")
y, sr = librosa.load(file_path, sr=target_sr)
if normalize:
y = librosa.util.normalize(y)
if trim_silence:
y, _ = librosa.effects.trim(y, top_db=30)
return y
def get_deep_embedding(self, audio, sr=16000):
input_values = self.processor(
audio,
sampling_rate=sr,
return_tensors="pt"
).input_values.to(self.device)
with torch.no_grad():
outputs = self.model(input_values, output_hidden_states=True)
hidden_states = outputs.hidden_states[-1]
embedding_seq = hidden_states.squeeze(0).cpu().numpy()
return embedding_seq
def compute_dtw_distance(self, features1, features2):
D, wp = dtw(X=features1, Y=features2, metric='euclidean')
distance = D[-1, -1]
normalized_distance = distance / len(wp)
return normalized_distance
def interpret_similarity(self, norm_distance):
if norm_distance == 0:
result = "The recitations are identical based on the deep embeddings."
score = 100
elif norm_distance < 1:
result = "The recitations are extremely similar."
score = 95
elif norm_distance < 5:
result = "The recitations are very similar with minor differences."
score = 80
elif norm_distance < 10:
result = "The recitations show moderate similarity."
score = 60
elif norm_distance < 20:
result = "The recitations show some noticeable differences."
score = 40
else:
result = "The recitations are quite different."
score = max(0, 100 - norm_distance)
return result, score
def get_embedding_for_file(self, file_path):
if file_path in self.embedding_cache:
return self.embedding_cache[file_path]
audio = self.load_audio(file_path)
embedding = self.get_deep_embedding(audio)
self.embedding_cache[file_path] = embedding
return embedding
def predict(self, file_path1, file_path2):
embedding1 = self.get_embedding_for_file(file_path1)
embedding2 = self.get_embedding_for_file(file_path2)
norm_distance = self.compute_dtw_distance(embedding1.T, embedding2.T)
interpretation, similarity_score = self.interpret_similarity(norm_distance)
print(f"Similarity Score: {similarity_score:.1f}/100")
print(f"Interpretation: {interpretation}")
return similarity_score, interpretation
def clear_cache(self):
self.embedding_cache = {}
# --- Lifespan Event Handler ---
@asynccontextmanager
async def lifespan(app: FastAPI):
global comparer
# Use environment variables or a secure configuration in production
auth_token = os.environ.get("HF_TOKEN")
comparer = QuranRecitationComparer(
model_name="jonatasgrosman/wav2vec2-large-xlsr-53-arabic",
auth_token=auth_token
)
print("Model initialized and ready for predictions!")
yield
print("Application shutdown: Cleanup if necessary.")
app = FastAPI(
title="Quran Recitation Comparer API",
description="Compares two Quran recitations using a deep wav2vec2 model.",
version="1.0",
lifespan=lifespan
)
# --- API Endpoints ---
@app.get("/", summary="Health Check")
async def root():
return {"message": "Quran Recitation Comparer API is up and running."}
@app.post("/predict", summary="Compare Two Audio Files", response_model=dict)
async def predict(file1: UploadFile = File(...), file2: UploadFile = File(...)):
tmp1_path = None
tmp2_path = None
try:
suffix1 = os.path.splitext(file1.filename)[1] or ".wav"
with tempfile.NamedTemporaryFile(delete=False, suffix=suffix1) as tmp1:
content1 = await file1.read()
tmp1.write(content1)
tmp1_path = tmp1.name
suffix2 = os.path.splitext(file2.filename)[1] or ".wav"
with tempfile.NamedTemporaryFile(delete=False, suffix=suffix2) as tmp2:
content2 = await file2.read()
tmp2.write(content2)
tmp2_path = tmp2.name
similarity_score, interpretation = comparer.predict(tmp1_path, tmp2_path)
return {"similarity_score": similarity_score, "interpretation": interpretation}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
finally:
if tmp1_path and os.path.exists(tmp1_path):
os.remove(tmp1_path)
if tmp2_path and os.path.exists(tmp2_path):
os.remove(tmp2_path)
@app.post("/clear_cache", summary="Clear Embedding Cache", response_model=dict)
async def clear_cache():
comparer.clear_cache()
return {"message": "Cache cleared."}
|