import gradio as gr import torch import torchaudio from torchaudio.transforms import Resample from transformers import AutoFeatureExtractor, AutoModelForAudioXVector device = torch.device("cuda" if torch.cuda.is_available() else "cpu") STYLE = """ """ OUTPUT_OK = ( STYLE + """

The speakers are

{:.1f}%

similar

Welcome, human!

(You must get at least 80% to be considered the same person)
""" ) OUTPUT_FAIL = ( STYLE + """

The speakers are

{:.1f}%

similar

You shall not pass!

(You must get at least 80% to be considered the same person)
""" ) THRESHOLD = 0.80 model_name = "microsoft/wavlm-base-plus-sv" feature_extractor = AutoFeatureExtractor.from_pretrained(model_name) model = AutoModelForAudioXVector.from_pretrained(model_name).to(device) cosine_sim = torch.nn.CosineSimilarity(dim=-1) def preprocess_audio(file_path, target_sr=16000): wav, sr = torchaudio.load(file_path) if sr != target_sr: wav = Resample(orig_freq=sr, new_freq=target_sr)(wav) return wav def similarity_fn(path1, path2): if not (path1 and path2): return 'ERROR: Please record audio for *both* speakers!' wav1 = preprocess_audio(path1) wav2 = preprocess_audio(path2) input1 = feature_extractor(wav1.squeeze(0), return_tensors="pt", sampling_rate=16000).input_values.to(device) input2 = feature_extractor(wav2.squeeze(0), return_tensors="pt", sampling_rate=16000).input_values.to(device) with torch.no_grad(): emb1 = model(input1).embeddings emb2 = model(input2).embeddings emb1 = torch.nn.functional.normalize(emb1, dim=-1).cpu() emb2 = torch.nn.functional.normalize(emb2, dim=-1).cpu() similarity = cosine_sim(emb1, emb2).numpy()[0] if similarity >= THRESHOLD: output = OUTPUT_OK.format(similarity * 100) else: output = OUTPUT_FAIL.format(similarity * 100) return output with gr.Blocks() as demo: gr.Markdown("# Voice Authentication with WavLM + X-Vectors") gr.Markdown( "This demo compares two speech samples to determine if they are from the same speaker. " "Try it with your own voice!" ) with gr.Row(): input1 = gr.Audio(sources=["microphone", "upload"], type="filepath", label="Speaker #1") input2 = gr.Audio(sources=["microphone", "upload"], type="filepath", label="Speaker #2") output = gr.HTML(label="Result") btn = gr.Button("Compare Speakers") btn.click(similarity_fn, inputs=[input1, input2], outputs=output) gr.Examples( examples=[ ["samples/denzel_washington.mp3", "samples/denzel_washington.mp3"], ["samples/heath_ledger_2.mp3", "samples/heath_ledger_3.mp3"], ["samples/heath_ledger_3.mp3", "samples/denzel_washington.mp3"], ["samples/denzel_washington.mp3", "samples/heath_ledger_2.mp3"], ], inputs=[input1, input2], ) gr.Markdown( "

" "🎙️ Learn more about WavLM | " "📚 WavLM paper | " "📚 X-Vector paper" "

" ) demo.launch()