Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,27 +1,53 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import
|
|
|
3 |
|
4 |
-
#
|
5 |
-
|
6 |
-
|
|
|
|
|
|
|
7 |
|
8 |
def predict_voice(audio_file):
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
-
#
|
15 |
iface = gr.Interface(
|
16 |
-
fn=predict_voice,
|
17 |
-
inputs=gr.Audio(source="upload", type="file", label="Upload Audio File"),
|
18 |
-
outputs=
|
19 |
title="Voice Authenticity Detection",
|
20 |
-
description="
|
21 |
-
allow_flagging="never",
|
22 |
theme="huggingface"
|
23 |
)
|
24 |
|
25 |
-
#
|
26 |
iface.launch()
|
27 |
-
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import AutoFeatureExtractor, AutoModelForAudioClassification
|
3 |
+
import numpy as np
|
4 |
|
5 |
+
# Path to the local directory where the model files are stored within the Space
|
6 |
+
local_model_path = "./"
|
7 |
+
|
8 |
+
# Initialize the feature extractor and model from the local files
|
9 |
+
extractor = AutoFeatureExtractor.from_pretrained(local_model_path)
|
10 |
+
model = AutoModelForAudioClassification.from_pretrained(local_model_path)
|
11 |
|
12 |
def predict_voice(audio_file):
|
13 |
+
"""
|
14 |
+
Predicts whether a voice is real or spoofed from an audio file.
|
15 |
+
|
16 |
+
Args:
|
17 |
+
audio_file: The input audio file to be classified.
|
18 |
+
|
19 |
+
Returns:
|
20 |
+
A string with the prediction and confidence level.
|
21 |
+
"""
|
22 |
+
# Convert the input audio file to model's expected format.
|
23 |
+
inputs = extractor(audio_file, return_tensors="pt")
|
24 |
+
|
25 |
+
# Generate predictions from the model.
|
26 |
+
outputs = model(**inputs)
|
27 |
+
|
28 |
+
# Extract logits and compute the class with the highest score.
|
29 |
+
logits = outputs.logits
|
30 |
+
predicted_index = np.argmax(logits.detach().numpy())
|
31 |
+
|
32 |
+
# Translate index to label
|
33 |
+
label = model.config.id2label[predicted_index]
|
34 |
+
|
35 |
+
# Calculate the confidence of the prediction.
|
36 |
+
confidence = np.max(np.softmax(logits.detach().numpy(), axis=1)) * 100
|
37 |
+
|
38 |
+
# Prepare the output string.
|
39 |
+
result = f"The voice is classified as '{label}' with a confidence of {confidence:.2f}%."
|
40 |
+
return result
|
41 |
|
42 |
+
# Setting up the Gradio interface
|
43 |
iface = gr.Interface(
|
44 |
+
fn=predict_voice, # Function to call
|
45 |
+
inputs=gr.inputs.Audio(source="upload", type="file", label="Upload Audio File"), # Audio input
|
46 |
+
outputs="text", # Text output
|
47 |
title="Voice Authenticity Detection",
|
48 |
+
description="Detects whether a voice is real or AI-generated. Upload an audio file to see the results.",
|
|
|
49 |
theme="huggingface"
|
50 |
)
|
51 |
|
52 |
+
# Run the Gradio interface
|
53 |
iface.launch()
|
|