Spaces:
Running
Running
import gradio as gr | |
import torch | |
from transformers import pipeline | |
# Initialize the pipeline | |
pipe = pipeline("audio-classification", model="MelodyMachine/Deepfake-audio-detection-V2") | |
def detect_deepfake(audio_file): | |
""" | |
Detect if an audio file is deepfake or real | |
""" | |
try: | |
if audio_file is None: | |
return "Please upload an audio file" | |
# Run the classification | |
result = pipe(audio_file) | |
# Format the results | |
predictions = {} | |
confidence_text = "" | |
for item in result: | |
label = item['label'] | |
score = item['score'] | |
predictions[label] = score | |
confidence_text += f"{label}: {score:.4f} ({score*100:.2f}%)\n" | |
# Determine the prediction | |
top_prediction = max(predictions, key=predictions.get) | |
confidence = predictions[top_prediction] | |
# Create a more readable result | |
if 'fake' in top_prediction.lower() or 'deepfake' in top_prediction.lower(): | |
main_result = f"β οΈ **DEEPFAKE DETECTED** (Confidence: {confidence*100:.1f}%)" | |
color = "red" | |
else: | |
main_result = f"β **REAL AUDIO** (Confidence: {confidence*100:.1f}%)" | |
color = "green" | |
detailed_results = f"**Detailed Results:**\n{confidence_text}" | |
return f"{main_result}\n\n{detailed_results}" | |
except Exception as e: | |
return f"Error processing audio: {str(e)}" | |
# Create the Gradio interface | |
with gr.Blocks(title="Audio Deepfake Detection", theme=gr.themes.Soft()) as app: | |
gr.Markdown( | |
""" | |
# π΅ Audio Deepfake Detection | |
Upload an audio file to detect if it's artificially generated (deepfake) or real. | |
**Supported formats:** WAV, MP3, FLAC, M4A | |
""" | |
) | |
with gr.Row(): | |
with gr.Column(): | |
audio_input = gr.Audio( | |
label="Upload Audio File", | |
type="filepath", | |
sources=["upload"] | |
) | |
detect_btn = gr.Button( | |
"π Analyze Audio", | |
variant="primary", | |
size="lg" | |
) | |
with gr.Column(): | |
output_text = gr.Textbox( | |
label="Detection Results", | |
lines=8, | |
max_lines=10, | |
interactive=False | |
) | |
# Set up the event handler | |
detect_btn.click( | |
fn=detect_deepfake, | |
inputs=audio_input, | |
outputs=output_text | |
) | |
# Also trigger on audio upload | |
audio_input.change( | |
fn=detect_deepfake, | |
inputs=audio_input, | |
outputs=output_text | |
) | |
gr.Markdown( | |
""" | |
--- | |
**Note:** This model analyzes audio characteristics to detect artificial generation. | |
Results are probabilities, not definitive proof. | |
""" | |
) | |
if __name__ == "__main__": | |
app.launch( | |
server_name="0.0.0.0", | |
server_port=7860, | |
share=False | |
) |