Spaces:
Running
Running
File size: 3,129 Bytes
dd8355e 9f0b456 dd8355e 9f0b456 dd8355e 9f0b456 b9ec101 9f0b456 a9277e0 9f0b456 a9277e0 dd8355e 9f0b456 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
import gradio as gr
import torch
from transformers import pipeline
# Initialize the pipeline
pipe = pipeline("audio-classification", model="MelodyMachine/Deepfake-audio-detection-V2")
def detect_deepfake(audio_file):
"""
Detect if an audio file is deepfake or real
"""
try:
if audio_file is None:
return "Please upload an audio file"
# Run the classification
result = pipe(audio_file)
# Format the results
predictions = {}
confidence_text = ""
for item in result:
label = item['label']
score = item['score']
predictions[label] = score
confidence_text += f"{label}: {score:.4f} ({score*100:.2f}%)\n"
# Determine the prediction
top_prediction = max(predictions, key=predictions.get)
confidence = predictions[top_prediction]
# Create a more readable result
if 'fake' in top_prediction.lower() or 'deepfake' in top_prediction.lower():
main_result = f"⚠️ **DEEPFAKE DETECTED** (Confidence: {confidence*100:.1f}%)"
color = "red"
else:
main_result = f"✅ **REAL AUDIO** (Confidence: {confidence*100:.1f}%)"
color = "green"
detailed_results = f"**Detailed Results:**\n{confidence_text}"
return f"{main_result}\n\n{detailed_results}"
except Exception as e:
return f"Error processing audio: {str(e)}"
# Create the Gradio interface
with gr.Blocks(title="Audio Deepfake Detection", theme=gr.themes.Soft()) as app:
gr.Markdown(
"""
# 🎵 Audio Deepfake Detection
Upload an audio file to detect if it's artificially generated (deepfake) or real.
**Supported formats:** WAV, MP3, FLAC, M4A
"""
)
with gr.Row():
with gr.Column():
audio_input = gr.Audio(
label="Upload Audio File",
type="filepath",
sources=["upload"]
)
detect_btn = gr.Button(
"🔍 Analyze Audio",
variant="primary",
size="lg"
)
with gr.Column():
output_text = gr.Textbox(
label="Detection Results",
lines=8,
max_lines=10,
interactive=False
)
# Set up the event handler
detect_btn.click(
fn=detect_deepfake,
inputs=audio_input,
outputs=output_text
)
# Also trigger on audio upload
audio_input.change(
fn=detect_deepfake,
inputs=audio_input,
outputs=output_text
)
gr.Markdown(
"""
---
**Note:** This model analyzes audio characteristics to detect artificial generation.
Results are probabilities, not definitive proof.
"""
)
if __name__ == "__main__":
app.launch(
server_name="0.0.0.0",
server_port=7860,
share=False
) |