reagvis commited on
Commit
dd8355e
·
verified ·
1 Parent(s): 67dabb1

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -0
app.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import torchaudio
4
+ from transformers import AutoProcessor, AutoModelForAudioClassification
5
+
6
+ # Load the Hugging Face processor and model for audio deepfake detection.
7
+ processor = AutoProcessor.from_pretrained("MelodyMachine/Deepfake-audio-detection-V2")
8
+ model = AutoModelForAudioClassification.from_pretrained("MelodyMachine/Deepfake-audio-detection-V2")
9
+
10
+ def detect_deepfake_audio(audio_path: str) -> str:
11
+ # Load audio (supports WAV, MP3, FLAC, etc.)
12
+ waveform, sample_rate = torchaudio.load(audio_path)
13
+
14
+ # Convert to mono if necessary
15
+ if waveform.shape[0] > 1:
16
+ waveform = torch.mean(waveform, dim=0, keepdim=True)
17
+
18
+ # Preprocess for model
19
+ inputs = processor(waveform, sampling_rate=sample_rate, return_tensors="pt")
20
+ with torch.no_grad():
21
+ outputs = model(**inputs)
22
+
23
+ # Get prediction
24
+ probs = torch.softmax(outputs.logits, dim=-1)[0]
25
+ idx = torch.argmax(probs).item()
26
+ label = model.config.id2label[idx]
27
+ confidence = probs[idx].item()
28
+
29
+ return f"The audio is classified as **{label}** with confidence **{confidence:.2f}**"
30
+
31
+ # Build Gradio interface
32
+ with gr.Blocks() as demo:
33
+ gr.Markdown("# Audio Deepfake Detection App")
34
+ gr.Markdown("### Upload or record an audio clip to detect deepfake content.")
35
+ audio_in = gr.Audio(source="upload", type="filepath", label="Upload Audio")
36
+ txt_out = gr.Textbox(label="Result")
37
+ gr.Button("Detect").click(fn=detect_deepfake_audio, inputs=audio_in, outputs=txt_out)
38
+
39
+ if __name__ == "__main__":
40
+ demo.launch()