Kabatubare commited on
Commit
805e4a6
·
verified ·
1 Parent(s): dfabd2f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -21
app.py CHANGED
@@ -7,7 +7,6 @@ import logging
7
 
8
  logging.basicConfig(level=logging.INFO)
9
 
10
- # Placeholder for loading your AST-compatible model
11
  model_path = "./"
12
  model = AutoModelForAudioClassification.from_pretrained(model_path)
13
 
@@ -18,37 +17,41 @@ def preprocess_audio(audio_path, sr=22050):
18
 
19
  def extract_patches(S_DB, patch_size=16, patch_overlap=6):
20
  stride = patch_size - patch_overlap
21
- num_patches_x = (S_DB.shape[1] - patch_size) // stride + 1
22
- num_patches_y = (S_DB.shape[0] - patch_size) // stride + 1
23
- patches = []
24
-
25
- for i in range(num_patches_y):
26
- for j in range(num_patches_x):
27
- start_i = i * stride
28
- start_j = j * stride
29
- patch = S_DB[start_i:start_i+patch_size, start_j:start_j+patch_size]
30
- patches.append(patch.reshape(-1))
31
 
32
- return np.array(patches)
 
 
 
 
 
 
33
 
34
  def extract_features(audio, sr):
35
  S = librosa.feature.melspectrogram(y=audio, sr=sr, n_mels=128, hop_length=512, n_fft=2048)
36
  S_DB = librosa.power_to_db(S, ref=np.max)
37
-
38
  patches = extract_patches(S_DB)
39
- patch_embeddings = torch.tensor(patches).float()
40
-
41
- # Assuming the model includes a patch embedding layer internally
42
- return patch_embeddings.unsqueeze(0) # Add batch dimension
 
 
 
 
 
 
 
43
 
44
  def predict_voice(audio_file_path):
45
  try:
46
  audio, sr = preprocess_audio(audio_file_path)
47
  features = extract_features(audio, sr)
48
 
49
- # Flatten the patches to match the model's expected input shape
50
- # Adjust this based on your AST model's input requirements
51
- features = features.view(1, -1, 768) # Reshape assuming the model expects (batch_size, seq_len, embedding_dim)
52
 
53
  with torch.no_grad():
54
  outputs = model(features)
@@ -70,7 +73,7 @@ iface = gr.Interface(
70
  inputs=gr.Audio(label="Upload Audio File", type="filepath"),
71
  outputs=gr.Text(label="Prediction"),
72
  title="Voice Authenticity Detection",
73
- description="Detects whether a voice is real or AI-generated using an advanced AST model. Upload an audio file to see the results."
74
  )
75
 
76
  iface.launch()
 
7
 
8
  logging.basicConfig(level=logging.INFO)
9
 
 
10
  model_path = "./"
11
  model = AutoModelForAudioClassification.from_pretrained(model_path)
12
 
 
17
 
18
  def extract_patches(S_DB, patch_size=16, patch_overlap=6):
19
  stride = patch_size - patch_overlap
20
+ num_patches_time = (S_DB.shape[1] - patch_overlap) // stride
21
+ num_patches_freq = (S_DB.shape[0] - patch_overlap) // stride
 
 
 
 
 
 
 
 
22
 
23
+ patches = []
24
+ for i in range(0, num_patches_freq * stride, stride):
25
+ for j in range(0, num_patches_time * stride, stride):
26
+ patch = S_DB[i:i+patch_size, j:j+patch_size]
27
+ if patch.shape == (patch_size, patch_size):
28
+ patches.append(patch.reshape(-1))
29
+ return np.stack(patches) if patches else np.empty((0, patch_size*patch_size))
30
 
31
  def extract_features(audio, sr):
32
  S = librosa.feature.melspectrogram(y=audio, sr=sr, n_mels=128, hop_length=512, n_fft=2048)
33
  S_DB = librosa.power_to_db(S, ref=np.max)
 
34
  patches = extract_patches(S_DB)
35
+
36
+ # Assuming each patch is flattened to a vector of size 256 (16*16) and then projected to 768 dimensions
37
+ # Here we simulate this projection by creating a dummy tensor, in practice, this should be done by a learned linear layer
38
+ patches_tensor = torch.tensor(patches).float()
39
+ # Simulate linear projection (e.g., via a fully connected layer) to match the embedding size
40
+ if patches_tensor.nelement() == 0: # Handle case of no patches
41
+ patch_embeddings_tensor = torch.empty(0, 768)
42
+ else:
43
+ patch_embeddings_tensor = patches_tensor # This is a placeholder, replace with actual projection
44
+
45
+ return patch_embeddings_tensor.unsqueeze(0) # Add batch dimension for compatibility with model
46
 
47
  def predict_voice(audio_file_path):
48
  try:
49
  audio, sr = preprocess_audio(audio_file_path)
50
  features = extract_features(audio, sr)
51
 
52
+ # Adjust the features size to match the model input, if necessary
53
+ # Example: Reshape or pad the features tensor
54
+ # features = adjust_features_shape(features, expected_shape)
55
 
56
  with torch.no_grad():
57
  outputs = model(features)
 
73
  inputs=gr.Audio(label="Upload Audio File", type="filepath"),
74
  outputs=gr.Text(label="Prediction"),
75
  title="Voice Authenticity Detection",
76
+ description="This system uses advanced audio processing to detect whether a voice is real or AI-generated. Upload an audio file to see the results."
77
  )
78
 
79
  iface.launch()