admin commited on
Commit
8948197
·
1 Parent(s): bf97162
Files changed (1) hide show
  1. app.py +40 -53
app.py CHANGED
@@ -29,63 +29,48 @@ def zero_padding(y: np.ndarray, end: int):
29
 
30
 
31
  def audio2mel(audio_path: str, seg_len=20):
32
- os.makedirs(TEMP_DIR, exist_ok=True)
33
- try:
34
- y, sr = librosa.load(audio_path, sr=SAMPLE_RATE)
35
- y = zero_padding(y, seg_len * sr)
36
- mel_spec = librosa.feature.melspectrogram(y=y, sr=sr)
37
- log_mel_spec = librosa.power_to_db(mel_spec, ref=np.max)
38
- librosa.display.specshow(log_mel_spec)
39
- plt.axis("off")
40
- plt.savefig(
41
- f"{TEMP_DIR}/output.jpg",
42
- bbox_inches="tight",
43
- pad_inches=0.0,
44
- )
45
- plt.close()
46
-
47
- except Exception as e:
48
- print(f"Error converting {audio_path} : {e}")
49
 
50
 
51
  def audio2cqt(audio_path: str, seg_len=20):
52
- os.makedirs(TEMP_DIR, exist_ok=True)
53
- try:
54
- y, sr = librosa.load(audio_path, sr=SAMPLE_RATE)
55
- y = zero_padding(y, seg_len * sr)
56
- cqt_spec = librosa.cqt(y=y, sr=sr)
57
- log_cqt_spec = librosa.power_to_db(np.abs(cqt_spec) ** 2, ref=np.max)
58
- librosa.display.specshow(log_cqt_spec)
59
- plt.axis("off")
60
- plt.savefig(
61
- f"{TEMP_DIR}/output.jpg",
62
- bbox_inches="tight",
63
- pad_inches=0.0,
64
- )
65
- plt.close()
66
-
67
- except Exception as e:
68
- print(f"Error converting {audio_path} : {e}")
69
 
70
 
71
  def audio2chroma(audio_path: str, seg_len=20):
72
- os.makedirs(TEMP_DIR, exist_ok=True)
73
- try:
74
- y, sr = librosa.load(audio_path, sr=SAMPLE_RATE)
75
- y = zero_padding(y, seg_len * sr)
76
- chroma_spec = librosa.feature.chroma_stft(y=y, sr=sr)
77
- log_chroma_spec = librosa.power_to_db(np.abs(chroma_spec) ** 2, ref=np.max)
78
- librosa.display.specshow(log_chroma_spec)
79
- plt.axis("off")
80
- plt.savefig(
81
- f"{TEMP_DIR}/output.jpg",
82
- bbox_inches="tight",
83
- pad_inches=0.0,
84
- )
85
- plt.close()
86
-
87
- except Exception as e:
88
- print(f"Error converting {audio_path} : {e}")
89
 
90
 
91
  def infer(wav_path: str, log_name: str, folder_path=TEMP_DIR):
@@ -95,13 +80,15 @@ def infer(wav_path: str, log_name: str, folder_path=TEMP_DIR):
95
  if not wav_path:
96
  return None, "Please input an audio!"
97
 
 
 
98
  try:
99
  model = EvalNet(log_name, len(CLASSES)).model
 
 
100
  except Exception as e:
101
  return None, f"{e}"
102
 
103
- spec = log_name.split("_")[-3]
104
- eval("audio2%s" % spec)(wav_path)
105
  input = embed_img(f"{folder_path}/output.jpg")
106
  output: torch.Tensor = model(input)
107
  pred_id = torch.max(output.data, 1)[1]
 
29
 
30
 
31
  def audio2mel(audio_path: str, seg_len=20):
32
+ y, sr = librosa.load(audio_path, sr=SAMPLE_RATE)
33
+ y = zero_padding(y, seg_len * sr)
34
+ mel_spec = librosa.feature.melspectrogram(y=y, sr=sr)
35
+ log_mel_spec = librosa.power_to_db(mel_spec, ref=np.max)
36
+ librosa.display.specshow(log_mel_spec)
37
+ plt.axis("off")
38
+ plt.savefig(
39
+ f"{TEMP_DIR}/output.jpg",
40
+ bbox_inches="tight",
41
+ pad_inches=0.0,
42
+ )
43
+ plt.close()
 
 
 
 
 
44
 
45
 
46
  def audio2cqt(audio_path: str, seg_len=20):
47
+ y, sr = librosa.load(audio_path, sr=SAMPLE_RATE)
48
+ y = zero_padding(y, seg_len * sr)
49
+ cqt_spec = librosa.cqt(y=y, sr=sr)
50
+ log_cqt_spec = librosa.power_to_db(np.abs(cqt_spec) ** 2, ref=np.max)
51
+ librosa.display.specshow(log_cqt_spec)
52
+ plt.axis("off")
53
+ plt.savefig(
54
+ f"{TEMP_DIR}/output.jpg",
55
+ bbox_inches="tight",
56
+ pad_inches=0.0,
57
+ )
58
+ plt.close()
 
 
 
 
 
59
 
60
 
61
  def audio2chroma(audio_path: str, seg_len=20):
62
+ y, sr = librosa.load(audio_path, sr=SAMPLE_RATE)
63
+ y = zero_padding(y, seg_len * sr)
64
+ chroma_spec = librosa.feature.chroma_stft(y=y, sr=sr)
65
+ log_chroma_spec = librosa.power_to_db(np.abs(chroma_spec) ** 2, ref=np.max)
66
+ librosa.display.specshow(log_chroma_spec)
67
+ plt.axis("off")
68
+ plt.savefig(
69
+ f"{TEMP_DIR}/output.jpg",
70
+ bbox_inches="tight",
71
+ pad_inches=0.0,
72
+ )
73
+ plt.close()
 
 
 
 
 
74
 
75
 
76
  def infer(wav_path: str, log_name: str, folder_path=TEMP_DIR):
 
80
  if not wav_path:
81
  return None, "Please input an audio!"
82
 
83
+ spec = log_name.split("_")[-3]
84
+ os.makedirs(folder_path, exist_ok=True)
85
  try:
86
  model = EvalNet(log_name, len(CLASSES)).model
87
+ eval("audio2%s" % spec)(wav_path)
88
+
89
  except Exception as e:
90
  return None, f"{e}"
91
 
 
 
92
  input = embed_img(f"{folder_path}/output.jpg")
93
  output: torch.Tensor = model(input)
94
  pred_id = torch.max(output.data, 1)[1]