admin commited on
Commit
36a6259
·
1 Parent(s): 5b8d4bb
Files changed (3) hide show
  1. app.py +17 -18
  2. requirements.txt +3 -3
  3. utils.py +4 -10
app.py CHANGED
@@ -8,22 +8,23 @@ import numpy as np
8
  import gradio as gr
9
  import librosa.display
10
  import matplotlib.pyplot as plt
11
- from utils import get_modelist, find_files, embed_img, TEMP_DIR
12
  from collections import Counter
13
  from model import EvalNet
14
 
15
 
16
  TRANSLATE = {
17
- "vibrato": "颤音",
18
- "upward_portamento": "上滑音",
19
- "downward_portamento": "下滑音",
20
- "returning_portamento": "回滑音",
21
- "glissando": "刮奏, 花指",
22
- "tremolo": "摇指",
23
- "harmonics": "泛音",
24
- "plucks": "勾, 打, 抹, 托, ...",
25
  }
26
  CLASSES = list(TRANSLATE.keys())
 
27
  SAMPLE_RATE = 44100
28
 
29
 
@@ -146,7 +147,7 @@ def infer(wav_path: str, log_name: str, folder_path=TEMP_DIR):
146
  shutil.rmtree(folder_path)
147
 
148
  if not wav_path:
149
- return None, "请输入音频 Please input an audio!"
150
 
151
  try:
152
  model = EvalNet(log_name, len(TRANSLATE)).model
@@ -182,27 +183,25 @@ if __name__ == "__main__":
182
  gr.Interface(
183
  fn=infer,
184
  inputs=[
185
- gr.Audio(label="上传录音 Upload a recording", type="filepath"),
186
- gr.Dropdown(
187
- choices=models, label="选择模型 Select a model", value=models[0]
188
- ),
189
  ],
190
  outputs=[
191
- gr.Textbox(label="音频文件名 Audio filename", show_copy_button=True),
192
  gr.Textbox(
193
- label="古筝演奏技法识别 Guzheng playing tech recognition",
194
  show_copy_button=True,
195
  ),
196
  ],
197
  examples=examples,
198
  cache_examples=False,
199
  flagging_mode="never",
200
- title="建议录音时长保持在 3s 左右<br>It is recommended to keep the recording length around 3s.",
201
  )
202
 
203
  gr.Markdown(
204
  """
205
- # 引用 Cite
206
  ```bibtex
207
  @dataset{zhaorui_liu_2021_5676893,
208
  author = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Baoqiang Han},
 
8
  import gradio as gr
9
  import librosa.display
10
  import matplotlib.pyplot as plt
11
+ from utils import get_modelist, find_files, embed_img
12
  from collections import Counter
13
  from model import EvalNet
14
 
15
 
16
  TRANSLATE = {
17
+ "vibrato": "chan yin",
18
+ "upward_portamento": "shang hua yin",
19
+ "downward_portamento": "xia hua yin",
20
+ "returning_portamento": "hui hua yin",
21
+ "glissando": "gua zou, hua zhi",
22
+ "tremolo": "yao zhi",
23
+ "harmonics": "fan yin",
24
+ "plucks": "gou, da, mo, tuo, ...",
25
  }
26
  CLASSES = list(TRANSLATE.keys())
27
+ TEMP_DIR = "./__pycache__/tmp"
28
  SAMPLE_RATE = 44100
29
 
30
 
 
147
  shutil.rmtree(folder_path)
148
 
149
  if not wav_path:
150
+ return None, "Please input an audio!"
151
 
152
  try:
153
  model = EvalNet(log_name, len(TRANSLATE)).model
 
183
  gr.Interface(
184
  fn=infer,
185
  inputs=[
186
+ gr.Audio(label="Upload a recording", type="filepath"),
187
+ gr.Dropdown(choices=models, label="Select a model", value=models[0]),
 
 
188
  ],
189
  outputs=[
190
+ gr.Textbox(label="Audio filename", show_copy_button=True),
191
  gr.Textbox(
192
+ label="Guzheng playing tech recognition",
193
  show_copy_button=True,
194
  ),
195
  ],
196
  examples=examples,
197
  cache_examples=False,
198
  flagging_mode="never",
199
+ title="It is recommended to keep the recording length around 3s.",
200
  )
201
 
202
  gr.Markdown(
203
  """
204
+ # Cite
205
  ```bibtex
206
  @dataset{zhaorui_liu_2021_5676893,
207
  author = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Baoqiang Han},
requirements.txt CHANGED
@@ -1,6 +1,6 @@
1
- librosa
2
  torch
 
 
3
  matplotlib
4
  torchvision
5
- pillow
6
- modelscope==1.15
 
 
1
  torch
2
+ pillow
3
+ librosa
4
  matplotlib
5
  torchvision
6
+ modelscope[framework]==1.18
 
utils.py CHANGED
@@ -5,10 +5,9 @@ from modelscope import snapshot_download
5
  from PIL import Image
6
 
7
  MODEL_DIR = snapshot_download(
8
- f"ccmusic-database/GZ_IsoTech",
9
- cache_dir=f"{os.getcwd()}/__pycache__",
10
  )
11
- TEMP_DIR = f"{os.getcwd()}/flagged"
12
 
13
 
14
  def toCUDA(x):
@@ -34,22 +33,17 @@ def get_modelist(model_dir=MODEL_DIR):
34
  try:
35
  entries = os.listdir(model_dir)
36
  except OSError as e:
37
- print(f"无法访问 {model_dir}: {e}")
38
  return
39
 
40
- # 遍历所有条目
41
  output = []
42
  for entry in entries:
43
- # 获取完整路径
44
  full_path = os.path.join(model_dir, entry)
45
- # 跳过'.git'文件夹
46
  if entry == ".git" or entry == "examples":
47
- print(f"跳过 .git examples 文件夹: {full_path}")
48
  continue
49
 
50
- # 检查条目是文件还是目录
51
  if os.path.isdir(full_path):
52
- # 打印目录路径
53
  output.append(os.path.basename(full_path))
54
 
55
  return output
 
5
  from PIL import Image
6
 
7
  MODEL_DIR = snapshot_download(
8
+ "ccmusic-database/GZ_IsoTech",
9
+ cache_dir="./__pycache__",
10
  )
 
11
 
12
 
13
  def toCUDA(x):
 
33
  try:
34
  entries = os.listdir(model_dir)
35
  except OSError as e:
36
+ print(f"Cannot access {model_dir}: {e}")
37
  return
38
 
 
39
  output = []
40
  for entry in entries:
 
41
  full_path = os.path.join(model_dir, entry)
 
42
  if entry == ".git" or entry == "examples":
43
+ print(f"Skip .git or examples dir: {full_path}")
44
  continue
45
 
 
46
  if os.path.isdir(full_path):
 
47
  output.append(os.path.basename(full_path))
48
 
49
  return output