admin commited on
Commit
0f76a19
·
1 Parent(s): 5e2bd34
Files changed (3) hide show
  1. app.py +12 -20
  2. requirements.txt +3 -3
  3. utils.py +4 -10
app.py CHANGED
@@ -8,18 +8,12 @@ import numpy as np
8
  import gradio as gr
9
  import librosa.display
10
  import matplotlib.pyplot as plt
11
- from utils import get_modelist, find_audio_files, embed_img, TEMP_DIR
12
  from model import EvalNet
13
 
14
 
15
- TRANSLATE = {
16
- "Gong": "",
17
- "Shang": "商",
18
- "Jue": "角",
19
- "Zhi": "徵",
20
- "Yu": "羽",
21
- }
22
- CLASSES = list(TRANSLATE.keys())
23
  SAMPLE_RATE = 44100
24
 
25
 
@@ -99,10 +93,10 @@ def infer(wav_path: str, log_name: str, folder_path=TEMP_DIR):
99
  shutil.rmtree(folder_path)
100
 
101
  if not wav_path:
102
- return None, "请输入音频 Please input an audio!"
103
 
104
  try:
105
- model = EvalNet(log_name, len(TRANSLATE)).model
106
  except Exception as e:
107
  return None, f"{e}"
108
 
@@ -113,7 +107,7 @@ def infer(wav_path: str, log_name: str, folder_path=TEMP_DIR):
113
  pred_id = torch.max(output.data, 1)[1]
114
  return (
115
  os.path.basename(wav_path),
116
- f"{TRANSLATE[CLASSES[pred_id]]} ({CLASSES[pred_id].capitalize()})",
117
  )
118
 
119
 
@@ -130,27 +124,25 @@ if __name__ == "__main__":
130
  gr.Interface(
131
  fn=infer,
132
  inputs=[
133
- gr.Audio(label="上传录音 Upload a recording", type="filepath"),
134
- gr.Dropdown(
135
- choices=models, label="选择模型 Select a model", value=models[0]
136
- ),
137
  ],
138
  outputs=[
139
- gr.Textbox(label="音频文件名 Audio filename", show_copy_button=True),
140
  gr.Textbox(
141
- label="中国五声调式识别 Chinese pentatonic mode recognition",
142
  show_copy_button=True,
143
  ),
144
  ],
145
  examples=examples,
146
  cache_examples=False,
147
  flagging_mode="never",
148
- title="建议录音时长保持在 20s 左右<br>It is recommended to keep the recording length around 20s.",
149
  )
150
 
151
  gr.Markdown(
152
  """
153
- # 引用 Cite
154
  ```bibtex
155
  @dataset{zhaorui_liu_2021_5676893,
156
  author = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Baoqiang Han},
 
8
  import gradio as gr
9
  import librosa.display
10
  import matplotlib.pyplot as plt
11
+ from utils import get_modelist, find_audio_files, embed_img
12
  from model import EvalNet
13
 
14
 
15
+ CLASSES = ["Gong", "Shang", "Jue", "Zhi", "Yu"]
16
+ TEMP_DIR = "./__pycache__/tmp"
 
 
 
 
 
 
17
  SAMPLE_RATE = 44100
18
 
19
 
 
93
  shutil.rmtree(folder_path)
94
 
95
  if not wav_path:
96
+ return None, "Please input an audio!"
97
 
98
  try:
99
+ model = EvalNet(log_name, len(CLASSES)).model
100
  except Exception as e:
101
  return None, f"{e}"
102
 
 
107
  pred_id = torch.max(output.data, 1)[1]
108
  return (
109
  os.path.basename(wav_path),
110
+ CLASSES[pred_id].capitalize(),
111
  )
112
 
113
 
 
124
  gr.Interface(
125
  fn=infer,
126
  inputs=[
127
+ gr.Audio(label="Upload a recording", type="filepath"),
128
+ gr.Dropdown(choices=models, label="Select a model", value=models[0]),
 
 
129
  ],
130
  outputs=[
131
+ gr.Textbox(label="Audio filename", show_copy_button=True),
132
  gr.Textbox(
133
+ label="Chinese pentatonic mode recognition",
134
  show_copy_button=True,
135
  ),
136
  ],
137
  examples=examples,
138
  cache_examples=False,
139
  flagging_mode="never",
140
+ title="It is recommended to keep the recording length around 20s.",
141
  )
142
 
143
  gr.Markdown(
144
  """
145
+ # Cite
146
  ```bibtex
147
  @dataset{zhaorui_liu_2021_5676893,
148
  author = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Baoqiang Han},
requirements.txt CHANGED
@@ -1,6 +1,6 @@
1
- librosa
2
  torch
 
 
3
  matplotlib
4
  torchvision
5
- pillow
6
- modelscope==1.15
 
 
1
  torch
2
+ pillow
3
+ librosa
4
  matplotlib
5
  torchvision
6
+ modelscope[framework]==1.18
 
utils.py CHANGED
@@ -5,10 +5,9 @@ from modelscope import snapshot_download
5
  from PIL import Image
6
 
7
  MODEL_DIR = snapshot_download(
8
- f"ccmusic-database/CNPM",
9
- cache_dir=f"{os.getcwd()}/__pycache__",
10
  )
11
- TEMP_DIR = f"{os.getcwd()}/flagged"
12
 
13
 
14
  def toCUDA(x):
@@ -34,22 +33,17 @@ def get_modelist(model_dir=MODEL_DIR):
34
  try:
35
  entries = os.listdir(model_dir)
36
  except OSError as e:
37
- print(f"无法访问 {model_dir}: {e}")
38
  return
39
 
40
- # 遍历所有条目
41
  output = []
42
  for entry in entries:
43
- # 获取完整路径
44
  full_path = os.path.join(model_dir, entry)
45
- # 跳过'.git'文件夹
46
  if entry == ".git" or entry == "examples":
47
- print(f"跳过 .git examples 文件夹: {full_path}")
48
  continue
49
 
50
- # 检查条目是文件还是目录
51
  if os.path.isdir(full_path):
52
- # 打印目录路径
53
  output.append(os.path.basename(full_path))
54
 
55
  return output
 
5
  from PIL import Image
6
 
7
  MODEL_DIR = snapshot_download(
8
+ "ccmusic-database/CNPM",
9
+ cache_dir="./__pycache__",
10
  )
 
11
 
12
 
13
  def toCUDA(x):
 
33
  try:
34
  entries = os.listdir(model_dir)
35
  except OSError as e:
36
+ print(f"Cannot access {model_dir}: {e}")
37
  return
38
 
 
39
  output = []
40
  for entry in entries:
 
41
  full_path = os.path.join(model_dir, entry)
 
42
  if entry == ".git" or entry == "examples":
43
+ print(f"Skip .git or examples dir: {full_path}")
44
  continue
45
 
 
46
  if os.path.isdir(full_path):
 
47
  output.append(os.path.basename(full_path))
48
 
49
  return output