Katock commited on
Commit
794e885
·
1 Parent(s): 5a899c5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -4
app.py CHANGED
@@ -11,6 +11,7 @@ import asyncio
11
  import argparse
12
  import edge_tts
13
  import gradio.processing_utils as gr_processing_utils
 
14
  logging.getLogger('numba').setLevel(logging.WARNING)
15
  logging.getLogger('markdown_it').setLevel(logging.WARNING)
16
  logging.getLogger('urllib3').setLevel(logging.WARNING)
@@ -20,6 +21,7 @@ limitation = os.getenv("SYSTEM") == "spaces" # limit audio length in huggingfac
20
 
21
  audio_postprocess_ori = gr.Audio.postprocess
22
 
 
23
  def audio_postprocess(self, y):
24
  data = audio_postprocess_ori(self, y)
25
  if data is None:
@@ -28,6 +30,8 @@ def audio_postprocess(self, y):
28
 
29
 
30
  gr.Audio.postprocess = audio_postprocess
 
 
31
  def create_vc_fn(model, sid):
32
  def vc_fn(input_audio, vc_transform, auto_f0):
33
  if input_audio is None:
@@ -45,9 +49,10 @@ def create_vc_fn(model, sid):
45
  soundfile.write(raw_path, audio, 16000, format="wav")
46
  raw_path.seek(0)
47
  out_audio, out_sr = model.infer(sid, vc_transform, raw_path,
48
- auto_predict_f0=auto_f0,
49
- )
50
  return "Success", (44100, out_audio.cpu().numpy())
 
51
  return vc_fn
52
 
53
 
@@ -80,11 +85,11 @@ if __name__ == '__main__':
80
  gr.Markdown(
81
  '<div align="center">'
82
  f'<img style="width:auto;height:300px;" src="file/{cover}">' if cover else ""
83
- '</div>'
84
  )
85
  with gr.Row():
86
  with gr.Column():
87
- vc_input = gr.Audio(label="输入干声"+' (小于 20 秒)' if limitation else '')
88
  vc_transform = gr.Number(label="音高调整(支持正负半音,12为一个八度)", value=0)
89
  auto_f0 = gr.Checkbox(label="自动音高预测(说话模式)", value=False)
90
  vc_submit = gr.Button("生成", variant="primary")
 
11
  import argparse
12
  import edge_tts
13
  import gradio.processing_utils as gr_processing_utils
14
+
15
  logging.getLogger('numba').setLevel(logging.WARNING)
16
  logging.getLogger('markdown_it').setLevel(logging.WARNING)
17
  logging.getLogger('urllib3').setLevel(logging.WARNING)
 
21
 
22
  audio_postprocess_ori = gr.Audio.postprocess
23
 
24
+
25
  def audio_postprocess(self, y):
26
  data = audio_postprocess_ori(self, y)
27
  if data is None:
 
30
 
31
 
32
  gr.Audio.postprocess = audio_postprocess
33
+
34
+
35
  def create_vc_fn(model, sid):
36
  def vc_fn(input_audio, vc_transform, auto_f0):
37
  if input_audio is None:
 
49
  soundfile.write(raw_path, audio, 16000, format="wav")
50
  raw_path.seek(0)
51
  out_audio, out_sr = model.infer(sid, vc_transform, raw_path,
52
+ auto_predict_f0=auto_f0,
53
+ )
54
  return "Success", (44100, out_audio.cpu().numpy())
55
+
56
  return vc_fn
57
 
58
 
 
85
  gr.Markdown(
86
  '<div align="center">'
87
  f'<img style="width:auto;height:300px;" src="file/{cover}">' if cover else ""
88
+ '</div>'
89
  )
90
  with gr.Row():
91
  with gr.Column():
92
+ vc_input = gr.Audio(label="输入干声" + ' (小于 20 秒)' if limitation else '')
93
  vc_transform = gr.Number(label="音高调整(支持正负半音,12为一个八度)", value=0)
94
  auto_f0 = gr.Checkbox(label="自动音高预测(说话模式)", value=False)
95
  vc_submit = gr.Button("生成", variant="primary")