rcell commited on
Commit
55e8e4e
1 Parent(s): 80a72bf
Files changed (1) hide show
  1. app.py +81 -0
app.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ os.system('cd monotonic_align && python setup.py build_ext --inplace && cd ..')
4
+
5
+ import logging
6
+
7
+ numba_logger = logging.getLogger('numba')
8
+ numba_logger.setLevel(logging.WARNING)
9
+
10
+ import librosa
11
+ import torch
12
+
13
+ import commons
14
+ import utils
15
+ from models import SynthesizerTrn
16
+ from text.symbols import symbols
17
+ from text import text_to_sequence
18
+
19
+
20
+ def get_text(text, hps):
21
+ text_norm = text_to_sequence(text, hps.data.text_cleaners)
22
+ if hps.data.add_blank:
23
+ text_norm = commons.intersperse(text_norm, 0)
24
+ text_norm = torch.LongTensor(text_norm)
25
+ print(text_norm.shape)
26
+ return text_norm
27
+
28
+
29
+ hps = utils.get_hparams_from_file("configs/ljs_base.json")
30
+
31
+ net_g = SynthesizerTrn(
32
+ len(symbols),
33
+ hps.data.filter_length // 2 + 1,
34
+ hps.train.segment_size // hps.data.hop_length,
35
+ **hps.model)
36
+ import numpy as np
37
+
38
+ hubert = torch.hub.load("bshall/hubert:main", "hubert_soft")
39
+
40
+ _ = utils.load_checkpoint("G_88000.pth", net_g, None)
41
+
42
+ def vc_fn(input_audio):
43
+ if input_audio is None:
44
+ return "You need to upload an audio", None
45
+ sampling_rate, audio = input_audio
46
+ # print(audio.shape,sampling_rate)
47
+ duration = audio.shape[0] / sampling_rate
48
+ if duration > 30:
49
+ return "Error: Audio is too long", None
50
+ audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32)
51
+ if len(audio.shape) > 1:
52
+ audio = librosa.to_mono(audio.transpose(1, 0))
53
+ if sampling_rate != 16000:
54
+ audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
55
+ source = torch.FloatTensor(audio).unsqueeze(0).unsqueeze(0)
56
+ print(source.shape)
57
+ with torch.inference_mode():
58
+ units = hubert.units(source)
59
+
60
+ stn_tst = torch.FloatTensor(units.squeeze(0))
61
+ with torch.no_grad():
62
+ x_tst = stn_tst.unsqueeze(0)
63
+ x_tst_lengths = torch.LongTensor([stn_tst.size(0)])
64
+ audio = net_g.infer(x_tst, x_tst_lengths, noise_scale=0.1, noise_scale_w=0.1, length_scale=1)[0][
65
+ 0, 0].data.float().numpy()
66
+
67
+ return "Success", (hps.data.sampling_rate, audio)
68
+
69
+
70
+
71
+ app = gr.Blocks()
72
+ with app:
73
+ with gr.Tabs():
74
+ with gr.TabItem("Basic"):
75
+ vc_input3 = gr.Audio(label="Input Audio (30s limitation)")
76
+ vc_submit = gr.Button("Convert", variant="primary")
77
+ vc_output1 = gr.Textbox(label="Output Message")
78
+ vc_output2 = gr.Audio(label="Output Audio")
79
+ vc_submit.click(vc_fn, [ vc_input3], [vc_output1, vc_output2])
80
+
81
+ app.launch()