akshansh36 commited on
Commit
715e7b5
·
verified ·
1 Parent(s): 17aaac0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -67
app.py CHANGED
@@ -1,76 +1,27 @@
1
  import gradio as gr
2
- import torch
3
  import numpy as np
4
- import datetime
5
  import time
6
- from infer_rvc_python import BaseLoader
7
 
8
- # Initialize the model
9
- now = datetime.datetime.now()
10
- timestamp = now.strftime("%Y-%m-%d_%H-%M-%S")
11
- random_tag = "USER_" + str(timestamp)
 
 
 
 
 
12
 
13
- converter = BaseLoader(only_cpu=True, hubert_path='./hubert_base.pt', rmvpe_path='./rmvpe.pt')
14
- converter.apply_conf(
15
- tag=random_tag,
16
- file_model="./model.pth",
17
- pitch_algo="rmvpe+",
18
- pitch_lvl=0,
19
- file_index="./model.index",
20
- index_influence=0.80,
21
- respiration_median_filtering=3,
22
- envelope_ratio=0.25,
23
- consonant_breath_protection=0.5,
24
- resample_sr=0,
25
- )
26
 
27
- chunk_sec = 0.1
28
- sr = 16000
29
- chunk_len = int(sr * chunk_sec)
30
- L = 16
31
- first_output_latency = 0
32
 
33
- # Function to process and play audio chunks
34
- def process_audio_chunk(audio, output_state):
35
- global first_output_latency
36
-
37
- if output_state is None:
38
- output_state = np.zeros(0, dtype=np.float32)
39
-
40
- if audio is None:
41
- return None, output_state
42
-
43
- # Convert input audio to tensor
44
- audio_data = torch.tensor(audio[1], dtype=torch.float32)
45
-
46
- if len(audio_data) < chunk_len:
47
- return None, output_state
48
-
49
- input_chunk = torch.cat([torch.zeros(L * 2, dtype=torch.float32), audio_data])
50
-
51
- with torch.inference_mode():
52
- data = (input_chunk.numpy().astype(np.int16), sr)
53
- result_array, _ = converter.generate_from_cache(
54
- audio_data=data,
55
- tag=random_tag,
56
- )
57
-
58
- if first_output_latency == 0:
59
- first_output_latency = time.time() # Track when the first output happens
60
-
61
- # Convert the result array to numpy and append to the ongoing output
62
- output_chunk = torch.tensor(result_array, dtype=torch.float32).squeeze(0).numpy()
63
- output_state = np.concatenate((output_state, output_chunk))
64
-
65
- # Return the updated output state for continuous playback
66
- return (sr, output_state), output_state
67
 
68
- # Gradio interface setup
69
- with gr.Blocks() as demo:
70
- audio_input = gr.Audio(sources="microphone", streaming=True, type="numpy", label="Input Audio")
71
- audio_output = gr.Audio(label="Output Audio")
72
- output_state = gr.State()
73
-
74
- audio_input.stream(process_audio_chunk, inputs=[audio_input, output_state], outputs=[audio_output, output_state])
75
 
76
- demo.launch()
 
 
1
  import gradio as gr
 
2
  import numpy as np
 
3
  import time
 
4
 
5
+ def add_to_stream(audio, instream):
6
+ time.sleep(1)
7
+ if audio is None:
8
+ return gr.update(), instream
9
+ if instream is None:
10
+ ret = audio
11
+ else:
12
+ ret = (audio[0], np.concatenate((instream[1], audio[1])))
13
+ return ret, ret
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
+ with gr.Blocks() as demo:
17
+ inp = gr.Audio(source="microphone")
18
+ out = gr.Audio()
19
+ stream = gr.State()
20
+ clear = gr.Button("Clear")
21
 
22
+ inp.stream(add_to_stream, [inp, stream], [out, stream])
23
+ clear.click(lambda: [None, None, None], None, [inp, out, stream])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
 
 
 
 
 
 
 
25
 
26
+ if __name__ == "__main__":
27
+ demo.launch()