akshansh36 commited on
Commit
17aaac0
·
verified ·
1 Parent(s): b0c644f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -26
app.py CHANGED
@@ -4,14 +4,13 @@ import numpy as np
4
  import datetime
5
  import time
6
  from infer_rvc_python import BaseLoader
7
- import torchaudio
8
 
9
  # Initialize the model
10
  now = datetime.datetime.now()
11
  timestamp = now.strftime("%Y-%m-%d_%H-%M-%S")
12
  random_tag = "USER_" + str(timestamp)
13
 
14
- converter = BaseLoader(only_cpu=False, hubert_path='./hubert_base.pt', rmvpe_path='./rmvpe.pt')
15
  converter.apply_conf(
16
  tag=random_tag,
17
  file_model="./model.pth",
@@ -29,56 +28,49 @@ chunk_sec = 0.1
29
  sr = 16000
30
  chunk_len = int(sr * chunk_sec)
31
  L = 16
32
- stop_recording = False
33
  first_output_latency = 0
34
 
35
- # Initialize global audio buffer
36
- audio_buffer = torch.zeros(0, dtype=torch.float32)
37
-
38
- # Function to process audio chunks
39
- def process_audio_chunk(audio, buffer_state):
40
  global first_output_latency
41
 
42
- if buffer_state is None:
43
- buffer_state = torch.zeros(0, dtype=torch.float32)
44
 
45
  if audio is None:
46
- return None, buffer_state
47
 
48
  # Convert input audio to tensor
49
  audio_data = torch.tensor(audio[1], dtype=torch.float32)
50
- buffer_state = torch.cat((buffer_state, audio_data))
51
-
52
- if len(buffer_state) < chunk_len:
53
- return None, buffer_state
54
 
55
- # Process the chunk
56
- previous_chunk = buffer_state[:chunk_len]
57
- buffer_state = buffer_state[chunk_len:]
58
 
59
- input_chunk = torch.cat([torch.zeros(L * 2, dtype=torch.float32), previous_chunk])
60
 
61
  with torch.inference_mode():
62
  data = (input_chunk.numpy().astype(np.int16), sr)
63
- result_array, sample_rate = converter.generate_from_cache(
64
  audio_data=data,
65
  tag=random_tag,
66
  )
67
 
68
  if first_output_latency == 0:
69
- first_output_latency = time.time()
70
 
71
- output = torch.tensor(result_array, dtype=torch.float32)
72
- output = output.squeeze(0).numpy()
 
73
 
74
- return (audio[0], output), buffer_state
 
75
 
76
  # Gradio interface setup
77
  with gr.Blocks() as demo:
78
  audio_input = gr.Audio(sources="microphone", streaming=True, type="numpy", label="Input Audio")
79
  audio_output = gr.Audio(label="Output Audio")
80
- buffer_state = gr.State()
81
 
82
- audio_input.stream(process_audio_chunk, inputs=[audio_input, buffer_state], outputs=[audio_output, buffer_state])
83
 
84
  demo.launch()
 
4
  import datetime
5
  import time
6
  from infer_rvc_python import BaseLoader
 
7
 
8
  # Initialize the model
9
  now = datetime.datetime.now()
10
  timestamp = now.strftime("%Y-%m-%d_%H-%M-%S")
11
  random_tag = "USER_" + str(timestamp)
12
 
13
+ converter = BaseLoader(only_cpu=True, hubert_path='./hubert_base.pt', rmvpe_path='./rmvpe.pt')
14
  converter.apply_conf(
15
  tag=random_tag,
16
  file_model="./model.pth",
 
28
  sr = 16000
29
  chunk_len = int(sr * chunk_sec)
30
  L = 16
 
31
  first_output_latency = 0
32
 
33
+ # Function to process and play audio chunks
34
+ def process_audio_chunk(audio, output_state):
 
 
 
35
  global first_output_latency
36
 
37
+ if output_state is None:
38
+ output_state = np.zeros(0, dtype=np.float32)
39
 
40
  if audio is None:
41
+ return None, output_state
42
 
43
  # Convert input audio to tensor
44
  audio_data = torch.tensor(audio[1], dtype=torch.float32)
 
 
 
 
45
 
46
+ if len(audio_data) < chunk_len:
47
+ return None, output_state
 
48
 
49
+ input_chunk = torch.cat([torch.zeros(L * 2, dtype=torch.float32), audio_data])
50
 
51
  with torch.inference_mode():
52
  data = (input_chunk.numpy().astype(np.int16), sr)
53
+ result_array, _ = converter.generate_from_cache(
54
  audio_data=data,
55
  tag=random_tag,
56
  )
57
 
58
  if first_output_latency == 0:
59
+ first_output_latency = time.time() # Track when the first output happens
60
 
61
+ # Convert the result array to numpy and append to the ongoing output
62
+ output_chunk = torch.tensor(result_array, dtype=torch.float32).squeeze(0).numpy()
63
+ output_state = np.concatenate((output_state, output_chunk))
64
 
65
+ # Return the updated output state for continuous playback
66
+ return (sr, output_state), output_state
67
 
68
  # Gradio interface setup
69
  with gr.Blocks() as demo:
70
  audio_input = gr.Audio(sources="microphone", streaming=True, type="numpy", label="Input Audio")
71
  audio_output = gr.Audio(label="Output Audio")
72
+ output_state = gr.State()
73
 
74
+ audio_input.stream(process_audio_chunk, inputs=[audio_input, output_state], outputs=[audio_output, output_state])
75
 
76
  demo.launch()