Staticaliza commited on
Commit
662b7d6
·
verified ·
1 Parent(s): 8ede8ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -13
app.py CHANGED
@@ -11,8 +11,6 @@ if DEVICE == "auto":
11
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
12
  print(f"[SYSTEM] | Using {DEVICE} type compute device.")
13
 
14
- torch.set_num_threads(4)
15
-
16
  # Variables
17
  CHAR_LIMIT = 2000
18
  DEFAULT_INPUT = ""
@@ -80,18 +78,12 @@ def trim_silence(audio, threshold=0.001):
80
  def generate(text=DEFAULT_INPUT, voice=DEFAULT_VOICE, speed=1):
81
  text = text.strip()[:CHAR_LIMIT] + "."
82
  pipeline = PIPELINES[voice[0]]
83
- pack = VOICE_PACKS[voice]
84
  for _, ps, _ in pipeline(text, voice, speed):
85
- index = min(len(ps) - 1, len(pack) - 1)
86
- ref_s = pack[index]
87
- try:
88
- audio = MODEL(ps, ref_s, speed)
89
- except Exception as e:
90
- print("Error during model inference:", e)
91
- return None
92
- audio_np = audio.numpy()
93
- trimmed_audio = trim_silence(audio_np)
94
- return (24000, trimmed_audio)
95
 
96
  def cloud():
97
  print("[CLOUD] | Space maintained.")
 
11
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
12
  print(f"[SYSTEM] | Using {DEVICE} type compute device.")
13
 
 
 
14
  # Variables
15
  CHAR_LIMIT = 2000
16
  DEFAULT_INPUT = ""
 
78
  def generate(text=DEFAULT_INPUT, voice=DEFAULT_VOICE, speed=1):
79
  text = text.strip()[:CHAR_LIMIT] + "."
80
  pipeline = PIPELINES[voice[0]]
81
+ pack = pipeline.load_voice(voice)
82
  for _, ps, _ in pipeline(text, voice, speed):
83
+ ref_s = pack[len(ps) - 1]
84
+ audio = MODEL(ps, ref_s, speed)
85
+ return (24000, trim_silence(audio.numpy()))
86
+ return None
 
 
 
 
 
 
87
 
88
  def cloud():
89
  print("[CLOUD] | Space maintained.")