ASG Models commited on
Commit
a359a1a
·
verified ·
1 Parent(s): bf2199e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -9
app.py CHANGED
@@ -10,7 +10,7 @@ token=os.environ.get("key_")
10
  genai.configure(api_key=api_key)
11
  tokenizer = AutoTokenizer.from_pretrained("asg2024/vits-ar-sa-huba",token=token)
12
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
- model=VitsModel.from_pretrained("asg2024/vits-ar-sa-huba",token=token).to(device)
14
 
15
 
16
  generation_config = {
@@ -91,7 +91,7 @@ def modelspeech(text):
91
  with torch.no_grad():
92
  inputs = tokenizer(text, return_tensors="pt")#.cuda()
93
 
94
- wav = model(input_ids=inputs["input_ids"]).waveform.cpu().numpy().reshape(-1)
95
  # display(Audio(wav, rate=model.config.sampling_rate))
96
  return model.config.sampling_rate,wav#remove_noise_nr(wav)
97
 
@@ -105,7 +105,7 @@ def clean_text(text):
105
 
106
  def text_to_speech(text):
107
 
108
- response = dash2(text)
109
  pad_text=''
110
  k=0
111
  for chunk in response:
@@ -127,12 +127,7 @@ def dash(text):
127
  for chunk in response:
128
  yield chunk.text
129
 
130
- def dash2(text):
131
-
132
- response=get_answer_ai(text)
133
- for chunk in response:
134
- yield chunk.text
135
- # return textai
136
 
137
 
138
  # demo = gr.Interface(fn=dash, inputs=["text"], outputs=['text'])
 
10
  genai.configure(api_key=api_key)
11
  tokenizer = AutoTokenizer.from_pretrained("asg2024/vits-ar-sa-huba",token=token)
12
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
+ model_vits=VitsModel.from_pretrained("asg2024/vits-ar-sa-huba",token=token).to(device)
14
 
15
 
16
  generation_config = {
 
91
  with torch.no_grad():
92
  inputs = tokenizer(text, return_tensors="pt")#.cuda()
93
 
94
+ wav = model_vits(input_ids=inputs["input_ids"]).waveform.cpu().numpy().reshape(-1)
95
  # display(Audio(wav, rate=model.config.sampling_rate))
96
  return model.config.sampling_rate,wav#remove_noise_nr(wav)
97
 
 
105
 
106
  def text_to_speech(text):
107
 
108
+ response = dash(text)
109
  pad_text=''
110
  k=0
111
  for chunk in response:
 
127
  for chunk in response:
128
  yield chunk.text
129
 
130
+
 
 
 
 
 
131
 
132
 
133
  # demo = gr.Interface(fn=dash, inputs=["text"], outputs=['text'])