Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -8,12 +8,22 @@ tokenizer = AutoTokenizer.from_pretrained("wasmdashai/vits-ar-sa-huba",token=tok
|
|
8 |
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
9 |
model_vits=VitsModel.from_pretrained("wasmdashai/vits-ar-sa-huba",token=token)#.to(device)
|
10 |
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
-
|
|
|
|
|
|
|
13 |
global GK
|
|
|
14 |
while True:
|
15 |
GK+=1
|
16 |
-
|
|
|
17 |
yield f"namber is {GK}"
|
18 |
|
19 |
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|
|
|
8 |
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
9 |
model_vits=VitsModel.from_pretrained("wasmdashai/vits-ar-sa-huba",token=token)#.to(device)
|
10 |
|
11 |
+
def modelspeech(texts):
|
12 |
+
|
13 |
+
|
14 |
+
|
15 |
+
inputs = tokenizer(texts, return_tensors="pt")#.cuda()
|
16 |
|
17 |
+
wav = model_vits(input_ids=inputs["input_ids"]).waveform#.detach()
|
18 |
+
# display(Audio(wav, rate=model.config.sampling_rate))
|
19 |
+
return model_vits.config.sampling_rate,wav#remove_noise_nr(wav)
|
20 |
+
def greet(id):
|
21 |
global GK
|
22 |
+
b=int(id)
|
23 |
while True:
|
24 |
GK+=1
|
25 |
+
texts=['السلام عليكم']*b
|
26 |
+
out=modelspeech(texts)
|
27 |
yield f"namber is {GK}"
|
28 |
|
29 |
demo = gr.Interface(fn=greet, inputs="text", outputs="text")
|