Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -123,11 +123,12 @@ zero = torch.Tensor([0]).cuda()
|
|
123 |
print(zero.device) # <-- 'cpu' 🤔
|
124 |
import torch
|
125 |
@spaces.GPU
|
126 |
-
def modelspeech(text,name_model):
|
127 |
|
128 |
|
129 |
inputs = tokenizer(text, return_tensors="pt")
|
130 |
model=get_model(name_model)
|
|
|
131 |
with torch.no_grad():
|
132 |
wav=list(_inference_forward_stream(model,input_ids=inputs.input_ids.cuda(),attention_mask=inputs.attention_mask.cuda(),speaker_embeddings= None,is_streaming=False))[0]
|
133 |
# with torch.no_grad():
|
@@ -154,6 +155,6 @@ model_choices = gr.Dropdown(
|
|
154 |
label="اختر النموذج",
|
155 |
value="wasmdashai/vits-ar-sa-huba-v2",
|
156 |
)
|
157 |
-
demo = gr.Interface(fn=modelspeech, inputs=["text",model_choices], outputs=["audio","audio"])
|
158 |
demo.queue()
|
159 |
demo.launch()
|
|
|
123 |
print(zero.device) # <-- 'cpu' 🤔
|
124 |
import torch
|
125 |
@spaces.GPU
|
126 |
+
def modelspeech(text,name_model,speaking_rate):
|
127 |
|
128 |
|
129 |
inputs = tokenizer(text, return_tensors="pt")
|
130 |
model=get_model(name_model)
|
131 |
+
model.speaking_rate=speaking_rate
|
132 |
with torch.no_grad():
|
133 |
wav=list(_inference_forward_stream(model,input_ids=inputs.input_ids.cuda(),attention_mask=inputs.attention_mask.cuda(),speaker_embeddings= None,is_streaming=False))[0]
|
134 |
# with torch.no_grad():
|
|
|
155 |
label="اختر النموذج",
|
156 |
value="wasmdashai/vits-ar-sa-huba-v2",
|
157 |
)
|
158 |
+
demo = gr.Interface(fn=modelspeech, inputs=["text",model_choices,gr.Slider(0, 1, step=0.01)], outputs=["audio","audio"])
|
159 |
demo.queue()
|
160 |
demo.launch()
|