wasmdashai commited on
Commit
3ead256
·
verified ·
1 Parent(s): f5dcb77

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -122,8 +122,9 @@ def get_model(name_model):
122
  zero = torch.Tensor([0]).cuda()
123
  print(zero.device) # <-- 'cpu' 🤔
124
  import torch
 
125
  @spaces.GPU
126
- def modelspeech(text,name_model,speaking_rate):
127
 
128
 
129
  inputs = tokenizer(text, return_tensors="pt")
@@ -148,6 +149,7 @@ model_choices = gr.Dropdown(
148
  label="اختر النموذج",
149
  value="wasmdashai/vits-ar-sa-huba-v2",
150
  )
 
151
  demo = gr.Interface(fn=modelspeech, inputs=["text",model_choices,gr.Slider(0, 1, step=0.1,value=0.8)], outputs=["audio","audio"])
152
  demo.queue()
153
  demo.launch()
 
122
  zero = torch.Tensor([0]).cuda()
123
  print(zero.device) # <-- 'cpu' 🤔
124
  import torch
125
+ TXT="""السلام عليكم ورحمة الله وبركاتة يا هلا وسهلا ومراحب بالغالي اخباركم طيبين ان شاء الله ارحبوا على العين والراس """
126
  @spaces.GPU
127
+ def modelspeech(text=TXT,name_model,speaking_rate):
128
 
129
 
130
  inputs = tokenizer(text, return_tensors="pt")
 
149
  label="اختر النموذج",
150
  value="wasmdashai/vits-ar-sa-huba-v2",
151
  )
152
+
153
  demo = gr.Interface(fn=modelspeech, inputs=["text",model_choices,gr.Slider(0, 1, step=0.1,value=0.8)], outputs=["audio","audio"])
154
  demo.queue()
155
  demo.launch()