File size: 993 Bytes
d85e329
 
9441ab4
7fef4b1
a801789
9441ab4
 
7fef4b1
 
 
 
 
ca2206d
 
 
 
 
d85e329
ca2206d
 
 
aaab03e
d85e329
ca2206d
4a3c8fc
 
aaab03e
ca2206d
e996a0b
d85e329
aaab03e
d85e329
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import gradio as gr
GK=0
from transformers import AutoTokenizer
import torch
import os
from VitsModelSplit.vits_model2 import VitsModel,get_state_grad_loss

token=os.environ.get("key_")
tokenizer = AutoTokenizer.from_pretrained("wasmdashai/vits-ar-sa-huba",token=token)
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_vits=VitsModel.from_pretrained("wasmdashai/vits-ar-sa-huba",token=token)#.to(device)

def   modelspeech(texts):
     
    
    
     inputs = tokenizer(texts, return_tensors="pt")#.cuda()

     wav = model_vits(input_ids=inputs["input_ids"]).waveform#.detach()
          # display(Audio(wav, rate=model.config.sampling_rate))
     return  model_vits.config.sampling_rate,wav#remove_noise_nr(wav)
def greet(text,id):
    global GK 
    b=int(id)
    while True:
        GK+=1
        texts=[text]*b
        out=modelspeech(texts)
        yield f"namber is {GK}"

demo = gr.Interface(fn=greet, inputs=["text","text"], outputs="text")
demo.launch()