RunTasking / app.py
wasmdashai's picture
Update app.py
9441ab4 verified
raw
history blame
993 Bytes
import gradio as gr
GK=0
from transformers import AutoTokenizer
import torch
import os
from VitsModelSplit.vits_model2 import VitsModel,get_state_grad_loss
token=os.environ.get("key_")
tokenizer = AutoTokenizer.from_pretrained("wasmdashai/vits-ar-sa-huba",token=token)
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_vits=VitsModel.from_pretrained("wasmdashai/vits-ar-sa-huba",token=token)#.to(device)
def modelspeech(texts):
inputs = tokenizer(texts, return_tensors="pt")#.cuda()
wav = model_vits(input_ids=inputs["input_ids"]).waveform#.detach()
# display(Audio(wav, rate=model.config.sampling_rate))
return model_vits.config.sampling_rate,wav#remove_noise_nr(wav)
def greet(text,id):
global GK
b=int(id)
while True:
GK+=1
texts=[text]*b
out=modelspeech(texts)
yield f"namber is {GK}"
demo = gr.Interface(fn=greet, inputs=["text","text"], outputs="text")
demo.launch()