Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,6 +4,7 @@ from transformers import AutoTokenizer,VitsModel
|
|
4 |
|
5 |
import google.generativeai as genai
|
6 |
import torch
|
|
|
7 |
|
8 |
api_key =os.environ.get("id_gmkey")
|
9 |
token=os.environ.get("key_")
|
@@ -195,11 +196,13 @@ def get_answer_ai(text,session_ai):
|
|
195 |
session_ai=create_chat_session()
|
196 |
response = session_ai.send_message(text,stream=True)
|
197 |
return response,session_ai
|
|
|
198 |
|
199 |
def modelspeech(text):
|
200 |
-
audio_bytes = query({
|
201 |
-
|
202 |
-
|
|
|
203 |
with torch.no_grad():
|
204 |
inputs = tokenizer(text, return_tensors="pt")#.cuda()
|
205 |
|
|
|
4 |
|
5 |
import google.generativeai as genai
|
6 |
import torch
|
7 |
+
import torchaudio
|
8 |
|
9 |
api_key =os.environ.get("id_gmkey")
|
10 |
token=os.environ.get("key_")
|
|
|
196 |
session_ai=create_chat_session()
|
197 |
response = session_ai.send_message(text,stream=True)
|
198 |
return response,session_ai
|
199 |
+
import torchaudio
|
200 |
|
201 |
def modelspeech(text):
|
202 |
+
audio_bytes = query({"inputs":text })
|
203 |
+
wav, sr = torchaudio.load(audio_bytes)
|
204 |
+
|
205 |
+
return sr,wav.squeeze().cpu().numpy()
|
206 |
with torch.no_grad():
|
207 |
inputs = tokenizer(text, return_tensors="pt")#.cuda()
|
208 |
|