s3nh commited on
Commit
a226e05
·
verified ·
1 Parent(s): e4943cc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -8
app.py CHANGED
@@ -21,13 +21,23 @@ import pandas as pd
21
  import whisper
22
 
23
 
 
 
 
 
 
 
24
  @spaces.GPU
25
- whisper_model = whisper.load_model("medium").to("cuda")
26
- tts_model = VitsModel.from_pretrained("facebook/mms-tts-pol")
27
- tts_model.to("cuda")
28
- print("TTS Loaded!")
 
 
 
 
 
29
 
30
- tokenizer_tss = AutoTokenizer.from_pretrained("facebook/mms-tts-pol")
31
 
32
  def save_to_txt(text_to_save):
33
  with open('prompt.txt', 'w', encoding='utf-8') as f:
@@ -42,14 +52,14 @@ def read_txt():
42
  ##### Chat z LLAMA ####
43
  ##### Chat z LLAMA ####
44
  ##### Chat z LLAMA ####
45
-
46
  def _load_model_tokenizer():
47
  model_id = 'tangger/Qwen-7B-Chat'
48
  tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
49
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto",trust_remote_code=True, fp16=True).eval()
50
  return model, tokenizer
51
-
52
-
53
  model, tokenizer = _load_model_tokenizer()
54
  def postprocess(self, y):
55
  if y is None:
 
21
  import whisper
22
 
23
 
24
+ # whisper_model = whisper.load_model("medium").to("cuda")
25
+ # tts_model = VitsModel.from_pretrained("facebook/mms-tts-pol")
26
+ # tts_model.to("cuda")
27
+ # print("TTS Loaded!")
28
+
29
+
30
  @spaces.GPU
31
+ def load_whisper():
32
+ return whisper.load_model("medium").to("cuda")
33
+
34
+ @spaces.GPU()
35
+ def load_tts():
36
+ tts_model = VitsModel.from_pretrained("facebook/mms-tts-pol")
37
+ tts_model.to("cuda")
38
+ tokenizer_tss = AutoTokenizer.from_pretrained("facebook/mms-tts-pol")
39
+ return tts_model, tokenizer_tss
40
 
 
41
 
42
  def save_to_txt(text_to_save):
43
  with open('prompt.txt', 'w', encoding='utf-8') as f:
 
52
  ##### Chat z LLAMA ####
53
  ##### Chat z LLAMA ####
54
  ##### Chat z LLAMA ####
55
+ @spaces.GPU
56
  def _load_model_tokenizer():
57
  model_id = 'tangger/Qwen-7B-Chat'
58
  tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
59
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto",trust_remote_code=True, fp16=True).eval()
60
  return model, tokenizer
61
+ whisper_model = load_whisper()
62
+ tts_model, tokenizer_tss = load_tts()
63
  model, tokenizer = _load_model_tokenizer()
64
  def postprocess(self, y):
65
  if y is None: