Spaces:
Sleeping
Sleeping
yellowcandle
commited on
Commit
•
d3b8a9b
1
Parent(s):
3d7bd2f
try to fix runtime error on HF
Browse files
app.py
CHANGED
@@ -16,7 +16,7 @@ def transcribe_audio(audio):
|
|
16 |
if transcribe_model is None:
|
17 |
return "Please load the transcription model first."
|
18 |
|
19 |
-
device = "cuda:0" if torch.cuda.is_available() else "
|
20 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
21 |
|
22 |
processor = AutoProcessor.from_pretrained(transcribe_model)
|
@@ -44,28 +44,24 @@ def proofread(text):
|
|
44 |
if proofread_model is None:
|
45 |
return "Please load the proofreading model first."
|
46 |
|
47 |
-
device = "cuda:0" if torch.cuda.is_available() else "
|
48 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
49 |
|
50 |
messages = [
|
51 |
{"role": "system", "content": "用繁體中文語體文整理這段文字,在最後加上整段文字的重點。"},
|
52 |
{"role": "user", "content": text},
|
53 |
]
|
54 |
-
pipe = pipeline("text-generation", model=proofread_model)
|
55 |
-
llm_output = pipe(messages)
|
56 |
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
assistant_content = next(item['content'] for item in generated_text if item['role'] == 'assistant')
|
62 |
-
|
63 |
-
proofread_text = assistant_content
|
64 |
return proofread_text
|
65 |
|
|
|
66 |
def load_models(transcribe_model_id, proofread_model_id):
|
67 |
global transcribe_model, proofread_model
|
68 |
-
device = "cuda:0" if torch.cuda.is_available() else "
|
69 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
70 |
|
71 |
transcribe_model = AutoModelForSpeechSeq2Seq.from_pretrained(
|
|
|
16 |
if transcribe_model is None:
|
17 |
return "Please load the transcription model first."
|
18 |
|
19 |
+
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
20 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
21 |
|
22 |
processor = AutoProcessor.from_pretrained(transcribe_model)
|
|
|
44 |
if proofread_model is None:
|
45 |
return "Please load the proofreading model first."
|
46 |
|
47 |
+
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
48 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
49 |
|
50 |
messages = [
|
51 |
{"role": "system", "content": "用繁體中文語體文整理這段文字,在最後加上整段文字的重點。"},
|
52 |
{"role": "user", "content": text},
|
53 |
]
|
|
|
|
|
54 |
|
55 |
+
inputs = proofread_model.tokenizer(messages, return_tensors="tf", padding=True)
|
56 |
+
outputs = proofread_model.generate(**inputs)
|
57 |
+
proofread_text = proofread_model.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
58 |
+
|
|
|
|
|
|
|
59 |
return proofread_text
|
60 |
|
61 |
+
@spaces.GPU(duration=120)
|
62 |
def load_models(transcribe_model_id, proofread_model_id):
|
63 |
global transcribe_model, proofread_model
|
64 |
+
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
65 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
66 |
|
67 |
transcribe_model = AutoModelForSpeechSeq2Seq.from_pretrained(
|