MingLi commited on
Commit
710fddf
·
1 Parent(s): 17debbb

增加根据GPU动态加载模型

Browse files
Files changed (1) hide show
  1. app.py +5 -2
app.py CHANGED
@@ -12,7 +12,10 @@ app = FastAPI()
12
 
13
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
14
 
15
- model_id = "openai/whisper-base.en"
 
 
 
16
 
17
  pipe = pipeline(
18
  "automatic-speech-recognition",
@@ -91,7 +94,7 @@ with gr.Blocks() as blocks:
91
  t = gr.Textbox(label="结果")
92
 
93
  b.click(handel, inputs=f, outputs=t)
94
-
95
  blocks.queue(max_size=3)
96
 
97
  app = gr.mount_gradio_app(app, blocks, path="/")
 
12
 
13
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
14
 
15
+ if torch.cuda.is_available():
16
+ model_id = "openai/whisper-small.en"
17
+ else:
18
+ model_id = "openai/whisper-tiny.en"
19
 
20
  pipe = pipeline(
21
  "automatic-speech-recognition",
 
94
  t = gr.Textbox(label="结果")
95
 
96
  b.click(handel, inputs=f, outputs=t)
97
+
98
  blocks.queue(max_size=3)
99
 
100
  app = gr.mount_gradio_app(app, blocks, path="/")