AnyaSchen commited on
Commit
763a8af
·
1 Parent(s): 30744bc

fix: which model is used?

Browse files
Files changed (3) hide show
  1. core.py +2 -0
  2. main.py +2 -0
  3. whisper_streaming_custom/backends.py +1 -0
core.py CHANGED
@@ -38,6 +38,8 @@ class WhisperLiveKit:
38
  )
39
 
40
  self.args = args
 
 
41
 
42
  self.asr = None
43
  self.tokenizer = None
 
38
  )
39
 
40
  self.args = args
41
+
42
+ print(args)
43
 
44
  self.asr = None
45
  self.tokenizer = None
main.py CHANGED
@@ -167,4 +167,6 @@ if __name__ == "__main__":
167
  parser.add_argument("--task", type=str, default="transcribe", help="Task to perform")
168
  args = parser.parse_args()
169
 
 
 
170
  uvicorn.run(app, host=args.host, port=args.port)
 
167
  parser.add_argument("--task", type=str, default="transcribe", help="Task to perform")
168
  args = parser.parse_args()
169
 
170
+ print(args)
171
+
172
  uvicorn.run(app, host=args.host, port=args.port)
whisper_streaming_custom/backends.py CHANGED
@@ -151,6 +151,7 @@ class FasterWhisperASR(ASRBase):
151
  compute_type=compute_type,
152
  download_root=cache_dir,
153
  )
 
154
  return model
155
 
156
  def transcribe(self, audio: np.ndarray, init_prompt: str = "") -> list:
 
151
  compute_type=compute_type,
152
  download_root=cache_dir,
153
  )
154
+
155
  return model
156
 
157
  def transcribe(self, audio: np.ndarray, init_prompt: str = "") -> list: