asahi417 commited on
Commit
7ec3fd2
·
verified ·
1 Parent(s): 1482df5

Update benchmark.sh

Browse files
Files changed (1) hide show
  1. benchmark.sh +4 -5
benchmark.sh CHANGED
@@ -8,16 +8,15 @@ ffmpeg -i kotoba-whisper-eval/audio/manzai3.mp3 -ar 16000 -ac 1 -c:a pcm_s16le k
8
  # cache the model
9
  python -c 'from faster_whisper import WhisperModel; model = WhisperModel("kotoba-tech/kotoba-whisper-v1.0-faster")'
10
  SECONDS=0
11
- python -c 'from faster_whisper import WhisperModel; model = WhisperModel("kotoba-tech/kotoba-whisper-v1.0-faster"); segments=model.transcribe("kotoba-whisper-eval/audio/long_interview_1.wav", language="ja", chunk_length=15, condition_on_previous_text=False); for segment in segments:print("[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text))'
12
-
13
  TIME_INTERVIEW=$SECONDS
14
  SECONDS=0
15
- python -c 'from faster_whisper import WhisperModel; model = WhisperModel("kotoba-tech/kotoba-whisper-v1.0-faster"); model.transcribe("kotoba-whisper-eval/audio/manzai1.wav", language="ja", chunk_length=15, condition_on_previous_text=False)"'
16
  TIME_MANZAI1=$SECONDS
17
  SECONDS=0
18
- python -c 'from faster_whisper import WhisperModel; model = WhisperModel("kotoba-tech/kotoba-whisper-v1.0-faster"); model.transcribe("kotoba-whisper-eval/audio/manzai2.wav", language="ja", chunk_length=15, condition_on_previous_text=False)"'
19
  TIME_MANZAI2=$SECONDS
20
  SECONDS=0
21
- python -c 'from faster_whisper import WhisperModel; model = WhisperModel("kotoba-tech/kotoba-whisper-v1.0-faster"); model.transcribe("kotoba-whisper-eval/audio/manzai3.wav", language="ja", chunk_length=15, condition_on_previous_text=False)"'
22
  TIME_MANZAI3=$SECONDS
23
 
 
8
  # cache the model
9
  python -c 'from faster_whisper import WhisperModel; model = WhisperModel("kotoba-tech/kotoba-whisper-v1.0-faster")'
10
  SECONDS=0
11
+ python -c 'from faster_whisper import WhisperModel; model = WhisperModel("kotoba-tech/kotoba-whisper-v1.0-faster"); print(["[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text) for segment in model.transcribe("kotoba-whisper-eval/audio/long_interview_1.wav", language="ja", chunk_length=15, condition_on_previous_text=False)[0]])'
 
12
  TIME_INTERVIEW=$SECONDS
13
  SECONDS=0
14
+ python -c 'from faster_whisper import WhisperModel; model = WhisperModel("kotoba-tech/kotoba-whisper-v1.0-faster"); print(["[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text) for segment in model.transcribe("kotoba-whisper-eval/audio/manzai1.wav", language="ja", chunk_length=15, condition_on_previous_text=False)[0]])'
15
  TIME_MANZAI1=$SECONDS
16
  SECONDS=0
17
+ python -c 'from faster_whisper import WhisperModel; model = WhisperModel("kotoba-tech/kotoba-whisper-v1.0-faster"); print(["[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text) for segment in model.transcribe("kotoba-whisper-eval/audio/manzai2.wav", language="ja", chunk_length=15, condition_on_previous_text=False)[0]])'
18
  TIME_MANZAI2=$SECONDS
19
  SECONDS=0
20
+ python -c 'from faster_whisper import WhisperModel; model = WhisperModel("kotoba-tech/kotoba-whisper-v1.0-faster"); print(["[%.2fs -> %.2fs] %s" % (segment.start, segment.end, segment.text) for segment in model.transcribe("kotoba-whisper-eval/audio/manzai3.wav", language="ja", chunk_length=15, condition_on_previous_text=False)[0]])'
21
  TIME_MANZAI3=$SECONDS
22