WajeehAzeemX commited on
Commit
7f3077c
·
verified ·
1 Parent(s): fecc0bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -4
app.py CHANGED
@@ -14,9 +14,7 @@ model = WhisperForConditionalGeneration.from_pretrained(
14
  import torch
15
 
16
  processor = WhisperProcessor.from_pretrained('WajeehAzeemX/whisper-smal-ar-testing-kale-5000')
17
- model.config.forced_decoder_ids = None
18
- forced_decoder_ids = processor.get_decoder_prompt_ids(language="Arabic", task="transcribe")
19
- model.generation_config.cache_implementation = "static"
20
  from transformers import GenerationConfig, WhisperForConditionalGeneration
21
  generation_config = GenerationConfig.from_pretrained("openai/whisper-small") # if you are using a multilingual model
22
  model.generation_config = generation_config
@@ -45,7 +43,7 @@ async def transcribe_audio(request: Request):
45
  input_features = processor(audio_array, sampling_rate=sampling_rate, return_tensors="pt").input_features
46
 
47
  # Generate token ids
48
- predicted_ids = model.generate(input_features, forced_decoder_ids=forced_decoder_ids, return_timestamps=True)
49
 
50
  # Decode token ids to text
51
  transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
 
14
  import torch
15
 
16
  processor = WhisperProcessor.from_pretrained('WajeehAzeemX/whisper-smal-ar-testing-kale-5000')
17
+
 
 
18
  from transformers import GenerationConfig, WhisperForConditionalGeneration
19
  generation_config = GenerationConfig.from_pretrained("openai/whisper-small") # if you are using a multilingual model
20
  model.generation_config = generation_config
 
43
  input_features = processor(audio_array, sampling_rate=sampling_rate, return_tensors="pt").input_features
44
 
45
  # Generate token ids
46
+ predicted_ids = model.generate(input_features, return_timestamps=True)
47
 
48
  # Decode token ids to text
49
  transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)