EwoutLagendijk commited on
Commit
fc110d0
1 Parent(s): 54eca9b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -24
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import torch
2
- from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor
3
  import gradio as gr
4
  import librosa
5
 
@@ -9,26 +9,29 @@ BATCH_SIZE = 8
9
  device = 0 if torch.cuda.is_available() else "cpu"
10
 
11
  # Load model and processor
12
- model = AutoModelForSpeechSeq2Seq.from_pretrained(MODEL_NAME)
13
- processor = AutoProcessor.from_pretrained(MODEL_NAME)
 
 
14
 
15
  # Update the generation config for transcription
16
  model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="id", task="transcribe")
17
- model.config.no_repeat_ngram_size = 3
18
 
19
- def transcribe_speech_with_timestamps(filepath):
 
 
 
20
  # Load the audio
21
  audio, sampling_rate = librosa.load(filepath, sr=16000)
22
 
23
  # Define chunk size (e.g., 30 seconds)
24
- chunk_duration = 30 # in seconds
25
  chunk_samples = chunk_duration * sampling_rate
26
 
27
  # Process audio in chunks
28
  transcription = []
29
  for i in range(0, len(audio), chunk_samples):
30
  chunk = audio[i:i + chunk_samples]
31
- chunk_start_time = i / sampling_rate # Calculate chunk start time in seconds
32
 
33
  # Convert the chunk into input features
34
  inputs = processor(audio=chunk, sampling_rate=16000, return_tensors="pt").input_features
@@ -36,25 +39,22 @@ def transcribe_speech_with_timestamps(filepath):
36
  # Generate transcription for the chunk
37
  generated_ids = model.generate(
38
  inputs,
39
- max_new_tokens=444,
40
- return_dict_in_generate=True,
41
- output_scores=False,
42
- output_attentions=False,
43
- output_hidden_states=False,
44
  forced_decoder_ids=processor.get_decoder_prompt_ids(language="id", task="transcribe")
45
  )
46
 
47
- # Decode the tokens into text and timestamps
48
- token_transcriptions = processor.batch_decode(generated_ids["sequences"], skip_special_tokens=False)[0]
49
- decoded_with_timestamps = processor.decode_with_timestamps(generated_ids["sequences"][0])
50
-
51
- # Parse timestamps and transcription
52
- for segment in decoded_with_timestamps:
53
- start_time = chunk_start_time + segment['start']
54
- end_time = chunk_start_time + segment['end']
55
- text = segment['text']
56
- transcription.append(f"[{start_time:.2f}s - {end_time:.2f}s]: {text}")
57
-
 
58
  return "\n".join(transcription)
59
 
60
  demo = gr.Blocks()
@@ -72,6 +72,6 @@ file_transcribe = gr.Interface(
72
  )
73
 
74
  with demo:
75
- gr.TabbedInterface([mic_transcribe, file_transcribe], ["Transcribe Microphone", "Transcribe Audio File"])
76
 
77
  demo.launch(debug=True)
 
1
  import torch
2
+ from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
3
  import gradio as gr
4
  import librosa
5
 
 
9
  device = 0 if torch.cuda.is_available() else "cpu"
10
 
11
  # Load model and processor
12
+ model_name = "EwoutLagendijk/whisper-small-indonesian"
13
+
14
+ model = AutoModelForSpeechSeq2Seq.from_pretrained(model_name)
15
+ processor = AutoProcessor.from_pretrained(model_name)
16
 
17
  # Update the generation config for transcription
18
  model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="id", task="transcribe")
 
19
 
20
+ # Initialize the translation pipeline (using a model like `Helsinki-NLP/opus-mt-id-en` for Indonesian to English)
21
+ translation_pipeline = pipeline("translation", model="Helsinki-NLP/opus-mt-id-en")
22
+
23
+ def transcribe_speech(filepath):
24
  # Load the audio
25
  audio, sampling_rate = librosa.load(filepath, sr=16000)
26
 
27
  # Define chunk size (e.g., 30 seconds)
28
+ chunk_duration = 5 # in seconds
29
  chunk_samples = chunk_duration * sampling_rate
30
 
31
  # Process audio in chunks
32
  transcription = []
33
  for i in range(0, len(audio), chunk_samples):
34
  chunk = audio[i:i + chunk_samples]
 
35
 
36
  # Convert the chunk into input features
37
  inputs = processor(audio=chunk, sampling_rate=16000, return_tensors="pt").input_features
 
39
  # Generate transcription for the chunk
40
  generated_ids = model.generate(
41
  inputs,
42
+ max_new_tokens=444, # Max allowed by Whisper
 
 
 
 
43
  forced_decoder_ids=processor.get_decoder_prompt_ids(language="id", task="transcribe")
44
  )
45
 
46
+ # Decode and append the transcription
47
+ chunk_transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
48
+
49
+ # Translate the transcription to English (or another language of choice)
50
+ chunk_translation = translation_pipeline(chunk_transcription)[0]['translation_text']
51
+
52
+ # Append both transcription and translation
53
+ transcription.append(f"Chunk {i//chunk_samples + 1}:\n")
54
+ transcription.append(f"Transcription: {chunk_transcription}\n")
55
+ transcription.append(f"Translation: {chunk_translation}\n\n")
56
+
57
+ # Combine all chunk transcriptions and translations into a single string
58
  return "\n".join(transcription)
59
 
60
  demo = gr.Blocks()
 
72
  )
73
 
74
  with demo:
75
+ gr.TabbedInterface([mic_transcribe, file_transcribe], ["Transcribe and translate Microphone", "Transcribe and translate Audio File"])
76
 
77
  demo.launch(debug=True)