Vrushali commited on
Commit
295f1a3
·
1 Parent(s): 8f35ce4

Add audio text module and whisper functions

Browse files
Files changed (1) hide show
  1. src/module/audio_text.py +49 -0
src/module/audio_text.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from whisper_jax import FlaxWhisperPipline
2
+ # import jax.numpy as jnp
3
+ import whisper
4
+ print(whisper.__file__)
5
+ from openai import OpenAI
6
+ from config import OPENAI_API_KEY
7
+ import os
8
+
9
+ client = OpenAI()
10
+ os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY
11
+
12
+
13
+ def whisper_pipeline_tpu(audio):
14
+ pipeline = FlaxWhisperPipline("openai/whisper-large-v3", dtype=jnp.bfloat16, batch_size=16)
15
+ text = pipeline(audio)
16
+ return text
17
+
18
+
19
+
20
+ def whisper_pipeline(audio_path):
21
+ model = whisper.load_model("medium")
22
+ # load audio and pad/trim it to fit 30 seconds
23
+ audio = whisper.load_audio(audio_path)
24
+ audio = whisper.pad_or_trim(audio)
25
+ # make log-Mel spectrogram and move to the same device as the model
26
+ mel = whisper.log_mel_spectrogram(audio).to(model.device)
27
+ # detect the spoken language
28
+ _, probs = model.detect_language(mel)
29
+ print(f"Detected language: {max(probs, key=probs.get)}")
30
+ # decode the audio
31
+ options = whisper.DecodingOptions()
32
+ result = whisper.decode(model, mel, options)
33
+ # print the recognized text
34
+ print(result.text)
35
+ return result.text
36
+
37
+
38
+
39
+
40
+
41
+ def whisper_openai(audio_path):
42
+ audio_file= open(audio_path, "rb")
43
+ transcript = client.audio.transcriptions.create(
44
+ model="whisper-1",
45
+ file=audio_file
46
+ )
47
+ return transcript
48
+
49
+ whisper_pipeline()