File size: 2,118 Bytes
1726dba
065ecac
1726dba
eaaa4aa
 
 
d130ccc
 
 
 
5cc4f06
 
 
 
 
 
147a6a8
 
5cc4f06
 
8222fa8
147a6a8
5cc4f06
 
 
 
 
147a6a8
 
 
 
 
 
 
8de914b
 
8222fa8
8de914b
5cc4f06
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147a6a8
 
5cc4f06
147a6a8
5cc4f06
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
import streamlit as st
from st_audiorec import st_audiorec

import librosa
import soundfile

from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
from datasets import load_dataset
import torch

pipe = None
audio_sample: bytes = None
audio_transcription: str = None

def main ():
    
    print("Run init model")
    pipe = init_model()
    # x = st.slider('Select a value')
    # st.write(x, 'squared is', x * x)

    print("Render UI")
    wav_audio_data = st_audiorec()

    if wav_audio_data is not None:
        st.audio(wav_audio_data, format='audio/wav')

    print("Load data: audio1")
    # dataset = load_dataset("distil-whisper/librispeech_long", "clean", split="validation")
    # sample = dataset[0]["audio"]
    
    audio_file_path = "data/audio1.wav"
    audio_data, sample_rate = librosa.load(audio_file_path)
    sample = transcribe(audio_data, pipe)
    st.write('Sample:', transcribe(sample))


def init_model ():
    device = "cuda:0" if torch.cuda.is_available() else "cpu"
    torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32

    model_id = "openai/whisper-large-v3"

    model = AutoModelForSpeechSeq2Seq.from_pretrained(
        model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True
    )
    model.to(device)

    processor = AutoProcessor.from_pretrained(model_id)

    pipe = pipeline(
        "automatic-speech-recognition",
        model=model,
        tokenizer=processor.tokenizer,
        feature_extractor=processor.feature_extractor,
        max_new_tokens=128,
        chunk_length_s=30,
        batch_size=16,
        return_timestamps=True,
        torch_dtype=torch_dtype,
        device=device,
    )
    print(f'Init model successful: {model}' )
    return pipe

def transcribe (audio_sample: bytes, pipe) -> str:

    # dataset = load_dataset("distil-whisper/librispeech_long", "clean", split="validation")
    # sample = dataset[0]["audio"]
    result = pipe(audio_sample)
    print(result)
    
    st.write('Result', result["text"])
    return result["text"]

if __name__ == "__main__":
    main()