File size: 5,018 Bytes
d262231
ffa1767
1c71a80
ffa1767
9ad68db
ffa1767
1c71a80
5673c7d
ffa1767
a77192d
ffa1767
 
9ad68db
ffa1767
9ad68db
ffa1767
0c2114f
 
ffa1767
9ad68db
 
ffa1767
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5776c8c
 
ffa1767
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5673c7d
 
ffa1767
 
 
5673c7d
ffa1767
 
 
5673c7d
 
ffa1767
 
5673c7d
 
ffa1767
 
5673c7d
ffa1767
 
 
 
 
 
 
9ad68db
ffa1767
9ad68db
 
ffa1767
 
 
9ad68db
ffa1767
9ad68db
 
ffa1767
9ad68db
ffa1767
5673c7d
ffa1767
 
 
 
9ad68db
 
 
 
ffa1767
 
9ad68db
ffa1767
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import gradio as gr 
import numpy as np
import os, time, librosa, torch
from pyannote.audio import Pipeline
from transformers import pipeline
from utils import second_to_timecode, download_from_youtube

MODEL_NAME = 'bayartsogt/whisper-large-v2-mn-13'
lang = 'mn'

chunk_length_s = 9
vad_activation_min_duration = 9 # sec
device = 0 if torch.cuda.is_available() else "cpu"
SAMPLE_RATE = 16_000

######## LOAD MODELS FROM HUB ########
dia_model = Pipeline.from_pretrained("pyannote/speaker-diarization", use_auth_token=os.environ['TOKEN'])
vad_model = Pipeline.from_pretrained("pyannote/voice-activity-detection", use_auth_token=os.environ['TOKEN'])
pipe = pipeline(task="automatic-speech-recognition", model=MODEL_NAME, chunk_length_s=chunk_length_s, device=device)
pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe")

print("----------> Loaded models <-----------")

def generator(youtube_link, microphone, file_upload, num_speakers, max_duration, history):

    if int(youtube_link != '') + int(microphone is not None) + int(file_upload is not None) != 1:
        raise Exception(f"Only one of the source should be given youtube_link={youtube_link}, microphone={microphone}, file_upload={file_upload}")

    history = history or ""
    
    if microphone:
        path = microphone
    elif file_upload:
        path = file_upload
    elif youtube_link:
        path = download_from_youtube(youtube_link)
    
    waveform, sampling_rate = librosa.load(path, sr=SAMPLE_RATE, mono=True, duration=max_duration)

    print(waveform.shape, sampling_rate)
    waveform_tensor = torch.unsqueeze(torch.tensor(waveform), 0).to(device)

    dia_result = dia_model({
        "waveform": waveform_tensor,
        "sample_rate": sampling_rate,
    }, num_speakers=num_speakers)

    counter = 1
    
    for speech_turn, track, speaker in dia_result.itertracks(yield_label=True):
        print(f"{speech_turn.start:4.1f} {speech_turn.end:4.1f} {speaker}")
        _start = int(sampling_rate * speech_turn.start)
        _end = int(sampling_rate * speech_turn.end)
        data = waveform[_start: _end]

        if speech_turn.end - speech_turn.start > vad_activation_min_duration:
            print(f'audio duration {speech_turn.end - speech_turn.start} sec ----> activating VAD')
            vad_output = vad_model({
                'waveform': waveform_tensor[:, _start:_end],
                'sample_rate': sampling_rate})
            for vad_turn in vad_output.get_timeline().support():
                vad_start = _start + int(sampling_rate * vad_turn.start)
                vad_end = _start + int(sampling_rate * vad_turn.end)
                prediction = pipe(waveform[vad_start: vad_end])['text']
                history +=  f"{counter}\n" + \
                            f"{second_to_timecode(speech_turn.start + vad_turn.start)} --> {second_to_timecode(speech_turn.start + vad_turn.end)}\n" + \
                            f"{prediction}\n\n"
                            # f">> {speaker}: {prediction}\n\n"
                yield history, history, None
                counter += 1

        else:
            prediction = pipe(data)['text']
            history +=  f"{counter}\n" + \
                        f"{second_to_timecode(speech_turn.start)} --> {second_to_timecode(speech_turn.end)}\n" + \
                        f"{prediction}\n\n"
                        # f">> {speaker}: {prediction}\n\n"
            counter += 1
            yield history, history, None
    
    # https://support.google.com/youtube/answer/2734698?hl=en#zippy=%2Cbasic-file-formats%2Csubrip-srt-example%2Csubviewer-sbv-example
    file_name = 'transcript.srt'
    with open(file_name, 'w') as fp:
        fp.write(history)
    
    yield history, history, file_name

demo = gr.Interface(
    generator, 
    inputs=[
        gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL", optional=True),
        gr.inputs.Audio(source="microphone", type="filepath", optional=True),
        gr.inputs.Audio(source="upload", type="filepath", optional=True),
        gr.Number(value=1, label="Number of Speakers"),
        gr.Number(value=120, label="Maximum Duration (Seconds)"),
        'state',
    ],
    outputs=['text', 'state', 'file'],
    layout="horizontal",
    theme="huggingface",
    title="Transcribe Mongolian Whisper 🇲🇳",
    description=(
        "Transcribe Youtube Video / Microphone / Uploaded File in Mongolian Whisper Model." + \
        " | You can upload SubRip file (`.srt`) [to your youtube video](https://support.google.com/youtube/answer/2734698?hl=en#zippy=%2Cbasic-file-formats)." + \
        " | Please REFRESH 🔄 the page after you transcribed!" + \
        " | 🐦 [@_tsogoo_](https://twitter.com/_tsogoo_)" + \
        " | 🤗 [@bayartsogt](https://huggingface.co/bayartsogt)" + \
        ""
    ),
    allow_flagging="never",
)

# define queue - required for generators
demo.queue()

demo.launch()