File size: 5,581 Bytes
abc4a4a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
# import gradio as gr

# gr.Interface.load("models/rohitp1/kkkh_whisper_small_distillation_att_loss_libri360_epochs_100_batch_4_concat_dataset").launch()


import gradio as gr
import os
import transformers
from transformers import pipeline, Wav2Vec2ForCTC,Wav2Vec2Processor
import time
import torch

# def greet_from_secret(ignored_param):
#     name = os.environ.get('TOKEN')
#     return 


auth_token = os.environ.get('TOKEN')


M1 = "rohitp1/dgx1_w2v2_base_teacher_student_distillation_mozilla_epochs_100_batch_16_concatenate_datasets"
M2 = "rohitp1/finetune_teacher_babble_noise_mozilla_200_epochs"
M3 = "rohitp1/finetune_teacher_clean_mozilla_200_epochs"

model1 = Wav2Vec2ForCTC.from_pretrained(M1, use_auth_token=auth_token)
processor1 = Wav2Vec2Processor.from_pretrained(M1, use_auth_token=auth_token)


model2 = Wav2Vec2ForCTC.from_pretrained(M2, use_auth_token=auth_token)
processor2 = Wav2Vec2Processor.from_pretrained(M2, use_auth_token=auth_token)


model3 = Wav2Vec2ForCTC.from_pretrained(M3, use_auth_token=auth_token)
processor3 = Wav2Vec2Processor.from_pretrained(M3, use_auth_token=auth_token)



# make quantized model
quantized_model1 = torch.quantization.quantize_dynamic(
    model3, {torch.nn.Linear}, dtype=torch.qint8
)


p1 = pipeline('automatic-speech-recognition', model=model1, processor=processor1)
p2 = pipeline('automatic-speech-recognition', model=model2, processor=processor2)
p3 = pipeline('automatic-speech-recognition', model=model3, processor=processor3)
p1_quant = pipeline('automatic-speech-recognition', model=quantized_model1, processor=processor1)

def transcribe(mic_input, upl_input, model_type):
    if mic_input:
        audio = mic_input
    else:
        audio = upl_input
    time.sleep(3)
    st_time = time.time()
    if model_type == 'NoisyFinetuned':
        text = p2(audio)["text"]
    elif model_type == 'CleanFinetuned':
        text = p3(audio)["text"]
    elif model_type == 'DistilledQuantised':
        text = p1_quant(audio)['text']
    else:
        text = p1(audio)["text"]
    end_time = time.time()
    # state = text + " "
    time_taken = round((end_time - st_time) / 60 , 4)
    return text, time_taken



# gr.Interface(
#     fn=transcribe, 
#     inputs=[
#         gr.inputs.Audio(source="microphone", type="filepath"),
#         'state'
#     ],
#     outputs=[
#         "textbox",
#         "state"
#     ],
#     live=False).launch()


# demo = gr.load(
#     "huggingface/rohitp1/kkkh_whisper_small_distillation_att_loss_libri360_epochs_100_batch_4_concat_dataset",
#     title="Speech-to-text",
#     inputs="mic",
#     description="Let me try to guess what you're saying!",
#     api_key="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"
# )

# demo.launch()

def clear_inputs_and_outputs():
    return [None, None, "CleanFinetuned", None, None]

# Main function
if __name__ == "__main__":
    demo = gr.Blocks()

    with demo:
        gr.Markdown(
            """
            <center><h1> Noise Robust English Automatic Speech Recognition LibriSpeech Dataset</h1></center> \
            This space is a demo of an English ASR model using Huggingface.<br> \
    In this space, you can record your voice or upload a wav file and the model will predict the text spoken in the audio<br><br>
            """
        )
        with gr.Row():
            ## Input
            with gr.Column():
                mic_input = gr.Audio(source="microphone", type="filepath", label="Record your own voice")
                upl_input = gr.Audio(
                    source="upload", type="filepath", label="Upload a wav file"
                )

                with gr.Row():
                    model_type = gr.inputs.Dropdown(["RobustDistillation", "NoisyFinetuned", "CleanFinetuned", "DistilledAndQuantised"], label='Model Type')

                with gr.Row():
                    clr_btn = gr.Button(value="Clear", variant="secondary")
                    prd_btn = gr.Button(value="Predict")


            # Outputs
            with gr.Column():
                lbl_output = gr.Label(label="Transcription")

                with gr.Row():
                    time_output = gr.Label(label="Time Taken (in sec)")
                    # with gr.Group():
                    #     gr.Markdown("<center>Prediction per time slot</center>")
                    #     plt_output = gr.Plot(
                    #         label="Prediction per time slot", show_label=False
                    #     )


        with gr.Row():
            gr.Examples(
            [ 
                # os.path.join(os.path.dirname(__file__), "audio/sample1.wav"),
                # os.path.join(os.path.dirname(__file__), "audio/sample2.wav"),
                os.path.join(os.path.dirname(__file__), "audio/sample3.wav"),
            ],
            upl_input,
            [lbl_output, time_output],
            transcribe
        )
        # Credits
        with gr.Row():
            gr.Markdown(
                """
                <h4>Credits</h4>
                Author:  Rohit Prasad <br>
                Check out the model <a href="https://huggingface.co/rohitp1/subh_whisper_small_distil_att_loss_mozilla_epochs_50_batch_8">here</a>
                """
            )

        clr_btn.click(
            fn=clear_inputs_and_outputs,
            inputs=[],
            outputs=[mic_input, upl_input, model_type, lbl_output, time_output],
        )
        prd_btn.click(
            fn=transcribe,
            inputs=[mic_input, upl_input, model_type],
            outputs=[lbl_output, time_output],
        )

    demo.launch(debug=True)