File size: 5,010 Bytes
d83d45e
 
 
 
 
 
 
 
d68492b
 
 
 
 
 
 
 
 
 
 
 
 
 
51ef34d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d68492b
 
 
 
 
4dabb93
d68492b
 
 
51ef34d
d68492b
 
51ef34d
d68492b
51ef34d
d68492b
 
51ef34d
d68492b
51ef34d
d68492b
51ef34d
 
 
 
 
 
d68492b
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import os
os.system("pip install git+https://github.com/openai/whisper.git")
import pytube
import whisper
import gradio as gr

model = whisper.load_model('large')

def speech_youtube(x):
    data = pytube.YouTube(x)
    audio = data.streams.get_audio_only()
    text = model.transcribe(audio.download())
    return text['text']

def speech_file(x):
    text = model.transcribe(x)
    return text['text']

def speech_record(x):
    text = model.transcribe(x)
    return text['text']

css = """
        .gradio-container {
            font-family: 'IBM Plex Sans', sans-serif;
        }
        .gr-button {
            color: white;
            border-color: black;
            background: black;
        }
        input[type='range'] {
            accent-color: black;
        }
        .dark input[type='range'] {
            accent-color: #dfdfdf;
        }
        .container {
            max-width: 730px;
            margin: auto;
            padding-top: 1.5rem;
        }
     
        .details:hover {
            text-decoration: underline;
        }
        .gr-button {
            white-space: nowrap;
        }
        .gr-button:focus {
            border-color: rgb(147 197 253 / var(--tw-border-opacity));
            outline: none;
            box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
            --tw-border-opacity: 1;
            --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
            --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
            --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
            --tw-ring-opacity: .5;
        }
        .footer {
            margin-bottom: 45px;
            margin-top: 35px;
            text-align: center;
            border-bottom: 1px solid #e5e5e5;
        }
        .footer>p {
            font-size: .8rem;
            display: inline-block;
            padding: 0 10px;
            transform: translateY(10px);
            background: white;
        }
        .dark .footer {
            border-color: #303030;
        }
        .dark .footer>p {
            background: #0b0f19;
        }
        .prompt h4{
            margin: 1.25em 0 .25em 0;
            font-weight: bold;
            font-size: 115%;
        }
        .animate-spin {
            animation: spin 1s linear infinite;
        }
        @keyframes spin {
            from {
                transform: rotate(0deg);
            }
            to {
                transform: rotate(360deg);
            }
        }
        #share-btn-container {
            display: flex; margin-top: 1.5rem !important; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
        }
        #share-btn {
            all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;
        }
        #share-btn * {
            all: unset;
        }
"""

with gr.Blocks(css = css) as demo:
    gr.Markdown(
    """
    # Speech to Text Transcriptions!
    This demo uses the OpenAI whisper model which is trained on a large dataset of diverse audio that can perform multilingual speech recognition. The computation time is dependent on the length of the audio.
    """)
    with gr.Tab("YouTube"):
        audio_input = gr.Textbox(label="YouTube Link", placeholder="paste the youtube link here")
        text_output = gr.Textbox(label="Transcription", show_label=False)
        youtube_button = gr.Button("Transcribe")
    with gr.Tab("Audio File"):
        with gr.Row().style(equal_height=True):
            audio_input2 = gr.Audio(label="Audio File", type="filepath")
            text_output2 = gr.Textbox(label="Transcription", show_label=False)
        file_button = gr.Button("Transcribe")
    with gr.Tab("Record"):
        with gr.Row().style(equal_height=True):
            audio_input3 = gr.Audio(label="Input Audio", source="microphone", type="filepath")
            text_output3 = gr.Textbox(label="Transcription", show_label=False)
        rec_button = gr.Button("Transcribe")
    gr.HTML('''
        <div class="footer">
                    <p>Model by <a href="https://github.com/openai/whisper" style="text-decoration: underline;" target="_blank">OpenAI</a> - Gradio Demo by 👩🏽‍🦱 <a href="https://www.linkedin.com/in/oayodeji/" style="text-decoration: underline;" target="_blank">Wvle</a>
                    </p>
        </div>
        ''')

    youtube_button.click(speech_youtube, inputs=audio_input, outputs=text_output)
    file_button.click(speech_file, inputs=audio_input2, outputs=text_output2)
    rec_button.click(speech_record, inputs=audio_input3, outputs=text_output3)

demo.launch()