wvle commited on
Commit
51ef34d
·
1 Parent(s): d83d45e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +94 -7
app.py CHANGED
@@ -20,28 +20,115 @@ def speech_record(x):
20
  text = model.transcribe(x)
21
  return text['text']
22
 
23
- css = ".gradio-container {background-color: blue}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
  with gr.Blocks(css = css) as demo:
26
  gr.Markdown(
27
  """
28
  # Speech to Text Transcriptions!
29
- Start typing below to see the output.
30
  """)
31
  with gr.Tab("YouTube"):
32
  audio_input = gr.Textbox(label="YouTube Link", placeholder="paste the youtube link here")
33
- text_output = gr.Textbox(label="Transcription")
34
  youtube_button = gr.Button("Transcribe")
35
  with gr.Tab("Audio File"):
36
- with gr.Row():
37
  audio_input2 = gr.Audio(label="Audio File", type="filepath")
38
- text_output2 = gr.Textbox(label="Transcription")
39
  file_button = gr.Button("Transcribe")
40
  with gr.Tab("Record"):
41
- with gr.Row():
42
  audio_input3 = gr.Audio(label="Input Audio", source="microphone", type="filepath")
43
- text_output3 = gr.Textbox(label="Transcription")
44
  rec_button = gr.Button("Transcribe")
 
 
 
 
 
 
45
 
46
  youtube_button.click(speech_youtube, inputs=audio_input, outputs=text_output)
47
  file_button.click(speech_file, inputs=audio_input2, outputs=text_output2)
 
20
  text = model.transcribe(x)
21
  return text['text']
22
 
23
+ css = """
24
+ .gradio-container {
25
+ font-family: 'IBM Plex Sans', sans-serif;
26
+ }
27
+ .gr-button {
28
+ color: white;
29
+ border-color: black;
30
+ background: black;
31
+ }
32
+ input[type='range'] {
33
+ accent-color: black;
34
+ }
35
+ .dark input[type='range'] {
36
+ accent-color: #dfdfdf;
37
+ }
38
+ .container {
39
+ max-width: 730px;
40
+ margin: auto;
41
+ padding-top: 1.5rem;
42
+ }
43
+
44
+ .details:hover {
45
+ text-decoration: underline;
46
+ }
47
+ .gr-button {
48
+ white-space: nowrap;
49
+ }
50
+ .gr-button:focus {
51
+ border-color: rgb(147 197 253 / var(--tw-border-opacity));
52
+ outline: none;
53
+ box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
54
+ --tw-border-opacity: 1;
55
+ --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
56
+ --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
57
+ --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
58
+ --tw-ring-opacity: .5;
59
+ }
60
+ .footer {
61
+ margin-bottom: 45px;
62
+ margin-top: 35px;
63
+ text-align: center;
64
+ border-bottom: 1px solid #e5e5e5;
65
+ }
66
+ .footer>p {
67
+ font-size: .8rem;
68
+ display: inline-block;
69
+ padding: 0 10px;
70
+ transform: translateY(10px);
71
+ background: white;
72
+ }
73
+ .dark .footer {
74
+ border-color: #303030;
75
+ }
76
+ .dark .footer>p {
77
+ background: #0b0f19;
78
+ }
79
+ .prompt h4{
80
+ margin: 1.25em 0 .25em 0;
81
+ font-weight: bold;
82
+ font-size: 115%;
83
+ }
84
+ .animate-spin {
85
+ animation: spin 1s linear infinite;
86
+ }
87
+ @keyframes spin {
88
+ from {
89
+ transform: rotate(0deg);
90
+ }
91
+ to {
92
+ transform: rotate(360deg);
93
+ }
94
+ }
95
+ #share-btn-container {
96
+ display: flex; margin-top: 1.5rem !important; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
97
+ }
98
+ #share-btn {
99
+ all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;
100
+ }
101
+ #share-btn * {
102
+ all: unset;
103
+ }
104
+ """
105
 
106
  with gr.Blocks(css = css) as demo:
107
  gr.Markdown(
108
  """
109
  # Speech to Text Transcriptions!
110
+ This demo uses OpenAI whisper model which is trained on a large dataset of diverse audio that can perform multilingual speech recognition.
111
  """)
112
  with gr.Tab("YouTube"):
113
  audio_input = gr.Textbox(label="YouTube Link", placeholder="paste the youtube link here")
114
+ text_output = gr.Textbox(label="Transcription", show_label=False)
115
  youtube_button = gr.Button("Transcribe")
116
  with gr.Tab("Audio File"):
117
+ with gr.Row().style(equal_height=True):
118
  audio_input2 = gr.Audio(label="Audio File", type="filepath")
119
+ text_output2 = gr.Textbox(label="Transcription", show_label=False)
120
  file_button = gr.Button("Transcribe")
121
  with gr.Tab("Record"):
122
+ with gr.Row().style(equal_height=True):
123
  audio_input3 = gr.Audio(label="Input Audio", source="microphone", type="filepath")
124
+ text_output3 = gr.Textbox(label="Transcription", show_label=False)
125
  rec_button = gr.Button("Transcribe")
126
+ gr.HTML('''
127
+ <div class="footer">
128
+ <p>Model by <a href="https://github.com/openai/whisper" style="text-decoration: underline;" target="_blank">OpenAI</a> - Gradio Demo by 👩🏽‍🦱 <a href="https://www.linkedin.com/in/oayodeji/" style="text-decoration: underline;" target="_blank">Wvle</a>
129
+ </p>
130
+ </div>
131
+ ''')
132
 
133
  youtube_button.click(speech_youtube, inputs=audio_input, outputs=text_output)
134
  file_button.click(speech_file, inputs=audio_input2, outputs=text_output2)