aleksfinn23 commited on
Commit
70a1cf7
·
verified ·
1 Parent(s): 2362603

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -14
app.py CHANGED
@@ -27,7 +27,7 @@ pipe = pipeline(
27
  @spaces.GPU
28
  def transcribe(inputs, task):
29
  if inputs is None:
30
- raise gr.Error("No audio file submitted! Please upload or record an audio file before submitting your request.")
31
 
32
  text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
33
  return text
@@ -99,11 +99,9 @@ mf_transcribe = gr.Interface(
99
  gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
100
  ],
101
  outputs="text",
102
- title="Whisper Large V3 Turbo: Transcribe Audio",
103
  description=(
104
- "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the"
105
- f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
106
- " of arbitrary length."
107
  ),
108
  allow_flagging="never",
109
  )
@@ -115,11 +113,10 @@ file_transcribe = gr.Interface(
115
  gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
116
  ],
117
  outputs="text",
118
- title="Whisper Large V3: Transcribe Audio",
119
  description=(
120
- "Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the"
121
- f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
122
- " of arbitrary length."
123
  ),
124
  allow_flagging="never",
125
  )
@@ -131,17 +128,16 @@ yt_transcribe = gr.Interface(
131
  gr.Radio(["transcribe", "translate"], label="Task", value="transcribe")
132
  ],
133
  outputs=["html", "text"],
134
- title="Whisper Large V3: Transcribe YouTube",
135
  description=(
136
- "Transcribe long-form YouTube videos with the click of a button! Demo uses the checkpoint"
137
- f" [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe video files of"
138
- " arbitrary length."
139
  ),
140
  allow_flagging="never",
141
  )
142
 
143
  with demo:
144
- gr.TabbedInterface([mf_transcribe, file_transcribe, yt_transcribe], ["Microphone", "Audio file", "YouTube"])
145
 
146
  demo.queue().launch(ssr_mode=False)
147
 
 
27
  @spaces.GPU
28
  def transcribe(inputs, task):
29
  if inputs is None:
30
+ raise gr.Error("Нет аудиофайла! Пожалуйста, загрузите аудиофайл перед запросом.")
31
 
32
  text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
33
  return text
 
99
  gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
100
  ],
101
  outputs="text",
102
+ title="Транскрибация аудио с помощью модели Whisper 3 от OpenAI",
103
  description=(
104
+ "Текстовая транскрибация записи с вашего микрофона нажатием одной кнопки! Попробуйте сейчас!"
 
 
105
  ),
106
  allow_flagging="never",
107
  )
 
113
  gr.Radio(["transcribe", "translate"], label="Task", value="transcribe"),
114
  ],
115
  outputs="text",
116
+ title="Транскрибация аудио с помощью модели Whisper 3 от OpenAI",
117
  description=(
118
+ "Текстовая транскрибация аудиофайла нажатием одной кнопки! Попробуйте сейчас!"
119
+
 
120
  ),
121
  allow_flagging="never",
122
  )
 
128
  gr.Radio(["transcribe", "translate"], label="Task", value="transcribe")
129
  ],
130
  outputs=["html", "text"],
131
+ title="Транскрибация YouTube с помощью модели Whisper 3 от OpenAI",
132
  description=(
133
+ "Текстовая транскрибация видео с Yuoutube нажатием одной кнопки! Попробуйте сейчас!"
134
+
 
135
  ),
136
  allow_flagging="never",
137
  )
138
 
139
  with demo:
140
+ gr.TabbedInterface([mf_transcribe, file_transcribe, yt_transcribe], ["Микрофон", "Аудиофайл", "YouTube"])
141
 
142
  demo.queue().launch(ssr_mode=False)
143