|
import gradio as gr |
|
from transformers import pipeline |
|
import numpy as np |
|
import moviepy.editor as mp |
|
|
|
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base.en") |
|
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-en-fr") |
|
|
|
def transcribe(video_file): |
|
|
|
audio_file = mp.AudioFileClip(video_file).write_audiofile("temp_audio.wav") |
|
result = transcriber("temp_audio.wav") |
|
return result['text'] |
|
|
|
def translate(text): |
|
return translator(text)[0]['translation_text'] |
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("# Curify Studio Demo") |
|
|
|
with gr.Tab("Transcription"): |
|
video_input = gr.File(label="Upload Video File") |
|
transcribe_output = gr.Textbox(label="Transcription Output", lines=10) |
|
transcribe_button = gr.Button("Transcribe") |
|
|
|
transcribe_button.click(fn=transcribe, inputs=video_input, outputs=transcribe_output) |
|
|
|
with gr.Tab("Translation"): |
|
text_input = gr.Textbox(label="Text to Translate") |
|
translate_output = gr.Textbox(label="Translation Output", lines=10) |
|
translate_button = gr.Button("Translate") |
|
|
|
translate_button.click(fn=translate, inputs=text_input, outputs=translate_output) |
|
|
|
demo.launch(debug=True) |
|
|