Spaces:
Runtime error
Runtime error
File size: 2,056 Bytes
c1f076a 04a0f7f c1f076a 04a0f7f c1f076a 04a0f7f c1f076a 04a0f7f c1f076a 04a0f7f c1f076a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
import gradio as gr
from pytube import YouTube
import subprocess
from huggingsound import SpeechRecognitionModel
import torch
from transformers import pipeline
def process_video(video_url):
response = {
'status': 'Success',
'message': '',
'data': ''
}
try:
yt = YouTube(video_url)
audio_file = yt.streams.filter(only_audio=True, file_extension='mp4').first().download(filename='ytaudio.mp4')
subprocess.run(['ffmpeg', '-i', 'ytaudio.mp4', '-acodec', 'pcm_s16le', '-ar', '16000', 'ytaudio.wav'], check=True)
except Exception as e:
response['status'] = 'Error'
response['message'] = f'Failed to download and convert video: {str(e)}'
return response
try:
device = "cuda" if torch.cuda.is_available() else "cpu"
model = SpeechRecognitionModel("jonatasgrosman/wav2vec2-large-xlsr-53-english", device=device)
transcription = model.transcribe(['ytaudio.wav'])[0]['transcription']
except Exception as e:
response['status'] = 'Error'
response['message'] = f'Failed during speech recognition: {str(e)}'
return response
try:
summarization = pipeline('summarization')
summarized_text = summarization(transcription, max_length=130, min_length=30, do_sample=False)
response['data'] = summarized_text[0]['summary_text']
except Exception as e:
response['status'] = 'Error'
response['message'] = f'Failed during summarization: {str(e)}'
return response
return response
iface = gr.Interface(
fn=process_video,
inputs=gr.inputs.Textbox(lines=2, placeholder="Enter YouTube Video URL Here..."),
outputs=[
gr.outputs.Textbox(label="Status"),
gr.outputs.Textbox(label="Message"),
gr.outputs.Textbox(label="Summary")
],
title="YouTube Video Summarizer",
description="This tool extracts audio from a YouTube video, transcribes it, and provides a summary.",
enable_queue=True # Enable request queuing
)
iface.launch()
|