JaganathC commited on
Commit
e19431c
·
verified ·
1 Parent(s): d851d3c

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -130
app.py DELETED
@@ -1,130 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- import yt_dlp
4
- import os
5
- import subprocess
6
- import json
7
- from threading import Thread
8
- from transformers import AutoTokenizer, AutoModelForCausalLM
9
- import spaces
10
- import time
11
- import langdetect
12
- import uuid
13
-
14
- HF_TOKEN = os.environ.get("HF_TOKEN")
15
- print("Starting the program...")
16
-
17
- model_path = "Qwen/Qwen2.5-7B-Instruct"
18
- print(f"Loading model {model_path}...")
19
- tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
20
- model = AutoModelForCausalLM.from_pretrained(model_path, torch_dtype=torch.float16, trust_remote_code=True).cuda()
21
- model = model.eval()
22
- print("Model successfully loaded.")
23
-
24
- def generate_unique_filename(extension):
25
- return f"{uuid.uuid4()}{extension}"
26
-
27
- def cleanup_files(*files):
28
- for file in files:
29
- if file and os.path.exists(file):
30
- os.remove(file)
31
- print(f"Removed file: {file}")
32
-
33
- def download_youtube_audio(url):
34
- print(f"Downloading audio from YouTube: {url}")
35
- output_path = generate_unique_filename(".wav")
36
- ydl_opts = {
37
- 'format': 'bestaudio/best',
38
- 'postprocessors': [{
39
- 'key': 'FFmpegExtractAudio',
40
- 'preferredcodec': 'wav',
41
- }],
42
- 'outtmpl': output_path,
43
- 'keepvideo': True,
44
- }
45
- with yt_dlp.YoutubeDL(ydl_opts) as ydl:
46
- ydl.download([url])
47
-
48
- if os.path.exists(output_path + ".wav"):
49
- os.rename(output_path + ".wav", output_path)
50
-
51
- return output_path
52
-
53
-
54
- def transcribe_audio(file_path):
55
- print(f"Starting transcription of file: {file_path}")
56
- temp_audio = None
57
- if file_path.endswith(('.mp4', '.avi', '.mov', '.flv')):
58
- print("Video file detected. Extracting audio using ffmpeg...")
59
- temp_audio = generate_unique_filename(".wav")
60
- command = ["ffmpeg", "-i", file_path, "-q:a", "0", "-map", "a", temp_audio]
61
- subprocess.run(command, check=True)
62
- file_path = temp_audio
63
-
64
- output_file = generate_unique_filename(".json")
65
- command = [
66
- "insanely-fast-whisper",
67
- "--file-name", file_path,
68
- "--device-id", "0",
69
- "--model-name", "openai/whisper-large-v3",
70
- "--task", "transcribe",
71
- "--timestamp", "chunk",
72
- "--transcript-path", output_file
73
- ]
74
- subprocess.run(command, check=True)
75
-
76
- with open(output_file, "r") as f:
77
- transcription = json.load(f)
78
-
79
- result = transcription.get("text", " ".join([chunk["text"] for chunk in transcription.get("chunks", [])]))
80
-
81
- cleanup_files(output_file)
82
- if temp_audio:
83
- cleanup_files(temp_audio)
84
-
85
- return result
86
-
87
- def generate_summary_stream(transcription):
88
- detected_language = langdetect.detect(transcription)
89
- prompt = f"""Summarize the following video transcription in 150-300 words in {detected_language}:
90
- {transcription[:300000]}..."""
91
-
92
- response, history = model.chat(tokenizer, prompt, history=[])
93
- return response
94
-
95
- def process_youtube(url):
96
- if not url:
97
- return "Please enter a YouTube URL.", None
98
- audio_file = download_youtube_audio(url)
99
- transcription = transcribe_audio(audio_file)
100
- cleanup_files(audio_file)
101
- return transcription, None
102
-
103
- def process_uploaded_video(video_path):
104
- transcription = transcribe_audio(video_path)
105
- return transcription, None
106
-
107
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
108
- gr.Markdown("""
109
- # 🎥 Video Transcription and Smart Summary
110
- Upload a video or provide a YouTube link to get a transcription and AI-generated summary.
111
- """)
112
-
113
- with gr.Tabs():
114
- with gr.TabItem("📤 Video Upload"):
115
- video_input = gr.Video()
116
- video_button = gr.Button("🚀 Process Video")
117
-
118
- with gr.TabItem("🔗 YouTube Link"):
119
- url_input = gr.Textbox(placeholder="https://www.youtube.com/watch?v=...")
120
- url_button = gr.Button("🚀 Process URL")
121
-
122
- transcription_output = gr.Textbox(label="📝 Transcription", lines=10, show_copy_button=True)
123
- summary_output = gr.Textbox(label="📊 Summary", lines=10, show_copy_button=True)
124
- summary_button = gr.Button("📝 Generate Summary")
125
-
126
- video_button.click(process_uploaded_video, inputs=[video_input], outputs=[transcription_output, summary_output])
127
- url_button.click(process_youtube, inputs=[url_input], outputs=[transcription_output, summary_output])
128
- summary_button.click(generate_summary_stream, inputs=[transcription_output], outputs=[summary_output])
129
-
130
- demo.launch()