|
from youtubesearchpython import VideosSearch |
|
import gradio as gr |
|
import openai |
|
from langchain_community.document_loaders import YoutubeLoader |
|
|
|
|
|
openai.api_key = os.getenv('O_API_KEY') |
|
|
|
def search_youtube_videos(keyword, limit=5, order='date'): |
|
videos_search = VideosSearch(keyword, limit=limit, order=order) |
|
results = videos_search.result() |
|
video_urls = [video['link'] for video in results['result']] |
|
return video_urls |
|
|
|
def get_transcript(url): |
|
loader = YoutubeLoader() |
|
transcript = loader.load(url) |
|
text = " ".join([segment['text'] for segment in transcript['segments']]) |
|
return text |
|
|
|
def summarize_text(text): |
|
response = openai.Completion.create( |
|
engine="text-davinci-003", |
|
prompt=f"μμ½: {text}", |
|
max_tokens=150 |
|
) |
|
return response.choices[0].text.strip() |
|
|
|
def process_keyword(keyword): |
|
video_urls = search_youtube_videos(keyword) |
|
summaries = [] |
|
for url in video_urls: |
|
try: |
|
text = get_transcript(url) |
|
summary = summarize_text(text) |
|
summaries.append(f"URL: {url}\nSummary: {summary}\n") |
|
except Exception as e: |
|
summaries.append(f"URL: {url}\nError: {str(e)}\n") |
|
return "\n".join(summaries) |
|
|
|
|
|
interface = gr.Interface( |
|
fn=process_keyword, |
|
inputs=gr.Textbox(label="κ²μ ν€μλ"), |
|
outputs=gr.Textbox(label="κ²°κ³Ό"), |
|
) |
|
|
|
interface.launch() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|