import gradio as gr import openai, os from langchain.document_loaders.generic import GenericLoader from langchain.document_loaders.parsers import OpenAIWhisperParser from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader from dotenv import load_dotenv, find_dotenv _ = load_dotenv(find_dotenv()) #openai.api_key = os.environ["OPENAI_API_KEY"] def invoke(openai_api_key, youtube_url): openai.api_key = openai_api_key # url = youtube_url save_dir = "docs/youtube/" loader = GenericLoader( YoutubeAudioLoader([url], save_dir), OpenAIWhisperParser() ) docs = loader.load() # return completion description = """Gradio UI using OpenAI API with Whisper 1 foundation model.""" gr.close_all() demo = gr.Interface(fn=invoke, inputs = [gr.Textbox(label = "OpenAI API Key", lines = 1), gr.Textbox(label = "YouTube URL", lines = 1)], outputs = [gr.Textbox(label = "Automatic Speech Recognition", lines = 1)], title = "Generative AI - Audio", description = description) demo.launch()