import gradio as gr import shutil, openai, os from langchain.chains import RetrievalQA from langchain.chat_models import ChatOpenAI from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader from langchain.document_loaders.generic import GenericLoader from langchain.document_loaders.parsers import OpenAIWhisperParser from langchain.embeddings.openai import OpenAIEmbeddings from langchain.prompts import PromptTemplate from langchain.text_splitter import RecursiveCharacterTextSplitter from langchain.vectorstores import Chroma from dotenv import load_dotenv, find_dotenv _ = load_dotenv(find_dotenv()) #openai.api_key = os.environ["OPENAI_API_KEY"] template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. Keep the answer as concise as possible. Always say "🔥 Thanks for using the app, Bernd Straehle." at the end of the answer. {context} Question: {question} Helpful Answer: """ QA_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], template = template) CHROMA_DIR = "docs/chroma/" YOUTUBE_DIR = "docs/youtube/" MODEL_NAME = "gpt-4" def invoke(openai_api_key, youtube_url, process_video, prompt): openai.api_key = openai_api_key if (process_video): if (os.path.isdir(CHROMA_DIR)): shutil.rmtree(CHROMA_DIR) if (os.path.isdir(YOUTUBE_DIR)): shutil.rmtree(YOUTUBE_DIR) loader = GenericLoader(YoutubeAudioLoader([youtube_url], YOUTUBE_DIR), OpenAIWhisperParser()) docs = loader.load() text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1500, chunk_overlap = 150) splits = text_splitter.split_documents(docs) vector_db = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = CHROMA_DIR) else: vector_db = Chroma(persist_directory = CHROMA_DIR, embedding_function = OpenAIEmbeddings()) llm = ChatOpenAI(model_name = MODEL_NAME, temperature = 0) qa_chain = RetrievalQA.from_chain_type(llm, retriever = vector_db.as_retriever(), return_source_documents = True, chain_type_kwargs = {"prompt": QA_CHAIN_PROMPT}) result = qa_chain({"query": prompt}) return result["result"] description = """Overview: The app demonstrates how to use a Large Language Model (LLM) with Retrieval Augmented Generation (RAG) on external data (YouTube videos in this case, but could be PDFs, URLs, databases, etc.)\n\n Instructions: Enter an OpenAI API key, YouTube URL, and prompt to perform semantic search, sentiment analysis, summarization, translation, etc. "Process Video" specifies whether or not to perform speech-to-text processing. To ask multiple questions related to the same video, typically set it to "True" the first run and then to "False". The example is a 3:12 min. video about GPT-4 and takes about 20 sec. to process. Try different prompts, for example "what is gpt-4, answer in german" or "write a poem about gpt-4".\n\n Technology: Gradio UI using OpenAI API via AI-first LangChain toolkit with Whisper (speech-to-text) and GPT-4 (LLM) foundation models as well as AI-native Chroma embedding database.""" gr.close_all() demo = gr.Interface(fn=invoke, inputs = [gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1), gr.Textbox(label = "YouTube URL", value = "https://www.youtube.com/watch?v=--khbXchTeE", lines = 1), gr.Radio([True, False], label="Process Video", value = True), gr.Textbox(label = "Prompt", value = "what is gpt-4", lines = 1)], outputs = [gr.Textbox(label = "Completion", lines = 1)], title = "Generative AI - LLM & RAG", description = description) demo.launch()