File size: 3,346 Bytes
7d6d701
9621cc7
7d6d701
6f02f68
 
 
1ad0dcf
 
6f02f68
 
 
 
1ad0dcf
7d6d701
 
 
9ed9edc
7d6d701
6f02f68
52f3a4a
 
6f02f68
752918c
b610816
9ed9edc
 
6f02f68
 
bef0bbf
6f02f68
 
 
 
 
 
 
 
 
 
7d6d701
3e7c183
6ed6ed9
58981a1
 
 
7d6d701
 
 
52f3a4a
9ed9edc
908ded3
7d6d701
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import gradio as gr
import shutil, openai, os

from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader
from langchain.document_loaders.generic import GenericLoader
from langchain.document_loaders.parsers import OpenAIWhisperParser
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.prompts import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma

from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())

#openai.api_key = os.environ["OPENAI_API_KEY"]

template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, 
              don't try to make up an answer. Keep the answer as concise as possible. Always say "\n\nThanks for using the app, Bernd Straehle 🔥" at the end of the answer. 
              {context} Question: {question} Helpful Answer: """

QA_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], template = template)

def invoke(openai_api_key, youtube_url, prompt):
    openai.api_key = openai_api_key
    youtube_dir = "docs/youtube/"
    loader = GenericLoader(YoutubeAudioLoader([youtube_url], youtube_dir), OpenAIWhisperParser())
    docs = loader.load()
    text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1500, chunk_overlap = 150)
    splits = text_splitter.split_documents(docs)
    chroma_dir = "docs/chroma/"
    vectordb = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = chroma_dir)
    llm = ChatOpenAI(model_name = "gpt-4", temperature = 0)
    qa_chain = RetrievalQA.from_chain_type(llm, retriever = vectordb.as_retriever(), return_source_documents = True, chain_type_kwargs = {"prompt": QA_CHAIN_PROMPT})
    result = qa_chain({"query": prompt})
    shutil.rmtree(youtube_dir)
    shutil.rmtree(chroma_dir)
    return result["result"]

description = """The app demonstrates how to use a <strong>Large Language Model</strong> (LLM) with <strong>Retrieval Augmented Generation</strong> (RAG) on external data. 
                 Enter an OpenAI API key, YouTube URL (external data), and prompt to search the video, analyse its sentiment, summarize it, and/or translate it, etc.\n\n
                 Implementation: <a href='https://www.gradio.app/'>Gradio</a> UI using <a href='https://platform.openai.com/'>OpenAI</a> API 
                 via AI-first toolkit <a href='https://www.langchain.com/'>LangChain</a> with foundation models 
                 <a href='https://openai.com/research/whisper'>Whisper</a> (speech to text) and <a href='https://openai.com/research/gpt-4'>GPT-4</a> (LLM use cases)."""

gr.close_all()
demo = gr.Interface(fn=invoke, 
                    inputs = [gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1), gr.Textbox(label = "YouTube URL", value = "https://www.youtube.com/watch?v=GJm7H9IP5SU", lines = 1), gr.Textbox(label = "Prompt", value = "Translate song into English", lines = 1)],
                    outputs = [gr.Textbox(label = "Completion", lines = 1)],
                    title = "Generative AI - LLM & RAG",
                    description = description)
demo.launch()