File size: 4,866 Bytes
7d6d701
9621cc7
7d6d701
6f02f68
 
 
1ad0dcf
 
6f02f68
 
 
 
1ad0dcf
7d6d701
 
 
9ed9edc
7d6d701
c8f85cc
8af1727
52f3a4a
6f02f68
752918c
b610816
fcdc77d
 
6553dbd
b12409c
9960268
 
9ed9edc
9960268
7cacaa1
 
 
 
5917f38
57e6710
 
 
8cffc38
9960268
8cffc38
4f5dd89
716c185
4f5dd89
a26bf39
6f02f68
7d6d701
7949ca0
01207a6
7949ca0
4f6d2e3
b3af0cf
c874a48
 
01207a6
c874a48
d43e676
96950c8
42b515d
423b214
290e7c0
7d6d701
 
 
284b63f
9ed9edc
908ded3
7d6d701
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import gradio as gr
import shutil, openai, os

from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader
from langchain.document_loaders.generic import GenericLoader
from langchain.document_loaders.parsers import OpenAIWhisperParser
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.prompts import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma

from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())

#openai.api_key = os.environ["OPENAI_API_KEY"]

template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up 
              an answer. Keep the answer as concise as possible. Always say "🔥 Thanks for using the app - Bernd Straehle." at the end of the answer. 
              {context} Question: {question} Helpful Answer: """

QA_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], template = template)

CHROMA_DIR  = "docs/chroma"
YOUTUBE_DIR = "docs/youtube"

MODEL_NAME  = "gpt-4"

def invoke(openai_api_key, youtube_url, process_video, prompt):
    openai.api_key = openai_api_key
    if (process_video):
        if (os.path.isdir(CHROMA_DIR)):
            shutil.rmtree(CHROMA_DIR)
        if (os.path.isdir(YOUTUBE_DIR)):
            shutil.rmtree(YOUTUBE_DIR)
        loader = GenericLoader(YoutubeAudioLoader([youtube_url], YOUTUBE_DIR), OpenAIWhisperParser())
        docs = loader.load()
        text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1500, chunk_overlap = 150)
        splits = text_splitter.split_documents(docs)
        vector_db = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = CHROMA_DIR)
    else:
        vector_db = Chroma(persist_directory = CHROMA_DIR, embedding_function = OpenAIEmbeddings())
    llm = ChatOpenAI(model_name = MODEL_NAME, temperature = 0)
    qa_chain = RetrievalQA.from_chain_type(llm, retriever = vector_db.as_retriever(search_kwargs = {"k": 3}), return_source_documents = True, chain_type_kwargs = {"prompt": QA_CHAIN_PROMPT})
    result = qa_chain({"query": prompt})
    #print(result)
    return result["result"]

description = """<strong>Overview:</strong> The app demonstrates how to use a <strong>Large Language Model</strong> (LLM) with <strong>Retrieval Augmented Generation</strong> 
                 (RAG) on external data (YouTube videos in this case, but it could be PDFs, URLs, databases, or other structured/unstructured private/public 
                 <a href='https://raw.githubusercontent.com/bstraehle/ai-ml-dl/c38b224c196fc984aab6b6cc6bdc666f8f4fbcff/langchain/document-loaders.png'>data sources</a>).\n\n
                 <strong>Instructions:</strong> Enter an OpenAI API key and perform LLM use cases (semantic search, sentiment analysis, summarization, translation, etc.)
                 <ul style="list-style-type:square;">
                 <li>Set "Process Video" to "False" and submit prompt "what is gpt-4". The LLM <strong>without</strong> RAG does not know the answer.</li>
                 <li>Set "Process Video" to "True" and submit prompt "what is gpt-4". The LLM <strong>with</strong> RAG knows the answer.</li>
                 <li>Set "Process Video" to "False" and experiment with different prompts, for example "what is gpt-4, answer in german" or "write a haiku about gpt-4".</li>
                 </ul>
                 In a production system processing external data would be done in a batch process, while prompting is done in a user interaction.\n\n
                 <strong>Technology:</strong> <a href='https://www.gradio.app/'>Gradio</a> UI using <a href='https://platform.openai.com/'>OpenAI</a> API 
                 via AI-first <a href='https://www.langchain.com/'>LangChain</a> toolkit with <a href='https://openai.com/research/whisper'>Whisper</a> (speech-to-text) 
                 and <a href='https://openai.com/research/gpt-4'>GPT-4</a> (LLM) foundation models as well as AI-native 
                 <a href='https://www.trychroma.com/'>Chroma</a> embedding database."""

gr.close_all()
demo = gr.Interface(fn=invoke, 
                    inputs = [gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1), gr.Textbox(label = "YouTube URL", value = "https://www.youtube.com/watch?v=--khbXchTeE", lines = 1), gr.Radio([True, False], label="Process Video", value = False), gr.Textbox(label = "Prompt", value = "what is gpt-4", lines = 1)],
                    outputs = [gr.Textbox(label = "Completion", lines = 1)],
                    title = "Generative AI - LLM & RAG",
                    description = description)
demo.launch()