File size: 6,509 Bytes
7d6d701
04a1583
7d6d701
0a1cd5f
f4087b0
994b8cd
f4087b0
 
 
 
 
 
 
1ad0dcf
7d6d701
 
 
6a95bbc
7d6d701
a4da0c1
 
6f02f68
a4da0c1
 
e38fd6d
a4da0c1
 
 
 
b610816
cd9c510
 
6553dbd
2db1016
 
 
d871888
 
 
 
dc12c17
994b8cd
 
b12409c
9960268
dc12c17
a4da0c1
 
 
dc12c17
b1760e2
994b8cd
a7d05d9
 
 
 
 
 
 
 
 
994b8cd
 
 
 
 
 
 
 
 
 
2301c17
24b21f4
 
a4da0c1
 
 
 
eedb77b
f6df106
0f74892
a4da0c1
e38fd6d
f6df106
7d6d701
60c9aea
f701df5
d958889
00a6d73
b3af0cf
3c3eb7e
 
00a6d73
bb3c29a
a5cb1b3
 
 
 
7d6d701
 
 
1cb182c
 
3c3eb7e
9ed9edc
908ded3
7d6d701
a4da0c1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import gradio as gr
import openai, os

from langchain.chains import LLMChain, RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader
from langchain.document_loaders.generic import GenericLoader
from langchain.document_loaders.parsers import OpenAIWhisperParser
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.prompts import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma

from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())

#openai.api_key = os.environ["OPENAI_API_KEY"]

template = """If you don't know the answer, just say that you don't know, don't try to make up an answer. Keep the answer as concise as possible. Always say 
              "🔥 Thanks for using the app - Bernd Straehle." at the end of the answer. """

llm_template = "Answer the question at the end. " + template + "Question: {question} Helpful Answer: "
rag_template = "Use the following pieces of context to answer the question at the end. " + template + "{context} Question: {question} Helpful Answer: "

LLM_CHAIN_PROMPT = PromptTemplate(input_variables = ["question"], 
                                  template = llm_template)
RAG_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], 
                                  template = rag_template)

CHROMA_DIR  = "/data/chroma"
YOUTUBE_DIR = "/data/youtube"

YOUTUBE_URL_1 = "https://www.youtube.com/watch?v=--khbXchTeE"
YOUTUBE_URL_2 = "https://www.youtube.com/watch?v=hdhZwyf24mE"
YOUTUBE_URL_3 = "https://www.youtube.com/watch?v=vw-KWfKwvTQ"
YOUTUBE_URL_4 = "https://www.youtube.com/watch?v=kiHpqXNCPj8"
YOUTUBE_URL_5 = "https://www.youtube.com/shorts/3x95mw35dJY"
YOUTUBE_URL_6 = "https://www.youtube.com/shorts/zg-DS23wq0c"
YOUTUBE_URL_7 = "https://www.youtube.com/shorts/cS4fyhKZ8bQ"

PDF_URL = "https://arxiv.org/pdf/2303.08774.pdf"

MODEL_NAME  = "gpt-4"

def invoke(openai_api_key, use_rag, prompt):
    llm = ChatOpenAI(model_name = MODEL_NAME, 
                     openai_api_key = openai_api_key, 
                     temperature = 0)
    if (use_rag):
        # Document loading, splitting, and storage
        docs = []
        #loader = GenericLoader(YoutubeAudioLoader([YOUTUBE_URL_1,
        #                                           YOUTUBE_URL_2,
        #                                           YOUTUBE_URL_3,
        #                                           YOUTUBE_URL_4,
        #                                           YOUTUBE_URL_5,
        #                                           YOUTUBE_URL_6,
        #                                           YOUTUBE_URL_7], YOUTUBE_DIR), 
        #                       OpenAIWhisperParser())
        #docs = loader.load()
        ###docs.extend(loader.load())
        loader = PyPDFLoader(PDF_URL)
        docs.extend(loader.load())
        #
        text_splitter = RecursiveCharacterTextSplitter(chunk_overlap = 150,
                                                       chunk_size = 1500)
        splits = text_splitter.split_documents(docs)
        vector_db = Chroma.from_documents(documents = splits, 
                                          embedding = OpenAIEmbeddings(), 
                                          persist_directory = CHROMA_DIR)
        # Document retrieval
        vector_db = Chroma(embedding_function = OpenAIEmbeddings(),
                           persist_directory = CHROMA_DIR)
        rag_chain = RetrievalQA.from_chain_type(llm, 
                                                chain_type_kwargs = {"prompt": RAG_CHAIN_PROMPT}, 
                                                retriever = vector_db.as_retriever(search_kwargs = {"k": 3}), 
                                                return_source_documents = True)
        result = rag_chain({"query": prompt})
        result = result["result"]
    else:
        chain = LLMChain(llm = llm, prompt = LLM_CHAIN_PROMPT)
        result = chain.run({"question": prompt})
    return result

description = """<strong>Overview:</strong> The app demonstrates how to use a Large Language Model (LLM) with Retrieval Augmented Generation (RAG) on external data 
                 (YouTube videos, PDFs, URLs, or other <a href='https://raw.githubusercontent.com/bstraehle/ai-ml-dl/c38b224c196fc984aab6b6cc6bdc666f8f4fbcff/langchain/document-loaders.png'>data sources</a>).\n\n
                 <strong>Instructions:</strong> Enter an OpenAI API key and perform LLM use cases (semantic search, summarization, translation, etc.) on 
                 <strong>YouTube videos about GPT-4</strong>, created after its training cutoff.
                 <ul style="list-style-type:square;">
                 <li>Set "Retrieval Augmented Generation" to "<strong>False</strong>" and submit prompt "What is GPT-4?" The LLM <strong>without</strong> RAG does not know the answer.</li>
                 <li>Set "Retrieval Augmented Generation" to "<strong>True</strong>" and submit prompt "What is GPT-4?" The LLM <strong>with</strong> RAG knows the answer.</li>
                 <li>Experiment with prompts, e.g. "What is GPT-4 in one sentence in German", "List pros and cons of GPT-4", or "Write a Python program to call the GPT-4 API".</li>
                 </ul>\n\n
                 <strong>Technology:</strong> <a href='https://www.gradio.app/'>Gradio</a> UI using <a href='https://platform.openai.com/'>OpenAI</a> API via AI-first 
                 <a href='https://www.langchain.com/'>LangChain</a> toolkit with <a href='https://openai.com/research/whisper'>Whisper</a> (speech-to-text) and 
                 <a href='https://openai.com/research/gpt-4'>GPT-4</a> (LLM) foundation models as well as AI-native <a href='https://www.trychroma.com/'>Chroma</a> 
                 embedding database."""

gr.close_all()
demo = gr.Interface(fn=invoke, 
                    inputs = [gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1), 
                              gr.Radio([True, False], label="Retrieval Augmented Generation", value = False), 
                              gr.Textbox(label = "Prompt", value = "What is GPT-4?", lines = 1)],
                    outputs = [gr.Textbox(label = "Completion", lines = 1)],
                    title = "Generative AI - LLM & RAG",
                    description = description)
demo.launch()