File size: 5,179 Bytes
7d6d701
04a1583
7d6d701
0a1cd5f
f4087b0
 
 
 
 
 
 
 
1ad0dcf
7d6d701
 
 
6a95bbc
7d6d701
c8f85cc
8af1727
52f3a4a
6f02f68
752918c
b610816
6a95bbc
 
6553dbd
ff7b136
dc12c17
b12409c
9960268
dc12c17
04a1583
dc12c17
996e450
 
 
 
 
 
 
 
04a1583
 
0f74892
04a1583
 
 
 
a26bf39
6f02f68
7d6d701
60c9aea
5397e21
7949ca0
58e1865
 
b3af0cf
58e1865
 
 
c874a48
5397e21
dc12c17
a5cb1b3
 
 
 
7d6d701
 
 
dc12c17
9ed9edc
908ded3
7d6d701
377a584
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import gradio as gr
import openai, os

from langchain.chains import LLMChain, RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader
from langchain.document_loaders.generic import GenericLoader
from langchain.document_loaders.parsers import OpenAIWhisperParser
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.prompts import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma

from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())

#openai.api_key = os.environ["OPENAI_API_KEY"]

template = """Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up 
              an answer. Keep the answer as concise as possible. Always say "🔥 Thanks for using the app - Bernd Straehle." at the end of the answer. 
              {context} Question: {question} Helpful Answer: """

QA_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"], template = template)

CHROMA_DIR  = "docs/chroma"
YOUTUBE_DIR = "docs/youtube"

YOUTUBE_URL = "https://www.youtube.com/watch?v=--khbXchTeE"

MODEL_NAME  = "gpt-4"

def invoke(openai_api_key, use_rag, prompt):
    llm = ChatOpenAI(model_name = MODEL_NAME, openai_api_key = openai_api_key, temperature = 0)
    if (use_rag):
        if (os.path.isdir(CHROMA_DIR)):
            vector_db = Chroma(persist_directory = CHROMA_DIR, embedding_function = OpenAIEmbeddings())
        else:
            loader = GenericLoader(YoutubeAudioLoader([YOUTUBE_URL], YOUTUBE_DIR), OpenAIWhisperParser())
            docs = loader.load()
            text_splitter = RecursiveCharacterTextSplitter(chunk_size = 1500, chunk_overlap = 150)
            splits = text_splitter.split_documents(docs)
            vector_db = Chroma.from_documents(documents = splits, embedding = OpenAIEmbeddings(), persist_directory = CHROMA_DIR)
        rag_chain = RetrievalQA.from_chain_type(llm, retriever = vector_db.as_retriever(search_kwargs = {"k": 3}), return_source_documents = True, chain_type_kwargs = {"prompt": QA_CHAIN_PROMPT})
        result = rag_chain({"query": prompt})
    else:
        #qa_chain = RetrievalQA.from_chain_type(llm, retriever = None, return_source_documents = True, cchain_type_kwargs = {"prompt": QA_CHAIN_PROMPT})
        #result = qa_chain({"query": prompt})
        chain = LLMChain(llm = llm)
        result = chain({"query": prompt})
    #print(result)
    return result["result"]

description = """<strong>Overview:</strong> The app demonstrates how to use a Large Language Model (LLM) with Retrieval Augmented Generation (RAG) on external data 
                 (in this case a YouTube video, but it could be PDFs, URLs, or other structured/unstructured private/public 
                 <a href='https://raw.githubusercontent.com/bstraehle/ai-ml-dl/c38b224c196fc984aab6b6cc6bdc666f8f4fbcff/langchain/document-loaders.png'>data sources</a>).\n\n
                 <strong>Instructions:</strong> Enter an OpenAI API key and perform LLM use cases on a <a href='https://www.youtube.com/watch?v=--khbXchTeE'>short video about GPT-4</a> 
                 (semantic search, sentiment analysis, summarization, translation, etc.)
                 <ul style="list-style-type:square;">
                 <li>Set "Use RAG" to "False" and submit prompt "what is gpt-4". The LLM <strong>without</strong> RAG does not know the answer.</li>
                 <li>Set "Use RAG" to "True" and submit prompt "what is gpt-4". The LLM <strong>with</strong> RAG knows the answer.</li>
                 <li>Experiment with different prompts, for example "what is gpt-4, answer in german" or "write a haiku about gpt-4".</li>
                 </ul>
                 In a production system, processing external data would be done in a batch process. An idea for a production system would be to perform LLM use cases on the 
                 <a href='https://www.youtube.com/playlist?list=PL2yQDdvlhXf_hIzmfHCdbcXj2hS52oP9r'>AWS re:Invent playlist</a>.\n\n
                 <strong>Technology:</strong> <a href='https://www.gradio.app/'>Gradio</a> UI using <a href='https://platform.openai.com/'>OpenAI</a> API via AI-first 
                 <a href='https://www.langchain.com/'>LangChain</a> toolkit with <a href='https://openai.com/research/whisper'>Whisper</a> (speech-to-text) and 
                 <a href='https://openai.com/research/gpt-4'>GPT-4</a> (LLM) foundation models as well as AI-native <a href='https://www.trychroma.com/'>Chroma</a> 
                 embedding database."""

gr.close_all()
demo = gr.Interface(fn=invoke, 
                    inputs = [gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1), gr.Radio([True, False], label="Use RAG", value = False), gr.Textbox(label = "Prompt", value = "what is gpt-4", lines = 1)],
                    outputs = [gr.Textbox(label = "Completion", lines = 1)],
                    title = "Generative AI - LLM & RAG",
                    description = description)
demo.queue().launch()