Spaces:
Build error
Build error
File size: 10,091 Bytes
7d6d701 eae45bc 7d6d701 0a1cd5f f4087b0 55274da f4087b0 55274da f4087b0 a426126 1ad0dcf bf1b617 a627434 7d6d701 8cf750e 4d8a63e 53d588f 38ee3ac 84e076a 38ee3ac a4da0c1 9b2551c 6f02f68 a4da0c1 e38fd6d a4da0c1 b610816 cd9c510 6553dbd 55274da 6772176 2db1016 994b8cd bf1b617 86d2f65 7e2b6ca 86d2f65 b1e2693 86d2f65 84e076a 86d2f65 bf1b617 53d588f 86d2f65 bf1b617 53d588f 86d2f65 f5190b5 503e34f 53d588f 503e34f 9549818 33f1a4f 4d8a63e 503e34f 38ee3ac 503e34f 33f1a4f 84e076a 33f1a4f 38ee3ac 1701897 33f1a4f 86d2f65 ebcdcac 044c0a3 86d2f65 044c0a3 ebcdcac 044c0a3 a627434 01ab979 1283168 bdac2cb 1283168 bdac2cb 86d2f65 53d588f bf1b617 503e34f 2f29f7e d2ea810 86d2f65 db5f00f 503e34f 2f29f7e d2ea810 1283168 d2ea810 1283168 9baa354 c2e6078 37ab520 a627434 37ab520 d2ea810 7d6d701 dcd035c e02f414 1a2d633 b89a9cd d715a02 b3af0cf b155f03 9fc6b76 b5461e4 9b5a19c 9fc6b76 b5461e4 bb3c29a 87d6a0d e3e0cd2 0e369c8 65ab51f 0c078bc 38ee3ac 5df72c2 7d6d701 1cb182c f01c51b 3c3eb7e b7d5b27 908ded3 7d6d701 a4da0c1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 |
import gradio as gr
import json, openai, os, wandb
from langchain.chains import LLMChain, RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import PyPDFLoader, WebBaseLoader
from langchain.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader
from langchain.document_loaders.generic import GenericLoader
from langchain.document_loaders.parsers import OpenAIWhisperParser
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.prompts import PromptTemplate
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.vectorstores import MongoDBAtlasVectorSearch
from pymongo import MongoClient
from wandb.sdk.data_types.trace_tree import Trace
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())
WANDB_API_KEY = os.environ["WANDB_API_KEY"]
MONGODB_URI = os.environ["MONGODB_ATLAS_CLUSTER_URI"]
client = MongoClient(MONGODB_URI)
MONGODB_DB_NAME = "langchain_db"
MONGODB_COLLECTION_NAME = "gpt-4"
MONGODB_COLLECTION = client[MONGODB_DB_NAME][MONGODB_COLLECTION_NAME]
MONGODB_INDEX_NAME = "default"
config = {
"chunk_overlap": 150,
"chunk_size": 1500,
"k": 3,
"model": "gpt-4",
"temperature": 0,
}
template = """If you don't know the answer, just say that you don't know, don't try to make up an answer. Keep the answer as concise as possible. Always say
"🧠 Thanks for using the app - Bernd" at the end of the answer. """
llm_template = "Answer the question at the end. " + template + "Question: {question} Helpful Answer: "
rag_template = "Use the following pieces of context to answer the question at the end. " + template + "{context} Question: {question} Helpful Answer: "
LLM_CHAIN_PROMPT = PromptTemplate(input_variables = ["question"],
template = llm_template)
RAG_CHAIN_PROMPT = PromptTemplate(input_variables = ["context", "question"],
template = rag_template)
CHROMA_DIR = "/data/chroma"
YOUTUBE_DIR = "/data/youtube"
PDF_URL = "https://arxiv.org/pdf/2303.08774.pdf"
WEB_URL = "https://openai.com/research/gpt-4"
YOUTUBE_URL_1 = "https://www.youtube.com/watch?v=--khbXchTeE"
YOUTUBE_URL_2 = "https://www.youtube.com/watch?v=hdhZwyf24mE"
YOUTUBE_URL_3 = "https://www.youtube.com/watch?v=vw-KWfKwvTQ"
def document_loading_splitting():
# Document loading
docs = []
# Load PDF
loader = PyPDFLoader(PDF_URL)
docs.extend(loader.load())
# Load Web
loader = WebBaseLoader(WEB_URL)
docs.extend(loader.load())
# Load YouTube
loader = GenericLoader(YoutubeAudioLoader([YOUTUBE_URL_1,
YOUTUBE_URL_2,
YOUTUBE_URL_3], YOUTUBE_DIR),
OpenAIWhisperParser())
docs.extend(loader.load())
# Document splitting
text_splitter = RecursiveCharacterTextSplitter(chunk_overlap = config["chunk_overlap"],
chunk_size = config["chunk_size"])
splits = text_splitter.split_documents(docs)
return splits
def document_storage_chroma(splits):
Chroma.from_documents(documents = splits,
embedding = OpenAIEmbeddings(disallowed_special = ()),
persist_directory = CHROMA_DIR)
def document_storage_mongodb(splits):
MongoDBAtlasVectorSearch.from_documents(documents = splits,
embedding = OpenAIEmbeddings(disallowed_special = ()),
collection = MONGODB_COLLECTION,
index_name = MONGODB_INDEX_NAME)
def document_retrieval_chroma(llm, prompt):
db = Chroma(embedding_function = OpenAIEmbeddings(),
persist_directory = CHROMA_DIR)
return db
def document_retrieval_mongodb(llm, prompt):
db = MongoDBAtlasVectorSearch.from_connection_string(MONGODB_URI,
MONGODB_DB_NAME + "." + MONGODB_COLLECTION_NAME,
OpenAIEmbeddings(disallowed_special = ()),
index_name = MONGODB_INDEX_NAME)
return db
def llm_chain(llm, prompt):
llm_chain = LLMChain(llm = llm, prompt = LLM_CHAIN_PROMPT)
completion = llm_chain.run({"question": prompt})
return completion
def rag_chain(llm, prompt, db):
rag_chain = RetrievalQA.from_chain_type(llm,
chain_type_kwargs = {"prompt": RAG_CHAIN_PROMPT},
retriever = db.as_retriever(search_kwargs = {"k": config["k"]}),
return_source_documents = True)
completion = rag_chain({"query": prompt})
return completion
def invoke(openai_api_key, rag_option, prompt):
if (openai_api_key == ""):
raise gr.Error("OpenAI API Key is required.")
if (rag_option is None):
raise gr.Error("Retrieval Augmented Generation is required.")
if (prompt == ""):
raise gr.Error("Prompt is required.")
wandb.init(project = "openai-llm-rag")
completion = ""
try:
llm = ChatOpenAI(model_name = config["model"],
openai_api_key = openai_api_key,
temperature = config["temperature"])
if (rag_option == "Chroma"):
#splits = document_loading_splitting()
#document_storage_chroma(splits)
db = document_retrieval_chroma(llm, prompt)
completion = rag_chain(llm, prompt, db)
completion = completion["result"]
elif (rag_option == "MongoDB"):
#splits = document_loading_splitting()
#document_storage_mongodb(splits)
db = document_retrieval_mongodb(llm, prompt)
completion = rag_chain(llm, prompt, db)
completion = completion["result"]
else:
completion = llm_chain(llm, prompt)
except Exception as e:
completion = e
raise gr.Error(e)
finally:
trace = Trace(
name="test",
kind="chain",
#status_code=status,
#status_message=status_message,
metadata={
"temperature": config["temperature"],
#"token_usage": token_usage,
#"model_name": model_name,
},
#start_time_ms=start_time_ms,
#end_time_ms=end_time_ms,
#inputs={"system_prompt": system_message, "query": query},
#outputs={"response": response_text},
)
trace.log("test")
wandb.finish()
return completion
description = """<strong>Overview:</strong> Context-aware multimodal reasoning application using a <strong>large language model (LLM)</strong> with
<strong>retrieval augmented generation (RAG)</strong>.
See the <a href='https://huggingface.co/spaces/bstraehle/openai-llm-rag/blob/main/openai-llm-rag.png'>architecture diagram</a>.\n\n
<strong>Instructions:</strong> Enter an OpenAI API key and perform text generation use cases on <a href='""" + YOUTUBE_URL_1 + """'>YouTube</a>,
<a href='""" + PDF_URL + """'>PDF</a>, and <a href='""" + WEB_URL + """'>web</a> data published after LLM knowledge cutoff (example: GPT-4 data).
<ul style="list-style-type:square;">
<li>Set "Retrieval Augmented Generation" to "<strong>Off</strong>" and submit prompt "What is GPT-4?" The <strong>LLM without RAG</strong> does not know the answer.</li>
<li>Set "Retrieval Augmented Generation" to "<strong>Chroma</strong>" or "<strong>MongoDB</strong>" and experiment with prompts. The <strong>LLM with RAG</strong> knows the answer:</li>
<ol>
<li>What are GPT-4's media capabilities in 5 emojis and 1 sentence?</li>
<li>List GPT-4's exam scores and benchmark results.</li>
<li>Compare GPT-4 to GPT-3.5 in markdown table format.</li>
<li>Write a Python program that calls the GPT-4 API.</li>
<li>What is the GPT-4 API's cost and rate limit? Answer in English, Arabic, Chinese, Hindi, and Russian in JSON format.</li>
</ol>
</ul>\n\n
<strong>Technology:</strong> <a href='https://www.gradio.app/'>Gradio</a> UI using the <a href='https://openai.com/'>OpenAI</a> API and
AI-native <a href='https://www.trychroma.com/'>Chroma</a> embedding database or
<a href='https://www.mongodb.com/blog/post/introducing-atlas-vector-search-build-intelligent-applications-semantic-search-ai'>MongoDB</a> vector search.
<strong>Speech-to-text</strong> via <a href='https://openai.com/research/whisper'>whisper-1</a> model, <strong>text embedding</strong> via
<a href='https://openai.com/blog/new-and-improved-embedding-model'>text-embedding-ada-002</a> model, and <strong>text generation</strong> via
<a href='""" + WEB_URL + """'>gpt-4</a> model. Implementation via AI-first <a href='https://www.langchain.com/'>LangChain</a> toolkit.
RAG evaluation via <a href='https://wandb.ai/bstraehle'>Weights & Biases</a>."""
gr.close_all()
demo = gr.Interface(fn=invoke,
inputs = [gr.Textbox(label = "OpenAI API Key", value = "sk-", lines = 1),
gr.Radio(["Off", "Chroma", "MongoDB"], label="Retrieval Augmented Generation", value = "Off"),
gr.Textbox(label = "Prompt", value = "What is GPT-4?", lines = 1)],
outputs = [gr.Textbox(label = "Completion", lines = 1)],
title = "Generative AI - LLM & RAG",
description = description)
demo.launch() |