File size: 5,832 Bytes
a93809e 830117f a93809e 466f57e a93809e 3ab4a6e a93809e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
# gradio imports
import gradio as gr
import os
import time
# Imports
import os
import openai
from langchain.chains import ConversationalRetrievalChain
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.document_loaders import TextLoader
from langchain.text_splitter import MarkdownTextSplitter
# from langchain.chat_models import ChatOpenAI
# from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
# from langchain.document_loaders import TextLoader
# from langchain.memory import ConversationBufferMemory
# from langchain.chat_models import ChatOpenAI
from langchain.chains.router import MultiRetrievalQAChain
from langchain.llms import OpenAI
css="""
#col-container {max-width: 700px; margin-left: auto; margin-right: auto;}
"""
title = """
<div style="text-align: center;max-width: 700px;">
<h1>Chat about Bulevar's Menu</h1>
</div>
"""
prompt_hints = """
<div style="text-align: center;max-width: 700px;">
<p style="text-align: left;">What is in the crab tostada?<br />
</div>
"""
# from index import PERSIST_DIRECTORY, CalendarIndex
REST_PERSIST_DIRECTORY = "chromadb_bul_details"
FOOD_GUIDE_PERSIST_DIRECTORY = "chromadb_food_guide"
# Create embeddings
# # create memory object
# from langchain.memory import ConversationBufferMemory
# memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
def loading_pdf():
return "Loading..."
def loading_database(open_ai_key):
if open_ai_key is not None:
os.environ['OPENAI_API_KEY'] = open_ai_key
openai.api_key = open_ai_key
embeddings = OpenAIEmbeddings(openai_api_key=open_ai_key)
# adds these restuarant details setnences
bulevar_restaurant_texts = [
"Bulevar is open Sunday through Wednesday from 5-9pm, and Thursday through Saturday from 4-10pm. It is open for lunch on Friday from 11-3pm",
"Bulevar is located in the Arboretum at 360 and Mopac, next to Eddie V's",
"Bulevar offers tasty Mexican Cuisine with a laid back style to fine-dining.",
"Bulevar is another restaurant created by Guy and Larry. With the success of their ATX Cocina, Bulevar has created another unique dining experience with high quality dishes."
]
bulevar_details_retriever = Chroma.from_texts(bulevar_restaurant_texts, embeddings, persist_directory=REST_PERSIST_DIRECTORY) #, embedding_function= embeddings
if not os.path.exists(REST_PERSIST_DIRECTORY):
save_dir(bulevar_details_retriever)
loader = TextLoader('raw_text/food_guide.md')
documents = loader.load()
# adds the food_guide database
text_splitter = MarkdownTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
docs_retriever = Chroma.from_documents(docs, embeddings, persist_directory=FOOD_GUIDE_PERSIST_DIRECTORY)
if not os.path.exists(FOOD_GUIDE_PERSIST_DIRECTORY):
save_dir(docs_retriever)
retriever_infos = [
{
"name": "Food Guide",
"description": "Good for answering questions about the menu",
"retriever": docs_retriever.as_retriever()
},
{
"name": "Bulevar Restaurant Details",
"description": "Good for answering questions about Bulevar's hours, and restaurant details such as its mission, history, and owners.",
"retriever": bulevar_details_retriever.as_retriever()
}
]
global chain
chain = MultiRetrievalQAChain.from_retrievers(OpenAI(temperature=0, openai_api_key=open_ai_key), retriever_infos, verbose=True)
return "Ready"
else:
return "You forgot OpenAI API key"
def save_dir(vectorstore_retriever):
vectorstore_retriever.persist()
def add_text(history, text):
history = history + [(text, None)]
return history, ""
def bot(history):
response = infer(history[-1][0], history)
history[-1][1] = ""
for character in response:
history[-1][1] += character
time.sleep(0.05)
yield history
def infer(question, history):
# print("Here")
# print(question)
# print(history)
# print("DISPLAYED!!!")
res = []
# for human, ai in history[:-1]:
# pair = (human, ai)
# res.append(pair)
# print("now ask something new")
chat_history = res
query = question
result = chain({"input": query})
return result["result"]
def update_message(question_component, chat_prompts):
question_component.value = chat_prompts.get_name()
return None
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.HTML(title)
with gr.Column():
with gr.Row():
openai_key = gr.Textbox(label="OpenAI API key", type="password")
submit_api_key = gr.Button("Submit")
with gr.Row():
langchain_status = gr.Textbox(label="Status", placeholder="", interactive=False)
chatbot = gr.Chatbot([], elem_id="chatbot").style(height=350)
question = gr.Textbox(label="Question", placeholder="Type your question and hit Enter ")
submit_btn = gr.Button("Send Message")
gr.HTML(prompt_hints)
submit_api_key.click(loading_database, inputs=[openai_key], outputs=[langchain_status], queue=False)
# demo.load(loading_database, None, langchain_status)
question.submit(add_text, [chatbot, question], [chatbot, question]).then(
bot, chatbot, chatbot
)
submit_btn.click(add_text, [chatbot, question], [chatbot, question]).then(
bot, chatbot, chatbot)
demo.queue(concurrency_count=2, max_size=20).launch() |