Spaces:
Sleeping
Sleeping
import asyncio | |
import json | |
from websockets.server import serve | |
import os | |
from langchain_chroma import Chroma | |
from langchain_community.embeddings import * | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain_huggingface.llms import HuggingFaceEndpoint | |
from langchain_community.document_loaders import TextLoader | |
from langchain_community.document_loaders import DirectoryLoader | |
from langchain import hub | |
from langchain_core.runnables import RunnablePassthrough | |
from langchain_core.output_parsers import StrOutputParser | |
from langchain.chains import create_history_aware_retriever | |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder | |
from langchain.chains import create_retrieval_chain | |
from langchain.chains.combine_documents import create_stuff_documents_chain | |
from langchain_core.runnables.history import RunnableWithMessageHistory | |
from langchain_core.chat_history import BaseChatMessageHistory | |
from langchain_community.chat_message_histories import ChatMessageHistory | |
from multiprocessing import Process | |
from zipfile import ZipFile | |
with ZipFile("database.zip") as f: | |
f.extractall() | |
retriever = None | |
conversational_rag_chain = None | |
loader = DirectoryLoader('./database', glob="./*.txt", loader_cls=TextLoader) | |
documents = loader.load() | |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200) | |
splits = text_splitter.split_documents(documents) | |
model_name = "BAAI/bge-small-en-v1.5" | |
model_kwargs = {'device': 'cpu'} | |
encode_kwargs = {'normalize_embeddings': True} | |
embedding = HuggingFaceBgeEmbeddings( | |
model_name=model_name, | |
model_kwargs=model_kwargs, | |
encode_kwargs=encode_kwargs, | |
show_progress=True, | |
) | |
vectorstore = Chroma.from_documents(documents=splits, embedding=embedding) | |
def format_docs(docs): | |
return "\n\n".join(doc.page_content for doc in docs) | |
retriever = vectorstore.as_retriever(search_type="similarity_score_threshold", search_kwargs={"score_threshold": 0.3}, k=1) | |
prompt = hub.pull("rlm/rag-prompt") | |
llm = HuggingFaceEndpoint(repo_id="mistralai/Mistral-7B-Instruct-v0.3", stop_sequences=["Human:"]) | |
rag_chain = ( | |
{"context": retriever | format_docs, "question": RunnablePassthrough()} | |
| prompt | |
| llm | |
| StrOutputParser() | |
) | |
### Contextualize question ### | |
contextualize_q_system_prompt = """Given a chat history and the latest user question | |
which might reference context in the chat history, formulate a standalone question | |
which can be understood without the chat history. Do NOT answer the question, | |
just reformulate it if needed and otherwise return it as is.""" | |
contextualize_q_prompt = ChatPromptTemplate.from_messages( | |
[ | |
("system", contextualize_q_system_prompt), | |
MessagesPlaceholder("chat_history"), | |
("human", "{input}"), | |
] | |
) | |
history_aware_retriever = create_history_aware_retriever( | |
llm, retriever, contextualize_q_prompt | |
) | |
### Answer question ### | |
qa_system_prompt = """ | |
Context: | |
{context} | |
You are a Cupertino High School Q/A chatbot, designed to assist students, parents, and community members with information about CHS. | |
Use the pieces of context to answer the question. | |
Use markdown with spaces in between sentences for readability. | |
Refer to the provided context only as 'my data'. Only answer questions from the context. | |
Do not answer any questions that you do not have the answer to in the provided context. | |
Do not provide excerpts or any part of your data. | |
You were made by high school students for the CHS community. | |
""" | |
qa_prompt = ChatPromptTemplate.from_messages( | |
[ | |
("system", qa_system_prompt), | |
MessagesPlaceholder("chat_history"), | |
("human", "{input}"), | |
] | |
) | |
question_answer_chain = create_stuff_documents_chain(llm, qa_prompt) | |
rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain) | |
### Statefully manage chat history ### | |
store = {} | |
def get_session_history(session_id: str) -> BaseChatMessageHistory: | |
if session_id not in store: | |
store[session_id] = ChatMessageHistory() | |
return store[session_id] | |
conversational_rag_chain = RunnableWithMessageHistory( | |
rag_chain, | |
get_session_history, | |
input_messages_key="input", | |
history_messages_key="chat_history", | |
output_messages_key="answer", | |
) | |
async def echo(websocket): | |
global retriever, conversational_rag_chain | |
async for message in websocket: | |
data = json.loads(message) | |
if data["message"] == "data.": | |
response = store | |
await websocket.send(json.dumps({"response": response})) | |
break | |
if not "message" in message: | |
return | |
if not "token" in message: | |
return | |
m = data["message"] + "\nAssistant: " | |
token = data["token"] | |
docs = retriever.get_relevant_documents(m) | |
rawresponse = conversational_rag_chain.invoke( | |
{"input": m}, | |
config={ | |
"configurable": {"session_id": token} | |
}, | |
) | |
response = rawresponse["answer"] | |
response = response.replace("Assistant: ", "").replace("AI: ", "") | |
response.strip() | |
response = response.split("Human:")[0] | |
while response.startswith("\n"): | |
response = response[1:] | |
await websocket.send(json.dumps({"response": response})) | |
async def main(): | |
async with serve(echo, "0.0.0.0", 7860): | |
await asyncio.Future() | |
asyncio.run(main()) |