Spaces:
Sleeping
Sleeping
File size: 5,668 Bytes
158b0f6 95e34ed 7b591d9 99e0104 95e34ed 9c90141 95e34ed 52ce267 a500b2e f00214b a500b2e f00214b a500b2e efae6aa 146328c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 |
print("eeeh")
import asyncio
import json
from websockets.server import serve
import os
from langchain_chroma import Chroma
from langchain_community.embeddings import *
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_huggingface.llms import HuggingFaceEndpoint
from langchain_community.document_loaders import TextLoader
from langchain_community.document_loaders import DirectoryLoader
from langchain import hub
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from langchain.chains import create_history_aware_retriever
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.chains import create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_core.runnables.history import RunnableWithMessageHistory
from langchain_core.chat_history import BaseChatMessageHistory
from langchain_community.chat_message_histories import ChatMessageHistory
from multiprocessing import Process
print()
print("-------")
print("started")
print("-------")
async def echo(websocket):
async for message in websocket:
data = json.loads(message)
if not "message" in message:
return
if not "token" in message:
return
m = data["message"]
token = data["token"]
docs = retriever.get_relevant_documents(m)
userData[token]["docs"] = str(docs)
response = conversational_rag_chain.invoke(
{"input": m},
config={
"configurable": {"session_id": token}
},
)["answer"]
await websocket.send(json.dumps({"response": response}))
async def main():
async with serve(echo, "0.0.0.0", 7860):
await asyncio.Future()
def g():
if not os.path.isdir('database'):
os.system("unzip database.zip")
loader = DirectoryLoader('./database', glob="./*.txt", loader_cls=TextLoader)
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
splits = text_splitter.split_documents(documents)
print()
print("-------")
print("TextSplitter, DirectoryLoader")
print("-------")
persist_directory = 'db'
model_name = "BAAI/bge-large-en"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': True}
f"""embedding = HuggingFaceBgeEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs,
show_progress=True,
)"""
embedding = HuggingFaceInferenceAPIEmbeddings(api_key=os.environ["HUGGINGFACE_API_KEY"], model=model_name)
print()
print("-------")
print("Embeddings")
print("-------")
vectorstore = Chroma.from_documents(documents=splits, embedding=embedding)
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
retriever = vectorstore.as_retriever()
prompt = hub.pull("rlm/rag-prompt")
llm = HuggingFaceEndpoint(repo_id="mistralai/Mixtral-8x7B-Instruct-v0.1")
rag_chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough()}
| prompt
| llm
| StrOutputParser()
)
print()
print("-------")
print("Retriever, Prompt, LLM, Rag_Chain")
print("-------")
### Contextualize question ###
contextualize_q_system_prompt = """Given a chat history and the latest user question \
which might reference context in the chat history, formulate a standalone question \
which can be understood without the chat history. Do NOT answer the question, \
just reformulate it if needed and otherwise return it as is."""
contextualize_q_prompt = ChatPromptTemplate.from_messages(
[
("system", contextualize_q_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
history_aware_retriever = create_history_aware_retriever(
llm, retriever, contextualize_q_prompt
)
### Answer question ###
qa_system_prompt = """You are an assistant for question-answering tasks. \
Use the following pieces of retrieved context to answer the question. \
If you don't know the answer, just say that you don't know. \
Use three sentences maximum and keep the answer concise.\
{context}"""
qa_prompt = ChatPromptTemplate.from_messages(
[
("system", qa_system_prompt),
MessagesPlaceholder("chat_history"),
("human", "{input}"),
]
)
question_answer_chain = create_stuff_documents_chain(llm, qa_prompt)
rag_chain = create_retrieval_chain(history_aware_retriever, question_answer_chain)
### Statefully manage chat history ###
store = {}
def get_session_history(session_id: str) -> BaseChatMessageHistory:
if session_id not in store:
store[session_id] = ChatMessageHistory()
return store[session_id]
conversational_rag_chain = RunnableWithMessageHistory(
rag_chain,
get_session_history,
input_messages_key="input",
history_messages_key="chat_history",
output_messages_key="answer",
)
def f():
asyncio.run(main())
Process(target=f).start()
Process(target=g).start()
"""
websocket
streamlit app ~> backend
{"token": "random", "message": "what is something"} ~> backend
backend ~> {"response": "something is something"}
streamlit app ~> display response
""" |