Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -20,11 +20,6 @@ from langchain_core.chat_history import BaseChatMessageHistory
|
|
20 |
from langchain_community.chat_message_histories import ChatMessageHistory
|
21 |
from multiprocessing import Process
|
22 |
|
23 |
-
print()
|
24 |
-
print("-------")
|
25 |
-
print("started")
|
26 |
-
print("-------")
|
27 |
-
|
28 |
if not os.path.isdir('database'):
|
29 |
os.system("unzip database.zip")
|
30 |
|
@@ -42,21 +37,16 @@ print("-------")
|
|
42 |
|
43 |
persist_directory = 'db'
|
44 |
|
45 |
-
# embedding = HuggingFaceInferenceAPIEmbeddings(api_key=os.environ["HUGGINGFACE_API_KEY"], model=)
|
46 |
model_name = "BAAI/bge-large-en"
|
47 |
model_kwargs = {'device': 'cpu'}
|
48 |
encode_kwargs = {'normalize_embeddings': True}
|
49 |
-
embedding = HuggingFaceBgeEmbeddings(
|
50 |
model_name=model_name,
|
51 |
model_kwargs=model_kwargs,
|
52 |
encode_kwargs=encode_kwargs,
|
53 |
show_progress=True,
|
54 |
-
)
|
55 |
-
|
56 |
-
print()
|
57 |
-
print("-------")
|
58 |
-
print("Embeddings")
|
59 |
-
print("-------")
|
60 |
|
61 |
async def echo(websocket):
|
62 |
global retriever, conversational_rag_chain
|
@@ -93,7 +83,7 @@ def format_docs(docs):
|
|
93 |
retriever = vectorstore.as_retriever()
|
94 |
|
95 |
prompt = hub.pull("rlm/rag-prompt")
|
96 |
-
llm = HuggingFaceEndpoint(repo_id="mistralai/
|
97 |
rag_chain = (
|
98 |
{"context": retriever | format_docs, "question": RunnablePassthrough()}
|
99 |
| prompt
|
@@ -101,11 +91,6 @@ rag_chain = (
|
|
101 |
| StrOutputParser()
|
102 |
)
|
103 |
|
104 |
-
print()
|
105 |
-
print("-------")
|
106 |
-
print("Retriever, Prompt, LLM, Rag_Chain")
|
107 |
-
print("-------")
|
108 |
-
|
109 |
### Contextualize question ###
|
110 |
contextualize_q_system_prompt = """Given a chat history and the latest user question \
|
111 |
which might reference context in the chat history, formulate a standalone question \
|
|
|
20 |
from langchain_community.chat_message_histories import ChatMessageHistory
|
21 |
from multiprocessing import Process
|
22 |
|
|
|
|
|
|
|
|
|
|
|
23 |
if not os.path.isdir('database'):
|
24 |
os.system("unzip database.zip")
|
25 |
|
|
|
37 |
|
38 |
persist_directory = 'db'
|
39 |
|
|
|
40 |
model_name = "BAAI/bge-large-en"
|
41 |
model_kwargs = {'device': 'cpu'}
|
42 |
encode_kwargs = {'normalize_embeddings': True}
|
43 |
+
"""embedding = HuggingFaceBgeEmbeddings(
|
44 |
model_name=model_name,
|
45 |
model_kwargs=model_kwargs,
|
46 |
encode_kwargs=encode_kwargs,
|
47 |
show_progress=True,
|
48 |
+
)"""
|
49 |
+
embedding = HuggingFaceInferenceAPIEmbeddings(api_key=os.environ["HUGGINGFACE_API_KEY"], model=model_name)
|
|
|
|
|
|
|
|
|
50 |
|
51 |
async def echo(websocket):
|
52 |
global retriever, conversational_rag_chain
|
|
|
83 |
retriever = vectorstore.as_retriever()
|
84 |
|
85 |
prompt = hub.pull("rlm/rag-prompt")
|
86 |
+
llm = HuggingFaceEndpoint(repo_id="mistralai/Mistral-7B-Instruct-v0.3")
|
87 |
rag_chain = (
|
88 |
{"context": retriever | format_docs, "question": RunnablePassthrough()}
|
89 |
| prompt
|
|
|
91 |
| StrOutputParser()
|
92 |
)
|
93 |
|
|
|
|
|
|
|
|
|
|
|
94 |
### Contextualize question ###
|
95 |
contextualize_q_system_prompt = """Given a chat history and the latest user question \
|
96 |
which might reference context in the chat history, formulate a standalone question \
|