Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update app.py
Browse files
app.py
CHANGED
@@ -16,6 +16,7 @@ from langchain_core.messages import (
|
|
16 |
from langchain_huggingface import ChatHuggingFace
|
17 |
from langchain_core.output_parsers import StrOutputParser
|
18 |
from langchain_huggingface import HuggingFaceEndpoint
|
|
|
19 |
#from qdrant_client import QdrantClient
|
20 |
from dotenv import load_dotenv
|
21 |
import pkg_resources
|
@@ -108,12 +109,34 @@ async def chat(query,history,sources,reports,subtype,year):
|
|
108 |
else:
|
109 |
vectorstore = vectorstores["allreports"]
|
110 |
|
111 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
context_retrieved_lst = []
|
113 |
question_lst= [query]
|
114 |
for question in question_lst:
|
115 |
retriever = vectorstore.as_retriever(
|
116 |
-
search_type="similarity_score_threshold", search_kwargs={"score_threshold": 0.6, "k": 3})
|
117 |
|
118 |
context_retrieved = retriever.invoke(question)
|
119 |
|
|
|
16 |
from langchain_huggingface import ChatHuggingFace
|
17 |
from langchain_core.output_parsers import StrOutputParser
|
18 |
from langchain_huggingface import HuggingFaceEndpoint
|
19 |
+
from qdrant_client.http import models as rest
|
20 |
#from qdrant_client import QdrantClient
|
21 |
from dotenv import load_dotenv
|
22 |
import pkg_resources
|
|
|
109 |
else:
|
110 |
vectorstore = vectorstores["allreports"]
|
111 |
|
112 |
+
###-------------------------------------Construct Filter------------------------------------
|
113 |
+
if len(reports) == 0:
|
114 |
+
filter=rest.Filter(
|
115 |
+
must=[
|
116 |
+
rest.FieldCondition(
|
117 |
+
key="metadata.subtype",
|
118 |
+
match=rest.MatchValue(value=subtype)
|
119 |
+
),
|
120 |
+
rest.FieldCondition(
|
121 |
+
key="metadata.year",
|
122 |
+
match=rest.MatchAny(any=year)
|
123 |
+
)])
|
124 |
+
else:
|
125 |
+
filter=rest.Filter(
|
126 |
+
must=[
|
127 |
+
rest.FieldCondition(
|
128 |
+
key="metadata.filename",
|
129 |
+
match=rest.MatchAny(any=reports)
|
130 |
+
)])
|
131 |
+
|
132 |
+
|
133 |
+
##------------------------------get context----------------------------------------------------
|
134 |
+
if
|
135 |
context_retrieved_lst = []
|
136 |
question_lst= [query]
|
137 |
for question in question_lst:
|
138 |
retriever = vectorstore.as_retriever(
|
139 |
+
search_type="similarity_score_threshold", search_kwargs={"score_threshold": 0.6, "k": 3, filter=filter})
|
140 |
|
141 |
context_retrieved = retriever.invoke(question)
|
142 |
|