Spaces:
Running
on
Zero
Running
on
Zero
jedick
commited on
Commit
·
f027363
1
Parent(s):
82453cf
Hide progress bar for BM25S retrieve call
Browse files- app.py +5 -4
- mods/bm25s_retriever.py +1 -1
app.py
CHANGED
@@ -58,10 +58,10 @@ def cleanup_graph(request: gr.Request):
|
|
58 |
timestamp = datetime.now().replace(microsecond=0).isoformat()
|
59 |
if request.session_hash in graph_instances["local"]:
|
60 |
del graph_instances["local"][request.session_hash]
|
61 |
-
print(f"{timestamp} -
|
62 |
if request.session_hash in graph_instances["remote"]:
|
63 |
del graph_instances["remote"][request.session_hash]
|
64 |
-
print(f"{timestamp} -
|
65 |
|
66 |
|
67 |
def append_content(chunk_messages, history, thinking_about):
|
@@ -98,8 +98,6 @@ def run_workflow(input, history, compute_mode, thread_id, session_hash):
|
|
98 |
|
99 |
# Get graph instance
|
100 |
graph = graph_instances[compute_mode].get(session_hash)
|
101 |
-
if graph is not None:
|
102 |
-
print(f"Get {compute_mode} graph for session {session_hash}")
|
103 |
|
104 |
if graph is None:
|
105 |
# Notify when we're loading the local model because it takes some time
|
@@ -126,6 +124,9 @@ def run_workflow(input, history, compute_mode, thread_id, session_hash):
|
|
126 |
print(f"{timestamp} - Set {compute_mode} graph for session {session_hash}")
|
127 |
# Notify when model finishes loading
|
128 |
gr.Success(f"{compute_mode}", duration=4, title=f"Model loaded!")
|
|
|
|
|
|
|
129 |
|
130 |
# print(f"Using thread_id: {thread_id}")
|
131 |
|
|
|
58 |
timestamp = datetime.now().replace(microsecond=0).isoformat()
|
59 |
if request.session_hash in graph_instances["local"]:
|
60 |
del graph_instances["local"][request.session_hash]
|
61 |
+
print(f"{timestamp} - Del local graph for session {request.session_hash}")
|
62 |
if request.session_hash in graph_instances["remote"]:
|
63 |
del graph_instances["remote"][request.session_hash]
|
64 |
+
print(f"{timestamp} - Del remote graph for session {request.session_hash}")
|
65 |
|
66 |
|
67 |
def append_content(chunk_messages, history, thinking_about):
|
|
|
98 |
|
99 |
# Get graph instance
|
100 |
graph = graph_instances[compute_mode].get(session_hash)
|
|
|
|
|
101 |
|
102 |
if graph is None:
|
103 |
# Notify when we're loading the local model because it takes some time
|
|
|
124 |
print(f"{timestamp} - Set {compute_mode} graph for session {session_hash}")
|
125 |
# Notify when model finishes loading
|
126 |
gr.Success(f"{compute_mode}", duration=4, title=f"Model loaded!")
|
127 |
+
else:
|
128 |
+
timestamp = datetime.now().replace(microsecond=0).isoformat()
|
129 |
+
print(f"{timestamp} - Get {compute_mode} graph for session {session_hash}")
|
130 |
|
131 |
# print(f"Using thread_id: {thread_id}")
|
132 |
|
mods/bm25s_retriever.py
CHANGED
@@ -166,6 +166,6 @@ class BM25SRetriever(BaseRetriever):
|
|
166 |
return [self.docs[i] for i in return_docs.documents[0]]
|
167 |
else:
|
168 |
return_docs, scores = self.vectorizer.retrieve(
|
169 |
-
processed_query, self.docs, k=self.k
|
170 |
)
|
171 |
return [return_docs[0, i] for i in range(return_docs.shape[1])]
|
|
|
166 |
return [self.docs[i] for i in return_docs.documents[0]]
|
167 |
else:
|
168 |
return_docs, scores = self.vectorizer.retrieve(
|
169 |
+
processed_query, self.docs, k=self.k, show_progress=False
|
170 |
)
|
171 |
return [return_docs[0, i] for i in range(return_docs.shape[1])]
|