Spaces:
Running
on
T4
Running
on
T4
search pipeline updated
Browse files
RAG/rag_DocumentSearcher.py
CHANGED
@@ -310,11 +310,11 @@ def query_(awsauth,inputs, session_id,search_types):
|
|
310 |
|
311 |
#print("re-rank")
|
312 |
|
313 |
-
if(st.session_state.input_is_rerank == True and len(total_context)):
|
314 |
-
|
315 |
-
|
316 |
|
317 |
-
|
318 |
|
319 |
llm_prompt = prompt_template.format(context=total_context[0],question=question)
|
320 |
output = invoke_models.invoke_llm_model( "\n\nHuman: {input}\n\nAssistant:".format(input=llm_prompt) ,False)
|
|
|
310 |
|
311 |
#print("re-rank")
|
312 |
|
313 |
+
# if(st.session_state.input_is_rerank == True and len(total_context)):
|
314 |
+
# ques = [{"question":question}]
|
315 |
+
# ans = [{"answer":total_context}]
|
316 |
|
317 |
+
# total_context = re_ranker.re_rank('rag','Cross Encoder',"",ques, ans)
|
318 |
|
319 |
llm_prompt = prompt_template.format(context=total_context[0],question=question)
|
320 |
output = invoke_models.invoke_llm_model( "\n\nHuman: {input}\n\nAssistant:".format(input=llm_prompt) ,False)
|