Spaces:
Running
on
T4
Running
on
T4
change ksize in RAG
Browse files
RAG/rag_DocumentSearcher.py
CHANGED
@@ -312,15 +312,15 @@ def query_(awsauth,inputs, session_id,search_types):
|
|
312 |
is_table_in_result = True
|
313 |
#table_res = invoke_models.read_from_table(hit["_source"]["table"],question) # use for complex analytical dataframe questions (uses panda at the background))
|
314 |
df.append({'name':hit["_source"]["table"],'text':hit["_source"]["processed_element"]})
|
315 |
-
context_tables.append(id + " : Reference from a table :" + hit["_source"]["processed_element"])#table_res+"\n\n"+
|
316 |
|
317 |
else:
|
318 |
if(hit["_source"]["image"]!="None"):
|
319 |
with open(parent_dirname+'/figures/'+st.session_state.input_index+"/"+hit["_source"]["raw_element_type"].split("_")[1].replace(".jpg","")+"-resized.jpg", "rb") as read_img:
|
320 |
input_encoded = base64.b64encode(read_img.read()).decode("utf8")
|
321 |
-
context.append(id + " : Reference from a image :" + invoke_models.generate_image_captions_llm(input_encoded,question))
|
322 |
else:
|
323 |
-
context.append(id + " : Reference from a text chunk :" + hit["_source"]["processed_element"])
|
324 |
|
325 |
if(hit["_source"]["image"]!="None"):
|
326 |
images_2.append({'file':hit["_source"]["image"],'caption':hit["_source"]["processed_element"]})
|
|
|
312 |
is_table_in_result = True
|
313 |
#table_res = invoke_models.read_from_table(hit["_source"]["table"],question) # use for complex analytical dataframe questions (uses panda at the background))
|
314 |
df.append({'name':hit["_source"]["table"],'text':hit["_source"]["processed_element"]})
|
315 |
+
context_tables.append(str(id) + " : Reference from a table :" + hit["_source"]["processed_element"])#table_res+"\n\n"+
|
316 |
|
317 |
else:
|
318 |
if(hit["_source"]["image"]!="None"):
|
319 |
with open(parent_dirname+'/figures/'+st.session_state.input_index+"/"+hit["_source"]["raw_element_type"].split("_")[1].replace(".jpg","")+"-resized.jpg", "rb") as read_img:
|
320 |
input_encoded = base64.b64encode(read_img.read()).decode("utf8")
|
321 |
+
context.append(str(id) + " : Reference from a image :" + invoke_models.generate_image_captions_llm(input_encoded,question))
|
322 |
else:
|
323 |
+
context.append(str(id) + " : Reference from a text chunk :" + hit["_source"]["processed_element"])
|
324 |
|
325 |
if(hit["_source"]["image"]!="None"):
|
326 |
images_2.append({'file':hit["_source"]["image"],'caption':hit["_source"]["processed_element"]})
|