prasadnu commited on
Commit
d8df773
·
1 Parent(s): 4252633

change ksize in RAG

Browse files
RAG/rag_DocumentSearcher.py CHANGED
@@ -307,20 +307,20 @@ def query_(awsauth,inputs, session_id,search_types):
307
  for hit in hits[0:5]:
308
 
309
 
310
- # if(hit["_source"]["raw_element_type"] == 'table'):
311
- # #print("Need to analyse table")
312
- # is_table_in_result = True
313
- # table_res = invoke_models.read_from_table(hit["_source"]["table"],question)
314
- # df.append({'name':hit["_source"]["table"],'text':hit["_source"]["processed_element"]})
315
- # context_tables.append(table_res+"\n\n"+hit["_source"]["processed_element"])
316
 
317
- # else:
318
- if(hit["_source"]["image"]!="None"):
319
- with open(parent_dirname+'/figures/'+st.session_state.input_index+"/"+hit["_source"]["raw_element_type"].split("_")[1].replace(".jpg","")+"-resized.jpg", "rb") as read_img:
320
- input_encoded = base64.b64encode(read_img.read()).decode("utf8")
321
- context.append(invoke_models.generate_image_captions_llm(input_encoded,question))
322
  else:
323
- context.append(hit["_source"]["processed_element"])
 
 
 
 
 
324
 
325
  if(hit["_source"]["image"]!="None"):
326
  images_2.append({'file':hit["_source"]["image"],'caption':hit["_source"]["processed_element"]})
 
307
  for hit in hits[0:5]:
308
 
309
 
310
+ if(hit["_source"]["raw_element_type"] == 'table'):
311
+ #print("Need to analyse table")
312
+ is_table_in_result = True
313
+ #table_res = invoke_models.read_from_table(hit["_source"]["table"],question) # use for complex analytical dataframe questions (uses panda at the background))
314
+ df.append({'name':hit["_source"]["table"],'text':hit["_source"]["processed_element"]})
315
+ context_tables.append(hit["_source"]["processed_element"])#table_res+"\n\n"+
316
 
 
 
 
 
 
317
  else:
318
+ if(hit["_source"]["image"]!="None"):
319
+ with open(parent_dirname+'/figures/'+st.session_state.input_index+"/"+hit["_source"]["raw_element_type"].split("_")[1].replace(".jpg","")+"-resized.jpg", "rb") as read_img:
320
+ input_encoded = base64.b64encode(read_img.read()).decode("utf8")
321
+ context.append(invoke_models.generate_image_captions_llm(input_encoded,question))
322
+ else:
323
+ context.append(hit["_source"]["processed_element"])
324
 
325
  if(hit["_source"]["image"]!="None"):
326
  images_2.append({'file':hit["_source"]["image"],'caption':hit["_source"]["processed_element"]})
pages/Multimodal_Conversational_Search.py CHANGED
@@ -269,9 +269,10 @@ def render_answer(question,answer,index,res_img):
269
  idx = idx+1
270
  if(len(answer["table"] )>0):
271
  #with st.expander("Table:"):
272
- df = pd.read_csv(answer["table"][0]['name'],skipinitialspace = True, on_bad_lines='skip',delimiter='`')
273
- df.fillna(method='pad', inplace=True)
274
- st.table(df)
 
275
  #with st.expander("Raw sources:"):
276
  st.write(answer["source"])
277
  with col_3:
 
269
  idx = idx+1
270
  if(len(answer["table"] )>0):
271
  #with st.expander("Table:"):
272
+ for table in answer["table"]:
273
+ df = pd.read_csv(table['name'],skipinitialspace = True, on_bad_lines='skip',delimiter='`')
274
+ df.fillna(method='pad', inplace=True)
275
+ st.table(df)
276
  #with st.expander("Raw sources:"):
277
  st.write(answer["source"])
278
  with col_3: