prasadnu commited on
Commit
ddab07b
·
1 Parent(s): d8df773

change ksize in RAG

Browse files
Files changed (1) hide show
  1. RAG/rag_DocumentSearcher.py +5 -5
RAG/rag_DocumentSearcher.py CHANGED
@@ -304,7 +304,7 @@ def query_(awsauth,inputs, session_id,search_types):
304
  images_2 = []
305
  is_table_in_result = False
306
  df = []
307
- for hit in hits[0:5]:
308
 
309
 
310
  if(hit["_source"]["raw_element_type"] == 'table'):
@@ -312,15 +312,15 @@ def query_(awsauth,inputs, session_id,search_types):
312
  is_table_in_result = True
313
  #table_res = invoke_models.read_from_table(hit["_source"]["table"],question) # use for complex analytical dataframe questions (uses panda at the background))
314
  df.append({'name':hit["_source"]["table"],'text':hit["_source"]["processed_element"]})
315
- context_tables.append(hit["_source"]["processed_element"])#table_res+"\n\n"+
316
 
317
  else:
318
  if(hit["_source"]["image"]!="None"):
319
  with open(parent_dirname+'/figures/'+st.session_state.input_index+"/"+hit["_source"]["raw_element_type"].split("_")[1].replace(".jpg","")+"-resized.jpg", "rb") as read_img:
320
  input_encoded = base64.b64encode(read_img.read()).decode("utf8")
321
- context.append(invoke_models.generate_image_captions_llm(input_encoded,question))
322
  else:
323
- context.append(hit["_source"]["processed_element"])
324
 
325
  if(hit["_source"]["image"]!="None"):
326
  images_2.append({'file':hit["_source"]["image"],'caption':hit["_source"]["processed_element"]})
@@ -340,7 +340,7 @@ def query_(awsauth,inputs, session_id,search_types):
340
  total_context = context_tables + context
341
 
342
 
343
- llm_prompt = prompt_template.format(context=total_context[0],question=question)
344
  output = invoke_models.invoke_llm_model( "\n\nHuman: {input}\n\nAssistant:".format(input=llm_prompt) ,False)
345
  if(len(images_2)==0):
346
  images_2 = images
 
304
  images_2 = []
305
  is_table_in_result = False
306
  df = []
307
+ for id,hit in enumerate(hits[0:5]):
308
 
309
 
310
  if(hit["_source"]["raw_element_type"] == 'table'):
 
312
  is_table_in_result = True
313
  #table_res = invoke_models.read_from_table(hit["_source"]["table"],question) # use for complex analytical dataframe questions (uses panda at the background))
314
  df.append({'name':hit["_source"]["table"],'text':hit["_source"]["processed_element"]})
315
+ context_tables.append(id + " : Reference from a table :" + hit["_source"]["processed_element"])#table_res+"\n\n"+
316
 
317
  else:
318
  if(hit["_source"]["image"]!="None"):
319
  with open(parent_dirname+'/figures/'+st.session_state.input_index+"/"+hit["_source"]["raw_element_type"].split("_")[1].replace(".jpg","")+"-resized.jpg", "rb") as read_img:
320
  input_encoded = base64.b64encode(read_img.read()).decode("utf8")
321
+ context.append(id + " : Reference from a image :" + invoke_models.generate_image_captions_llm(input_encoded,question))
322
  else:
323
+ context.append(id + " : Reference from a text chunk :" + hit["_source"]["processed_element"])
324
 
325
  if(hit["_source"]["image"]!="None"):
326
  images_2.append({'file':hit["_source"]["image"],'caption':hit["_source"]["processed_element"]})
 
340
  total_context = context_tables + context
341
 
342
 
343
+ llm_prompt = prompt_template.format(context="\n".join(total_context[0:3]),question=question)
344
  output = invoke_models.invoke_llm_model( "\n\nHuman: {input}\n\nAssistant:".format(input=llm_prompt) ,False)
345
  if(len(images_2)==0):
346
  images_2 = images