prasadnu commited on
Commit
f3c44a2
·
1 Parent(s): ad75ca5

rerank model

Browse files
RAG/rag_DocumentSearcher.py CHANGED
@@ -12,7 +12,7 @@ headers = {"Content-Type": "application/json"}
12
  host = "https://search-opensearchservi-shjckef2t7wo-iyv6rajdgxg6jas25aupuxev6i.us-west-2.es.amazonaws.com/"
13
 
14
  parent_dirname = "/".join((os.path.dirname(__file__)).split("/")[0:-1])
15
- @st.cache_data
16
  def query_(awsauth,inputs, session_id,search_types):
17
 
18
  print("using index: "+st.session_state.input_index)
 
12
  host = "https://search-opensearchservi-shjckef2t7wo-iyv6rajdgxg6jas25aupuxev6i.us-west-2.es.amazonaws.com/"
13
 
14
  parent_dirname = "/".join((os.path.dirname(__file__)).split("/")[0:-1])
15
+
16
  def query_(awsauth,inputs, session_id,search_types):
17
 
18
  print("using index: "+st.session_state.input_index)
utilities/invoke_models.py CHANGED
@@ -30,7 +30,7 @@ bedrock_runtime_client = boto3.client(
30
  # max_length = 16
31
  # num_beams = 4
32
  # gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
33
- @st.cache_data
34
  def invoke_model(input):
35
  response = bedrock_runtime_client.invoke_model(
36
  body=json.dumps({
@@ -43,7 +43,7 @@ def invoke_model(input):
43
 
44
  response_body = json.loads(response.get("body").read())
45
  return response_body.get("embedding")
46
- @st.cache_data
47
  def invoke_model_mm(text,img):
48
  body_ = {
49
  "inputText": text,
@@ -64,7 +64,7 @@ def invoke_model_mm(text,img):
64
  response_body = json.loads(response.get("body").read())
65
  #print(response_body)
66
  return response_body.get("embedding")
67
- @st.cache_data
68
  def invoke_llm_model(input,is_stream):
69
  if(is_stream == False):
70
  response = bedrock_runtime_client.invoke_model(
@@ -145,7 +145,7 @@ def invoke_llm_model(input,is_stream):
145
  # stream = response.get('body')
146
 
147
  # return stream
148
- @st.cache_data
149
  def read_from_table(file,question):
150
  print("started table analysis:")
151
  print("-----------------------")
@@ -181,7 +181,7 @@ def read_from_table(file,question):
181
  )
182
  agent_res = agent.invoke(question)['output']
183
  return agent_res
184
- @st.cache_data
185
  def generate_image_captions_llm(base64_string,question):
186
 
187
  # ant_client = Anthropic()
 
30
  # max_length = 16
31
  # num_beams = 4
32
  # gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
33
+
34
  def invoke_model(input):
35
  response = bedrock_runtime_client.invoke_model(
36
  body=json.dumps({
 
43
 
44
  response_body = json.loads(response.get("body").read())
45
  return response_body.get("embedding")
46
+
47
  def invoke_model_mm(text,img):
48
  body_ = {
49
  "inputText": text,
 
64
  response_body = json.loads(response.get("body").read())
65
  #print(response_body)
66
  return response_body.get("embedding")
67
+
68
  def invoke_llm_model(input,is_stream):
69
  if(is_stream == False):
70
  response = bedrock_runtime_client.invoke_model(
 
145
  # stream = response.get('body')
146
 
147
  # return stream
148
+
149
  def read_from_table(file,question):
150
  print("started table analysis:")
151
  print("-----------------------")
 
181
  )
182
  agent_res = agent.invoke(question)['output']
183
  return agent_res
184
+
185
  def generate_image_captions_llm(base64_string,question):
186
 
187
  # ant_client = Anthropic()