prasadnu commited on
Commit
59c4f4e
·
1 Parent(s): 5a7796a

rerank model

Browse files
RAG/rag_DocumentSearcher.py CHANGED
@@ -12,7 +12,7 @@ headers = {"Content-Type": "application/json"}
12
  host = "https://search-opensearchservi-shjckef2t7wo-iyv6rajdgxg6jas25aupuxev6i.us-west-2.es.amazonaws.com/"
13
 
14
  parent_dirname = "/".join((os.path.dirname(__file__)).split("/")[0:-1])
15
- @st.cache_resource
16
  def query_(awsauth,inputs, session_id,search_types):
17
 
18
  print("using index: "+st.session_state.input_index)
 
12
  host = "https://search-opensearchservi-shjckef2t7wo-iyv6rajdgxg6jas25aupuxev6i.us-west-2.es.amazonaws.com/"
13
 
14
  parent_dirname = "/".join((os.path.dirname(__file__)).split("/")[0:-1])
15
+
16
  def query_(awsauth,inputs, session_id,search_types):
17
 
18
  print("using index: "+st.session_state.input_index)
utilities/invoke_models.py CHANGED
@@ -11,7 +11,7 @@ import streamlit as st
11
  #import torch
12
 
13
  region = 'us-east-1'
14
- @st.cache_resource
15
  def get_bedrock_client():
16
  return boto3.client(
17
  'bedrock-runtime',
@@ -34,7 +34,7 @@ bedrock_runtime_client = get_bedrock_client()
34
  # max_length = 16
35
  # num_beams = 4
36
  # gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
37
- @st.cache_resource
38
  def invoke_model(input):
39
  response = bedrock_runtime_client.invoke_model(
40
  body=json.dumps({
@@ -47,7 +47,7 @@ def invoke_model(input):
47
 
48
  response_body = json.loads(response.get("body").read())
49
  return response_body.get("embedding")
50
- @st.cache_resource
51
  def invoke_model_mm(text,img):
52
  body_ = {
53
  "inputText": text,
@@ -68,7 +68,7 @@ def invoke_model_mm(text,img):
68
  response_body = json.loads(response.get("body").read())
69
  #print(response_body)
70
  return response_body.get("embedding")
71
- @st.cache_resource
72
  def invoke_llm_model(input,is_stream):
73
  if(is_stream == False):
74
  response = bedrock_runtime_client.invoke_model(
@@ -149,7 +149,7 @@ def invoke_llm_model(input,is_stream):
149
  # stream = response.get('body')
150
 
151
  # return stream
152
- @st.cache_resource
153
  def read_from_table(file,question):
154
  print("started table analysis:")
155
  print("-----------------------")
@@ -185,7 +185,7 @@ def read_from_table(file,question):
185
  )
186
  agent_res = agent.invoke(question)['output']
187
  return agent_res
188
- @st.cache_resource
189
  def generate_image_captions_llm(base64_string,question):
190
 
191
  # ant_client = Anthropic()
 
11
  #import torch
12
 
13
  region = 'us-east-1'
14
+
15
  def get_bedrock_client():
16
  return boto3.client(
17
  'bedrock-runtime',
 
34
  # max_length = 16
35
  # num_beams = 4
36
  # gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
37
+
38
  def invoke_model(input):
39
  response = bedrock_runtime_client.invoke_model(
40
  body=json.dumps({
 
47
 
48
  response_body = json.loads(response.get("body").read())
49
  return response_body.get("embedding")
50
+
51
  def invoke_model_mm(text,img):
52
  body_ = {
53
  "inputText": text,
 
68
  response_body = json.loads(response.get("body").read())
69
  #print(response_body)
70
  return response_body.get("embedding")
71
+
72
  def invoke_llm_model(input,is_stream):
73
  if(is_stream == False):
74
  response = bedrock_runtime_client.invoke_model(
 
149
  # stream = response.get('body')
150
 
151
  # return stream
152
+
153
  def read_from_table(file,question):
154
  print("started table analysis:")
155
  print("-----------------------")
 
185
  )
186
  agent_res = agent.invoke(question)['output']
187
  return agent_res
188
+
189
  def generate_image_captions_llm(base64_string,question):
190
 
191
  # ant_client = Anthropic()