seanpedrickcase commited on
Commit
843932c
·
1 Parent(s): 03afd76

Removed obligatory bedrock load in from app

Browse files
Files changed (1) hide show
  1. chatfuncs/chatfuncs.py +5 -2
chatfuncs/chatfuncs.py CHANGED
@@ -34,7 +34,7 @@ from langchain.docstore.document import Document
34
 
35
  from chatfuncs.prompts import instruction_prompt_template_alpaca, instruction_prompt_mistral_orca, instruction_prompt_phi3, instruction_prompt_llama3, instruction_prompt_qwen, instruction_prompt_template_orca, instruction_prompt_gemma
36
  from chatfuncs.model_load import temperature, max_new_tokens, sample, repetition_penalty, top_p, top_k, torch_device, CtransGenGenerationConfig, max_tokens
37
- from chatfuncs.config import GEMINI_API_KEY, AWS_DEFAULT_REGION, LARGE_MODEL_NAME, SMALL_MODEL_NAME
38
 
39
  model_object = [] # Define empty list for model functions to run
40
  tokenizer = [] # Define empty list for model functions to run
@@ -45,7 +45,10 @@ class ResponseObject:
45
  self.text = text
46
  self.usage_metadata = usage_metadata
47
 
48
- bedrock_runtime = boto3.client('bedrock-runtime', region_name=AWS_DEFAULT_REGION)
 
 
 
49
 
50
  torch.cuda.empty_cache()
51
 
 
34
 
35
  from chatfuncs.prompts import instruction_prompt_template_alpaca, instruction_prompt_mistral_orca, instruction_prompt_phi3, instruction_prompt_llama3, instruction_prompt_qwen, instruction_prompt_template_orca, instruction_prompt_gemma
36
  from chatfuncs.model_load import temperature, max_new_tokens, sample, repetition_penalty, top_p, top_k, torch_device, CtransGenGenerationConfig, max_tokens
37
+ from chatfuncs.config import GEMINI_API_KEY, AWS_DEFAULT_REGION, LARGE_MODEL_NAME, SMALL_MODEL_NAME, RUN_AWS_FUNCTIONS
38
 
39
  model_object = [] # Define empty list for model functions to run
40
  tokenizer = [] # Define empty list for model functions to run
 
45
  self.text = text
46
  self.usage_metadata = usage_metadata
47
 
48
+ if RUN_AWS_FUNCTIONS=="1":
49
+ bedrock_runtime = boto3.client('bedrock-runtime', region_name=AWS_DEFAULT_REGION)
50
+ else:
51
+ bedrock_runtime = ""
52
 
53
  torch.cuda.empty_cache()
54