acecalisto3 commited on
Commit
afd66a9
·
verified ·
1 Parent(s): 43a275b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -917,7 +917,7 @@ def respond(
917
  top_p: float,
918
  ) -> str:
919
  """
920
- Generates a response using the Mistral model based on the user's message and history.
921
  Additionally, handles dynamic commands to interact with individual components.
922
  """
923
  if chat_pipeline is None:
@@ -1165,15 +1165,15 @@ def display_historical_data(storage_location: str, url: str):
1165
  logging.error(f"Error fetching historical data for {url}: {e}")
1166
  return pd.DataFrame()
1167
 
1168
- # Function to load the Mistral model
1169
  def load_model():
1170
  """
1171
- Loads the Mistral model and tokenizer once and returns the pipeline.
1172
  """
1173
- model_name = "mistralai/Mixtral-8x7B-Instruct-v0.1"
1174
  try:
1175
  # Load tokenizer with warning suppression
1176
- tokenizer = AutoTokenizer.from_pretrained("T5Config", clean_up_tokenization_spaces=True)
1177
 
1178
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
1179
  pipe = pipeline(
@@ -1185,7 +1185,7 @@ def load_model():
1185
  logging.info("Model loaded successfully.")
1186
  return pipe
1187
  except Exception as e:
1188
- logging.error(f"Error loading Mistral model: {e}")
1189
  return None
1190
 
1191
  # Load the model once at the start
 
917
  top_p: float,
918
  ) -> str:
919
  """
920
+ Generates a response using the google/flan-t5-xl model based on the user's message and history.
921
  Additionally, handles dynamic commands to interact with individual components.
922
  """
923
  if chat_pipeline is None:
 
1165
  logging.error(f"Error fetching historical data for {url}: {e}")
1166
  return pd.DataFrame()
1167
 
1168
+ # Function to load the "google/flan-t5-xl" model
1169
  def load_model():
1170
  """
1171
+ Loads the FlanT5XL model and tokenizer once and returns the pipeline.
1172
  """
1173
+ model_name = "google/flan-t5-xl"
1174
  try:
1175
  # Load tokenizer with warning suppression
1176
+ tokenizer = "google/flan-t5-xl", clean_up_tokenization_spaces=True)
1177
 
1178
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
1179
  pipe = pipeline(
 
1185
  logging.info("Model loaded successfully.")
1186
  return pipe
1187
  except Exception as e:
1188
+ logging.error(f"Error loading google/flan-t5-xl model: {e}")
1189
  return None
1190
 
1191
  # Load the model once at the start