yaleh commited on
Commit
1314823
·
1 Parent(s): e9f41ce

Fixed a bug about model config.

Browse files
Files changed (1) hide show
  1. app/gradio_meta_prompt.py +19 -18
app/gradio_meta_prompt.py CHANGED
@@ -161,7 +161,7 @@ def on_model_tab_select(event: gr.SelectData):
161
 
162
  def get_current_executor_model(simple_model_name: str,
163
  advanced_model_name: str,
164
- expert_model_name: str, expert_model_configs: Optional[Dict[str, Any]] = None) -> BaseLanguageModel:
165
  """
166
  Retrieve and return a language model (LLM) based on the currently active model tab.
167
 
@@ -178,8 +178,8 @@ def get_current_executor_model(simple_model_name: str,
178
  This should correspond to a key in the 'llms' section of the application's configuration.
179
  expert_model_name (str): The name of the expert language model.
180
  This should correspond to a key in the 'llms' section of the application's configuration.
181
- expert_model_configs (Optional[Dict[str, Any]]): Optional configurations for the expert model.
182
- These configurations will be used to update the executor model configuration if the active
183
  model tab is "Expert". Defaults to None.
184
 
185
  Returns:
@@ -197,22 +197,23 @@ def get_current_executor_model(simple_model_name: str,
197
  "Advanced": advanced_model_name,
198
  "Expert": expert_model_name
199
  }
200
-
201
  try:
202
  executor_model_name = model_mapping.get(active_model_tab, simple_model_name)
203
- executor_model_type = config.llms[executor_model_name].type
204
- executor_model_config = config.llms[executor_model_name].model_dump(exclude={'type'})
205
-
 
206
  # Update the configuration with the expert model configurations if provided
207
- if active_model_tab == "Expert" and expert_model_configs:
208
- executor_model_config.update(expert_model_configs)
209
-
210
  return LLMModelFactory().create(executor_model_type, **executor_model_config)
211
-
212
  except KeyError as e:
213
  logging.error(f"Configuration key error: {e}")
214
  raise ValueError(f"Invalid model name or configuration: {e}")
215
-
216
  except Exception as e:
217
  logging.error(f"An unexpected error occurred: {e}")
218
  raise RuntimeError(f"Failed to retrieve the executor model: {e}")
@@ -345,18 +346,18 @@ def initialize_llm(model_name: str, model_config: Optional[Dict[str, Any]] = Non
345
  checks and validates the type when creating a new language model.
346
  """
347
  try:
348
- model_config = config.llms[model_name]
349
- llm_type = model_config.type
350
- config = model_config.model_dump(exclude={'type'})
351
 
352
  if model_config:
353
- config.update(model_config)
354
 
355
- return LLMModelFactory().create(llm_type, **config)
356
  except KeyError:
357
  raise KeyError(f"No configuration exists for the model name: {model_name}")
358
  except NotImplementedError:
359
- raise NotImplementedError(f"Unrecognized type configured for the language model: {llm_type}")
360
 
361
 
362
  def process_message_with_single_llm(user_message, expected_output, acceptance_criteria, initial_system_message,
 
161
 
162
  def get_current_executor_model(simple_model_name: str,
163
  advanced_model_name: str,
164
+ expert_model_name: str, expert_model_config: Optional[Dict[str, Any]] = None) -> BaseLanguageModel:
165
  """
166
  Retrieve and return a language model (LLM) based on the currently active model tab.
167
 
 
178
  This should correspond to a key in the 'llms' section of the application's configuration.
179
  expert_model_name (str): The name of the expert language model.
180
  This should correspond to a key in the 'llms' section of the application's configuration.
181
+ expert_model_config (Optional[Dict[str, Any]]): Optional configuration for the expert model.
182
+ This configuration will be used to update the executor model configuration if the active
183
  model tab is "Expert". Defaults to None.
184
 
185
  Returns:
 
197
  "Advanced": advanced_model_name,
198
  "Expert": expert_model_name
199
  }
200
+
201
  try:
202
  executor_model_name = model_mapping.get(active_model_tab, simple_model_name)
203
+ executor_model = config.llms[executor_model_name]
204
+ executor_model_type = executor_model.type
205
+ executor_model_config = executor_model.model_dump(exclude={'type'})
206
+
207
  # Update the configuration with the expert model configurations if provided
208
+ if active_model_tab == "Expert" and expert_model_config:
209
+ executor_model_config.update(expert_model_config)
210
+
211
  return LLMModelFactory().create(executor_model_type, **executor_model_config)
212
+
213
  except KeyError as e:
214
  logging.error(f"Configuration key error: {e}")
215
  raise ValueError(f"Invalid model name or configuration: {e}")
216
+
217
  except Exception as e:
218
  logging.error(f"An unexpected error occurred: {e}")
219
  raise RuntimeError(f"Failed to retrieve the executor model: {e}")
 
346
  checks and validates the type when creating a new language model.
347
  """
348
  try:
349
+ llm_config = config.llms[model_name]
350
+ model_type = llm_config.type
351
+ dumped_config = llm_config.model_dump(exclude={'type'})
352
 
353
  if model_config:
354
+ dumped_config.update(model_config)
355
 
356
+ return LLMModelFactory().create(model_type, **dumped_config)
357
  except KeyError:
358
  raise KeyError(f"No configuration exists for the model name: {model_name}")
359
  except NotImplementedError:
360
+ raise NotImplementedError(f"Unrecognized type configured for the language model: {model_type}")
361
 
362
 
363
  def process_message_with_single_llm(user_message, expected_output, acceptance_criteria, initial_system_message,