euler314 commited on
Commit
9c5b5c6
·
verified ·
1 Parent(s): 74b30a1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -53,8 +53,8 @@ MODEL_CONFIGS = {
53
  "openai/gpt-4.1-mini": {"max_tokens": 33000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None},
54
  "openai/gpt-4.1-nano": {"max_tokens": 33000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None},
55
  "openai/o3-mini": {"max_tokens": 100000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI", "warning": None},
56
- "openai/o1": {"max_tokens": 100000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI", "warning": "Uses special parameters. Will use gpt-4o instead."},
57
- "openai/o1-mini": {"max_tokens": 66000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI", "warning": "Uses special parameters. Will use gpt-4o instead."},
58
  "openai/o1-preview": {"max_tokens": 33000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None},
59
  "Phi-4-multimodal-instruct": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Microsoft", "warning": None},
60
  "Mistral-large-2407": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Mistral", "warning": None},
@@ -2135,7 +2135,7 @@ class MyScene(Scene):
2135
  config = MODEL_CONFIGS.get(model_name, MODEL_CONFIGS["default"])
2136
 
2137
  # Check if this is a model that requires max_completion_tokens
2138
- if config["param_name"] == "max_completion_tokens":
2139
  st.warning(f"Model {model_name} requires special handling that isn't fully supported. Testing with gpt-4o instead.")
2140
  model_name = "gpt-4o"
2141
  config = MODEL_CONFIGS["gpt-4o"]
 
53
  "openai/gpt-4.1-mini": {"max_tokens": 33000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None},
54
  "openai/gpt-4.1-nano": {"max_tokens": 33000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None},
55
  "openai/o3-mini": {"max_tokens": 100000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI", "warning": None},
56
+ "openai/o1": {"max_tokens": 100000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI", "warning": None},
57
+ "openai/o1-mini": {"max_tokens": 66000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI", "warning": None},
58
  "openai/o1-preview": {"max_tokens": 33000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None},
59
  "Phi-4-multimodal-instruct": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Microsoft", "warning": None},
60
  "Mistral-large-2407": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Mistral", "warning": None},
 
2135
  config = MODEL_CONFIGS.get(model_name, MODEL_CONFIGS["default"])
2136
 
2137
  # Check if this is a model that requires max_completion_tokens
2138
+ if config["param_name"] == "max_com343434pletion_tokens":
2139
  st.warning(f"Model {model_name} requires special handling that isn't fully supported. Testing with gpt-4o instead.")
2140
  model_name = "gpt-4o"
2141
  config = MODEL_CONFIGS["gpt-4o"]