Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -53,8 +53,8 @@ MODEL_CONFIGS = {
|
|
53 |
"gpt-4.1-mini": {"max_tokens": 33000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None},
|
54 |
"gpt-4.1-nano": {"max_tokens": 33000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None},
|
55 |
"o3-mini": {"max_completion_tokens": 100000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI", "warning": None},
|
56 |
-
"o1": {"
|
57 |
-
"o1-mini": {"
|
58 |
"o1-preview": {"max_tokens": 33000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None},
|
59 |
"Phi-4-multimodal-instruct": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Microsoft", "warning": None},
|
60 |
"Mistral-large-2407": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Mistral", "warning": None},
|
@@ -365,6 +365,7 @@ Here's the complete Manim code:
|
|
365 |
messages=messages,
|
366 |
model=model_name,
|
367 |
max_tokens=config["max_tokens"]
|
|
|
368 |
)
|
369 |
else:
|
370 |
# Use the existing client with normal max_tokens
|
@@ -372,6 +373,7 @@ Here's the complete Manim code:
|
|
372 |
messages=messages,
|
373 |
model=model_name,
|
374 |
max_tokens=config["max_tokens"]
|
|
|
375 |
)
|
376 |
|
377 |
# Process the response
|
@@ -2159,6 +2161,7 @@ class MyScene(Scene):
|
|
2159 |
messages=[UserMessage("Hello, this is a connection test.")],
|
2160 |
model=model_name,
|
2161 |
max_tokens=50 # Small value for quick testing
|
|
|
2162 |
)
|
2163 |
|
2164 |
# Check if response is valid
|
|
|
53 |
"gpt-4.1-mini": {"max_tokens": 33000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None},
|
54 |
"gpt-4.1-nano": {"max_tokens": 33000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None},
|
55 |
"o3-mini": {"max_completion_tokens": 100000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI", "warning": None},
|
56 |
+
"o1": {"max_completion_tokens": 100000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI", "warning": None},
|
57 |
+
"o1-mini": {"max_completion_tokens": 66000, "param_name": "max_completion_tokens", "api_version": "2024-12-01-preview", "category": "OpenAI", "warning": None},
|
58 |
"o1-preview": {"max_tokens": 33000, "param_name": "max_tokens", "api_version": None, "category": "OpenAI", "warning": None},
|
59 |
"Phi-4-multimodal-instruct": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Microsoft", "warning": None},
|
60 |
"Mistral-large-2407": {"max_tokens": 4000, "param_name": "max_tokens", "api_version": None, "category": "Mistral", "warning": None},
|
|
|
365 |
messages=messages,
|
366 |
model=model_name,
|
367 |
max_tokens=config["max_tokens"]
|
368 |
+
max_completion_tokens=config["max_completion_tokens"]
|
369 |
)
|
370 |
else:
|
371 |
# Use the existing client with normal max_tokens
|
|
|
373 |
messages=messages,
|
374 |
model=model_name,
|
375 |
max_tokens=config["max_tokens"]
|
376 |
+
max_completion_tokens=config["max_completion_tokens"]
|
377 |
)
|
378 |
|
379 |
# Process the response
|
|
|
2161 |
messages=[UserMessage("Hello, this is a connection test.")],
|
2162 |
model=model_name,
|
2163 |
max_tokens=50 # Small value for quick testing
|
2164 |
+
max_completion_tokens=50
|
2165 |
)
|
2166 |
|
2167 |
# Check if response is valid
|