dolphinium
commited on
Commit
Β·
6f4b6a3
1
Parent(s):
f43f2d3
fix amend: set max_output_tokens to max(1048576) on LLM initialization to fix visualization generation
Browse files- connections.py +1 -1
connections.py
CHANGED
@@ -41,7 +41,7 @@ def initialize_connections():
|
|
41 |
|
42 |
# 3. Initialize the LLM
|
43 |
genai.configure(api_key=config.GEMINI_API_KEY)
|
44 |
-
llm_model = genai.GenerativeModel('gemini-2.5-flash', generation_config=genai.types.GenerationConfig(temperature=0))
|
45 |
print(f"β
LLM Model '{llm_model.model_name}' initialized.")
|
46 |
|
47 |
print("β
System Initialized Successfully.")
|
|
|
41 |
|
42 |
# 3. Initialize the LLM
|
43 |
genai.configure(api_key=config.GEMINI_API_KEY)
|
44 |
+
llm_model = genai.GenerativeModel('gemini-2.5-flash', generation_config=genai.types.GenerationConfig(temperature=0, max_output_tokens=1048576))
|
45 |
print(f"β
LLM Model '{llm_model.model_name}' initialized.")
|
46 |
|
47 |
print("β
System Initialized Successfully.")
|