dolphinium commited on
Commit
6f4b6a3
Β·
1 Parent(s): f43f2d3

fix amend: set max_output_tokens to max(1048576) on LLM initialization to fix visualization generation

Browse files
Files changed (1) hide show
  1. connections.py +1 -1
connections.py CHANGED
@@ -41,7 +41,7 @@ def initialize_connections():
41
 
42
  # 3. Initialize the LLM
43
  genai.configure(api_key=config.GEMINI_API_KEY)
44
- llm_model = genai.GenerativeModel('gemini-2.5-flash', generation_config=genai.types.GenerationConfig(temperature=0))
45
  print(f"βœ… LLM Model '{llm_model.model_name}' initialized.")
46
 
47
  print("βœ… System Initialized Successfully.")
 
41
 
42
  # 3. Initialize the LLM
43
  genai.configure(api_key=config.GEMINI_API_KEY)
44
+ llm_model = genai.GenerativeModel('gemini-2.5-flash', generation_config=genai.types.GenerationConfig(temperature=0, max_output_tokens=1048576))
45
  print(f"βœ… LLM Model '{llm_model.model_name}' initialized.")
46
 
47
  print("βœ… System Initialized Successfully.")