Sami Halawa Claude commited on
Commit
efcf330
·
1 Parent(s): 4afb1de

Configure AutoStartup.ai to use Gemini API

Browse files

- Add Gemini 2.0 Flash support to LLMClientManager
- Update default model from o4-mini to gemini-2.0-flash-exp
- Configure Gemini API endpoint and client initialization
- Add environment variable documentation and examples
- Update README with Gemini API setup instructions

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <[email protected]>

Files changed (5) hide show
  1. .env.example +8 -0
  2. README.md +18 -0
  3. app.py +6 -2
  4. generator.py +1 -1
  5. utils.py +12 -1
.env.example ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Environment variables for AutoStartup.ai
2
+ OPENAI_MODEL=gemini-2.0-flash-exp
3
+ OPENAI_API_ENDPOINT=https://generativelanguage.googleapis.com/v1beta/openai/
4
+ OPENAI_API_KEY=your_gemini_api_key_here
5
+
6
+ # Alternative models you can use:
7
+ # OPENAI_MODEL=gpt-4o-mini (requires OpenAI API key)
8
+ # OPENAI_MODEL=gemini-1.5-pro (requires Google AI API key)
README.md CHANGED
@@ -37,10 +37,28 @@ Transform market problems into viable startup opportunities using AI-driven anal
37
  ## Installation
38
 
39
  ```bash
 
40
  pip install -r requirements.txt
 
 
 
 
 
 
 
 
 
 
 
41
  python app.py
42
  ```
43
 
 
 
 
 
 
 
44
  ## Technology Stack
45
 
46
  - **Frontend**: Gradio for interactive web interface
 
37
  ## Installation
38
 
39
  ```bash
40
+ # Install dependencies
41
  pip install -r requirements.txt
42
+
43
+ # Set up environment variables (choose one):
44
+ # For Gemini API (recommended):
45
+ export OPENAI_MODEL=gemini-2.0-flash-exp
46
+ export OPENAI_API_ENDPOINT=https://generativelanguage.googleapis.com/v1beta/openai/
47
+ export OPENAI_API_KEY=your_gemini_api_key
48
+
49
+ # For OpenAI API:
50
+ export OPENAI_API_KEY=your_openai_api_key
51
+
52
+ # Run the application
53
  python app.py
54
  ```
55
 
56
+ ## Environment Variables
57
+
58
+ - `OPENAI_API_KEY`: Your Gemini or OpenAI API key
59
+ - `OPENAI_API_ENDPOINT`: API endpoint (for Gemini: https://generativelanguage.googleapis.com/v1beta/openai/)
60
+ - `OPENAI_MODEL`: Model to use (gemini-2.0-flash-exp, gpt-4o-mini, etc.)
61
+
62
  ## Technology Stack
63
 
64
  - **Frontend**: Gradio for interactive web interface
app.py CHANGED
@@ -90,8 +90,12 @@ def generate_and_evaluate(query, ideas_count, random_seed, progress=gr.Progress(
90
  yield f"Done. Processed {len(ranked_ideas)} ideas in {total_time:.2f}s.", results_df, f"${total_cost:.2f}", ranked_ideas, pd.DataFrame()
91
  # return
92
  # --- Compare using generate_and_evaluate2 method ---
93
- client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
94
- ideas = generate_temp_free_idea('ai_scientist/test.json', client, 'gpt-4o-mini', query, ideas_count//4, 4, False)
 
 
 
 
95
  comparison_ranked_ideas, cost = generator.evaluate(ideas, query, progress)
96
  with open('comparison_ranked_ideas.json', 'w', encoding='utf-8') as f:
97
  json.dump(comparison_ranked_ideas, f, indent=4, ensure_ascii=False)
 
90
  yield f"Done. Processed {len(ranked_ideas)} ideas in {total_time:.2f}s.", results_df, f"${total_cost:.2f}", ranked_ideas, pd.DataFrame()
91
  # return
92
  # --- Compare using generate_and_evaluate2 method ---
93
+ # Create Gemini client for comparison
94
+ gemini_client = OpenAI(
95
+ api_key=os.getenv("OPENAI_API_KEY"),
96
+ base_url=os.getenv("OPENAI_API_ENDPOINT", "https://generativelanguage.googleapis.com/v1beta/openai/")
97
+ )
98
+ ideas = generate_temp_free_idea('ai_scientist/test.json', gemini_client, 'gemini-2.0-flash-exp', query, ideas_count//4, 4, False)
99
  comparison_ranked_ideas, cost = generator.evaluate(ideas, query, progress)
100
  with open('comparison_ranked_ideas.json', 'w', encoding='utf-8') as f:
101
  json.dump(comparison_ranked_ideas, f, indent=4, ensure_ascii=False)
generator.py CHANGED
@@ -81,7 +81,7 @@ class IdeaGenerator():
81
  json.dump(summary_list, file, indent=4)
82
 
83
  self.paper_list = summary_list
84
- self.llm.switch_model('o4-mini')
85
  print("Summary cost: ", self.llm.get_cost())
86
 
87
  def get_paper_list(self, num):
 
81
  json.dump(summary_list, file, indent=4)
82
 
83
  self.paper_list = summary_list
84
+ self.llm.switch_model('gemini-2.0-flash-exp')
85
  print("Summary cost: ", self.llm.get_cost())
86
 
87
  def get_paper_list(self, num):
utils.py CHANGED
@@ -57,6 +57,7 @@ class LLMClientManager:
57
 
58
  def __init__(self):
59
  self.openai_client = None
 
60
  self.deepseek_client = None
61
  self.current_client = None
62
  self.current_model = None
@@ -71,6 +72,16 @@ class LLMClientManager:
71
  self.openai_client = OpenAI(api_key=API_KEY)
72
  self.current_client = self.openai_client
73
 
 
 
 
 
 
 
 
 
 
 
74
  elif model in ["deepseek-chat", "deepseek-reasoner", 'Pro/deepseek-ai/DeepSeek-R1', 'deepseek-r1-250120']:
75
  self.current_client = self.deepseek_client
76
  else:
@@ -78,7 +89,7 @@ class LLMClientManager:
78
 
79
  def get_response(self, msg, system_message, response_format=None, temperature=1, print_debug=False):
80
  if self.current_client is None or self.current_model is None:
81
- self.switch_model("o4-mini")
82
 
83
  msg_history = self.msg_history
84
  for _ in range(3):
 
57
 
58
  def __init__(self):
59
  self.openai_client = None
60
+ self.gemini_client = None
61
  self.deepseek_client = None
62
  self.current_client = None
63
  self.current_model = None
 
72
  self.openai_client = OpenAI(api_key=API_KEY)
73
  self.current_client = self.openai_client
74
 
75
+ elif model in ["gemini-2.0-flash-exp", "gemini-1.5-pro", "gemini-1.5-flash"]:
76
+ if self.gemini_client is None:
77
+ API_KEY = os.getenv("OPENAI_API_KEY") # Using same env var for compatibility
78
+ API_ENDPOINT = os.getenv("OPENAI_API_ENDPOINT", "https://generativelanguage.googleapis.com/v1beta/openai/")
79
+ self.gemini_client = OpenAI(
80
+ api_key=API_KEY,
81
+ base_url=API_ENDPOINT
82
+ )
83
+ self.current_client = self.gemini_client
84
+
85
  elif model in ["deepseek-chat", "deepseek-reasoner", 'Pro/deepseek-ai/DeepSeek-R1', 'deepseek-r1-250120']:
86
  self.current_client = self.deepseek_client
87
  else:
 
89
 
90
  def get_response(self, msg, system_message, response_format=None, temperature=1, print_debug=False):
91
  if self.current_client is None or self.current_model is None:
92
+ self.switch_model("gemini-2.0-flash-exp")
93
 
94
  msg_history = self.msg_history
95
  for _ in range(3):