siddhartharya commited on
Commit
c2780b2
·
verified ·
1 Parent(s): b62c66e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -60
app.py CHANGED
@@ -1,12 +1,13 @@
1
  import gradio as gr
2
  import requests
3
  import os
 
4
 
5
- # Load API keys securely from environment variables
6
- proxycurl_api_key = os.getenv("PROXYCURL_API_KEY") # Proxycurl API key
7
- groq_api_key = os.getenv("GROQ_CLOUD_API_KEY") # Groq Cloud API key
8
- firecrawl_api_key = os.getenv("FIRECRAWL_API_KEY") # Firecrawl API key
9
- serp_api_key = os.getenv("SERP_API_KEY") # SERP API key for fetching company URL
10
 
11
  class AutonomousEmailAgent:
12
  def __init__(self, linkedin_url, company_name, role, word_limit, user_name, email, phone, linkedin):
@@ -52,47 +53,37 @@ class AutonomousEmailAgent:
52
  """
53
 
54
  # Send the reasoning prompt to the LLM
55
- url = "https://api.groq.com/openai/v1/chat/completions"
56
- headers = {
57
- "Authorization": f"Bearer {groq_api_key}",
58
- "Content-Type": "application/json",
59
- }
60
-
61
- data = {
62
- "messages": [{"role": "user", "content": reasoning_prompt}],
63
- "model": "llama3-8b-8192"
64
- }
65
-
66
- response = requests.post(url, headers=headers, json=data)
67
- if response.status_code == 200:
68
- reasoning_output = response.json()["choices"][0]["message"]["content"].strip()
69
- print("LLM Reasoning Output:", reasoning_output)
70
-
71
- # Now the LLM takes action based on the reflection
72
- return self.act_on_llm_instructions(reasoning_output)
73
- else:
74
- print(f"Error: {response.status_code}, {response.text}")
75
- return "Error: Unable to complete reasoning."
76
 
77
  # Function to act on the LLM's structured instructions
78
  def act_on_llm_instructions(self, reasoning_output):
79
- # Convert the output to lowercase and trim whitespace for easier parsing
80
  instruction = reasoning_output.lower().strip()
81
 
82
  if "scrape" in instruction:
83
- # Action: Fetch company URL via SERP API before scraping
84
  self.fetch_company_url()
85
  if self.company_url:
86
  self.fetch_company_info_with_firecrawl(self.company_url)
87
- # Reflect again by invoking the LLM to reassess
88
  return self.autonomous_reasoning()
89
 
90
  elif "generate_email" in instruction:
91
- # Action: Proceed to generate the email
92
  return self.generate_email()
93
 
94
  elif "fallback" in instruction:
95
- # Action: Use fallback logic or default values
96
  print("Action: Using fallback values for missing data.")
97
  if not self.company_info:
98
  self.company_info = "A leading company in its field."
@@ -101,13 +92,14 @@ class AutonomousEmailAgent:
101
  return self.generate_email()
102
 
103
  else:
104
- # If the LLM returns an unrecognized instruction, fall back to using the current available data
105
  print("Error: Unrecognized instruction from LLM. Proceeding with available data.")
106
  return self.generate_email()
107
 
108
  # Fetch company URL using SERP API
109
  def fetch_company_url(self):
 
110
  print(f"Fetching company URL for {self.company_name} using SERP API...")
 
111
  serp_url = f"https://serpapi.com/search.json?q={self.company_name}&api_key={serp_api_key}&num=1"
112
  response = requests.get(serp_url)
113
 
@@ -120,11 +112,12 @@ class AutonomousEmailAgent:
120
  print("No URL found for the company via SERP API.")
121
  self.company_url = None
122
  else:
123
- print(f"Error fetching company URL: {response.status_code}")
124
- self.company_url = None
125
 
126
- # Action: Fetch LinkedIn data via Proxycurl
127
  def fetch_linkedin_data(self):
 
128
  if not self.linkedin_url:
129
  print("Action: No LinkedIn URL provided, using default bio.")
130
  self.bio = "A professional with diverse experience."
@@ -146,9 +139,11 @@ class AutonomousEmailAgent:
146
  self.skills = ["Adaptable", "Hardworking"]
147
  self.experiences = ["Worked across various industries"]
148
 
149
- # Action: Fetch company information via Firecrawl API using company URL
150
  def fetch_company_info_with_firecrawl(self, company_url):
 
151
  print(f"Fetching company info for {company_url} using Firecrawl.")
 
152
  headers = {"Authorization": f"Bearer {firecrawl_api_key}"}
153
  firecrawl_url = "https://api.firecrawl.dev/v1/scrape"
154
  data = {
@@ -162,16 +157,13 @@ class AutonomousEmailAgent:
162
  self.company_info = firecrawl_data.get("description", "No detailed company info available.")
163
  print(f"Company info fetched: {self.company_info}")
164
  else:
165
- print(f"Error: Unable to fetch company info via Firecrawl. Using default info.")
166
  self.company_info = "A leading company in its field."
167
 
168
- # Final Action: Generate the email using Groq Cloud LLM with "Start with Why" framework
169
  def generate_email(self):
170
- print("Action: Generating the email with the gathered information.")
171
 
172
- linkedin_text = f"Please find my LinkedIn profile at {self.linkedin}" if self.linkedin else ""
173
-
174
- # Updated prompt to reflect Simon Sinek's "Start with Why" approach
175
  prompt = f"""
176
  Write a professional job application email applying for the {self.role} position at {self.company_name}.
177
 
@@ -194,33 +186,31 @@ class AutonomousEmailAgent:
194
  Email: {self.email}
195
  Phone: {self.phone}
196
  LinkedIn: {self.linkedin}
197
-
198
  The email should not exceed {self.word_limit} words.
199
  """
200
 
201
- url = "https://api.groq.com/openai/v1/chat/completions"
202
- headers = {
203
- "Authorization": f"Bearer {groq_api_key}",
204
- "Content-Type": "application/json",
205
- }
 
 
 
206
 
207
- data = {
208
- "messages": [{"role": "user", "content": prompt}],
209
- "model": "llama3-8b-8192"
210
- }
 
211
 
212
- response = requests.post(url, headers=headers, json=data)
213
- if response.status_code == 200:
214
- return response.json()["choices"][0]["message"]["content"].strip()
215
- else:
216
- print(f"Error: {response.status_code}, {response.text}")
217
- return "Error generating email. Please check your API key or try again later."
218
 
219
  # Main loop following ReAct pattern
220
  def run(self):
221
  self.fetch_linkedin_data() # Fetch LinkedIn data
222
- # Let LLM autonomously decide and act
223
- return self.autonomous_reasoning()
224
 
225
  # Define the Gradio interface and the main app logic
226
  def gradio_ui():
 
1
  import gradio as gr
2
  import requests
3
  import os
4
+ from openai import OpenAI
5
 
6
+ # Initialize the NVIDIA LLM client
7
+ client = OpenAI(
8
+ base_url="https://integrate.api.nvidia.com/v1",
9
+ api_key=os.getenv("NVIDIA_API_KEY") # Use your NVIDIA API key
10
+ )
11
 
12
  class AutonomousEmailAgent:
13
  def __init__(self, linkedin_url, company_name, role, word_limit, user_name, email, phone, linkedin):
 
53
  """
54
 
55
  # Send the reasoning prompt to the LLM
56
+ completion = client.chat.completions.create(
57
+ model="nvidia/llama-3.1-nemotron-70b-instruct",
58
+ messages=[{"role": "user", "content": reasoning_prompt}],
59
+ temperature=0.5,
60
+ top_p=1,
61
+ max_tokens=1024,
62
+ stream=True
63
+ )
64
+
65
+ reasoning_output = ""
66
+ for chunk in completion:
67
+ if chunk.choices[0].delta.content is not None:
68
+ print(chunk.choices[0].delta.content, end="")
69
+ reasoning_output += chunk.choices[0].delta.content
70
+
71
+ return self.act_on_llm_instructions(reasoning_output)
 
 
 
 
 
72
 
73
  # Function to act on the LLM's structured instructions
74
  def act_on_llm_instructions(self, reasoning_output):
 
75
  instruction = reasoning_output.lower().strip()
76
 
77
  if "scrape" in instruction:
 
78
  self.fetch_company_url()
79
  if self.company_url:
80
  self.fetch_company_info_with_firecrawl(self.company_url)
 
81
  return self.autonomous_reasoning()
82
 
83
  elif "generate_email" in instruction:
 
84
  return self.generate_email()
85
 
86
  elif "fallback" in instruction:
 
87
  print("Action: Using fallback values for missing data.")
88
  if not self.company_info:
89
  self.company_info = "A leading company in its field."
 
92
  return self.generate_email()
93
 
94
  else:
 
95
  print("Error: Unrecognized instruction from LLM. Proceeding with available data.")
96
  return self.generate_email()
97
 
98
  # Fetch company URL using SERP API
99
  def fetch_company_url(self):
100
+ serp_api_key = os.getenv("SERP_API_KEY") # Fetch the SERP API key from the environment
101
  print(f"Fetching company URL for {self.company_name} using SERP API...")
102
+
103
  serp_url = f"https://serpapi.com/search.json?q={self.company_name}&api_key={serp_api_key}&num=1"
104
  response = requests.get(serp_url)
105
 
 
112
  print("No URL found for the company via SERP API.")
113
  self.company_url = None
114
  else:
115
+ print(f"Error fetching company URL: {response.status_code}. Retrying with fallback or alternative...")
116
+ # Additional logic to retry with alternative API or handle error
117
 
118
+ # Fetch LinkedIn data via Proxycurl
119
  def fetch_linkedin_data(self):
120
+ proxycurl_api_key = os.getenv("PROXYCURL_API_KEY") # Fetch API key from environment
121
  if not self.linkedin_url:
122
  print("Action: No LinkedIn URL provided, using default bio.")
123
  self.bio = "A professional with diverse experience."
 
139
  self.skills = ["Adaptable", "Hardworking"]
140
  self.experiences = ["Worked across various industries"]
141
 
142
+ # Fetch company information via Firecrawl API using company URL
143
  def fetch_company_info_with_firecrawl(self, company_url):
144
+ firecrawl_api_key = os.getenv("FIRECRAWL_API_KEY") # Fetch the Firecrawl API key from the environment
145
  print(f"Fetching company info for {company_url} using Firecrawl.")
146
+
147
  headers = {"Authorization": f"Bearer {firecrawl_api_key}"}
148
  firecrawl_url = "https://api.firecrawl.dev/v1/scrape"
149
  data = {
 
157
  self.company_info = firecrawl_data.get("description", "No detailed company info available.")
158
  print(f"Company info fetched: {self.company_info}")
159
  else:
160
+ print(f"Error: Unable to fetch company info via Firecrawl. Retrying or using fallback...")
161
  self.company_info = "A leading company in its field."
162
 
163
+ # Final Action: Generate the email using NVIDIA LLM with "Start with Why" framework
164
  def generate_email(self):
165
+ print("Action: Generating the email using NVIDIA LLM with the gathered information.")
166
 
 
 
 
167
  prompt = f"""
168
  Write a professional job application email applying for the {self.role} position at {self.company_name}.
169
 
 
186
  Email: {self.email}
187
  Phone: {self.phone}
188
  LinkedIn: {self.linkedin}
189
+
190
  The email should not exceed {self.word_limit} words.
191
  """
192
 
193
+ completion = client.chat.completions.create(
194
+ model="nvidia/llama-3.1-nemotron-70b-instruct",
195
+ messages=[{"role": "user", "content": prompt}],
196
+ temperature=0.5,
197
+ top_p=1,
198
+ max_tokens=1024,
199
+ stream=True
200
+ )
201
 
202
+ generated_email = ""
203
+ for chunk in completion:
204
+ if chunk.choices[0].delta.content is not None:
205
+ print(chunk.choices[0].delta.content, end="")
206
+ generated_email += chunk.choices[0].delta.content
207
 
208
+ return generated_email
 
 
 
 
 
209
 
210
  # Main loop following ReAct pattern
211
  def run(self):
212
  self.fetch_linkedin_data() # Fetch LinkedIn data
213
+ return self.autonomous_reasoning() # Let the LLM autonomously decide and act
 
214
 
215
  # Define the Gradio interface and the main app logic
216
  def gradio_ui():