siddhartharya commited on
Commit
5d4fc94
·
verified ·
1 Parent(s): 8b416e2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -9
app.py CHANGED
@@ -3,10 +3,10 @@ import requests
3
  import os
4
  from openai import OpenAI
5
 
6
- # Initialize the NVIDIA LLM client
7
  client = OpenAI(
8
- base_url="https://integrate.api.nvidia.com/v1",
9
- api_key=os.getenv("NVIDIA_API_KEY") # Use your NVIDIA API key
10
  )
11
 
12
  class AutonomousEmailAgent:
@@ -52,9 +52,9 @@ class AutonomousEmailAgent:
52
  After generating the email, reflect on whether the content aligns with the role and company and whether any improvements are needed. Respond clearly with one of the above options.
53
  """
54
 
55
- # Send the reasoning prompt to the LLM
56
  completion = client.chat.completions.create(
57
- model="nvidia/llama-3.1-nemotron-70b-instruct",
58
  messages=[{"role": "user", "content": reasoning_prompt}],
59
  temperature=0.5,
60
  top_p=1,
@@ -113,7 +113,6 @@ class AutonomousEmailAgent:
113
  self.company_url = None
114
  else:
115
  print(f"Error fetching company URL: {response.status_code}. Retrying with fallback or alternative...")
116
- # Additional logic to retry with alternative API or handle error
117
 
118
  # Fetch LinkedIn data via Proxycurl
119
  def fetch_linkedin_data(self):
@@ -160,9 +159,9 @@ class AutonomousEmailAgent:
160
  print(f"Error: Unable to fetch company info via Firecrawl. Retrying or using fallback...")
161
  self.company_info = "A leading company in its field."
162
 
163
- # Final Action: Generate the email using NVIDIA LLM with "Start with Why" framework
164
  def generate_email(self):
165
- print("Action: Generating the email using NVIDIA LLM with the gathered information.")
166
 
167
  prompt = f"""
168
  Write a professional job application email applying for the {self.role} position at {self.company_name}.
@@ -191,7 +190,7 @@ class AutonomousEmailAgent:
191
  """
192
 
193
  completion = client.chat.completions.create(
194
- model="nvidia/llama-3.1-nemotron-70b-instruct",
195
  messages=[{"role": "user", "content": prompt}],
196
  temperature=0.5,
197
  top_p=1,
 
3
  import os
4
  from openai import OpenAI
5
 
6
+ # Initialize the Groq Cloud LLM client
7
  client = OpenAI(
8
+ base_url="https://api.groq.com/openai/v1",
9
+ api_key=os.getenv("GROQ_API_KEY") # Use your Groq Cloud API key
10
  )
11
 
12
  class AutonomousEmailAgent:
 
52
  After generating the email, reflect on whether the content aligns with the role and company and whether any improvements are needed. Respond clearly with one of the above options.
53
  """
54
 
55
+ # Send the reasoning prompt to the Groq Cloud LLM
56
  completion = client.chat.completions.create(
57
+ model="llama3-8b-8192", # Adjust model if necessary
58
  messages=[{"role": "user", "content": reasoning_prompt}],
59
  temperature=0.5,
60
  top_p=1,
 
113
  self.company_url = None
114
  else:
115
  print(f"Error fetching company URL: {response.status_code}. Retrying with fallback or alternative...")
 
116
 
117
  # Fetch LinkedIn data via Proxycurl
118
  def fetch_linkedin_data(self):
 
159
  print(f"Error: Unable to fetch company info via Firecrawl. Retrying or using fallback...")
160
  self.company_info = "A leading company in its field."
161
 
162
+ # Final Action: Generate the email using Groq Cloud LLM with "Start with Why" framework
163
  def generate_email(self):
164
+ print("Action: Generating the email using Groq Cloud LLM with the gathered information.")
165
 
166
  prompt = f"""
167
  Write a professional job application email applying for the {self.role} position at {self.company_name}.
 
190
  """
191
 
192
  completion = client.chat.completions.create(
193
+ model="llama3-8b-8192",
194
  messages=[{"role": "user", "content": prompt}],
195
  temperature=0.5,
196
  top_p=1,