File size: 9,902 Bytes
37185e0
 
 
bd877a9
37185e0
cdbe688
0707373
 
37185e0
0707373
41f95b2
0707373
 
 
 
41f95b2
 
 
 
0707373
 
 
 
 
37185e0
bd877a9
0707373
bd877a9
4a2f000
bd877a9
4a2f000
bd877a9
 
 
 
 
0707373
ab5ba21
 
 
 
 
0707373
bd877a9
 
ab5ba21
 
 
 
 
 
 
 
 
 
 
 
bd877a9
 
0707373
ab5ba21
 
 
0707373
ab5ba21
bd877a9
ab5ba21
 
 
 
 
 
 
 
4a2f000
bd877a9
4a2f000
 
 
 
 
 
bd877a9
4a2f000
 
 
 
 
 
 
bd877a9
4a2f000
 
 
 
 
 
 
 
 
 
 
bd877a9
4a2f000
 
bd877a9
2bd7b24
bd877a9
0707373
bd877a9
4a2f000
 
0707373
bd877a9
 
0707373
 
 
 
 
41f95b2
 
 
bd877a9
41f95b2
 
 
 
 
 
 
 
 
 
 
0707373
 
 
 
bd877a9
0707373
 
 
7ea79b0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
import gradio as gr
import requests
import os
from bs4 import BeautifulSoup  # For scraping company and role info

# Load API keys securely from environment variables
proxycurl_api_key = os.getenv("PROXYCURL_API_KEY")  # Proxycurl API key
groq_api_key = os.getenv("GROQ_CLOUD_API_KEY")  # Groq Cloud API key

class EmailAgent:
    def __init__(self, linkedin_url, company_name, role, word_limit, user_name, email, phone, linkedin):
        self.linkedin_url = linkedin_url
        self.company_name = company_name
        self.role = role
        self.word_limit = word_limit
        self.user_name = user_name
        self.email = email
        self.phone = phone
        self.linkedin = linkedin
        self.bio = None
        self.skills = []
        self.experiences = []
        self.company_info = None
        self.role_description = None

    # Reason: Decide what information is needed and if we need to take additional steps
    def reason_about_data(self):
        print("Reasoning: Deciding what data we need...")
        if not self.linkedin_url:
            print("Warning: LinkedIn URL missing. Proceeding with default bio.")
        if not self.company_name:
            print("Warning: Company name missing. Proceeding with default company info.")
        if not self.role:
            print("Warning: Role missing. We will use general logic for the role.")

    # Action: Fetch LinkedIn data via Proxycurl (acting based on reasoning)
    def fetch_linkedin_data(self):
        if not self.linkedin_url:
            print("Action: No LinkedIn URL provided, using default bio.")
            self.bio = "A professional with diverse experience."
            self.skills = ["Adaptable", "Hardworking"]
            self.experiences = ["Worked across various industries"]
        else:
            print("Action: Fetching LinkedIn data via Proxycurl.")
            headers = {"Authorization": f"Bearer {proxycurl_api_key}"}
            url = f"https://nubela.co/proxycurl/api/v2/linkedin?url={self.linkedin_url}"
            response = requests.get(url, headers=headers)
            if response.status_code == 200:
                data = response.json()
                self.bio = data.get("summary", "No bio available")
                self.skills = data.get("skills", [])
                self.experiences = data.get("experiences", [])
            else:
                print("Error: Unable to fetch LinkedIn profile. Using default bio.")
                self.bio = "A professional with diverse experience."
                self.skills = ["Adaptable", "Hardworking"]
                self.experiences = ["Worked across various industries"]

    # Action: Fetch company information via Proxycurl or use defaults
    def fetch_company_info(self):
        if not self.company_name:
            print("Action: No company name provided, using default company info.")
            self.company_info = "A leading company in its field, offering innovative solutions."
        else:
            print(f"Action: Fetching company info for {self.company_name}.")
            headers = {"Authorization": f"Bearer {proxycurl_api_key}"}
            url = f"https://nubela.co/proxycurl/api/v2/linkedin/company?company_name={self.company_name}"
            response = requests.get(url, headers=headers)
            if response.status_code == 200:
                data = response.json()
                self.company_info = data.get("description", "No detailed company info available.")
            else:
                print(f"Error: Unable to fetch company info for {self.company_name}. Using default info.")
                self.company_info = "A leading company in its field, offering innovative solutions."

    # Action: Scrape the company's website for role-specific information or use defaults
    def scrape_role_from_website(self):
        print(f"Action: Scraping role description from the company's website for {self.role}.")
        if not self.company_name:
            print("Error: No company name or URL provided for scraping.")
            return False
        
        # Try scraping the website for role descriptions
        try:
            response = requests.get(f"https://{self.company_name}.com/careers")
            if response.status_code == 200:
                soup = BeautifulSoup(response.text, 'html.parser')
                role_descriptions = soup.find_all(string=lambda text: self.role.lower() in text.lower())
                if role_descriptions:
                    self.role_description = role_descriptions[0]
                    print(f"Found role description: {self.role_description}")
                    return True
                else:
                    print(f"No specific role description found on the website for {self.role}.")
                    return False
            else:
                print(f"Error: Unable to reach company's website at {self.company_name}.com.")
                return False
        except Exception as e:
            print(f"Error during scraping: {e}")
            return False

    # Action: Use default logic for role description if no role is available
    def use_default_role_description(self):
        print(f"Action: Using default logic for the role of {self.role}.")
        self.role_description = f"The role of {self.role} at {self.company_name} involves mentoring and leading teams in innovative technology solutions."

    # Reflection: Check if we have enough data to generate the email
    def reflect_on_data(self):
        print("Reflection: Do we have enough data?")
        if not self.bio or not self.skills or not self.company_info:
            print("Warning: Some critical information is missing. Proceeding with default values.")
        return True

    # Final Action: Generate the email using Groq Cloud LLM based on gathered data
    def generate_email(self):
        print("Action: Generating the email with the gathered information.")
        prompt = f"""
        Write a professional email applying for the {self.role} position at {self.company_name}.
        The candidate’s bio is: {self.bio}.
        
        Focus on relevant skills and experiences from LinkedIn, such as {', '.join(self.skills)}, 
        that directly align with the role of {self.role}. 
        Highlight only the skills and experiences that relate to leadership, mentoring, technology, and innovation.
        
        The company info is: {self.company_info}.
        The role involves: {self.role_description}.
        
        End the email with this signature:
        Best regards,
        {self.user_name}
        Email: {self.email}
        Phone: {self.phone}
        LinkedIn: {self.linkedin}
        
        The email should not exceed {self.word_limit} words.
        """
        
        url = "https://api.groq.com/openai/v1/chat/completions"
        headers = {"Authorization": f"Bearer {groq_api_key}", "Content-Type": "application/json"}
        
        data = {
            "messages": [{"role": "user", "content": prompt}],
            "model": "llama3-8b-8192"
        }
        
        response = requests.post(url, headers=headers, json=data)
        if response.status_code == 200:
            return response.json()["choices"][0]["message"]["content"].strip()
        else:
            print(f"Error: {response.status_code}, {response.text}")
            return "Error generating email. Please check your API key or try again later."

    # Main loop following ReAct pattern
    def run(self):
        self.reason_about_data()  # Reasoning step
        self.fetch_linkedin_data()  # Fetch LinkedIn data
        self.fetch_company_info()  # Fetch company data
        # Scrape the company's website for role-specific information or use defaults
        if not self.scrape_role_from_website():
            self.use_default_role_description()  
        # Reflect on whether the data is sufficient
        if self.reflect_on_data():
            return self.generate_email()  # Final action: generate email
        else:
            return "Error: Not enough data to generate the email."

# Define the Gradio interface and the main app logic
def gradio_ui():
    # Input fields
    name_input = gr.Textbox(label="Your Name", placeholder="Enter your name")
    company_input = gr.Textbox(label="Company Name or URL", placeholder="Enter the company name or website URL")
    role_input = gr.Textbox(label="Role Applying For", placeholder="Enter the role you are applying for")
    email_input = gr.Textbox(label="Your Email Address", placeholder="Enter your email address")
    phone_input = gr.Textbox(label="Your Phone Number", placeholder="Enter your phone number")
    linkedin_input = gr.Textbox(label="Your LinkedIn URL", placeholder="Enter your LinkedIn profile URL")
    word_limit_slider = gr.Slider(minimum=50, maximum=300, step=10, label="Email Word Limit", value=150)  # Word limit slider
    
    # Output field
    email_output = gr.Textbox(label="Generated Email", placeholder="Your generated email will appear here", lines=10)

    # Function to create and run the email agent
    def create_email(name, company_name, role, email, phone, linkedin_url, word_limit):
        agent = EmailAgent(linkedin_url, company_name, role, word_limit, name, email, phone, linkedin_url)
        return agent.run()

    # Gradio interface
    demo = gr.Interface(
        fn=create_email,
        inputs=[name_input, company_input, role_input, email_input, phone_input, linkedin_input, word_limit_slider],
        outputs=[email_output],
        title="Email Writing AI Agent with ReAct",
        description="Generate a professional email for a job application using LinkedIn data, company info, and role description.",
        allow_flagging="never"
    )
    
    # Launch the Gradio app
    demo.launch()

# Start the Gradio app when running the script
if __name__ == "__main__":
    gradio_ui()