Kuberwastaken commited on
Commit
83f04c7
·
1 Parent(s): 0c0f923

ACTUALLY switched to Resume Roasting

Browse files
Files changed (4) hide show
  1. app.py +7 -32
  2. main.py +51 -14
  3. prompts.yaml +10 -16
  4. tools/LinkedInScraperTool.py +0 -77
app.py CHANGED
@@ -1,22 +1,15 @@
1
- from dotenv import load_dotenv
2
- import os
3
  from smolagents import CodeAgent, HfApiModel
4
  from smolagents.tools import Tool
5
- import yaml
6
-
7
- # Load environment variables from .env in the root
8
- load_dotenv()
9
-
10
- # Retrieve the Hugging Face token from the environment
11
- hf_token = os.getenv("HF_TOKEN")
12
 
13
  class FinalAnswerTool(Tool):
14
  name = "final_answer"
15
- description = "Use this tool to provide your final answer"
16
  inputs = {
17
  "answer": {
18
  "type": "string",
19
- "description": "The final answer to the problem"
20
  }
21
  }
22
  output_type = "string"
@@ -24,29 +17,11 @@ class FinalAnswerTool(Tool):
24
  def forward(self, answer: str) -> str:
25
  return answer
26
 
27
- class LinkedInScraperTool(Tool):
28
- name = "linkedin_scraper"
29
- description = "Scrapes LinkedIn profiles to extract professional information"
30
- inputs = {
31
- "linkedin_url": {
32
- "type": "string",
33
- "description": "The URL of the LinkedIn profile"
34
- }
35
- }
36
- output_type = "object"
37
-
38
- def forward(self, linkedin_url: str):
39
- # Dummy implementation; replace with actual scraping logic
40
- return {
41
- "experience": "10 years in industry",
42
- "skills": "Python, AI",
43
- "description": "Experienced professional with a robust background in technology."
44
- }
45
-
46
  def create_agent():
47
  final_answer = FinalAnswerTool()
48
- linkedin_scraper = LinkedInScraperTool()
49
 
 
50
  model = HfApiModel(
51
  max_tokens=2096,
52
  temperature=0.5,
@@ -59,7 +34,7 @@ def create_agent():
59
 
60
  agent = CodeAgent(
61
  model=model,
62
- tools=[linkedin_scraper, final_answer],
63
  max_steps=6,
64
  verbosity_level=1,
65
  prompt_templates=prompt_templates
 
1
+ import yaml
 
2
  from smolagents import CodeAgent, HfApiModel
3
  from smolagents.tools import Tool
4
+ from tools.resumescraper import ResumeScraperTool
 
 
 
 
 
 
5
 
6
  class FinalAnswerTool(Tool):
7
  name = "final_answer"
8
+ description = "Use this tool to provide your final roast"
9
  inputs = {
10
  "answer": {
11
  "type": "string",
12
+ "description": "The final roast for the resume"
13
  }
14
  }
15
  output_type = "string"
 
17
  def forward(self, answer: str) -> str:
18
  return answer
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  def create_agent():
21
  final_answer = FinalAnswerTool()
22
+ resume_scraper = ResumeScraperTool()
23
 
24
+ # Use Qwen/Qwen2.5-Coder-32B-Instruct for roasting
25
  model = HfApiModel(
26
  max_tokens=2096,
27
  temperature=0.5,
 
34
 
35
  agent = CodeAgent(
36
  model=model,
37
+ tools=[resume_scraper, final_answer],
38
  max_steps=6,
39
  verbosity_level=1,
40
  prompt_templates=prompt_templates
main.py CHANGED
@@ -1,21 +1,58 @@
1
  import gradio as gr
 
 
2
  from app import create_agent
3
 
4
- def roast_profile(linkedin_url):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  agent = create_agent()
6
- response = agent.run(
7
- f"Scrape this LinkedIn profile: {linkedin_url} and create a humorous but not mean-spirited roast based on their experience, skills, and description. Keep it professional and avoid personal attacks."
8
- )
9
  return response
10
 
11
- demo = gr.Interface(
12
- fn=roast_profile,
13
- inputs=gr.Textbox(label="LinkedIn Profile URL"),
14
- outputs=gr.Textbox(label="Roast Result"),
15
- title="LinkedIn Profile Roaster",
16
- description="Enter a LinkedIn profile URL and get a humorous professional roast!",
17
- examples=[["https://www.linkedin.com/in/example-profile"]]
18
- )
19
 
20
- if __name__ == "__main__":
21
- demo.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import io
3
+ from PyPDF2 import PdfReader
4
  from app import create_agent
5
 
6
+ def extract_text_from_pdf(file_obj) -> str:
7
+ reader = PdfReader(file_obj)
8
+ text = ""
9
+ for page in reader.pages:
10
+ page_text = page.extract_text()
11
+ if page_text:
12
+ text += page_text + "\n"
13
+ return text
14
+
15
+ def process_resume(input_method, resume_text, pdf_file):
16
+ if input_method == "Text":
17
+ text = resume_text
18
+ else:
19
+ if pdf_file is None:
20
+ return "No PDF uploaded."
21
+ # Check if pdf_file is a string (i.e. a file path) or a file-like object
22
+ if isinstance(pdf_file, str):
23
+ with open(pdf_file, "rb") as f:
24
+ file_bytes = f.read()
25
+ else:
26
+ file_bytes = pdf_file.read()
27
+ file_obj = io.BytesIO(file_bytes)
28
+ text = extract_text_from_pdf(file_obj)
29
+
30
+ if not text.strip():
31
+ return "No resume text found."
32
+
33
  agent = create_agent()
34
+ # Instruct the agent to roast the resume using the resume text.
35
+ response = agent.run(f"Roast this resume: {text}")
 
36
  return response
37
 
 
 
 
 
 
 
 
 
38
 
39
+ def toggle_inputs(method):
40
+ if method == "Text":
41
+ return gr.update(visible=True), gr.update(visible=False)
42
+ else:
43
+ return gr.update(visible=False), gr.update(visible=True)
44
+
45
+ with gr.Blocks() as demo:
46
+ gr.Markdown("# Resume Roaster")
47
+ gr.Markdown("Enter your resume as text or upload a PDF to receive a humorous, professional roast!")
48
+
49
+ input_method = gr.Radio(choices=["Text", "PDF"], label="Input Method", value="Text")
50
+ resume_text = gr.Textbox(label="Resume Text", lines=10, visible=True)
51
+ pdf_file = gr.File(label="Upload Resume PDF", file_types=[".pdf"], visible=False)
52
+ output = gr.Textbox(label="Roast Result", lines=10)
53
+ submit_btn = gr.Button("Roast It!")
54
+
55
+ input_method.change(fn=toggle_inputs, inputs=input_method, outputs=[resume_text, pdf_file])
56
+ submit_btn.click(fn=process_resume, inputs=[input_method, resume_text, pdf_file], outputs=output)
57
+
58
+ demo.launch(share=True)
prompts.yaml CHANGED
@@ -1,22 +1,16 @@
1
  system_prompt: |
2
- You are a witty professional roaster who analyzes LinkedIn profiles.
3
- Your job is to create humorous but not mean-spirited roasts based on people's professional experiences.
4
- Focus on gentle teasing about common LinkedIn behaviors like:
5
- - Overuse of buzzwords
6
- - Lengthy job titles
7
- - Humble brags
8
- - Excessive use of emojis
9
- - Connection collecting
10
- Avoid personal attacks or inappropriate content.
11
 
12
  task_prompt: |
13
- Using the provided LinkedIn profile information, create a humorous roast that:
14
- 1. References specific details from their profile
15
- 2. Keeps the tone light and professional
16
- 3. Focuses on common LinkedIn behaviors and professional quirks
17
- 4. Avoids mean-spirited or personal attacks
18
- 5. Would be appropriate to share in a professional setting
19
 
20
  final_answer:
21
- pre_messages: "Here is your final roast:"
22
  post_messages: ""
 
1
  system_prompt: |
2
+ You are a witty professional roaster who analyzes resumes.
3
+ Your job is to create a humorous, professional roast of a resume.
4
+ Focus on teasing overly verbose descriptions, excessive buzzwords, and generic statements,
5
+ while keeping the tone light and appropriate for a professional setting.
 
 
 
 
 
6
 
7
  task_prompt: |
8
+ Using the provided resume details, craft a roast that:
9
+ 1. References key sections such as Summary, Experience, Education, and Skills.
10
+ 2. Is humorous but not mean-spirited.
11
+ 3. Maintains a professional tone.
12
+ 4. Adds creative flair that makes the roast both entertaining and insightful.
 
13
 
14
  final_answer:
15
+ pre_messages: "Final Roast:"
16
  post_messages: ""
tools/LinkedInScraperTool.py DELETED
@@ -1,77 +0,0 @@
1
- from bs4 import BeautifulSoup
2
- import requests
3
- from typing import Dict
4
- from smolagents.tools import Tool
5
-
6
- class LinkedInScraperTool(Tool):
7
- name = "linkedin_scraper"
8
- description = "Scrapes LinkedIn profiles to extract professional information"
9
- inputs = {"linkedin_url": str}
10
- outputs = dict
11
-
12
- def __call__(self, linkedin_url: str) -> dict:
13
- try:
14
- # Add headers to mimic a browser request
15
- headers = {
16
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
17
- }
18
-
19
- response = requests.get(linkedin_url, headers=headers)
20
- soup = BeautifulSoup(response.text, 'html.parser')
21
-
22
- # Extract profile information
23
- profile_data = {
24
- 'name': self._extract_name(soup),
25
- 'headline': self._extract_headline(soup),
26
- 'about': self._extract_about(soup),
27
- 'experience': self._extract_experience(soup),
28
- 'education': self._extract_education(soup),
29
- 'skills': self._extract_skills(soup)
30
- }
31
-
32
- return profile_data
33
-
34
- except Exception as e:
35
- return {"error": f"Failed to scrape profile: {str(e)}"}
36
-
37
- def _extract_name(self, soup):
38
- name_element = soup.find('h1', {'class': 'text-heading-xlarge'})
39
- return name_element.text.strip() if name_element else "Name not found"
40
-
41
- def _extract_headline(self, soup):
42
- headline_element = soup.find('div', {'class': 'text-body-medium'})
43
- return headline_element.text.strip() if headline_element else "Headline not found"
44
-
45
- def _extract_about(self, soup):
46
- about_element = soup.find('div', {'class': 'pv-about-section'})
47
- return about_element.text.strip() if about_element else "About section not found"
48
-
49
- def _extract_experience(self, soup):
50
- experience_elements = soup.find_all('li', {'class': 'experience-item'})
51
- experience = []
52
- for exp in experience_elements:
53
- title_element = exp.find('h3', {'class': 'experience-title'})
54
- company_element = exp.find('p', {'class': 'experience-company'})
55
- if title_element and company_element:
56
- experience.append({
57
- 'title': title_element.text.strip(),
58
- 'company': company_element.text.strip()
59
- })
60
- return experience if experience else ["Experience not found"]
61
-
62
- def _extract_education(self, soup):
63
- education_elements = soup.find_all('li', {'class': 'education-item'})
64
- education = []
65
- for edu in education_elements:
66
- school_element = edu.find('h3', {'class': 'education-school'})
67
- degree_element = edu.find('p', {'class': 'education-degree'})
68
- if school_element and degree_element:
69
- education.append({
70
- 'school': school_element.text.strip(),
71
- 'degree': degree_element.text.strip()
72
- })
73
- return education if education else ["Education not found"]
74
-
75
- def _extract_skills(self, soup):
76
- skills_elements = soup.find_all('span', {'class': 'skill-name'})
77
- return [skill.text.strip() for skill in skills_elements] if skills_elements else ["Skills not found"]