Spaces:
Runtime error
Runtime error
revise flow logic
Browse files
main.py
CHANGED
@@ -7,10 +7,16 @@ import requests
|
|
7 |
from bs4 import BeautifulSoup
|
8 |
from cleantext import clean
|
9 |
from docx import Document
|
|
|
|
|
|
|
10 |
import numpy as np
|
|
|
|
|
|
|
11 |
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
|
12 |
-
import
|
13 |
-
|
14 |
|
15 |
app = FastAPI()
|
16 |
app.mount("/static", StaticFiles(directory='static'), name="static")
|
@@ -18,9 +24,11 @@ templates = Jinja2Templates(directory="templates/")
|
|
18 |
|
19 |
onet = pd.read_csv('static/ONET_JobTitles.csv')
|
20 |
simdat = pd.read_csv('static/cohere_embeddings.csv')
|
|
|
21 |
|
22 |
model = AutoModelForSequenceClassification.from_pretrained('static/model_shards', low_cpu_mem_usage=True)
|
23 |
tokenizer = AutoTokenizer.from_pretrained('static/tokenizer_shards', low_cpu_mem_usage=True)
|
|
|
24 |
|
25 |
### job information center ###
|
26 |
# get
|
@@ -67,20 +75,28 @@ def render_job_info(request: Request, jobtitle: str = Form(enum=[x for x in onet
|
|
67 |
### job neighborhoods ###
|
68 |
@app.get("/explore-job-neighborhoods/", response_class=HTMLResponse)
|
69 |
async def render_job_neighborhoods(request: Request):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
return templates.TemplateResponse('job_neighborhoods.html', context={'request': request})
|
71 |
|
72 |
### find my match ###
|
73 |
# get
|
74 |
-
@app.get("/find-my-match
|
75 |
async def match_page(request: Request):
|
76 |
return templates.TemplateResponse('find_my_match.html', context={'request': request})
|
77 |
|
78 |
# post
|
79 |
-
@app.post('/find-my-match
|
80 |
def get_resume(request: Request, resume: UploadFile = File(...)):
|
81 |
-
|
82 |
-
classifier = pipeline('text-classification', model = model, tokenizer = tokenizer)
|
83 |
-
|
84 |
path = f"static/{resume.filename}"
|
85 |
with open(path, 'wb') as buffer:
|
86 |
buffer.write(resume.file.read())
|
@@ -90,6 +106,22 @@ def get_resume(request: Request, resume: UploadFile = File(...)):
|
|
90 |
text.append(para.text)
|
91 |
resume = "\n".join(text)
|
92 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
embeds = coSkillEmbed(resume)
|
94 |
simResults = []
|
95 |
|
@@ -107,14 +139,28 @@ def get_resume(request: Request, resume: UploadFile = File(...)):
|
|
107 |
for x in range(len(simResults)):
|
108 |
simResults.iloc[x,1] = "{:0.2f}".format(simResults.iloc[x,1])
|
109 |
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
if classification == 'LABEL_1':
|
115 |
labels.append("Skill")
|
116 |
else:
|
117 |
labels.append("Not Skill")
|
118 |
-
|
|
|
|
|
|
|
119 |
|
120 |
-
return templates.TemplateResponse('find_my_match.html', context={'request': request, 'resume': resume, 'skills': skills, 'simResults': simResults})
|
|
|
7 |
from bs4 import BeautifulSoup
|
8 |
from cleantext import clean
|
9 |
from docx import Document
|
10 |
+
import os
|
11 |
+
import cohere
|
12 |
+
import string
|
13 |
import numpy as np
|
14 |
+
from numpy.linalg import norm
|
15 |
+
from nltk.tokenize import SpaceTokenizer
|
16 |
+
import nltk
|
17 |
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
|
18 |
+
from dotenv import load_dotenv
|
19 |
+
load_dotenv()
|
20 |
|
21 |
app = FastAPI()
|
22 |
app.mount("/static", StaticFiles(directory='static'), name="static")
|
|
|
24 |
|
25 |
onet = pd.read_csv('static/ONET_JobTitles.csv')
|
26 |
simdat = pd.read_csv('static/cohere_embeddings.csv')
|
27 |
+
coheredat = pd.read_csv("static/cohere_tSNE_dat.csv")
|
28 |
|
29 |
model = AutoModelForSequenceClassification.from_pretrained('static/model_shards', low_cpu_mem_usage=True)
|
30 |
tokenizer = AutoTokenizer.from_pretrained('static/tokenizer_shards', low_cpu_mem_usage=True)
|
31 |
+
classifier = pipeline('text-classification', model = model, tokenizer = tokenizer)
|
32 |
|
33 |
### job information center ###
|
34 |
# get
|
|
|
75 |
### job neighborhoods ###
|
76 |
@app.get("/explore-job-neighborhoods/", response_class=HTMLResponse)
|
77 |
async def render_job_neighborhoods(request: Request):
|
78 |
+
def format_title(logo, title, subtitle, title_font_size = 28, subtitle_font_size=14):
|
79 |
+
logo = f'<a href="/" target="_self">{logo}</a>'
|
80 |
+
subtitle = f'<span style="font-size: {subtitle_font_size}px;">{subtitle}</span>'
|
81 |
+
title = f'<span style="font-size: {title_font_size}px;">{title}</span>'
|
82 |
+
return f'{logo}{title}<br>{subtitle}'
|
83 |
+
|
84 |
+
fig = px.scatter(coheredat, x = 'longitude', y = 'latitude', color = 'Category', hover_data = ['Category', 'Title'],
|
85 |
+
title=format_title("Pathfinder", " Job Neighborhoods: Explore the Map!", "(Generated using Co-here AI's LLM & ONET's Task Statements)"))
|
86 |
+
fig['layout'].update(height=1000, width=1500, font=dict(family='Courier New, monospace', color='black'))
|
87 |
+
fig.write_html('templates/job_neighborhoods.html')
|
88 |
+
|
89 |
return templates.TemplateResponse('job_neighborhoods.html', context={'request': request})
|
90 |
|
91 |
### find my match ###
|
92 |
# get
|
93 |
+
@app.get("/find-my-match", response_class=HTMLResponse)
|
94 |
async def match_page(request: Request):
|
95 |
return templates.TemplateResponse('find_my_match.html', context={'request': request})
|
96 |
|
97 |
# post
|
98 |
+
@app.post('/find-my-match', response_class=HTMLResponse)
|
99 |
def get_resume(request: Request, resume: UploadFile = File(...)):
|
|
|
|
|
|
|
100 |
path = f"static/{resume.filename}"
|
101 |
with open(path, 'wb') as buffer:
|
102 |
buffer.write(resume.file.read())
|
|
|
106 |
text.append(para.text)
|
107 |
resume = "\n".join(text)
|
108 |
|
109 |
+
def clean_my_text(text):
|
110 |
+
clean_text = ' '.join(text.splitlines())
|
111 |
+
clean_text = clean_text.replace('-', " ").replace("/"," ")
|
112 |
+
clean_text = clean(clean_text.translate(str.maketrans('', '', string.punctuation)))
|
113 |
+
return clean_text
|
114 |
+
|
115 |
+
def coSkillEmbed(text):
|
116 |
+
co = cohere.Client(os.getenv("COHERE_TOKEN"))
|
117 |
+
response = co.embed(
|
118 |
+
model='large',
|
119 |
+
texts=[text])
|
120 |
+
return response.embeddings
|
121 |
+
|
122 |
+
def cosine(A, B):
|
123 |
+
return np.dot(A,B)/(norm(A)*norm(B))
|
124 |
+
|
125 |
embeds = coSkillEmbed(resume)
|
126 |
simResults = []
|
127 |
|
|
|
139 |
for x in range(len(simResults)):
|
140 |
simResults.iloc[x,1] = "{:0.2f}".format(simResults.iloc[x,1])
|
141 |
|
142 |
+
# EXTRACT SKILLS FROM RESUME
|
143 |
+
def skillNER(resume):
|
144 |
+
resume = clean_my_text(resume)
|
145 |
+
stops = set(nltk.corpus.stopwords.words('english'))
|
146 |
+
stops = stops.union({'eg', 'ie', 'etc', 'experience', 'experiences', 'experienced', 'experiencing', 'knowledge',
|
147 |
+
'ability', 'abilities', 'skill', 'skills', 'skilled', 'including', 'includes', 'included', 'include'
|
148 |
+
'education', 'follow', 'following', 'follows', 'followed', 'make', 'made', 'makes', 'making', 'maker',
|
149 |
+
'available', 'large', 'larger', 'largescale', 'client', 'clients', 'responsible', 'x', 'many', 'team', 'teams'})
|
150 |
+
resume = [word for word in SpaceTokenizer().tokenize(resume) if word not in stops]
|
151 |
+
resume = [word for word in resume if ")" not in word]
|
152 |
+
resume = [word for word in resume if "(" not in word]
|
153 |
+
|
154 |
+
labels = []
|
155 |
+
for i in range(len(resume)):
|
156 |
+
classification = classifier(resume[i])[0]['label']
|
157 |
if classification == 'LABEL_1':
|
158 |
labels.append("Skill")
|
159 |
else:
|
160 |
labels.append("Not Skill")
|
161 |
+
labels_dict = dict(zip(resume, labels))
|
162 |
+
return labels_dict
|
163 |
+
|
164 |
+
skills=skillNER(resume)
|
165 |
|
166 |
+
return templates.TemplateResponse('find_my_match.html', context={'request': request, 'resume': resume, 'skills': skills, 'simResults': simResults})
|
utils.py
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
from cleantext import clean
|
2 |
-
import cohere
|
3 |
-
import string
|
4 |
-
import numpy as np
|
5 |
-
from numpy.linalg import norm
|
6 |
-
from nltk.tokenize import SpaceTokenizer
|
7 |
-
import nltk
|
8 |
-
import os
|
9 |
-
from dotenv import load_dotenv
|
10 |
-
load_dotenv()
|
11 |
-
|
12 |
-
def coSkillEmbed(text):
|
13 |
-
co = cohere.Client(os.getenv("COHERE_TOKEN"))
|
14 |
-
response = co.embed(
|
15 |
-
model='large',
|
16 |
-
texts=[text])
|
17 |
-
return response.embeddings
|
18 |
-
|
19 |
-
def cosine(A, B):
|
20 |
-
return np.dot(A,B)/(norm(A)*norm(B))
|
21 |
-
|
22 |
-
def clean_my_text(resume):
|
23 |
-
clean_text = ' '.join(resume.splitlines())
|
24 |
-
clean_text = clean_text.replace('-', " ").replace("/"," ")
|
25 |
-
clean_text = clean(clean_text.translate(str.maketrans('', '', string.punctuation)))
|
26 |
-
stops = set(nltk.corpus.stopwords.words('english'))
|
27 |
-
stops = stops.union({'eg', 'ie', 'etc', 'experience', 'experiences', 'experienced', 'experiencing', 'knowledge',
|
28 |
-
'ability', 'abilities', 'skill', 'skills', 'skilled', 'including', 'includes', 'included', 'include'
|
29 |
-
'education', 'follow', 'following', 'follows', 'followed', 'make', 'made', 'makes', 'making', 'maker',
|
30 |
-
'available', 'large', 'larger', 'largescale', 'client', 'clients', 'responsible', 'x', 'many', 'team', 'teams'})
|
31 |
-
resume = [word for word in SpaceTokenizer().tokenize(resume) if word not in stops]
|
32 |
-
resume = [word for word in resume if ")" not in word]
|
33 |
-
resume = [word for word in resume if "(" not in word]
|
34 |
-
return resume
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|