Spaces:
Sleeping
Sleeping
#Fast APi Packages | |
from fastapi import FastAPI,File | |
from pydantic import BaseModel | |
import json | |
#SkillExtraction Packages | |
import psycopg2 | |
import pandas as pd | |
import numpy as np | |
from sentence_transformers import SentenceTransformer | |
import spacy | |
from sklearn.metrics.pairwise import cosine_similarity | |
from spacy.matcher import PhraseMatcher | |
from skillNer.general_params import SKILL_DB | |
from skillNer.skill_extractor_class import SkillExtractor | |
from psycopg2.extensions import register_adapter, AsIs | |
register_adapter(np.int64, AsIs) | |
import warnings | |
warnings.filterwarnings('ignore') | |
#Custom Classes for endpoints | |
from DbConnection import DbConnection | |
from UploadFile import UploadOpenFile | |
from SkillExtract import SkillExtractorDetails | |
from ExtractContentsFromFile import ExtractContentFromFile | |
from RemoveSkills import RemoveSkill | |
from AddSkillDetails import AddSkill | |
from SkillMatcher import SkillMatch | |
from OpenAIResponse import OpenAIText | |
from SkillExtractV1 import SkillExtractorDetailsV1 | |
import ClassModals | |
import os | |
os.environ['HF_HOME'] = '/hug/cache/' | |
app = FastAPI() | |
nlp = spacy.load("en_core_web_lg") | |
# init skill extractor | |
skill_extractor = SkillExtractor(nlp, SKILL_DB, PhraseMatcher) | |
model = SentenceTransformer('all-MiniLM-L6-v2') | |
async def root(): | |
return {"SkillAPI":"SkillAPi Version 0.05"} | |
db_params = DbConnection.GetDbConnection() | |
def parse_csv(df): | |
res = df.to_json(orient="records") | |
parsed = json.loads(res) | |
return parsed | |
def UploadJobDescription(file: bytes = File(...), FileName: str = "sample.pdf"): | |
text= ExtractContentFromFile.ExtractDataFromFile(FileName,file) | |
returnID = UploadOpenFile.uploadFile(text,FileName,db_params,True) | |
returnSkills = SkillExtractorDetails.SkillExtract(db_params,skill_extractor,returnID,True) | |
details = returnSkills.split('@') | |
data = {'Data':['Required Skills', 'Soft Skills', 'Good to have Skills'], 'Values':[details[0], details[1], details[2]]} | |
df = pd.DataFrame(data) | |
return parse_csv(df) | |
def UploadJobDescriptionV1(file: bytes = File(...), FileName: str = "sample.pdf"): | |
text= ExtractContentFromFile.ExtractDataFromFile(FileName,file) | |
#returnID = UploadOpenFile.uploadFile(text,FileName,db_params,True) | |
returnSkills = SkillExtractorDetailsV1.GetSkillData(skill_extractor,text,db_params) | |
#details = returnSkills.split('@') | |
#data = {'Data':['Required Skills', 'Soft Skills', 'Good to have Skills'], 'Values':[details[0], details[1], details[2]]} | |
#df = pd.DataFrame(data) | |
return parse_csv(returnSkills) | |
def AllProfileMatchResults(): | |
dbQuery = "select * from profilematch" | |
conn = psycopg2.connect(**db_params) | |
df = pd.read_sql_query(dbQuery, conn) | |
return parse_csv(df) | |
def UploadOpenText(text : str, filename : str): | |
returnID = UploadOpenFile.uploadFile(text,filename,db_params,True) | |
returnSkills = SkillExtractorDetails.SkillExtract(db_params,skill_extractor,returnID,True) | |
data = SkillMatch.SkillMatcher(model,db_params,returnID).split(';') | |
dataJson = {'Data':['Best Resume Fit', 'Score', 'ProfileID'], 'Values':[data[0], data[1], data[2]]} | |
df = pd.DataFrame(dataJson) | |
return parse_csv(df) | |
def ExtractSkillsByJobID(skill_data: ClassModals.Modals.SkillDetails): | |
returnSkills = SkillExtractorDetails.SkillExtract(db_params,skill_extractor,skill_data.skillid) | |
details = returnSkills.split('@') | |
skill_data.requiredSkills = details[0] | |
skill_data.softSkills = details[1] | |
skill_data.goodToHaveSkills = details[1] | |
return skill_data | |
def RemoveSkills(SkillName : str): | |
RemoveSkill.RemoveSkillDetails(db_params,SkillName) | |
return "Skill Removed Successfully" | |
def AddSkills(Skills : ClassModals.Modals.AddSkillDetails): | |
skilldetailsStr = Skills.SkillName + ',' + Skills.SkillType + ',' + str(Skills.SkillScore) | |
return AddSkill.AddSkillDetails(db_params,skilldetailsStr) | |
def UploadProfileFromFile(file: bytes = File(...), FileName: str = "sample.pdf"): | |
text= ExtractContentFromFile.ExtractDataFromFile(FileName,file) | |
returnID = UploadOpenFile.uploadFile(text,FileName,db_params,False) | |
returnSkills = SkillExtractorDetails.SkillExtract(db_params,skill_extractor,returnID,False) | |
details = returnSkills.split('@') | |
data = {'Data':['Required Skills', 'Soft Skills', 'Good to have Skills'], 'Values':[details[0], details[1], details[2]]} | |
df = pd.DataFrame(data) | |
return parse_csv(df) | |
class FileText(BaseModel): | |
text: int | |
fname: str | |
def UploadProfileOpenText(text : str, filename : str): | |
#text= ExtractContentFromFile.ExtractDataFromFile(FileName,file) | |
returnID = UploadOpenFile.uploadFile(text,filename,db_params,False) | |
returnSkills = SkillExtractorDetails.SkillExtract(db_params,skill_extractor,returnID,False) | |
details = returnSkills.split('@') | |
data = {'Data':['Required Skills', 'Soft Skills', 'Good to have Skills'], 'Values':[details[0], details[1], details[2]]} | |
df = pd.DataFrame(data) | |
return parse_csv(df) | |
def GetMatchProfileByJobId(JobId : int): | |
data = SkillMatch.SkillMatcher(model,db_params,JobId).split(';') | |
dataJson = {'Data':['Best Resume Fit', 'Score', 'ProfileID'], 'Values':[data[0], data[1], data[2]]} | |
df = pd.DataFrame(dataJson) | |
return parse_csv(df) | |
def GetOpenAPIResponse(query:str, content:str): | |
return OpenAIText.OpenAITextResponse(query,content) | |
#return JSONResponse(content={"message": "Here's your interdimensional portal." , "mes1":"data2"}) | |
#https://vaibhav84-resumeapi.hf.space/docs |