ResumeAPI / app.py
Vaibhav84's picture
cahnges
ffb1952
raw
history blame
3.92 kB
#Fast APi Packages
from fastapi import FastAPI,File, UploadFile
from pydantic import BaseModel
import json
from typing_extensions import Annotated
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse
#SkillExtraction Packages
import PyPDF2
from PyPDF2 import PdfReader
import psycopg2
from psycopg2 import sql
import pandas as pd
from datetime import date
import numpy as np
import spacy
import re
import docx2txt
from sentence_transformers import SentenceTransformer, util
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import io
from spacy.matcher import PhraseMatcher
from skillNer.general_params import SKILL_DB
from skillNer.skill_extractor_class import SkillExtractor
from psycopg2.extensions import register_adapter, AsIs
register_adapter(np.int64, AsIs)
import warnings
warnings.filterwarnings('ignore')
from io import BytesIO
import requests
#Custom Classes for endpoints
from DbConnection import DbConnection
from UploadFile import UploadOpenFile
from SkillExtract import SkillExtractorDetails
from ExtractContentsFromFile import ExtractContentFromFile
import os
os.environ['HF_HOME'] = '/hug/cache/'
app = FastAPI()
class FileDetails(BaseModel):
filecontents: str
filename: str
fileid: str
message: str
class SkillDetails(BaseModel):
skillid: int
requiredSkills: str
softSkills: str
goodToHaveSkills: str
class SkillData(BaseModel):
filename: str
nlp = spacy.load("en_core_web_lg")
# init skill extractor
skill_extractor = SkillExtractor(nlp, SKILL_DB, PhraseMatcher)
@app.get("/")
async def root():
return {"SkillAPI":"SkillAPi Version 0.05"}
#https://vaibhav84-resumeapi.hf.space/docs
db_params = DbConnection.GetDbConnection()
def parse_csv(df):
res = df.to_json(orient="records")
parsed = json.loads(res)
return parsed
@app.post("/uploadJobDescription/")
def uploadJobDescription(file: bytes = File(...), FileName: str = "sample.pdf"):
text= ExtractContentFromFile.ExtractDataFromFile(FileName,file)
returnID = UploadOpenFile.uploadFile(text,FileName,db_params)
returnSkills = SkillExtractorDetails.SkillExtract(db_params,skill_extractor,returnID)
details = returnSkills.split('@')
data = {'Data':['Required Skills', 'Soft Skills', 'Good to have Skills'], 'Values':[details[0], details[1], details[2]]}
df = pd.DataFrame(data)
return parse_csv(df)
@app.get("/AllProfileMatchResults")
def AllProfileMatchResults():
dbQuery = "select * from profilematch"
conn = psycopg2.connect(**db_params)
df = pd.read_sql_query(dbQuery, conn)
return parse_csv(df)
@app.post("/UploadOpenText/")
def UploadOpenText(file_data: FileDetails):
returnID = UploadOpenFile.uploadFile(file_data.filecontents,file_data.filename,db_params)
file_data.filecontents = ""
file_data.fileid = str(returnID)
file_data.message = "File Uploaded Successfully!"
return file_data
@app.post("/ExtractSkillsByJobID/")
def ExtractSkillsByJobID(skill_data: SkillDetails):
returnSkills = SkillExtractorDetails.SkillExtract(db_params,skill_extractor,skill_data.skillid)
details = returnSkills.split('@')
skill_data.requiredSkills = details[0]
skill_data.softSkills = details[1]
skill_data.goodToHaveSkills = details[1]
return skill_data
@app.post("/RemoveSkillsByName/")
def RemoveSkills(SkillName : str):
conn = psycopg2.connect(**db_params)
cursor = conn.cursor()
SkillName = data.split(':')[1]
print("Removing Skills " + SkillName)
query = "update skillmaster set weightage = 0 where skilldetails = (%s)"
params = (SkillName,)
cursor.execute(query, params)
conn.commit()
cursor.close()
conn.close()
#return JSONResponse(content={"message": "Here's your interdimensional portal." , "mes1":"data2"})