File size: 6,613 Bytes
99bbd64
afbea99
99bbd64
89ac774
99bbd64
 
 
89509c4
7dbf682
2885677
9e0b38e
99bbd64
9e0b38e
 
 
7dbf682
 
 
 
 
afbea99
7dbf682
6e78f7b
fa24c7d
4aba3cf
ffb1952
afbea99
a61f95d
2885677
a61f95d
bcc36a0
4d13280
bcc36a0
24a440f
9ce2997
9e0b38e
 
 
2885677
fa24c7d
24a440f
 
194b1f0
a3d0e12
6e78f7b
89ac774
 
 
 
07479a9
c7e63c1
f0a5434
e050d94
ffb1952
e050d94
eabd638
ca9140a
320e565
94d87be
 
ffb1952
4006ccd
b076bad
89509c4
 
 
fa24c7d
 
97301c2
673a232
8d08b4d
673a232
 
 
 
 
 
8d08b4d
 
b076bad
1bd3b0e
d21275b
 
 
 
 
4006ccd
 
fd4a491
afbea99
 
 
a61f95d
 
1bd3b0e
 
 
a61f95d
 
0383a41
 
b9d21c9
2c0502b
8fe641a
 
 
 
 
b9d21c9
bf3c262
 
 
 
0759bef
73194a7
bf3c262
73194a7
9612bf2
0383a41
 
 
 
 
 
2885677
db967a4
2bb645d
296fd89
 
 
2885677
4ead886
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a3d0e12
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
#Fast APi Packages
from fastapi import FastAPI,File
from pydantic import BaseModel
import json

#SkillExtraction Packages
import psycopg2
import pandas as pd
import numpy as np
from sentence_transformers import SentenceTransformer
import spacy
from sklearn.metrics.pairwise import cosine_similarity
from spacy.matcher import PhraseMatcher
from skillNer.general_params import SKILL_DB
from skillNer.skill_extractor_class import SkillExtractor
from psycopg2.extensions import register_adapter, AsIs
register_adapter(np.int64, AsIs)
import warnings
warnings.filterwarnings('ignore')


#Custom Classes for endpoints
from DbConnection import DbConnection
from UploadFile import UploadOpenFile
from SkillExtract import SkillExtractorDetails
from ExtractContentsFromFile import ExtractContentFromFile
from RemoveSkills import RemoveSkill
from AddSkillDetails import AddSkill
from SkillMatcher import SkillMatch
import ClassModals
import os
os.environ['HF_HOME'] = '/hug/cache/'

app = FastAPI()
         
nlp = spacy.load("en_core_web_lg")
    # init skill extractor
skill_extractor = SkillExtractor(nlp, SKILL_DB, PhraseMatcher)
model = SentenceTransformer('all-MiniLM-L6-v2')

@app.get("/")
async def root():
 return {"SkillAPI":"SkillAPi Version 0.05"}

db_params = DbConnection.GetDbConnection()
def parse_csv(df):
    res = df.to_json(orient="records")
    parsed = json.loads(res)
    return parsed


@app.post("/UploadJobDescription/")
def UploadJobDescription(file: bytes =  File(...), FileName: str = "sample.pdf"):   
    text= ExtractContentFromFile.ExtractDataFromFile(FileName,file)
    returnID = UploadOpenFile.uploadFile(text,FileName,db_params,True)
    returnSkills = SkillExtractorDetails.SkillExtract(db_params,skill_extractor,returnID,True)     
    details = returnSkills.split('@')
    data = {'Data':['Required Skills', 'Soft Skills', 'Good to have Skills'], 'Values':[details[0], details[1], details[2]]}
    df = pd.DataFrame(data)
    return parse_csv(df) 

@app.get("/AllProfileMatchResults") 
def AllProfileMatchResults():
   dbQuery = "select * from profilematch"
   conn = psycopg2.connect(**db_params)   
   df = pd.read_sql_query(dbQuery, conn)
   return parse_csv(df) 

@app.get("/UploadJobDescriptionOpenText/")
def UploadOpenText(text : str, filename : str):   
   
   returnID = UploadOpenFile.uploadFile(text,filename,db_params,True)
   returnSkills = SkillExtractorDetails.SkillExtract(db_params,skill_extractor,returnID,True)
   data = SkillMatch.SkillMatcher(model,db_params,returnID).split(';')
   dataJson = {'Data':['Best Resume Fit', 'Score', 'ProfileID'], 'Values':[data[0], data[1], data[2]]}
   df = pd.DataFrame(dataJson)
   return parse_csv(df) 


@app.post("/ExtractSkillsByJobID/")
def ExtractSkillsByJobID(skill_data: ClassModals.Modals.SkillDetails):
   returnSkills = SkillExtractorDetails.SkillExtract(db_params,skill_extractor,skill_data.skillid)     
   details = returnSkills.split('@')
   skill_data.requiredSkills = details[0]
   skill_data.softSkills = details[1]
   skill_data.goodToHaveSkills = details[1]
   return skill_data

@app.delete("/RemoveSkillsByName/")
def RemoveSkills(SkillName : str):    
    RemoveSkill.RemoveSkillDetails(db_params,SkillName)
    return "Skill Removed Successfully"

@app.post("/AddSkillDeails/")
def AddSkills(Skills : ClassModals.Modals.AddSkillDetails):    
    skilldetailsStr = Skills.SkillName + ',' + Skills.SkillType + ',' + str(Skills.SkillScore)    
    return AddSkill.AddSkillDetails(db_params,skilldetailsStr)


@app.post("/UploadProfileFromFile/")
def UploadProfileFromFile(file: bytes =  File(...), FileName: str = "sample.pdf"):    
    text= ExtractContentFromFile.ExtractDataFromFile(FileName,file)
    returnID = UploadOpenFile.uploadFile(text,FileName,db_params,False) 
    returnSkills = SkillExtractorDetails.SkillExtract(db_params,skill_extractor,returnID,False)     
    details = returnSkills.split('@')
    data = {'Data':['Required Skills', 'Soft Skills', 'Good to have Skills'], 'Values':[details[0], details[1], details[2]]}
    df = pd.DataFrame(data)
    return parse_csv(df) 

class FileText(BaseModel):
    text: int 
    fname: str
    
@app.get("/UploadProfileOpenText/")
def UploadProfileOpenText(text : str, filename : str):    

    #text= ExtractContentFromFile.ExtractDataFromFile(FileName,file)
    returnID = UploadOpenFile.uploadFile(text,filename,db_params,False) 
    returnSkills = SkillExtractorDetails.SkillExtract(db_params,skill_extractor,returnID,False)     
    details = returnSkills.split('@')
    data = {'Data':['Required Skills', 'Soft Skills', 'Good to have Skills'], 'Values':[details[0], details[1], details[2]]}
    df = pd.DataFrame(data)
    return parse_csv(df) 

@app.get("/GetMatchProfileByJobId/")
def GetMatchProfileByJobId(JobId : int):    
    data = SkillMatch.SkillMatcher(model,db_params,JobId).split(';')
    dataJson = {'Data':['Best Resume Fit', 'Score', 'ProfileID'], 'Values':[data[0], data[1], data[2]]}
    df = pd.DataFrame(dataJson)
    return parse_csv(df) 

from openai import OpenAI
@app.get("/GetOpenAPIResponse/")
def GetOpenAPIResponse():
    client = OpenAI(api_key='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')

    response = client.chat.completions.create(
    model="gpt-3.5-turbo",
    messages=[
        {
        "role": "system",
        "content": "Summarize content you are provided in a single paragraph."
        },
        {
        "role": "user",
        "content": "Working as a secondary application owner with the Development team, on troubleshooting, analyzing data stored, its inbound and outbound core functionality using SQL Scripts and commands. Synchronized various records in the static Data Source (SSSDR), and solving issues related to the running SQL Server Instance and infrastructure. •	Worked as a Database Integration Specialist with experience in Autosys Job Automation, worked towards Jil file management and troubleshooting issues related to the data administration, created end to end application flow PARP and DIRP Cutover with all server specifications, database nodes and overseeing the entire infra level application flow.•	Worked as a Support application developer, analyzing issues and performing"
                        +"troubleshooting steps and handled exceptions through exception handling mechanism in Java."
        }
    ],
    temperature=0.7,
    max_tokens=64,
    top_p=1
    )
    print(response.choices[0].message.content)
#return JSONResponse(content={"message": "Here's your interdimensional portal." , "mes1":"data2"})
#https://vaibhav84-resumeapi.hf.space/docs