robertselvam commited on
Commit
8dbf259
·
verified ·
1 Parent(s): d89a462

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +162 -0
app.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from dotenv import load_dotenv
3
+ from utils import *
4
+ import uuid
5
+ from langchain_community.vectorstores import FAISS
6
+ from langchain_chroma import Chroma
7
+ import pandas as pd
8
+
9
+ st.set_page_config(page_title="Resume Screening Assistance", layout="wide")
10
+
11
+
12
+ custom_html = """
13
+ <style>
14
+
15
+ h1 {
16
+ background-color: #e6763b;
17
+ padding: 20px;
18
+ margin: 0;
19
+ text-align: center; /* Center the text */
20
+ }
21
+
22
+ img {
23
+ max-width: 100%;
24
+ height: 31px;
25
+ margin-right: 20px;
26
+ margin-bottom: 6px;
27
+ }
28
+
29
+ h1 img {
30
+ vertical-align: middle;
31
+ }
32
+ </style>
33
+ <body>
34
+ <h1><img src="https://www.athmick.com/static/media/athmick-logo-with-name.32abc7ca97607204825eed0610ae2eea.svg">HR Resume Copilot ... 💁</h1>
35
+ </body>
36
+ """
37
+
38
+ # Render the custom HTML
39
+ st.markdown(custom_html, unsafe_allow_html=True)
40
+
41
+ #Creating session variables
42
+ if 'unique_id' not in st.session_state:
43
+ st.session_state['unique_id'] =''
44
+
45
+ def main():
46
+ load_dotenv()
47
+
48
+ # st.set_page_config(page_title="Resume Screening Assistance")
49
+ # st.title("HR - Resume Screening Assistance💁")
50
+ st.subheader("I can help you in resume screening process")
51
+
52
+ # Upload the Job Description (pdf files)
53
+ job_description = st.file_uploader("JOB DESCRIPTION", type=["pdf"])
54
+
55
+ # Upload the Resumes (pdf files)
56
+ pdf = st.file_uploader("RESUME", type=["pdf"],accept_multiple_files=True)
57
+
58
+ #document retrun count
59
+ document_count = st.text_input("No.of 'RESUMES' to return",key="2")
60
+
61
+ #submit button
62
+ submit = st.button("Help me with the analysis")
63
+
64
+ if submit:
65
+ with st.spinner('Wait for it...'):
66
+
67
+ #Creating a unique ID, so that we can use to query and get only the user uploaded documents from PINECONE vector store
68
+ st.session_state['unique_id']=uuid.uuid4().hex
69
+
70
+ #Create a documents list out of all the user uploaded pdf files
71
+ final_docs_list=create_docs(pdf,st.session_state['unique_id'])
72
+
73
+ #Displaying the count of resumes that have been uploaded
74
+ st.write("*Resumes uploaded* :"+str(len(final_docs_list)))
75
+
76
+ #Create embeddings instance
77
+ embeddings=create_embeddings_load_data()
78
+
79
+ #using faiss db
80
+ db = Chroma.from_documents(final_docs_list, embeddings)
81
+
82
+ job_description_txt = get_pdf_text(job_description)
83
+
84
+ #using faiss db for similarity search with similarity score
85
+ relavant_docs = db.similarity_search_with_relevance_scores(job_description_txt,k=int(document_count))
86
+
87
+ data = []
88
+
89
+ #For each item in relavant docs - we are displaying some info of it on the UI
90
+ for item in pdf:
91
+
92
+ # st.subheader("👉 "+str(item+1))
93
+
94
+ resume_txt = get_pdf_text(item)
95
+
96
+ # #Displaying Filepath
97
+ # document_object = final_docs_list[item][0]
98
+ # file_name = document_object.metadata.get('name', None)
99
+ finel_name = item.name
100
+ # st.write("**File** : "+str(finel_name))
101
+
102
+ #Introducing Expander feature
103
+ with st.expander('Show me 👀'):
104
+ # st.info("**Matched Vector Score** : "+str(relavant_docs[item][1]))
105
+
106
+ matched_result = opeani_response(resume_txt, job_description_txt)
107
+
108
+ matched_percentage, reason, skills_to_improve, keywords, irrelevant = get_strip_response(matched_result)
109
+
110
+ #Gets the summary of the current item using 'get_summary' function that we have created which uses LLM & Langchain chain
111
+ summary = get_summary(resume_txt)
112
+
113
+
114
+ # Append the information to the DataFrame
115
+ data.append([finel_name, matched_percentage, reason, skills_to_improve, keywords, irrelevant, summary])
116
+
117
+ table_data = pd.DataFrame(data, columns=["File", "Matched Score", "Matched Reason" , "Skills to improve", "Keywords", "Irrelevant", "Summary"])
118
+
119
+
120
+ # Sort the DataFrame based on the 'Matched Score' column in descending order
121
+ df_sorted = table_data.sort_values(by='Matched Score', ascending=False)
122
+
123
+ # Reset the index
124
+ df_sorted.reset_index(drop=True, inplace=True)
125
+
126
+ # Loop through each row and print in the specified format
127
+ for index, row in df_sorted.iterrows():
128
+
129
+ st.write("**File** : "+row["File"])
130
+
131
+ st.info("**Matched Score 💯** : " + str(row["Matched Score"]) + "%")
132
+
133
+ st.write("**Matched Reason 🌟** : " + row["Matched Reason"])
134
+
135
+ st.write("**Skills to improve 🎯** : " + row["Skills to improve"])
136
+
137
+ st.write("**Keywords 🗝️** : " + row["Keywords"])
138
+
139
+ st.write("**Irrelevant 📛** : " + row["Irrelevant"])
140
+
141
+ st.write("**Summary 📜** : " + row["Summary"])
142
+
143
+
144
+ st.write("## Relevant Documents")
145
+ st.table(df_sorted)
146
+
147
+ # graph = ploty_graph(matched_percentage_dict)
148
+ # st.plotly_chart(graph)
149
+
150
+
151
+ csv = df_sorted.to_csv().encode('utf-8')
152
+ st.download_button(
153
+ label="Download data as CSV",
154
+ data=csv,
155
+ file_name='Result Data.csv',
156
+ mime='text/csv',
157
+ )
158
+ st.success("Hope I was able to save your time❤️")
159
+
160
+ #Invoking main function
161
+ if __name__ == '__main__':
162
+ main()