Spaces:
Running
Added ARS (#1)
Browse files- initial commit (66ceaab066d0ebf7b2b7016d40d54d1fe19afd65)
- Create requirements.txt (de33359dd9db20b1a5220f54f94499db83ee4400)
- Create app.py (a459994d80cab3676a0784225f33ce8ee807bf46)
- Create Dockerfile (0b94fdd7d4a103dd8f14ab99a1fd27d8570fa952)
- Create utils.py (e3884a1638689f48e57cff92937e1b2117418535)
- Update requirements.txt (c36320979d66a8b4c08d365746223f2010283b33)
- Update requirements.txt (6d9386f3f4618dcfd06337740ca3674f0e670912)
- Update app.py (fe28cb295fdf32c2d5cb32546d9bb2617806684e)
- Update app.py (c12efb0fc478f222e913bc60e015082df6e2ec70)
- connected stuff (1bb6f096a2f7b8f9b6c8b2ca3be91d658a866dac)
- changed .env (1f1ff083964e7ba2bfe7905f486bada5ed3bf1aa)
- removed pycache (66a875b92f3de7e31494703e26ef1ed0e888bc36)
- app.py updated (3e286b70c8540b34d56c4e86694f8c5b09cb21f9)
- deleted file (ca1717f8945c27fe4a6604428a90c8b044506274)
- added stuff (1a41961c0f49ccef85fe3f9ac550b856268b05d6)
- updated.py (2026af211aa1e2f8faf804a209845aabcd242027)
- added some stuff (1abc2753eb6b31cd4ab9a56dd63b652d43d5ef65)
- added some stuff (c4073a5a04aa9ec7ecdee64aa6b9225de6724245)
- added some stuff (4cd1ffd1352bcacbe91347d1b8444790edcac164)
- added more stuff (6ba5f500be5004e03820dfd7a04f3d805e847237)
- added jwt auth (26a8b59f87b0e656d4768bfaeb067f857689f7c2)
- added streaks feature (756ed25f5c7a8853c5a610da3e3a2ff827dcfc71)
- changed db up a bit (54b7518c7cfdc7128521023acaf38bf3d3911b79)
- very scripted (8382c943e322dfd4eac3dc652ab03bfcb813d8ff)
- added logout (ba7b47505c09469c56dfb64e0d87c7385a56c1b0)
- changed up something for jwt (0dceb1e95c0489889149296967eaa7a435f869fe)
- changed up something for jwt (936d31ee6555b0af6d2ab2868285cb6c3ff2a06f)
- added logout (d8149f4a80cc9b8aff86ec3d49133e30b47706f4)
- added http 226 error (ec44a32881eb42ee4cf2aedd2d8b82b6ef5466ff)
- added user details (5818dbbacf34d5bedef6cfa1e0dae49b4e06c7af)
- hopefully doesn't crash (f9ab8e5b1bbd0bf9234c7049c7c66bbe9405aaed)
- added dream role and fixed interem role bug (e692eccad884bff8c45c27806b01d14b7480e23b)
- added questionaire to the user details (2128dedf2eb30e4f265b834ed2ae6bcf73277ab9)
- added dream role and fixed interem role bug (c43a9826b7d30564d6612d5cbb5207347ef01b26)
- added dream role and fixed interem role bug (c4c7ccc42d7654a99dd41271465b536f76afadfb)
- fixed questionaire retrieval (5142863f09b619e922aec68ed749521636f80b14)
- fixed updating questionaire (83338db95cab796fe5dc0c58ec1ae4a84dd1a3f1)
- completed this shit (80d2e7b7d4d7c6bc8dfb744f4ae367cbdfe7a0cc)
- login stuff (995533c4da23fefc7bcedac8c447150f9d2002dd)
- HOP (d46a40ec686260dab05748b2228820ca78922c17)
- added provider (398210d72dc645f110c51e4ff99ed310541b3f69)
- changed 0 to 1 (cf203377de703160ff234ab241686d303761b45a)
- added a new data type (73371fd5625a8d6be23cb8bfb6ebd1e2b207e90f)
- made the object less strict (0c276d34853fc789bd61ae121431ae3462429032)
- added userCourse stuff (441685320d5827ac57061ad01f4d769beaa2fbf6)
- fixed verification for refresh token (06533adec92074da1fd5b4e6e1aa3f4dc18377a9)
- added course link (ef82ce661b2485092253cf818107b9871ed31046)
- updated util.py (1060ca882e8dbe7c691ea70893dfdd088b69ae1e)
- fixed streaks issue (8f2e112bdf80fdfd3ecf5cbc30239d48c93c31ab)
- added a new streaks feature (c38f2344cb9151733e8d9dc815f5d9afd9cc9340)
- removed testes.py (5d4fab2eb76ab5e8b86eb63885668ca19ef9823a)
- added some routes (323b3570d37d797a278c36a594ab193ba45a5b16)
- put in the customer rate (32d2a84afc19d7a441f7551fc25e62a1fb3da76a)
- changed testing parameters to stable parameters (aa8143aa91809f595238076b8fdd231db10f57fd)
- added most features (f1b601bf9a32ceb99c318737d02a897617570476)
- added index (8159bdee32b01b8091e05bcbf795306dc36ef4b1)
- updated stream (90bab231aeba71101e6690719818c4579eb49ef7)
- fixed weekend warrior pointing not working (82124ef6be18af0b53420fd7489c12831ea51fb3)
- added total Point to float (ca1bfdd7969f9b6ea94614ae2cc16591e3dde8ed)
- removed int response (a76a90cd34e674c245b02c7a314001941505d39f)
- removed int response (dbebc697ab2f2a03b2fcf09ae396106037442bc1)
- made it production grade (161667a95596eb917d533337909c171af5b131b3)
- removed the returning levels (0d5781045e2e69697f43fada26336a51c9a61ae4)
- removed the returning levels (578e7e0d52ce14b9659ed51f8563440f74975c48)
- added the ability for leaderboard to be recalculated upon every new point, which may slow down performance so I will make it a background process (b8596d51f0ecefc5c14465f45272b43d0c37e4d7)
- changed some stuff I can't remember no need for background processes (1225834a1f805048df83dafcaae0eb2b14a9b7a8)
- added badge url as part of return for rankers (07ffd862cf5c61e01373f48ff4aece5532806fbf)
- added a fix to leaderboard (9bb2f7cc4c1be1c60c90fdd83c585f367a015de1)
- hot fix for leaderboard (d6b9f3da4b60f331556e71e0ba6b1c2435da0812)
- refactored codebase (9f571c04077337b986a1325b165c59a8766ef975)
- updated codebase to make it faster (9f13856d0db8ce88f0824536439fe8b9ba82b2e8)
- added .env.example (0b04c95f3b76a9231af0f1308f251005e2d3229a)
- redis addd to requirements (560381218fd289d3432b440fd72be29c8d35a414)
- cryptography added to requirements (35852be5b10b4f896180be9c62b00871002b8898)
- fixed the refresh token incorrect token error (0197cb38c358ec91a0ff325ddfa19a0f920d60f1)
- fixed byte12 error (58d4afb077be8b27f9d7f62a28613419cd67735c)
- changed name (4238535596edd98310a8e25b3e5a742cc6ebcfe8)
- updated get levels (b37e48c96dbb8a22a788b209b8d4661d5a7dcb29)
- updated get levels (2b9a22d22e0309e20d0d4daa97016e733ad6141d)
- updated get levels (9186c0c7df7fdb539f4929004031c912e5fc9ce0)
- added star url (c74fe081c2e6c12a9683afa2d6f00908ad851aa0)
- fixed error (f9371dab78067b9e9d57699df1314bdfdd17d337)
- fixed error (752266eda903a150c6970f9799d4886842cf4342)
- fixed star url not changing (d9c224e559e244df502b942e4e51f40b2e7dc1b8)
- ars (4eab69a9f03b414e40eecc6f40fb55c1bf6bb4b4)
- stage 1 for ARS (1e531e2133dd12d2ddff48ce04fd72cca732f458)
- added ars route (1aa5032c5dcce62618f8ca891f8c500385563076)
- fixed merge conflict (103cfcd37fd3266b1396516d241c41b998383332)
- Ars/__init__.py +0 -0
- Ars/ai_functions.py +316 -0
- Ars/controllers.py +19 -0
- Ars/core.py +24 -0
- Ars/embedDoc.py +109 -0
- Ars/imports.py +0 -0
- Ars/objects.py +415 -0
- Ars/repositories.py +407 -0
- Ars/routes.py +15 -0
- README.md +4 -0
- app.py +2 -0
- gamification/pointLogic.py +12 -0
- requirements.txt +4 -0
File without changes
|
@@ -0,0 +1,316 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pdfminer.high_level import extract_text_to_fp
|
2 |
+
from io import BytesIO
|
3 |
+
from objects import ai,ResumeData,AutomationRiskResult,AutomationRiskInput,RealWorldQuestion,SkillDepthResult,SkillDepthInput,BreakDownByDomainUpdate,FlaggedRiskAreasUpdate,BoostSuggestionsUpdate,AICollabReadinessInput
|
4 |
+
from embedDoc import search_pinecone_text
|
5 |
+
from fastapi import UploadFile
|
6 |
+
|
7 |
+
def extract_text_from_bytes(pdf_bytes: bytes) -> str:
|
8 |
+
output_string = BytesIO()
|
9 |
+
with BytesIO(pdf_bytes) as input_stream:
|
10 |
+
extract_text_to_fp(input_stream, output_string)
|
11 |
+
return output_string.getvalue().decode()
|
12 |
+
|
13 |
+
|
14 |
+
async def resume_analysis(upload_file:UploadFile) -> ResumeData:
|
15 |
+
contents = await upload_file.read()
|
16 |
+
resume = extract_text_from_bytes(pdf_bytes=contents)
|
17 |
+
if resume:
|
18 |
+
prompt = f"""
|
19 |
+
You are an AI resume parser. Read the following resume and extract the following structured information from the resume below if any was found:
|
20 |
+
|
21 |
+
1. Work experience details: job titles and descriptions.(max3)
|
22 |
+
2. List of technical skills (technologies, tools, platforms, etc.).(max4)
|
23 |
+
3. Soft and transferable skills:
|
24 |
+
- Leadership and collaboration
|
25 |
+
- Critical thinking and problem-solving
|
26 |
+
- Communication skills
|
27 |
+
- Cross-functional/interdisciplinary experience
|
28 |
+
- Initiative and adaptability
|
29 |
+
4. Career path details:
|
30 |
+
- Current or intended role/field
|
31 |
+
- Industry and sector context
|
32 |
+
- Career trajectory trends
|
33 |
+
5. Project experience (if any): project titles, descriptions, role.(max3)
|
34 |
+
6. Evidence of upskilling (optional): certifications, courses, learning projects, hackathons.
|
35 |
+
|
36 |
+
Return the extracted data as a JSON object that matches this schema:
|
37 |
+
|
38 |
+
Here is the resume:
|
39 |
+
|
40 |
+
{resume}
|
41 |
+
"""
|
42 |
+
result =ai.chat(prompt=prompt,output_schema=ResumeData)
|
43 |
+
return result
|
44 |
+
|
45 |
+
|
46 |
+
|
47 |
+
|
48 |
+
|
49 |
+
def calculate_automation_risk(resume:ResumeData):
|
50 |
+
def generate_real_world_data(resume:ResumeData):
|
51 |
+
prompt=f"Generata a search query using the resume details below I want to check some text that has data on future jobs reports using semantic searches Here is the resume data: {resume} "
|
52 |
+
result = ai.chat(prompt=prompt,output_schema=RealWorldQuestion)
|
53 |
+
search_result =search_pinecone_text(query_text=result.question)
|
54 |
+
|
55 |
+
return search_result
|
56 |
+
|
57 |
+
real_world_data=generate_real_world_data(resume=resume)
|
58 |
+
prompt = f"""
|
59 |
+
You are an Automation Risk Calculator AI. Your task is to analyze the user's resume and any provided real-world data to estimate how automatable their skills and job titles are.
|
60 |
+
|
61 |
+
For each of the following factors, please evaluate on a scale from 0 to 5, where:
|
62 |
+
|
63 |
+
0 = Not present or irrelevant
|
64 |
+
1 = Very low presence or impact
|
65 |
+
2 = Low presence or impact
|
66 |
+
3 = Moderate presence or impact
|
67 |
+
4 = High presence or impact
|
68 |
+
5 = Very high presence or impact
|
69 |
+
|
70 |
+
Factors to score:
|
71 |
+
|
72 |
+
- repetitiveness of tasks
|
73 |
+
- creativity required
|
74 |
+
- emotional intelligence needed
|
75 |
+
- reliance on data-driven tasks
|
76 |
+
- physical/manual work involved
|
77 |
+
- level of autonomous decision-making
|
78 |
+
- need for strategic thinking
|
79 |
+
- importance of collaboration
|
80 |
+
- current use of AI tools in the role
|
81 |
+
- recent upskilling or adaptability
|
82 |
+
|
83 |
+
The real-world data may contain labor market trends, automation probabilities, or other relevant insights — use it if helpful.
|
84 |
+
|
85 |
+
After scoring all factors, calculate an overall automation risk score between 0 and 100, where:
|
86 |
+
- 100 means very low automation risk (skills are highly resilient to automation),
|
87 |
+
- 0 means very high automation risk (skills are highly automatable).
|
88 |
+
|
89 |
+
Return only the final integer risk score (0 to 100) — no explanations or extra text.
|
90 |
+
|
91 |
+
Here is the resume:
|
92 |
+
|
93 |
+
(
|
94 |
+
{resume}
|
95 |
+
)
|
96 |
+
|
97 |
+
Here is the real-world data:
|
98 |
+
|
99 |
+
(
|
100 |
+
{real_world_data}
|
101 |
+
)
|
102 |
+
"""
|
103 |
+
result = ai.chat(prompt=prompt, output_schema=AutomationRiskInput)
|
104 |
+
return result
|
105 |
+
|
106 |
+
|
107 |
+
|
108 |
+
def calculate_skill_depth(resume:ResumeData):
|
109 |
+
|
110 |
+
|
111 |
+
prompt = f"""
|
112 |
+
You are a Skill Depth Calculator AI. Your task is to analyze the user's resume data and score various skill-related factors.
|
113 |
+
|
114 |
+
For each of the following factors, please score on a scale from 0 to 5, where:
|
115 |
+
|
116 |
+
0 = Not present or no depth
|
117 |
+
1 = Very shallow experience or presence
|
118 |
+
2 = Basic or limited experience
|
119 |
+
3 = Moderate experience or involvement
|
120 |
+
4 = Strong experience or senior level
|
121 |
+
5 = Expert or highly advanced experience
|
122 |
+
|
123 |
+
Factors to score include (but are not limited to):
|
124 |
+
|
125 |
+
- Years of experience per skill
|
126 |
+
- Seniority level in roles held
|
127 |
+
- Number and relevance of certifications
|
128 |
+
- Breadth of skills (variety and diversity)
|
129 |
+
- Depth in core technical skills
|
130 |
+
- Depth in leadership or management skills
|
131 |
+
- Involvement in complex projects
|
132 |
+
- Contributions to strategic initiatives
|
133 |
+
- Frequency of skill usage in recent roles
|
134 |
+
- Evidence of continuous learning or upskilling
|
135 |
+
- Cross-functional collaboration skills
|
136 |
+
- Recognition or awards related to skills
|
137 |
+
- Public speaking or training experience
|
138 |
+
- Publications or patents (if any)
|
139 |
+
- Industry-specific expertise depth
|
140 |
+
- Mentoring or coaching experience
|
141 |
+
- Ability to innovate using skills
|
142 |
+
- Adaptability to new technologies
|
143 |
+
- Problem-solving skills depth
|
144 |
+
- Communication skills related to technical content
|
145 |
+
|
146 |
+
|
147 |
+
|
148 |
+
Here is the resume:
|
149 |
+
|
150 |
+
(
|
151 |
+
{resume}
|
152 |
+
)
|
153 |
+
"""
|
154 |
+
|
155 |
+
result=ai.chat(prompt=prompt,output_schema=SkillDepthInput)
|
156 |
+
return result
|
157 |
+
|
158 |
+
|
159 |
+
|
160 |
+
def calculate_Ai_collab_readiness(resume:ResumeData):
|
161 |
+
|
162 |
+
|
163 |
+
prompt = f"""
|
164 |
+
You are an AI Collaboration Readiness Calculator. Your task is to read the following resume and assess how ready the individual is to **collaborate with AI tools**, not just compete against them.
|
165 |
+
|
166 |
+
Score the user on the following 10 readiness traits, each on a scale from 0 to 5:
|
167 |
+
|
168 |
+
0 = Not mentioned or no evidence
|
169 |
+
1 = Very minimal evidence or weak relevance
|
170 |
+
2 = Some mention, limited depth
|
171 |
+
3 = Moderate use or awareness
|
172 |
+
4 = Strong use or understanding
|
173 |
+
5 = Expert use or deep integration
|
174 |
+
|
175 |
+
Traits to evaluate:
|
176 |
+
|
177 |
+
- Mention of AI tools (e.g., ChatGPT, GitHub Copilot, Midjourney, etc.)
|
178 |
+
- Adaptability to AI-enhanced workflows
|
179 |
+
- Willingness to learn and adopt AI tools
|
180 |
+
- Understanding of AI ethics and responsible use
|
181 |
+
- Demonstrated collaboration with AI (not just use)
|
182 |
+
- Use of AI for problem-solving
|
183 |
+
- Creativity in applying AI to new tasks or contexts
|
184 |
+
- Speed in learning and applying new AI technologies
|
185 |
+
- Communication skills related to explaining or using AI
|
186 |
+
- Integration of AI into work/study processes
|
187 |
+
|
188 |
+
|
189 |
+
Here is the resume:
|
190 |
+
|
191 |
+
(
|
192 |
+
{resume}
|
193 |
+
)
|
194 |
+
"""
|
195 |
+
|
196 |
+
result=ai.chat(prompt=prompt,output_schema=AICollabReadinessInput)
|
197 |
+
return result
|
198 |
+
|
199 |
+
|
200 |
+
|
201 |
+
|
202 |
+
|
203 |
+
|
204 |
+
def generate_domain_breakdown(resume:ResumeData,**helpful_key_data):
|
205 |
+
|
206 |
+
|
207 |
+
prompt = f"""
|
208 |
+
You have one clear objective: analyze the following resume and produce a domain skills breakdown based on the taxonomy below.
|
209 |
+
|
210 |
+
**Task:**
|
211 |
+
Classify and score the candidate’s skills/domains on a scale from 0 to 100, where 0 means no proficiency and 100 means expert-level proficiency.
|
212 |
+
|
213 |
+
**Example output format:**
|
214 |
+
- Technical: 65
|
215 |
+
- Creative: 80
|
216 |
+
- Strategy: 75
|
217 |
+
- Collaboration: 60
|
218 |
+
|
219 |
+
**Domains to consider (but not limited to):**
|
220 |
+
- Technical (e.g., software development, programming)
|
221 |
+
- Creative (e.g., design, content creation)
|
222 |
+
- Strategy (e.g., planning, business analysis)
|
223 |
+
- Collaboration (e.g., teamwork, communication)
|
224 |
+
|
225 |
+
**Requirements:**
|
226 |
+
- Return only the domain names and their scores as shown in the example.
|
227 |
+
- Scores must be integers between 0 and 100.
|
228 |
+
- Provide 4–6 domain categories relevant to the resume content.
|
229 |
+
- Use your best judgment based on the resume to assign scores reflecting demonstrated skills and experience.
|
230 |
+
|
231 |
+
Here is the resume to analyze:
|
232 |
+
|
233 |
+
({resume})
|
234 |
+
|
235 |
+
|
236 |
+
**Analysis data to aid evaluation**:
|
237 |
+
**{helpful_key_data}**
|
238 |
+
"""
|
239 |
+
|
240 |
+
result=ai.chat(prompt=prompt,output_schema=BreakDownByDomainUpdate)
|
241 |
+
return result
|
242 |
+
|
243 |
+
|
244 |
+
|
245 |
+
|
246 |
+
|
247 |
+
def generate_flagged_risk_areas(resume:ResumeData,**helpful_key_data):
|
248 |
+
|
249 |
+
|
250 |
+
prompt = f"""
|
251 |
+
|
252 |
+
**You are an AI Resume Risk Evaluator.**
|
253 |
+
Your job is to review the following resume and flag potential *career resilience risks*. Focus strictly on:
|
254 |
+
|
255 |
+
1. **Missing recent/up-to-date certifications**
|
256 |
+
2. **Lack of AI or automation tool usage**
|
257 |
+
3. **Dependence on easily automatable tasks** (e.g., repetitive data entry, basic spreadsheets)
|
258 |
+
|
259 |
+
For each issue identified, return **a concise 1–2 sentence explanation** with clear language.
|
260 |
+
|
261 |
+
**Response Format (Examples)**:
|
262 |
+
|
263 |
+
* "Heavy reliance on spreadsheet tasks, which are easily automated."
|
264 |
+
* "No evidence of recent certifications to stay current in the field."
|
265 |
+
* "Resume lacks any mention of AI or automation tools."
|
266 |
+
|
267 |
+
**Resume to evaluate**:
|
268 |
+
|
269 |
+
```
|
270 |
+
({resume})
|
271 |
+
```
|
272 |
+
|
273 |
+
**Analysis data to aid evaluation**:
|
274 |
+
**{helpful_key_data}**
|
275 |
+
|
276 |
+
"""
|
277 |
+
result=ai.chat(prompt=prompt,output_schema=FlaggedRiskAreasUpdate)
|
278 |
+
return result
|
279 |
+
|
280 |
+
|
281 |
+
|
282 |
+
|
283 |
+
def generate_boost_suggestion(resume:ResumeData,**helpful_key_data):
|
284 |
+
|
285 |
+
|
286 |
+
prompt = f"""
|
287 |
+
You are a Resume Enhancement AI. Analyze the resume below and generate 2 to 4 clear, actionable suggestions to help the user strengthen their profile by addressing skill or experience gaps.
|
288 |
+
|
289 |
+
Focus on practical, AI-driven improvements such as:
|
290 |
+
- Mastering relevant AI tools
|
291 |
+
- Enrolling in advanced or specialized courses
|
292 |
+
- Expanding the complexity or impact of projects
|
293 |
+
- Obtaining up-to-date certifications
|
294 |
+
|
295 |
+
**Examples**:
|
296 |
+
- "Learn ChatGPT to boost your coding efficiency."
|
297 |
+
- "Enhance your Python skills through an advanced course."
|
298 |
+
- "Add recent certifications to demonstrate continuous growth."
|
299 |
+
- "Integrate AI tools into your project portfolio."
|
300 |
+
|
301 |
+
**Guidelines**:
|
302 |
+
- Suggestions must be specific, relevant, and directly tied to the resume content.
|
303 |
+
- Keep each suggestion concise (1–2 sentences).
|
304 |
+
- Avoid generic advice; prioritize actionable, targeted improvements.
|
305 |
+
|
306 |
+
Resume to analyze:
|
307 |
+
|
308 |
+
({resume})
|
309 |
+
**Analysis data to aid evaluation**:
|
310 |
+
**{helpful_key_data}**
|
311 |
+
"""
|
312 |
+
|
313 |
+
result=ai.chat(prompt=prompt,output_schema=BoostSuggestionsUpdate)
|
314 |
+
return result
|
315 |
+
|
316 |
+
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from core import r
|
2 |
+
from repositories import create_boost_suggestions,create_breakdown_by_domain,create_flagged_risk_areas,create_user_resilience
|
3 |
+
from fastapi import UploadFile
|
4 |
+
from ai_functions import resume_analysis,calculate_automation_risk,calculate_Ai_collab_readiness,calculate_skill_depth,generate_boost_suggestion,generate_domain_breakdown,generate_flagged_risk_areas
|
5 |
+
from objects import AICollabReadiness,SkillDepthResult,AutomationRiskResult
|
6 |
+
async def resilience_analysis(file:UploadFile):
|
7 |
+
resume= await resume_analysis(file)
|
8 |
+
risk = calculate_automation_risk(resume)
|
9 |
+
risk = AutomationRiskResult(**risk.model_dump())
|
10 |
+
skill_depth = calculate_skill_depth(resume)
|
11 |
+
skill_depth= SkillDepthResult(**skill_depth.model_dump())
|
12 |
+
ai_readiness = calculate_Ai_collab_readiness(resume)
|
13 |
+
ai_readiness = AICollabReadiness(**ai_readiness.model_dump())
|
14 |
+
ResilienceScore = ((1-(risk.result/100))*0.5+(skill_depth.result/100)*0.3+(ai_readiness.result/100)*0.2)
|
15 |
+
flagged_risk =generate_flagged_risk_areas(resume=resume,skil_depth=skill_depth,risk=risk,ai_readiness=ai_readiness)
|
16 |
+
boost_suggestion = generate_boost_suggestion(resume=resume,skil_depth=skill_depth,risk=risk,ai_readiness=ai_readiness)
|
17 |
+
domain_breakdown = generate_domain_breakdown(resume=resume,skil_depth=skill_depth,risk=risk,ai_readiness=ai_readiness)
|
18 |
+
|
19 |
+
return {"overall score": ResilienceScore,"flagged Risk": flagged_risk,"boost suggestion":boost_suggestion,"domain breakdown":domain_breakdown,"resume":resume,"skil_depth":skill_depth,"risk":risk,"ai_readiness":ai_readiness}
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# db.py
|
2 |
+
import os
|
3 |
+
from dotenv import load_dotenv
|
4 |
+
import redis
|
5 |
+
from motor.motor_asyncio import AsyncIOMotorClient
|
6 |
+
load_dotenv()
|
7 |
+
|
8 |
+
MONGO_URI = os.getenv('MONGO_URI')
|
9 |
+
DB_NAME = "crayonics"
|
10 |
+
REDIS_HOST = os.getenv('REDIS_HOST', 'localhost')
|
11 |
+
REDIS_PORT = int(os.getenv('REDIS_PORT', 6379))
|
12 |
+
REDIS_PASSWORD = os.getenv('REDIS_PASSWORD', None)
|
13 |
+
FERNET_SECRET_KEY = os.getenv('FERNET_SECRET_KEY')
|
14 |
+
REDIS_USERNAME=os.getenv('REDIS_USERNAME')
|
15 |
+
client = AsyncIOMotorClient(MONGO_URI)
|
16 |
+
db = client[DB_NAME]
|
17 |
+
r = redis.StrictRedis(
|
18 |
+
host=REDIS_HOST,
|
19 |
+
port=REDIS_PORT,
|
20 |
+
password=REDIS_PASSWORD,
|
21 |
+
username=REDIS_USERNAME,
|
22 |
+
db=0,
|
23 |
+
decode_responses=True
|
24 |
+
)
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pinecone import Pinecone
|
2 |
+
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
3 |
+
import os
|
4 |
+
from dotenv import load_dotenv
|
5 |
+
import time
|
6 |
+
|
7 |
+
load_dotenv()
|
8 |
+
def chunk_list(lst, chunk_size):
|
9 |
+
"""Yield successive chunks of size `chunk_size` from list."""
|
10 |
+
for i in range(0, len(lst), chunk_size):
|
11 |
+
yield lst[i:i + chunk_size]
|
12 |
+
|
13 |
+
def upsert_text_with_chunks(
|
14 |
+
text: str,
|
15 |
+
*,
|
16 |
+
index_host: str = "https://resume-42eo81u.svc.aped-4627-b74a.pinecone.io",
|
17 |
+
namespace: str = "default",
|
18 |
+
chunk_size: int = 1000,
|
19 |
+
chunk_overlap: int = 200
|
20 |
+
) -> None:
|
21 |
+
"""
|
22 |
+
Splits a long text into overlapping chunks and upserts them directly into a Pinecone index
|
23 |
+
that has integrated embedding enabled.
|
24 |
+
|
25 |
+
Args:
|
26 |
+
text (str): The full text document to embed.
|
27 |
+
index_host (str): Pinecone index host URL.
|
28 |
+
namespace (str): Pinecone namespace to upsert into.
|
29 |
+
chunk_size (int): Max characters per chunk.
|
30 |
+
chunk_overlap (int): Overlap in characters between chunks.
|
31 |
+
"""
|
32 |
+
api_key = os.getenv("PINECONE_API_KEY")
|
33 |
+
if not api_key:
|
34 |
+
raise EnvironmentError("Set PINECONE_API_KEY in environment")
|
35 |
+
|
36 |
+
pc = Pinecone(api_key=api_key)
|
37 |
+
index = pc.Index(host=index_host)
|
38 |
+
|
39 |
+
splitter = RecursiveCharacterTextSplitter(
|
40 |
+
chunk_size=chunk_size,
|
41 |
+
chunk_overlap=chunk_overlap,
|
42 |
+
length_function=len,
|
43 |
+
is_separator_regex=False
|
44 |
+
)
|
45 |
+
chunks = splitter.split_text(text)
|
46 |
+
if not chunks:
|
47 |
+
print("No chunks generated — exiting.")
|
48 |
+
return
|
49 |
+
|
50 |
+
records = [
|
51 |
+
{
|
52 |
+
"_id": f"chunk-{i}",
|
53 |
+
"text": chunk
|
54 |
+
}
|
55 |
+
for i, chunk in enumerate(chunks)
|
56 |
+
]
|
57 |
+
|
58 |
+
for batch in chunk_list(records, 50):
|
59 |
+
print("Inserting")
|
60 |
+
index.upsert_records(records=batch, namespace=namespace)
|
61 |
+
time.sleep(60)
|
62 |
+
print("resting")
|
63 |
+
print(f"✅ Upserted {len(records)} valid chunks (out of {len(chunks)}) into namespace '{namespace}'.")
|
64 |
+
|
65 |
+
|
66 |
+
|
67 |
+
|
68 |
+
|
69 |
+
from pinecone import Pinecone
|
70 |
+
from typing import List, Dict
|
71 |
+
|
72 |
+
def search_pinecone_text(
|
73 |
+
query_text: str,
|
74 |
+
index_host: str = "https://resume-42eo81u.svc.aped-4627-b74a.pinecone.io",
|
75 |
+
namespace: str = "default",
|
76 |
+
top_k: int = 2,
|
77 |
+
fields: List[str] = ["category", "text"]
|
78 |
+
) -> List[Dict]:
|
79 |
+
"""
|
80 |
+
Search a Pinecone index using a text query.
|
81 |
+
|
82 |
+
Args:
|
83 |
+
api_key (str): Your Pinecone API key.
|
84 |
+
index_host (str): The specific index host URL.
|
85 |
+
namespace (str): The namespace to search within.
|
86 |
+
query_text (str): The input text to search for.
|
87 |
+
top_k (int): Number of top results to return.
|
88 |
+
fields (List[str]): Metadata fields to include in the response.
|
89 |
+
|
90 |
+
Returns:
|
91 |
+
List[Dict]: The top matching results.
|
92 |
+
"""
|
93 |
+
api_key = os.getenv("PINECONE_API_KEY")
|
94 |
+
pc = Pinecone(api_key=api_key)
|
95 |
+
index = pc.Index(host=index_host)
|
96 |
+
|
97 |
+
results = index.search(
|
98 |
+
namespace=namespace,
|
99 |
+
query={"inputs": {"text": query_text}, "top_k": top_k},
|
100 |
+
fields=fields
|
101 |
+
)
|
102 |
+
print()
|
103 |
+
hits =results.result['hits']
|
104 |
+
result=[]
|
105 |
+
for hit in hits:
|
106 |
+
text = hit['fields']['text']
|
107 |
+
score = hit['_score']
|
108 |
+
result.append({"text":text,"score":score})
|
109 |
+
return result
|
File without changes
|
@@ -0,0 +1,415 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datetime import datetime
|
2 |
+
from pydantic import Field, model_validator, BaseModel
|
3 |
+
from typing import List, Optional, Union,Type, TypeVar
|
4 |
+
from bson import ObjectId
|
5 |
+
import openai
|
6 |
+
from google import genai
|
7 |
+
from google.genai import types
|
8 |
+
import os
|
9 |
+
from dotenv import load_dotenv
|
10 |
+
load_dotenv()
|
11 |
+
GOOGLE_API_KEY=os.getenv("GEMINI_API_KEY")
|
12 |
+
OPENAI_API_KEY=os.getenv("OPENAI_API_KEY")
|
13 |
+
T = TypeVar("T", bound=BaseModel)
|
14 |
+
class AIWrapper:
|
15 |
+
def __init__(self, provider='openai'):
|
16 |
+
self.provider = provider.lower()
|
17 |
+
|
18 |
+
if self.provider == 'openai':
|
19 |
+
openai.api_key = OPENAI_API_KEY
|
20 |
+
elif self.provider == 'gemini':
|
21 |
+
self.gemini_client = genai.Client(
|
22 |
+
api_key=GOOGLE_API_KEY,
|
23 |
+
http_options=types.HttpOptions(api_version='v1alpha')
|
24 |
+
)
|
25 |
+
else:
|
26 |
+
raise ValueError("Provider must be 'openai' or 'gemini'")
|
27 |
+
|
28 |
+
def chat(self, prompt: str,output_schema:Type[T]) -> T:
|
29 |
+
"""
|
30 |
+
Generate a response from the AI provider and return it parsed into the specified schema.
|
31 |
+
|
32 |
+
Args:
|
33 |
+
prompt (str): The input prompt.
|
34 |
+
output_schema (Type[T]): A Pydantic model representing the output schema.
|
35 |
+
|
36 |
+
Returns:
|
37 |
+
T: Parsed AI response as an instance of the output_schema.
|
38 |
+
"""
|
39 |
+
if self.provider == 'openai':
|
40 |
+
return self._openai_chat(prompt)
|
41 |
+
elif self.provider == 'gemini':
|
42 |
+
return self._gemini_chat(prompt,output_schema=output_schema)
|
43 |
+
|
44 |
+
def _openai_chat(self, prompt: str) -> str:
|
45 |
+
response = openai.ChatCompletion.create(
|
46 |
+
model="gpt-4",
|
47 |
+
messages=[
|
48 |
+
{"role": "user", "content": prompt}
|
49 |
+
]
|
50 |
+
)
|
51 |
+
return response['choices'][0]['message']['content']
|
52 |
+
|
53 |
+
def _gemini_chat(self, prompt: str, output_schema: Type[T]) -> T:
|
54 |
+
response = self.gemini_client.models.generate_content(
|
55 |
+
model='gemini-2.0-flash-001',
|
56 |
+
contents=prompt,
|
57 |
+
config=types.GenerateContentConfig(
|
58 |
+
response_mime_type='application/json',
|
59 |
+
response_schema=output_schema,
|
60 |
+
),
|
61 |
+
)
|
62 |
+
|
63 |
+
return response.parsed
|
64 |
+
|
65 |
+
|
66 |
+
ai = AIWrapper(provider='gemini')
|
67 |
+
|
68 |
+
class UserResilienceScoreCreate(BaseModel):
|
69 |
+
|
70 |
+
overallScore: float
|
71 |
+
userId: str
|
72 |
+
BreakDownByDomainId: str
|
73 |
+
FlaggedRiskAreasId: str
|
74 |
+
BoostSuggestionsId: str
|
75 |
+
|
76 |
+
|
77 |
+
class UserResilienceScoreUpdate(BaseModel):
|
78 |
+
|
79 |
+
overallScore: Optional[float]=None
|
80 |
+
BreakDownByDomainId: Optional[str]=None
|
81 |
+
FlaggedRiskAreasId: Optional[str]=None
|
82 |
+
BoostSuggestionsId: Optional[str]=None
|
83 |
+
|
84 |
+
|
85 |
+
|
86 |
+
class BreakDownByDomainCreate(BaseModel):
|
87 |
+
userId:str
|
88 |
+
Technical:float
|
89 |
+
Creative:float
|
90 |
+
Strategy:float
|
91 |
+
Collaboration:float
|
92 |
+
|
93 |
+
|
94 |
+
class BreakDownByDomainUpdate(BaseModel):
|
95 |
+
Technical:Optional[float]=None
|
96 |
+
Creative:Optional[float]=None
|
97 |
+
Strategy:Optional[float]=None
|
98 |
+
Collaboration:Optional[float]=None
|
99 |
+
|
100 |
+
|
101 |
+
class FlaggedRiskAreasCreate(BaseModel):
|
102 |
+
userId:str
|
103 |
+
risk_areas:List[str]
|
104 |
+
|
105 |
+
class FlaggedRiskAreasUpdate(BaseModel):
|
106 |
+
|
107 |
+
risk_areas:Optional[List[str]]=None
|
108 |
+
|
109 |
+
|
110 |
+
class BoostSuggestionsCreate(BaseModel):
|
111 |
+
|
112 |
+
boost_suggestions:List[str]
|
113 |
+
|
114 |
+
|
115 |
+
|
116 |
+
class ProjectExperienceDetails(BaseModel):
|
117 |
+
ProjectTitles: str = Field(..., description="The title(s) of the project(s) involved in.")
|
118 |
+
descriptions: str = Field(..., description="Detailed description of the project and what it entailed.")
|
119 |
+
RoleInTheProject: str = Field(..., description="The specific role played within the project.")
|
120 |
+
|
121 |
+
class WorkExperienceDetails(BaseModel):
|
122 |
+
JobTitles: str = Field(..., description="The job titles held in past employment.")
|
123 |
+
JobDescriptions: str = Field(..., description="Summary of responsibilities and duties in these jobs.")
|
124 |
+
|
125 |
+
class SoftTransferableSkills(BaseModel):
|
126 |
+
LeadershipAndCollaborationIndicators: str = Field(..., description="Evidence or examples demonstrating leadership and teamwork.")
|
127 |
+
CriticalThinkingOrProblemSolvingVerb: str = Field(..., description="Examples of critical thinking or problem solving performed.")
|
128 |
+
CommunicationSkills: str = Field(None, description="Description of communication skills and contexts.")
|
129 |
+
CrossFunctionalOrInterdisciplinaryExperience: str = Field(..., description="Experience working across teams or disciplines.")
|
130 |
+
InitiativeAndAdaptabilityLanguage: str = Field(..., description="Examples of taking initiative and adapting to change.")
|
131 |
+
|
132 |
+
class CareerPathInformation(BaseModel):
|
133 |
+
CurrentOrIntendedRoleOrField: str = Field(..., description="Current or intended professional role or field of work.")
|
134 |
+
IndustryAndSectorContext: str = Field(..., description="Context about the industry and sector related to the career path.")
|
135 |
+
CareerTrajectoryTrends: str = Field(..., description="Observed or expected trends in the career trajectory or sector.")
|
136 |
+
|
137 |
+
class EvidenceOfUpskillingAndLifelongLearning(BaseModel):
|
138 |
+
CertificationsCoursesOrBootcampsListed: Optional[List[str]] = Field(None, description="List of certifications, courses, or bootcamps completed.")
|
139 |
+
SelfInitiatedLearningProjectsOrNonDegreeEducationalAchievements: Optional[List[str]] = Field(None, description="List of personal projects or non-degree achievements.")
|
140 |
+
ParticipationInHackathonsClubsOrProfessionalCommunities: Optional[List[str]] = Field(None, description="Involvement in hackathons, clubs, or professional groups.")
|
141 |
+
|
142 |
+
class AIRelatedKeywords(BaseModel):
|
143 |
+
AiToolsAndTechnologies: Optional[List[str]] = Field(
|
144 |
+
None,
|
145 |
+
description="List of AI tools and technologies mentioned in the resume, e.g., ChatGPT, TensorFlow."
|
146 |
+
)
|
147 |
+
conceptsAndTechniques: Optional[List[str]] = Field(
|
148 |
+
None,
|
149 |
+
description="AI concepts or techniques like NLP, computer vision, or reinforcement learning."
|
150 |
+
)
|
151 |
+
aiIntegratedProjectsMentioned: Optional[List[str]] = Field(
|
152 |
+
None,
|
153 |
+
description="Names or descriptions of projects where AI was applied."
|
154 |
+
)
|
155 |
+
usageContextDescriptions: Optional[List[str]] = Field(
|
156 |
+
None,
|
157 |
+
description="Sentences or phrases describing how AI was used in projects or tasks."
|
158 |
+
)
|
159 |
+
|
160 |
+
class ResumeData(BaseModel):
|
161 |
+
workExperienceDetails:Optional[List[WorkExperienceDetails]]=None
|
162 |
+
listOfExplicitTechnicalSkills:Optional[List[str]]=None
|
163 |
+
softTransferableSkills:List[SoftTransferableSkills]
|
164 |
+
projectExperienceDetails:Optional[List[ProjectExperienceDetails]]=None
|
165 |
+
careerPathInformation:CareerPathInformation
|
166 |
+
evidenceOfUpskillingAndLifelongLearning:Optional[EvidenceOfUpskillingAndLifelongLearning]=None
|
167 |
+
aiRelatedKeywords:AIRelatedKeywords
|
168 |
+
|
169 |
+
class RealWorldQuestion(BaseModel):
|
170 |
+
question:str
|
171 |
+
|
172 |
+
|
173 |
+
|
174 |
+
|
175 |
+
class AutomationRiskInput(BaseModel):
|
176 |
+
# Resume background fields
|
177 |
+
job_title: str = Field(..., description="Most recent job title")
|
178 |
+
industry: str = Field(..., description="Industry sector (e.g., finance, education, manufacturing)")
|
179 |
+
years_experience: int = Field(..., ge=0, description="Years of professional experience")
|
180 |
+
education_level: str = Field(..., description="Highest education level (e.g., Bachelors, Masters, PhD)")
|
181 |
+
|
182 |
+
technical_skills: List[str] = Field(default_factory=list, description="List of technical skills")
|
183 |
+
soft_skills: List[str] = Field(default_factory=list, description="List of soft skills")
|
184 |
+
managerial_experience: bool = Field(..., description="Has managed teams or projects")
|
185 |
+
customer_facing_roles: bool = Field(..., description="Has held customer-facing roles")
|
186 |
+
domain_specialization: Optional[str] = Field(None, description="Specialized domain (e.g., legal, medical)")
|
187 |
+
recent_certifications: List[str] = Field(default_factory=list, description="Certifications obtained recently")
|
188 |
+
|
189 |
+
# Scored traits (all int 0-5)
|
190 |
+
repetitiveness_score: int = Field(..., ge=0, le=5, description="Repetitiveness of the tasks performed")
|
191 |
+
creativity_score: int = Field(..., ge=0, le=5, description="Creativity required in the role")
|
192 |
+
emotional_intelligence_score: int = Field(..., ge=0, le=5, description="Importance of emotional intelligence")
|
193 |
+
data_driven_tasks_score: int = Field(..., ge=0, le=5, description="Dependence on data-driven tasks")
|
194 |
+
physical_task_score: int = Field(..., ge=0, le=5, description="Amount of physical/manual work")
|
195 |
+
decision_making_level: int = Field(..., ge=0, le=5, description="Level of autonomous decision-making")
|
196 |
+
strategic_thinking_score: int = Field(..., ge=0, le=5, description="Need for strategic thinking")
|
197 |
+
collaboration_score: int = Field(..., ge=0, le=5, description="Collaboration required in the role")
|
198 |
+
ai_dependency_score: int = Field(..., ge=0, le=5, description="How much AI tools are already used")
|
199 |
+
upskilling_index: int = Field(..., ge=0, le=5, description="Recent evidence of upskilling/adaptability")
|
200 |
+
|
201 |
+
|
202 |
+
class AutomationRiskResult(AutomationRiskInput):
|
203 |
+
result: Optional[int] =0
|
204 |
+
@model_validator(mode='after')
|
205 |
+
def calculate_result(self,cls) -> int:
|
206 |
+
"""
|
207 |
+
Calculate the overall automation risk score (0-100)
|
208 |
+
based on the scored traits.
|
209 |
+
"""
|
210 |
+
|
211 |
+
# Weights for each scored trait (example weights; you can tune these)
|
212 |
+
weights = {
|
213 |
+
"repetitiveness_score": 15,
|
214 |
+
"creativity_score": -10,
|
215 |
+
"emotional_intelligence_score": -10,
|
216 |
+
"data_driven_tasks_score": 10,
|
217 |
+
"physical_task_score": 10,
|
218 |
+
"decision_making_level": -10,
|
219 |
+
"strategic_thinking_score": -10,
|
220 |
+
"collaboration_score": -5,
|
221 |
+
"ai_dependency_score": 5,
|
222 |
+
"upskilling_index": -5,
|
223 |
+
}
|
224 |
+
|
225 |
+
# Sum weighted scores
|
226 |
+
score = 0
|
227 |
+
for field, weight in weights.items():
|
228 |
+
value = getattr(self, field)
|
229 |
+
score += value * weight
|
230 |
+
|
231 |
+
# Normalize score to 0-100 range
|
232 |
+
# Minimum possible score
|
233 |
+
min_score = sum(0 * w for w in weights.values())
|
234 |
+
# Maximum possible score
|
235 |
+
max_score = sum(5 * w if w > 0 else 0 for w in weights.values()) + \
|
236 |
+
sum(0 * w if w < 0 else 0 for w in weights.values())
|
237 |
+
|
238 |
+
# Because some weights are negative, min/max can be tricky.
|
239 |
+
# Let's compute min and max more precisely:
|
240 |
+
|
241 |
+
min_score = sum(0 * w if w > 0 else 5 * w for w in weights.values())
|
242 |
+
max_score = sum(5 * w if w > 0 else 0 * w for w in weights.values())
|
243 |
+
|
244 |
+
# Clamp the score between min and max
|
245 |
+
score = max(min_score, min(max_score, score))
|
246 |
+
|
247 |
+
# Map score linearly to 0-100
|
248 |
+
normalized_score = int((score - min_score) / (max_score - min_score) * 100)
|
249 |
+
|
250 |
+
self.result = normalized_score
|
251 |
+
return self
|
252 |
+
|
253 |
+
|
254 |
+
|
255 |
+
|
256 |
+
|
257 |
+
class SkillDepthInput(BaseModel):
|
258 |
+
# Core scoring fields (all 0-5 integers)
|
259 |
+
years_experience_per_skill: int = Field(..., ge=0, le=5, description="Depth of years experience per skill")
|
260 |
+
seniority_level: int = Field(..., ge=0, le=5, description="Seniority level in roles held")
|
261 |
+
certification_presence: int = Field(..., ge=0, le=5, description="Number and relevance of certifications")
|
262 |
+
breadth_of_skills: int = Field(..., ge=0, le=5, description="Variety and diversity of skills")
|
263 |
+
technical_skill_depth: int = Field(..., ge=0, le=5, description="Depth in core technical skills")
|
264 |
+
leadership_skill_depth: int = Field(..., ge=0, le=5, description="Depth in leadership or management skills")
|
265 |
+
complex_projects_involvement: int = Field(..., ge=0, le=5, description="Involvement in complex projects")
|
266 |
+
strategic_initiatives_contribution: int = Field(..., ge=0, le=5, description="Contributions to strategic initiatives")
|
267 |
+
recent_skill_usage_frequency: int = Field(..., ge=0, le=5, description="Frequency of skill usage in recent roles")
|
268 |
+
continuous_learning_evidence: int = Field(..., ge=0, le=5, description="Evidence of continuous learning or upskilling")
|
269 |
+
cross_functional_collaboration: int = Field(..., ge=0, le=5, description="Cross-functional collaboration skills")
|
270 |
+
recognition_awards: int = Field(..., ge=0, le=5, description="Recognition or awards related to skills")
|
271 |
+
public_speaking_training: int = Field(..., ge=0, le=5, description="Public speaking or training experience")
|
272 |
+
publications_patents: int = Field(..., ge=0, le=5, description="Publications or patents (if any)")
|
273 |
+
industry_expertise_depth: int = Field(..., ge=0, le=5, description="Industry-specific expertise depth")
|
274 |
+
mentoring_coaching_experience: int = Field(..., ge=0, le=5, description="Mentoring or coaching experience")
|
275 |
+
innovation_ability: int = Field(..., ge=0, le=5, description="Ability to innovate using skills")
|
276 |
+
adaptability_to_technologies: int = Field(..., ge=0, le=5, description="Adaptability to new technologies")
|
277 |
+
problem_solving_depth: int = Field(..., ge=0, le=5, description="Problem-solving skills depth")
|
278 |
+
technical_communication_skills: int = Field(..., ge=0, le=5, description="Communication skills related to technical content")
|
279 |
+
|
280 |
+
|
281 |
+
class SkillDepthResult(SkillDepthInput):
|
282 |
+
result: Optional[int] =0
|
283 |
+
@model_validator(mode='after')
|
284 |
+
def calculate_result(self) -> None:
|
285 |
+
fields = [
|
286 |
+
self.years_experience_per_skill,
|
287 |
+
self.seniority_level,
|
288 |
+
self.certification_presence,
|
289 |
+
self.breadth_of_skills,
|
290 |
+
self.technical_skill_depth,
|
291 |
+
self.leadership_skill_depth,
|
292 |
+
self.complex_projects_involvement,
|
293 |
+
self.strategic_initiatives_contribution,
|
294 |
+
self.recent_skill_usage_frequency,
|
295 |
+
self.continuous_learning_evidence,
|
296 |
+
self.cross_functional_collaboration,
|
297 |
+
self.recognition_awards,
|
298 |
+
self.public_speaking_training,
|
299 |
+
self.publications_patents,
|
300 |
+
self.industry_expertise_depth,
|
301 |
+
self.mentoring_coaching_experience,
|
302 |
+
self.innovation_ability,
|
303 |
+
self.adaptability_to_technologies,
|
304 |
+
self.problem_solving_depth,
|
305 |
+
self.technical_communication_skills,
|
306 |
+
]
|
307 |
+
|
308 |
+
max_total = 5 * len(fields)
|
309 |
+
total_score = sum(fields)
|
310 |
+
self.result = int((total_score / max_total) * 100)
|
311 |
+
return self
|
312 |
+
|
313 |
+
|
314 |
+
class AICollabReadinessInput(BaseModel):
|
315 |
+
ai_tool_familiarity: int = Field(..., ge=0, le=5, description="Familiarity with AI tools and platforms")
|
316 |
+
adaptability_to_ai_workflows: int = Field(..., ge=0, le=5, description="Ability to adapt to AI-enhanced workflows")
|
317 |
+
willingness_to_learn_ai_skills: int = Field(..., ge=0, le=5, description="Motivation and willingness to learn AI skills")
|
318 |
+
ai_ethics_understanding: int = Field(..., ge=0, le=5, description="Understanding of AI ethics and responsible use")
|
319 |
+
collaboration_with_ai: int = Field(..., ge=0, le=5, description="Experience or mindset to collaborate effectively with AI systems")
|
320 |
+
problem_solving_with_ai: int = Field(..., ge=0, le=5, description="Skill in using AI to solve complex problems")
|
321 |
+
creativity_in_ai_use: int = Field(..., ge=0, le=5, description="Creativity in leveraging AI capabilities")
|
322 |
+
ai_learning_speed: int = Field(..., ge=0, le=5, description="Speed of learning new AI technologies")
|
323 |
+
communication_about_ai: int = Field(..., ge=0, le=5, description="Ability to communicate AI concepts effectively")
|
324 |
+
ai_tool_integration: int = Field(..., ge=0, le=5, description="Skill in integrating AI tools into existing workflows")
|
325 |
+
|
326 |
+
class AICollabReadiness(AICollabReadinessInput):
|
327 |
+
result: Optional[int] =0
|
328 |
+
@model_validator(mode='after')
|
329 |
+
def calculate_result(self) -> None:
|
330 |
+
fields = [
|
331 |
+
self.ai_tool_familiarity,
|
332 |
+
self.adaptability_to_ai_workflows,
|
333 |
+
self.willingness_to_learn_ai_skills,
|
334 |
+
self.ai_ethics_understanding,
|
335 |
+
self.collaboration_with_ai,
|
336 |
+
self.problem_solving_with_ai,
|
337 |
+
self.creativity_in_ai_use,
|
338 |
+
self.ai_learning_speed,
|
339 |
+
self.communication_about_ai,
|
340 |
+
self.ai_tool_integration,
|
341 |
+
]
|
342 |
+
max_total = 5 * len(fields)
|
343 |
+
total_score = sum(fields)
|
344 |
+
self.result = int((total_score / max_total) * 100)
|
345 |
+
return self
|
346 |
+
|
347 |
+
|
348 |
+
|
349 |
+
|
350 |
+
class BoostSuggestionsUpdate(BaseModel):
|
351 |
+
|
352 |
+
boost_suggestions:List[str]
|
353 |
+
|
354 |
+
|
355 |
+
|
356 |
+
class UserResilienceScoreOut(UserResilienceScoreCreate):
|
357 |
+
_id: Optional[ObjectId]=None # Make sure _id can be Optional
|
358 |
+
id:Optional[str]=None
|
359 |
+
# To convert MongoDB ObjectId to string
|
360 |
+
class Config:
|
361 |
+
json_encoders = {
|
362 |
+
ObjectId: str
|
363 |
+
}
|
364 |
+
|
365 |
+
# Custom validator to handle the ObjectId conversion if needed
|
366 |
+
@model_validator(mode='before')
|
367 |
+
def handle_objectid(cls, values):
|
368 |
+
if '_id' in values and isinstance(values['_id'], ObjectId):
|
369 |
+
values['id'] = str(values['_id']) # Convert ObjectId to string
|
370 |
+
return values
|
371 |
+
|
372 |
+
class BreakDownByDomainOut(BreakDownByDomainCreate):
|
373 |
+
_id: Optional[ObjectId]=None # Make sure _id can be Optional
|
374 |
+
id:Optional[str]=None
|
375 |
+
# To convert MongoDB ObjectId to string
|
376 |
+
class Config:
|
377 |
+
json_encoders = {
|
378 |
+
ObjectId: str
|
379 |
+
}
|
380 |
+
|
381 |
+
# Custom validator to handle the ObjectId conversion if needed
|
382 |
+
@model_validator(mode='before')
|
383 |
+
def handle_objectid(cls, values):
|
384 |
+
if '_id' in values and isinstance(values['_id'], ObjectId):
|
385 |
+
values['id'] = str(values['_id']) # Convert ObjectId to string
|
386 |
+
return values
|
387 |
+
|
388 |
+
class FlaggedRiskAreasOut(FlaggedRiskAreasCreate):
|
389 |
+
_id: Optional[ObjectId]=None # Make sure _id can be Optional
|
390 |
+
id:Optional[str]=None
|
391 |
+
class Config:
|
392 |
+
json_encoders = {
|
393 |
+
ObjectId: str
|
394 |
+
}
|
395 |
+
|
396 |
+
# Custom validator to handle the ObjectId conversion if needed
|
397 |
+
@model_validator(mode='before')
|
398 |
+
def handle_objectid(cls, values):
|
399 |
+
if '_id' in values and isinstance(values['_id'], ObjectId):
|
400 |
+
values['id'] = str(values['_id']) # Convert ObjectId to string
|
401 |
+
return values
|
402 |
+
class BoostSuggestionsOut(BoostSuggestionsCreate):
|
403 |
+
_id: Optional[ObjectId]=None # Make sure _id can be Optional
|
404 |
+
id:Optional[str]=None
|
405 |
+
class Config:
|
406 |
+
json_encoders = {
|
407 |
+
ObjectId: str
|
408 |
+
}
|
409 |
+
|
410 |
+
# Custom validator to handle the ObjectId conversion if needed
|
411 |
+
@model_validator(mode='before')
|
412 |
+
def handle_objectid(cls, values):
|
413 |
+
if '_id' in values and isinstance(values['_id'], ObjectId):
|
414 |
+
values['id'] = str(values['_id']) # Convert ObjectId to string
|
415 |
+
return values
|
@@ -0,0 +1,407 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from core import db
|
2 |
+
from bson import ObjectId
|
3 |
+
from fastapi import HTTPException
|
4 |
+
from typing import Optional, List
|
5 |
+
from motor.motor_asyncio import AsyncIOMotorDatabase
|
6 |
+
from objects import UserResilienceScoreCreate, UserResilienceScoreOut,BreakDownByDomainCreate,BreakDownByDomainOut,FlaggedRiskAreasCreate,FlaggedRiskAreasOut,BoostSuggestionsCreate,BoostSuggestionsOut,BoostSuggestionsUpdate,UserResilienceScoreUpdate,FlaggedRiskAreasUpdate,BreakDownByDomainUpdate
|
7 |
+
|
8 |
+
|
9 |
+
async def create_user_resilience( data: UserResilienceScoreCreate) -> UserResilienceScoreOut:
|
10 |
+
"""
|
11 |
+
Create a new UserResilienceScore in the database.
|
12 |
+
|
13 |
+
Args:
|
14 |
+
db: The MongoDB database instance.
|
15 |
+
data: A Pydantic object containing the fields to create.
|
16 |
+
|
17 |
+
Returns:
|
18 |
+
The newly created object.
|
19 |
+
"""
|
20 |
+
result = await db.user_resilience.insert_one(data.model_dump())
|
21 |
+
created = await db.user_resilience.find_one({"_id": result.inserted_id})
|
22 |
+
out = UserResilienceScoreOut(**created)
|
23 |
+
return out
|
24 |
+
|
25 |
+
|
26 |
+
async def get_user_resilience( object_id: str) -> Optional[UserResilienceScoreOut]:
|
27 |
+
"""
|
28 |
+
Retrieve a UserResilienceScore by its ID.
|
29 |
+
|
30 |
+
Args:
|
31 |
+
db: The MongoDB database instance.
|
32 |
+
object_id: The ID of the object to retrieve.
|
33 |
+
|
34 |
+
Returns:
|
35 |
+
An bject, or raises 404 if not found.
|
36 |
+
"""
|
37 |
+
if not ObjectId.is_valid(object_id):
|
38 |
+
raise HTTPException(status_code=400, detail="Invalid ID format")
|
39 |
+
|
40 |
+
result = await db.user_resilience.find_one({"_id": ObjectId(object_id)})
|
41 |
+
|
42 |
+
if result is None:
|
43 |
+
raise HTTPException(status_code=404, detail="Object not found")
|
44 |
+
out =UserResilienceScoreOut(**result)
|
45 |
+
return out
|
46 |
+
|
47 |
+
|
48 |
+
async def update_user_resilience( object_id: str, data: UserResilienceScoreUpdate) -> UserResilienceScoreOut:
|
49 |
+
"""
|
50 |
+
Update a UserResilienceScore by its ID.
|
51 |
+
|
52 |
+
Args:
|
53 |
+
db: The MongoDB database instance.
|
54 |
+
object_id: The ID of the object to update.
|
55 |
+
data: A Pydantic object with the updated fields.
|
56 |
+
|
57 |
+
Returns:
|
58 |
+
The updated object dictionary.
|
59 |
+
"""
|
60 |
+
if not ObjectId.is_valid(object_id):
|
61 |
+
raise HTTPException(status_code=400, detail="Invalid ID format")
|
62 |
+
|
63 |
+
update_data = {k: v for k, v in data.dict().items() if v is not None}
|
64 |
+
result = await db.user_resilience.update_one(
|
65 |
+
{"_id": ObjectId(object_id)},
|
66 |
+
{"$set": update_data}
|
67 |
+
)
|
68 |
+
|
69 |
+
if result.matched_count == 0:
|
70 |
+
raise HTTPException(status_code=404, detail="Object not found")
|
71 |
+
|
72 |
+
updateresult =await db.user_resilience.find_one({"_id": ObjectId(object_id)})
|
73 |
+
out = UserResilienceScoreOut(**updateresult)
|
74 |
+
return out
|
75 |
+
|
76 |
+
|
77 |
+
|
78 |
+
async def delete_user_resilience( object_id: str) -> dict:
|
79 |
+
"""
|
80 |
+
Delete a UserResilienceScore by its ID.
|
81 |
+
|
82 |
+
Args:
|
83 |
+
db: The MongoDB database instance.
|
84 |
+
object_id: The ID of the object to delete.
|
85 |
+
|
86 |
+
Returns:
|
87 |
+
A confirmation message or raises 404 if object is not found.
|
88 |
+
"""
|
89 |
+
if not ObjectId.is_valid(object_id):
|
90 |
+
raise HTTPException(status_code=400, detail="Invalid ID format")
|
91 |
+
|
92 |
+
result = await db.user_resilience.delete_one({"_id": ObjectId(object_id)})
|
93 |
+
|
94 |
+
if result.deleted_count == 0:
|
95 |
+
raise HTTPException(status_code=404, detail="Object not found")
|
96 |
+
|
97 |
+
return {"message": "Object deleted successfully"}
|
98 |
+
|
99 |
+
|
100 |
+
|
101 |
+
|
102 |
+
|
103 |
+
|
104 |
+
|
105 |
+
|
106 |
+
|
107 |
+
|
108 |
+
|
109 |
+
|
110 |
+
|
111 |
+
|
112 |
+
|
113 |
+
|
114 |
+
|
115 |
+
|
116 |
+
|
117 |
+
async def create_breakdown_by_domain( data: BreakDownByDomainCreate) -> BreakDownByDomainOut:
|
118 |
+
"""
|
119 |
+
Create a new BreakDownByDomain in the database.
|
120 |
+
|
121 |
+
Args:
|
122 |
+
db: The MongoDB database instance.
|
123 |
+
data: A Pydantic object containing the fields to create.
|
124 |
+
|
125 |
+
Returns:
|
126 |
+
A dictionary representing the newly created object.
|
127 |
+
"""
|
128 |
+
result = await db.breakdown_by_domain.insert_one(data.dict())
|
129 |
+
created = await db.breakdown_by_domain.find_one({"_id": result.inserted_id})
|
130 |
+
out = BreakDownByDomainOut(**created)
|
131 |
+
return out
|
132 |
+
|
133 |
+
|
134 |
+
async def get_breakdown_by_domain( object_id: str) -> Optional[ BreakDownByDomainOut]:
|
135 |
+
"""
|
136 |
+
Retrieve a BreakDownByDomain by its ID.
|
137 |
+
|
138 |
+
Args:
|
139 |
+
db: The MongoDB database instance.
|
140 |
+
object_id: The ID of the object to retrieve.
|
141 |
+
|
142 |
+
Returns:
|
143 |
+
A dictionary of the found object, or raises 404 if not found.
|
144 |
+
"""
|
145 |
+
if not ObjectId.is_valid(object_id):
|
146 |
+
raise HTTPException(status_code=400, detail="Invalid ID format")
|
147 |
+
|
148 |
+
result = await db.breakdown_by_domain.find_one({"_id": ObjectId(object_id)})
|
149 |
+
|
150 |
+
if result is None:
|
151 |
+
raise HTTPException(status_code=404, detail="Object not found")
|
152 |
+
out = BreakDownByDomainOut(**result)
|
153 |
+
return out
|
154 |
+
|
155 |
+
|
156 |
+
async def update_breakdown_by_domain( object_id: str, data: BreakDownByDomainUpdate) -> BreakDownByDomainOut:
|
157 |
+
"""
|
158 |
+
Update a BreakDownByDomain by its ID.
|
159 |
+
|
160 |
+
Args:
|
161 |
+
db: The MongoDB database instance.
|
162 |
+
object_id: The ID of the object to update.
|
163 |
+
data: A Pydantic object with the updated fields.
|
164 |
+
|
165 |
+
Returns:
|
166 |
+
The updated object dictionary.
|
167 |
+
"""
|
168 |
+
if not ObjectId.is_valid(object_id):
|
169 |
+
raise HTTPException(status_code=400, detail="Invalid ID format")
|
170 |
+
|
171 |
+
update_data = {k: v for k, v in data.dict().items() if v is not None}
|
172 |
+
result = await db.breakdown_by_domain.update_one(
|
173 |
+
{"_id": ObjectId(object_id)},
|
174 |
+
{"$set": update_data}
|
175 |
+
)
|
176 |
+
|
177 |
+
if result.matched_count == 0:
|
178 |
+
raise HTTPException(status_code=404, detail="Object not found")
|
179 |
+
|
180 |
+
updateresult =await db. breakdown_by_domain.find_one({"_id": ObjectId(object_id)})
|
181 |
+
out = BreakDownByDomainOut(**updateresult)
|
182 |
+
return out
|
183 |
+
|
184 |
+
|
185 |
+
|
186 |
+
async def delete_breakdown_by_domain( object_id: str) -> dict:
|
187 |
+
"""
|
188 |
+
Delete a BreakDownByDomain by its ID.
|
189 |
+
|
190 |
+
Args:
|
191 |
+
db: The MongoDB database instance.
|
192 |
+
object_id: The ID of the object to delete.
|
193 |
+
|
194 |
+
Returns:
|
195 |
+
A confirmation message or raises 404 if object is not found.
|
196 |
+
"""
|
197 |
+
if not ObjectId.is_valid(object_id):
|
198 |
+
raise HTTPException(status_code=400, detail="Invalid ID format")
|
199 |
+
|
200 |
+
result = await db.breakdown_by_domain.delete_one({"_id": ObjectId(object_id)})
|
201 |
+
|
202 |
+
if result.deleted_count == 0:
|
203 |
+
raise HTTPException(status_code=404, detail="Object not found")
|
204 |
+
|
205 |
+
return {"message": "Object deleted successfully"}
|
206 |
+
|
207 |
+
|
208 |
+
|
209 |
+
|
210 |
+
|
211 |
+
|
212 |
+
|
213 |
+
|
214 |
+
|
215 |
+
async def create_flagged_risk_areas( data: FlaggedRiskAreasCreate) -> FlaggedRiskAreasOut:
|
216 |
+
"""
|
217 |
+
Create a new BreakDownByDomain in the database.
|
218 |
+
|
219 |
+
Args:
|
220 |
+
db: The MongoDB database instance.
|
221 |
+
data: A Pydantic object containing the fields to create.
|
222 |
+
|
223 |
+
Returns:
|
224 |
+
A dictionary representing the newly created object.
|
225 |
+
"""
|
226 |
+
result = await db.flagged_risk_areas.insert_one(data.dict())
|
227 |
+
created = await db.flagged_risk_areas.find_one({"_id": result.inserted_id})
|
228 |
+
out = FlaggedRiskAreasOut(**created)
|
229 |
+
return out
|
230 |
+
|
231 |
+
|
232 |
+
async def get_flagged_risk_areas( object_id: str) -> Optional[FlaggedRiskAreasOut]:
|
233 |
+
"""
|
234 |
+
Retrieve a FlaggedRiskAreas by its ID.
|
235 |
+
|
236 |
+
Args:
|
237 |
+
db: The MongoDB database instance.
|
238 |
+
object_id: The ID of the object to retrieve.
|
239 |
+
|
240 |
+
Returns:
|
241 |
+
A dictionary of the found object, or raises 404 if not found.
|
242 |
+
"""
|
243 |
+
if not ObjectId.is_valid(object_id):
|
244 |
+
raise HTTPException(status_code=400, detail="Invalid ID format")
|
245 |
+
|
246 |
+
result = await db.flagged_risk_areas.find_one({"_id": ObjectId(object_id)})
|
247 |
+
|
248 |
+
if result is None:
|
249 |
+
raise HTTPException(status_code=404, detail="Object not found")
|
250 |
+
out =FlaggedRiskAreasOut(**result)
|
251 |
+
return out
|
252 |
+
|
253 |
+
|
254 |
+
async def update_flagged_risk_areas( object_id: str, data: FlaggedRiskAreasUpdate) -> FlaggedRiskAreasOut:
|
255 |
+
"""
|
256 |
+
Update a FlaggedRiskAreas by its ID.
|
257 |
+
|
258 |
+
Args:
|
259 |
+
db: The MongoDB database instance.
|
260 |
+
object_id: The ID of the object to update.
|
261 |
+
data: A Pydantic object with the updated fields.
|
262 |
+
|
263 |
+
Returns:
|
264 |
+
The updated object dictionary.
|
265 |
+
"""
|
266 |
+
if not ObjectId.is_valid(object_id):
|
267 |
+
raise HTTPException(status_code=400, detail="Invalid ID format")
|
268 |
+
|
269 |
+
update_data = {k: v for k, v in data.dict().items() if v is not None}
|
270 |
+
result = await db.flagged_risk_areas.update_one(
|
271 |
+
{"_id": ObjectId(object_id)},
|
272 |
+
{"$set": update_data}
|
273 |
+
)
|
274 |
+
|
275 |
+
if result.matched_count == 0:
|
276 |
+
raise HTTPException(status_code=404, detail="Object not found")
|
277 |
+
|
278 |
+
updateresult =await db.flagged_risk_areas.find_one({"_id": ObjectId(object_id)})
|
279 |
+
out = FlaggedRiskAreasOut(**updateresult)
|
280 |
+
return out
|
281 |
+
|
282 |
+
|
283 |
+
|
284 |
+
async def delete_flagged_risk_areas( object_id: str) -> dict:
|
285 |
+
"""
|
286 |
+
Delete a FlaggedRiskAreas by its ID.
|
287 |
+
|
288 |
+
Args:
|
289 |
+
db: The MongoDB database instance.
|
290 |
+
object_id: The ID of the object to delete.
|
291 |
+
|
292 |
+
Returns:
|
293 |
+
A confirmation message or raises 404 if object is not found.
|
294 |
+
"""
|
295 |
+
if not ObjectId.is_valid(object_id):
|
296 |
+
raise HTTPException(status_code=400, detail="Invalid ID format")
|
297 |
+
|
298 |
+
result = await db.flagged_risk_areas.delete_one({"_id": ObjectId(object_id)})
|
299 |
+
|
300 |
+
if result.deleted_count == 0:
|
301 |
+
raise HTTPException(status_code=404, detail="Object not found")
|
302 |
+
|
303 |
+
return {"message": "Object deleted successfully"}
|
304 |
+
|
305 |
+
|
306 |
+
|
307 |
+
|
308 |
+
|
309 |
+
|
310 |
+
|
311 |
+
|
312 |
+
|
313 |
+
|
314 |
+
|
315 |
+
|
316 |
+
|
317 |
+
|
318 |
+
|
319 |
+
async def create_boost_suggestions( data: BoostSuggestionsCreate) -> BoostSuggestionsOut:
|
320 |
+
"""
|
321 |
+
Create a new BoostSuggestions in the database.
|
322 |
+
|
323 |
+
Args:
|
324 |
+
db: The MongoDB database instance.
|
325 |
+
data: A Pydantic object containing the fields to create.
|
326 |
+
|
327 |
+
Returns:
|
328 |
+
A dictionary representing the newly created object.
|
329 |
+
"""
|
330 |
+
result = await db.boost_suggestions.insert_one(data.dict())
|
331 |
+
created = await db.boost_suggestions.find_one({"_id": result.inserted_id})
|
332 |
+
out = BoostSuggestionsOut(**created)
|
333 |
+
return out
|
334 |
+
|
335 |
+
|
336 |
+
async def get_boost_suggestions( object_id: str) -> Optional[BoostSuggestionsOut]:
|
337 |
+
"""
|
338 |
+
Retrieve a BoostSuggestions by its ID.
|
339 |
+
|
340 |
+
Args:
|
341 |
+
db: The MongoDB database instance.
|
342 |
+
object_id: The ID of the object to retrieve.
|
343 |
+
|
344 |
+
Returns:
|
345 |
+
A dictionary of the found object, or raises 404 if not found.
|
346 |
+
"""
|
347 |
+
if not ObjectId.is_valid(object_id):
|
348 |
+
raise HTTPException(status_code=400, detail="Invalid ID format")
|
349 |
+
|
350 |
+
result = await db.boost_suggestions.find_one({"_id": ObjectId(object_id)})
|
351 |
+
|
352 |
+
if result is None:
|
353 |
+
raise HTTPException(status_code=404, detail="Object not found")
|
354 |
+
out =BoostSuggestionsOut(**result)
|
355 |
+
return out
|
356 |
+
|
357 |
+
|
358 |
+
async def update_boost_suggestions( object_id: str, data: BoostSuggestionsUpdate) -> BoostSuggestionsOut:
|
359 |
+
"""
|
360 |
+
Update a BoostSuggestions by its ID.
|
361 |
+
|
362 |
+
Args:
|
363 |
+
db: The MongoDB database instance.
|
364 |
+
object_id: The ID of the object to update.
|
365 |
+
data: A Pydantic object with the updated fields.
|
366 |
+
|
367 |
+
Returns:
|
368 |
+
The updated object dictionary.
|
369 |
+
"""
|
370 |
+
if not ObjectId.is_valid(object_id):
|
371 |
+
raise HTTPException(status_code=400, detail="Invalid ID format")
|
372 |
+
|
373 |
+
update_data = {k: v for k, v in data.dict().items() if v is not None}
|
374 |
+
result = await db.boost_suggestions.update_one(
|
375 |
+
{"_id": ObjectId(object_id)},
|
376 |
+
{"$set": update_data}
|
377 |
+
)
|
378 |
+
|
379 |
+
if result.matched_count == 0:
|
380 |
+
raise HTTPException(status_code=404, detail="Object not found")
|
381 |
+
|
382 |
+
updateresult =await db.boost_suggestions.find_one({"_id": ObjectId(object_id)})
|
383 |
+
out = BoostSuggestionsOut(**updateresult)
|
384 |
+
return out
|
385 |
+
|
386 |
+
|
387 |
+
|
388 |
+
async def delete_boost_suggestions( object_id: str) -> dict:
|
389 |
+
"""
|
390 |
+
Delete a BoostSuggestions by its ID.
|
391 |
+
|
392 |
+
Args:
|
393 |
+
db: The MongoDB database instance.
|
394 |
+
object_id: The ID of the object to delete.
|
395 |
+
|
396 |
+
Returns:
|
397 |
+
A confirmation message or raises 404 if object is not found.
|
398 |
+
"""
|
399 |
+
if not ObjectId.is_valid(object_id):
|
400 |
+
raise HTTPException(status_code=400, detail="Invalid ID format")
|
401 |
+
|
402 |
+
result = await db.boost_suggestions.delete_one({"_id": ObjectId(object_id)})
|
403 |
+
|
404 |
+
if result.deleted_count == 0:
|
405 |
+
raise HTTPException(status_code=404, detail="Object not found")
|
406 |
+
|
407 |
+
return {"message": "Object deleted successfully"}
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64
|
2 |
+
from fastapi import FastAPI, File, UploadFile,HTTPException
|
3 |
+
from controllers import resilience_analysis
|
4 |
+
from embedDoc import upsert_text_with_chunks,search_pinecone_text
|
5 |
+
ARS = FastAPI()
|
6 |
+
|
7 |
+
@ARS.post("/risk-analysis")
|
8 |
+
async def perform_risk_analysis(file: UploadFile = File(...)):
|
9 |
+
if file.content_type != "application/pdf":
|
10 |
+
return HTTPException(status_code=400, detail={"error": "File must be a PDF."})
|
11 |
+
ResilienceScore = await resilience_analysis(file=file)
|
12 |
+
return ResilienceScore
|
13 |
+
|
14 |
+
|
15 |
+
|
@@ -8,3 +8,7 @@ pinned: false
|
|
8 |
license: apache-2.0
|
9 |
---
|
10 |
|
|
|
|
|
|
|
|
|
|
8 |
license: apache-2.0
|
9 |
---
|
10 |
|
11 |
+
<<<<<<< HEAD
|
12 |
+
=======
|
13 |
+
if you are changing the connection string in the env to a new one ensure you populate the levels table with levels up to level9 for a default career path and default level name else points won't be calculated properly
|
14 |
+
>>>>>>> master
|
@@ -2,11 +2,13 @@ from controller.imports import *
|
|
2 |
|
3 |
import logging
|
4 |
from datetime import datetime
|
|
|
5 |
logging.basicConfig(level=logging.INFO)
|
6 |
logger = logging.getLogger(__name__)
|
7 |
|
8 |
app = FastAPI()
|
9 |
app.mount('/gamification',gamification)
|
|
|
10 |
|
11 |
|
12 |
|
|
|
2 |
|
3 |
import logging
|
4 |
from datetime import datetime
|
5 |
+
from Ars.routes import ARS
|
6 |
logging.basicConfig(level=logging.INFO)
|
7 |
logger = logging.getLogger(__name__)
|
8 |
|
9 |
app = FastAPI()
|
10 |
app.mount('/gamification',gamification)
|
11 |
+
app.mount('/Ars',ARS)
|
12 |
|
13 |
|
14 |
|
@@ -93,6 +93,7 @@ def get_all_simple_points_func(userId) -> SimpleIndividualUserLevel:
|
|
93 |
db = client[db_name]
|
94 |
collection = db[collection_name]
|
95 |
dreamJob = get_dream_job(userId=userId)
|
|
|
96 |
|
97 |
point_cursor = collection.find({"userId": userId})
|
98 |
try:
|
@@ -101,6 +102,17 @@ def get_all_simple_points_func(userId) -> SimpleIndividualUserLevel:
|
|
101 |
totalPoints = sum([point['numOfPoints'] for point in points_list])
|
102 |
particularLevelInfo = get_particular_level(dreamJob=dreamJob,totalPoints=totalPoints)
|
103 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
points = SimpleIndividualUserLevel(totalpoints=totalPoints,levelName=particularLevelInfo[0].levelName,maxPoints=particularLevelInfo[0].maxPoints,minPoints=particularLevelInfo[0].minPoints,levelNumber=particularLevelInfo[0].levelNumber)
|
105 |
except:
|
106 |
totalPoints = 0
|
|
|
93 |
db = client[db_name]
|
94 |
collection = db[collection_name]
|
95 |
dreamJob = get_dream_job(userId=userId)
|
96 |
+
<<<<<<< HEAD
|
97 |
|
98 |
point_cursor = collection.find({"userId": userId})
|
99 |
try:
|
|
|
102 |
totalPoints = sum([point['numOfPoints'] for point in points_list])
|
103 |
particularLevelInfo = get_particular_level(dreamJob=dreamJob,totalPoints=totalPoints)
|
104 |
|
105 |
+
=======
|
106 |
+
print(dreamJob)
|
107 |
+
point_cursor = collection.find({"userId": userId})
|
108 |
+
try:
|
109 |
+
points_list = list(point_cursor)
|
110 |
+
|
111 |
+
totalPoints = sum([point['numOfPoints'] for point in points_list])
|
112 |
+
|
113 |
+
particularLevelInfo = get_particular_level(dreamJob=dreamJob,totalPoints=totalPoints)
|
114 |
+
print(particularLevelInfo)
|
115 |
+
>>>>>>> master
|
116 |
points = SimpleIndividualUserLevel(totalpoints=totalPoints,levelName=particularLevelInfo[0].levelName,maxPoints=particularLevelInfo[0].maxPoints,minPoints=particularLevelInfo[0].minPoints,levelNumber=particularLevelInfo[0].levelNumber)
|
117 |
except:
|
118 |
totalPoints = 0
|
@@ -2,6 +2,10 @@ fastapi[all]
|
|
2 |
requests
|
3 |
python-dotenv
|
4 |
pymupdf
|
|
|
|
|
|
|
|
|
5 |
pinecone
|
6 |
sentence-transformers
|
7 |
einops
|
|
|
2 |
requests
|
3 |
python-dotenv
|
4 |
pymupdf
|
5 |
+
<<<<<<< HEAD
|
6 |
+
=======
|
7 |
+
motor
|
8 |
+
>>>>>>> master
|
9 |
pinecone
|
10 |
sentence-transformers
|
11 |
einops
|