Nattyboi commited on
Commit
1e531e2
·
1 Parent(s): 4eab69a

stage 1 for ARS

Browse files
Files changed (6) hide show
  1. Ars/ai_functions.py +210 -0
  2. Ars/controllers.py +18 -53
  3. Ars/embedDoc.py +109 -0
  4. Ars/objects.py +138 -3
  5. Ars/repositories.py +2 -2
  6. Ars/routes.py +15 -0
Ars/ai_functions.py CHANGED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pdfminer.high_level import extract_text_to_fp
2
+ from io import BytesIO
3
+ from objects import ai,ResumeData,AutomationRiskResult,RealWorldQuestion,SkillDepthResult,BreakDownByDomainUpdate,FlaggedRiskAreasUpdate,BoostSuggestionsUpdate
4
+ from embedDoc import search_pinecone_text
5
+ from fastapi import UploadFile
6
+
7
+ def extract_text_from_bytes(pdf_bytes: bytes) -> str:
8
+ output_string = BytesIO()
9
+ with BytesIO(pdf_bytes) as input_stream:
10
+ extract_text_to_fp(input_stream, output_string)
11
+ return output_string.getvalue().decode()
12
+
13
+
14
+ async def resume_analysis(upload_file:UploadFile) -> ResumeData:
15
+ contents = await upload_file.read()
16
+ resume = extract_text_from_bytes(pdf_bytes=contents)
17
+ if resume:
18
+ prompt = f"""
19
+ You are an AI resume parser. Read the following resume and extract the following structured information from the resume below if any was found:
20
+
21
+ 1. Work experience details: job titles and descriptions.(max3)
22
+ 2. List of technical skills (technologies, tools, platforms, etc.).(max4)
23
+ 3. Soft and transferable skills:
24
+ - Leadership and collaboration
25
+ - Critical thinking and problem-solving
26
+ - Communication skills
27
+ - Cross-functional/interdisciplinary experience
28
+ - Initiative and adaptability
29
+ 4. Career path details:
30
+ - Current or intended role/field
31
+ - Industry and sector context
32
+ - Career trajectory trends
33
+ 5. Project experience (if any): project titles, descriptions, role.(max3)
34
+ 6. Evidence of upskilling (optional): certifications, courses, learning projects, hackathons.
35
+
36
+ Return the extracted data as a JSON object that matches this schema:
37
+
38
+ Here is the resume:
39
+
40
+ {resume}
41
+ """
42
+ result =ai.chat(prompt=prompt,output_schema=ResumeData)
43
+ return result
44
+
45
+
46
+
47
+
48
+
49
+ def calculate_automation_risk(resume:ResumeData):
50
+ def generate_real_world_data(resume:ResumeData):
51
+ prompt=f"Generata a search query using the resume details below I want to check some text that has data on future jobs reports using semantic searches Here is the resume data: {resume} "
52
+ result = ai.chat(prompt=prompt,output_schema=RealWorldQuestion)
53
+ search_result =search_pinecone_text(query_text=result.question)
54
+
55
+ return search_result
56
+
57
+ real_world_data=generate_real_world_data(resume=resume)
58
+ prompt = f"""
59
+ You are an Automation Risk Calculator. Read the following resume Data and estimate How easily you can automate the skills and job titles of the user persona some real world data would be given to you the data may or may not provide help with your estimation:
60
+
61
+ Here is the resume:
62
+
63
+ (
64
+ {resume}
65
+ )
66
+
67
+ Here is the data:
68
+
69
+ (
70
+ {real_world_data}
71
+ )
72
+
73
+ Return Just Numbers MAX (100) MIN (0)Automation Risk is inverted (lower risk = higher score)
74
+ """
75
+ result=ai.chat(prompt=prompt,output_schema=AutomationRiskResult)
76
+ return result
77
+
78
+
79
+
80
+
81
+
82
+ def calculate_skill_depth(resume:ResumeData):
83
+
84
+
85
+ prompt = f"""
86
+ You are an Skill Depth Calculator. Read the following resume Data and Score based on number of years per skill, seniority of roles, certification presence:
87
+
88
+ Here is the resume:
89
+
90
+ (
91
+ {resume}
92
+ )
93
+
94
+ Return Just Numbers MAX (100) MIN (0)
95
+ """
96
+ result=ai.chat(prompt=prompt,output_schema=SkillDepthResult)
97
+ return result
98
+
99
+
100
+
101
+ def calculate_Ai_collab_readiness(resume:ResumeData):
102
+
103
+
104
+ prompt = f"""
105
+ You are an AI Collab Readiness Calculator. Read the following resume Data and Score based How ready are they to use AI, not just compete with it?- Based on:
106
+ - Mention of AI tools (ChatGPT, Copilot, etc.)
107
+ - Stated openness to automation
108
+ - How do they currently use AI tools in your work or studies?”:
109
+
110
+ Here is the resume:
111
+
112
+ (
113
+ {resume}
114
+ )
115
+
116
+ Return Just Numbers MAX (100) MIN (0)
117
+ """
118
+ result=ai.chat(prompt=prompt,output_schema=SkillDepthResult)
119
+ return result
120
+
121
+
122
+
123
+
124
+
125
+
126
+ def generate_domain_breakdown(resume:ResumeData):
127
+
128
+
129
+ prompt = f"""
130
+ You currently have only one important task and that is to generate_domain_breakdown. Read the following resume Data and Classify skills/domains using taxonomy (e.g., "Software Development", "Marketing", "Data Science"):
131
+ - **Breakdown by Domain EXAMPLE**:
132
+ - Technical: 65
133
+ - Creative: 80
134
+ - Strategy: 75
135
+ - Collaboration: 60:
136
+
137
+ Here is the resume:
138
+
139
+ (
140
+ {resume}
141
+ )
142
+
143
+ Return Proper values Numbers MAX (100) MIN (0)
144
+ """
145
+ result=ai.chat(prompt=prompt,output_schema=BreakDownByDomainUpdate)
146
+ return result
147
+
148
+
149
+
150
+
151
+
152
+ def generate_flagged_risk_areas(resume:ResumeData):
153
+
154
+
155
+ prompt = f"""
156
+ You are a Resume Evaluation AI. Your task is to analyze the provided resume and identify potential risk areas based on the content. Focus on:
157
+
158
+ 1. Missing recent certifications
159
+ 2. Lack of AI tool usage
160
+ 3. Overreliance on automatable tasks (e.g., data entry, spreadsheets)
161
+
162
+ For each risk area, return:
163
+ - A clear explanation (1–2 sentences MAX)
164
+
165
+ **Example Output Format**:
166
+ - "risk_areas: Your spreadsheet skills are highly automatable."
167
+ - "risk_areas: Your portfolio lacks AI-integrated work."
168
+
169
+ Here is the resume to evaluate:
170
+
171
+ ({resume})
172
+ """
173
+ result=ai.chat(prompt=prompt,output_schema=FlaggedRiskAreasUpdate)
174
+ return result
175
+
176
+
177
+
178
+
179
+ def generate_boost_suggestion(resume:ResumeData):
180
+
181
+
182
+ prompt = f"""
183
+ You are a Resume Enhancement AI. Based on the resume provided below, generate clear, AI-assisted improvement suggestions to help the user strengthen weak areas.
184
+
185
+ Focus on identifying skill or experience gaps and suggest practical next steps, such as:
186
+ - Learning relevant AI tools
187
+ - Enrolling in advanced courses
188
+ - Expanding project depth
189
+ - Gaining certifications
190
+
191
+ **Examples of Suggestions**:
192
+ - "Consider learning ChatGPT to enhance your coding workflow."
193
+ - "Deepen your skill in Python with an advanced online course."
194
+ - "Add recent certifications to show ongoing learning."
195
+ - "Incorporate AI tools into your portfolio projects."
196
+
197
+ **Rules**:
198
+ - Be specific and relevant to the resume.
199
+ - Each suggestion should be 1–2 sentences.
200
+ - Return 2–4 actionable suggestions.
201
+
202
+ Here is the resume to analyze:
203
+
204
+ ({resume})
205
+ """
206
+
207
+ result=ai.chat(prompt=prompt,output_schema=BoostSuggestionsUpdate)
208
+ return result
209
+
210
+
Ars/controllers.py CHANGED
@@ -1,53 +1,18 @@
1
-
2
- # TODO: perform calculation Risk Calculation and also generate Breakdown by Domain from resume, generate Flagged Risk areas from resume and boost suggestions from resume
3
-
4
- # NOTE: to perform risk calculation we use this formula ResilienceScore = (1 - AutomationRisk) * 0.5 + SkillDepth * 0.3 + AICollabReadiness * 0.2
5
- # NOTE: - **Automation Risk** is inverted (lower risk = higher score)
6
- # NOTE:- Normalize each sub-score to a scale of 0–100
7
- # NOTE: - Final score is a 0–100 **Crayonics Resilience Score**
8
-
9
- # TODO: things I'm gonna need 1. An AI , 2. Cache Controller 3. Database functions
10
- from Ars.core import r
11
-
12
-
13
-
14
- def analyze_resume(resume_text: str) -> dict:
15
- # Step 1: Check cache
16
- cached = r.get(resume_text)
17
- if cached:
18
- return cached
19
-
20
- # Step 2: Extract skills, domains, and experience info
21
- extracted_data = extract_resume_insights(resume_text)
22
-
23
- # Step 3: Score sub-components
24
- automation_risk = calculate_automation_risk(extracted_data)
25
- skill_depth = calculate_skill_depth(extracted_data)
26
- ai_collab_readiness = calculate_ai_collab_readiness(extracted_data)
27
-
28
- # Normalize sub-scores to 0–100
29
- automation_risk = normalize_score(1 - automation_risk)
30
- skill_depth = normalize_score(skill_depth)
31
- ai_collab_readiness = normalize_score(ai_collab_readiness)
32
-
33
- # Step 4: Final score
34
- resilience_score = (automation_risk * 0.5 +
35
- skill_depth * 0.3 +
36
- ai_collab_readiness * 0.2)
37
-
38
- # Step 5: Breakdown, flags, suggestions
39
- breakdown = generate_domain_breakdown(extracted_data)
40
- risk_flags = identify_risk_flags(extracted_data)
41
- suggestions = generate_boost_suggestions(risk_flags, extracted_data)
42
-
43
- # Step 6: Package and cache
44
- result = {
45
- "resilience_score": round(resilience_score, 2),
46
- "breakdown_by_domain": breakdown,
47
- "flagged_risk_areas": risk_flags,
48
- "boost_suggestions": suggestions,
49
- }
50
- r.setex(name=resume_text,value= result,time=3600)
51
- db_controller.save_analysis_result(resume_text, result)
52
-
53
- return result
 
1
+ from core import r
2
+ from repositories import create_boost_suggestions,create_breakdown_by_domain,create_flagged_risk_areas,create_user_resilience
3
+ from fastapi import UploadFile
4
+ from ai_functions import resume_analysis,calculate_automation_risk,calculate_Ai_collab_readiness,calculate_skill_depth,generate_boost_suggestion,generate_domain_breakdown,generate_flagged_risk_areas
5
+
6
+ async def resilience_analysis(file:UploadFile):
7
+ resume= await resume_analysis(file)
8
+ risk = calculate_automation_risk(resume)
9
+ skill_depth = calculate_skill_depth(resume)
10
+ ai_readiness = calculate_Ai_collab_readiness(resume)
11
+ ResilienceScore = ((1-(risk.result/100))*0.5+(skill_depth.result/100)*0.3+(ai_readiness.result/100)*0.2)
12
+ flagged_risk =generate_flagged_risk_areas(resume=resume)
13
+ boost_suggestion = generate_boost_suggestion(resume=resume)
14
+ domain_breakdown = generate_domain_breakdown(resume=resume)
15
+ print("flagged_risk",flagged_risk)
16
+ print("boost_suggestion",boost_suggestion)
17
+ print("domain_breakdown",domain_breakdown)
18
+ return {"overall score": ResilienceScore,"flagged Risk": flagged_risk,"boost suggestion":boost_suggestion,"domain breakdown":domain_breakdown,"resume":resume}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Ars/embedDoc.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pinecone import Pinecone
2
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
3
+ import os
4
+ from dotenv import load_dotenv
5
+ import time
6
+
7
+ load_dotenv()
8
+ def chunk_list(lst, chunk_size):
9
+ """Yield successive chunks of size `chunk_size` from list."""
10
+ for i in range(0, len(lst), chunk_size):
11
+ yield lst[i:i + chunk_size]
12
+
13
+ def upsert_text_with_chunks(
14
+ text: str,
15
+ *,
16
+ index_host: str = "https://resume-42eo81u.svc.aped-4627-b74a.pinecone.io",
17
+ namespace: str = "default",
18
+ chunk_size: int = 1000,
19
+ chunk_overlap: int = 200
20
+ ) -> None:
21
+ """
22
+ Splits a long text into overlapping chunks and upserts them directly into a Pinecone index
23
+ that has integrated embedding enabled.
24
+
25
+ Args:
26
+ text (str): The full text document to embed.
27
+ index_host (str): Pinecone index host URL.
28
+ namespace (str): Pinecone namespace to upsert into.
29
+ chunk_size (int): Max characters per chunk.
30
+ chunk_overlap (int): Overlap in characters between chunks.
31
+ """
32
+ api_key = os.getenv("PINECONE_API_KEY")
33
+ if not api_key:
34
+ raise EnvironmentError("Set PINECONE_API_KEY in environment")
35
+
36
+ pc = Pinecone(api_key=api_key)
37
+ index = pc.Index(host=index_host)
38
+
39
+ splitter = RecursiveCharacterTextSplitter(
40
+ chunk_size=chunk_size,
41
+ chunk_overlap=chunk_overlap,
42
+ length_function=len,
43
+ is_separator_regex=False
44
+ )
45
+ chunks = splitter.split_text(text)
46
+ if not chunks:
47
+ print("No chunks generated — exiting.")
48
+ return
49
+
50
+ records = [
51
+ {
52
+ "_id": f"chunk-{i}",
53
+ "text": chunk
54
+ }
55
+ for i, chunk in enumerate(chunks)
56
+ ]
57
+
58
+ for batch in chunk_list(records, 50):
59
+ print("Inserting")
60
+ index.upsert_records(records=batch, namespace=namespace)
61
+ time.sleep(60)
62
+ print("resting")
63
+ print(f"✅ Upserted {len(records)} valid chunks (out of {len(chunks)}) into namespace '{namespace}'.")
64
+
65
+
66
+
67
+
68
+
69
+ from pinecone import Pinecone
70
+ from typing import List, Dict
71
+
72
+ def search_pinecone_text(
73
+ query_text: str,
74
+ index_host: str = "https://resume-42eo81u.svc.aped-4627-b74a.pinecone.io",
75
+ namespace: str = "default",
76
+ top_k: int = 2,
77
+ fields: List[str] = ["category", "text"]
78
+ ) -> List[Dict]:
79
+ """
80
+ Search a Pinecone index using a text query.
81
+
82
+ Args:
83
+ api_key (str): Your Pinecone API key.
84
+ index_host (str): The specific index host URL.
85
+ namespace (str): The namespace to search within.
86
+ query_text (str): The input text to search for.
87
+ top_k (int): Number of top results to return.
88
+ fields (List[str]): Metadata fields to include in the response.
89
+
90
+ Returns:
91
+ List[Dict]: The top matching results.
92
+ """
93
+ api_key = os.getenv("PINECONE_API_KEY")
94
+ pc = Pinecone(api_key=api_key)
95
+ index = pc.Index(host=index_host)
96
+
97
+ results = index.search(
98
+ namespace=namespace,
99
+ query={"inputs": {"text": query_text}, "top_k": top_k},
100
+ fields=fields
101
+ )
102
+ print()
103
+ hits =results.result['hits']
104
+ result=[]
105
+ for hit in hits:
106
+ text = hit['fields']['text']
107
+ score = hit['_score']
108
+ result.append({"text":text,"score":score})
109
+ return result
Ars/objects.py CHANGED
@@ -1,7 +1,69 @@
1
  from datetime import datetime
2
- from pydantic import model_validator, BaseModel
3
- from typing import List, Optional, Union
4
  from bson import ObjectId
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  class UserResilienceScoreCreate(BaseModel):
7
 
@@ -30,7 +92,6 @@ class BreakDownByDomainCreate(BaseModel):
30
 
31
 
32
  class BreakDownByDomainUpdate(BaseModel):
33
-
34
  Technical:Optional[float]=None
35
  Creative:Optional[float]=None
36
  Strategy:Optional[float]=None
@@ -50,7 +111,81 @@ class BoostSuggestionsCreate(BaseModel):
50
 
51
  boost_suggestions:List[str]
52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
 
 
 
 
 
 
 
 
 
 
54
 
55
  class BoostSuggestionsUpdate(BaseModel):
56
 
 
1
  from datetime import datetime
2
+ from pydantic import Field, model_validator, BaseModel
3
+ from typing import List, Optional, Union,Type, TypeVar
4
  from bson import ObjectId
5
+ import openai
6
+ from google import genai
7
+ from google.genai import types
8
+ import os
9
+ from dotenv import load_dotenv
10
+ load_dotenv()
11
+ GOOGLE_API_KEY=os.getenv("GEMINI_API_KEY")
12
+ OPENAI_API_KEY=os.getenv("OPENAI_API_KEY")
13
+ T = TypeVar("T", bound=BaseModel)
14
+ class AIWrapper:
15
+ def __init__(self, provider='openai'):
16
+ self.provider = provider.lower()
17
+
18
+ if self.provider == 'openai':
19
+ openai.api_key = OPENAI_API_KEY
20
+ elif self.provider == 'gemini':
21
+ self.gemini_client = genai.Client(
22
+ api_key=GOOGLE_API_KEY,
23
+ http_options=types.HttpOptions(api_version='v1alpha')
24
+ )
25
+ else:
26
+ raise ValueError("Provider must be 'openai' or 'gemini'")
27
+
28
+ def chat(self, prompt: str,output_schema:Type[T]) -> T:
29
+ """
30
+ Generate a response from the AI provider and return it parsed into the specified schema.
31
+
32
+ Args:
33
+ prompt (str): The input prompt.
34
+ output_schema (Type[T]): A Pydantic model representing the output schema.
35
+
36
+ Returns:
37
+ T: Parsed AI response as an instance of the output_schema.
38
+ """
39
+ if self.provider == 'openai':
40
+ return self._openai_chat(prompt)
41
+ elif self.provider == 'gemini':
42
+ return self._gemini_chat(prompt,output_schema=output_schema)
43
+
44
+ def _openai_chat(self, prompt: str) -> str:
45
+ response = openai.ChatCompletion.create(
46
+ model="gpt-4",
47
+ messages=[
48
+ {"role": "user", "content": prompt}
49
+ ]
50
+ )
51
+ return response['choices'][0]['message']['content']
52
+
53
+ def _gemini_chat(self, prompt: str, output_schema: Type[T]) -> T:
54
+ response = self.gemini_client.models.generate_content(
55
+ model='gemini-2.0-flash-001',
56
+ contents=prompt,
57
+ config=types.GenerateContentConfig(
58
+ response_mime_type='application/json',
59
+ response_schema=output_schema,
60
+ ),
61
+ )
62
+
63
+ return response.parsed
64
+
65
+
66
+ ai = AIWrapper(provider='gemini')
67
 
68
  class UserResilienceScoreCreate(BaseModel):
69
 
 
92
 
93
 
94
  class BreakDownByDomainUpdate(BaseModel):
 
95
  Technical:Optional[float]=None
96
  Creative:Optional[float]=None
97
  Strategy:Optional[float]=None
 
111
 
112
  boost_suggestions:List[str]
113
 
114
+
115
+
116
+ class ProjectExperienceDetails(BaseModel):
117
+ ProjectTitles: str = Field(..., description="The title(s) of the project(s) involved in.")
118
+ descriptions: str = Field(..., description="Detailed description of the project and what it entailed.")
119
+ RoleInTheProject: str = Field(..., description="The specific role played within the project.")
120
+
121
+ class WorkExperienceDetails(BaseModel):
122
+ JobTitles: str = Field(..., description="The job titles held in past employment.")
123
+ JobDescriptions: str = Field(..., description="Summary of responsibilities and duties in these jobs.")
124
+
125
+ class SoftTransferableSkills(BaseModel):
126
+ LeadershipAndCollaborationIndicators: str = Field(..., description="Evidence or examples demonstrating leadership and teamwork.")
127
+ CriticalThinkingOrProblemSolvingVerb: str = Field(..., description="Examples of critical thinking or problem solving performed.")
128
+ CommunicationSkills: str = Field(None, description="Description of communication skills and contexts.")
129
+ CrossFunctionalOrInterdisciplinaryExperience: str = Field(..., description="Experience working across teams or disciplines.")
130
+ InitiativeAndAdaptabilityLanguage: str = Field(..., description="Examples of taking initiative and adapting to change.")
131
+
132
+ class CareerPathInformation(BaseModel):
133
+ CurrentOrIntendedRoleOrField: str = Field(..., description="Current or intended professional role or field of work.")
134
+ IndustryAndSectorContext: str = Field(..., description="Context about the industry and sector related to the career path.")
135
+ CareerTrajectoryTrends: str = Field(..., description="Observed or expected trends in the career trajectory or sector.")
136
+
137
+ class EvidenceOfUpskillingAndLifelongLearning(BaseModel):
138
+ CertificationsCoursesOrBootcampsListed: Optional[List[str]] = Field(None, description="List of certifications, courses, or bootcamps completed.")
139
+ SelfInitiatedLearningProjectsOrNonDegreeEducationalAchievements: Optional[List[str]] = Field(None, description="List of personal projects or non-degree achievements.")
140
+ ParticipationInHackathonsClubsOrProfessionalCommunities: Optional[List[str]] = Field(None, description="Involvement in hackathons, clubs, or professional groups.")
141
+
142
+ class AIRelatedKeywords(BaseModel):
143
+ AiToolsAndTechnologies: Optional[List[str]] = Field(
144
+ None,
145
+ description="List of AI tools and technologies mentioned in the resume, e.g., ChatGPT, TensorFlow."
146
+ )
147
+ conceptsAndTechniques: Optional[List[str]] = Field(
148
+ None,
149
+ description="AI concepts or techniques like NLP, computer vision, or reinforcement learning."
150
+ )
151
+ aiIntegratedProjectsMentioned: Optional[List[str]] = Field(
152
+ None,
153
+ description="Names or descriptions of projects where AI was applied."
154
+ )
155
+ usageContextDescriptions: Optional[List[str]] = Field(
156
+ None,
157
+ description="Sentences or phrases describing how AI was used in projects or tasks."
158
+ )
159
+
160
+ class ResumeData(BaseModel):
161
+ workExperienceDetails:Optional[List[WorkExperienceDetails]]=None
162
+ listOfExplicitTechnicalSkills:Optional[List[str]]=None
163
+ softTransferableSkills:List[SoftTransferableSkills]
164
+ projectExperienceDetails:Optional[List[ProjectExperienceDetails]]=None
165
+ careerPathInformation:CareerPathInformation
166
+ evidenceOfUpskillingAndLifelongLearning:Optional[EvidenceOfUpskillingAndLifelongLearning]=None
167
+ aiRelatedKeywords:AIRelatedKeywords
168
+
169
+ class RealWorldQuestion(BaseModel):
170
+ question:str
171
+
172
+
173
+
174
+ class AutomationRiskResult(BaseModel):
175
+ result: int = Field(...,description="The result of an automation risk estimation done using realword data and resume data of a user")
176
+
177
+
178
 
179
+ class SkillDepthResult(BaseModel):
180
+ result: int = Field(...,description="Score based on number of years per skill, seniority of roles, certification presence")
181
+
182
+
183
+ class AICollabReadiness(BaseModel):
184
+ result: int = Field(...,description="Score based on How ready are they to use AI, not just compete with it?")
185
+
186
+
187
+
188
+
189
 
190
  class BoostSuggestionsUpdate(BaseModel):
191
 
Ars/repositories.py CHANGED
@@ -1,9 +1,9 @@
1
- from Ars.core import db
2
  from bson import ObjectId
3
  from fastapi import HTTPException
4
  from typing import Optional, List
5
  from motor.motor_asyncio import AsyncIOMotorDatabase
6
- from Ars.objects import UserResilienceScoreCreate, UserResilienceScoreOut,BreakDownByDomainCreate,BreakDownByDomainOut,FlaggedRiskAreasCreate,FlaggedRiskAreasOut,BoostSuggestionsCreate,BoostSuggestionsOut,BoostSuggestionsUpdate,UserResilienceScoreUpdate,FlaggedRiskAreasUpdate,BreakDownByDomainUpdate
7
 
8
 
9
  async def create_user_resilience( data: UserResilienceScoreCreate) -> UserResilienceScoreOut:
 
1
+ from core import db
2
  from bson import ObjectId
3
  from fastapi import HTTPException
4
  from typing import Optional, List
5
  from motor.motor_asyncio import AsyncIOMotorDatabase
6
+ from objects import UserResilienceScoreCreate, UserResilienceScoreOut,BreakDownByDomainCreate,BreakDownByDomainOut,FlaggedRiskAreasCreate,FlaggedRiskAreasOut,BoostSuggestionsCreate,BoostSuggestionsOut,BoostSuggestionsUpdate,UserResilienceScoreUpdate,FlaggedRiskAreasUpdate,BreakDownByDomainUpdate
7
 
8
 
9
  async def create_user_resilience( data: UserResilienceScoreCreate) -> UserResilienceScoreOut:
Ars/routes.py CHANGED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ from fastapi import FastAPI, File, UploadFile,HTTPException
3
+ from controllers import resilience_analysis
4
+ from embedDoc import upsert_text_with_chunks,search_pinecone_text
5
+ ARS = FastAPI()
6
+
7
+ @ARS.post("/risk-analysis")
8
+ async def perform_risk_analysis(file: UploadFile = File(...)):
9
+ if file.content_type != "application/pdf":
10
+ return HTTPException(status_code=400, detail={"error": "File must be a PDF."})
11
+ ResilienceScore = await resilience_analysis(file=file)
12
+ return ResilienceScore
13
+
14
+
15
+