Deepakraj2006 commited on
Commit
3516082
·
verified ·
1 Parent(s): ef41e15

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -75
app.py CHANGED
@@ -1,76 +1,75 @@
1
- # Import necessary packages
2
- from ibm_watsonx_ai import Credentials
3
- from ibm_watsonx_ai import APIClient
4
- from ibm_watsonx_ai.foundation_models import Model, ModelInference
5
- from ibm_watsonx_ai.foundation_models.schema import TextChatParameters
6
- from ibm_watsonx_ai.metanames import GenTextParamsMetaNames
7
- import gradio as gr
8
-
9
- # Model and project settings
10
- model_id = "meta-llama/llama-3-2-11b-vision-instruct" # Directly specifying the LLAMA3 model
11
-
12
- watsonx_API="L0sx3BXcQRWNmz45mbBLxL1UiZGnftHFQTwITAci-523"
13
- project_id="ed8f7a2c-e597-4a09-a98f-dbdcef57a0d0"
14
-
15
- # Set credentials to use the model
16
- credentials = {
17
- "url" : "https://au-syd.ml.cloud.ibm.com",
18
- "apikey": watsonx_API
19
- }
20
-
21
- # Generation parameters
22
- params = TextChatParameters(
23
- temperature=0.7,
24
- max_tokens=1024
25
- )
26
-
27
- project_id = "skills-network"
28
-
29
- # Initialize the model
30
- model = ModelInference(
31
- model_id=model_id,
32
- credentials=credentials,
33
- project_id=project_id,
34
- params=params
35
- )
36
-
37
- # Function to generate career advice
38
- def generate_career_advice(position_applied, job_description, resume_content):
39
- # The prompt for the model
40
- prompt = f"Considering the job description: {job_description}, and the resume provided: {resume_content}, identify areas for enhancement in the resume. Offer specific suggestions on how to improve these aspects to better match the job requirements and increase the likelihood of being selected for the position of {position_applied}."
41
-
42
- messages = [
43
- {
44
- "role": "user",
45
- "content": [
46
- {
47
- "type": "text",
48
- "text": prompt
49
- },
50
- ]
51
- }
52
- ]
53
-
54
- # Generate a response using the model with parameters
55
- generated_response = model.chat(messages=messages)
56
-
57
- # Extract and format the generated text
58
- advice = generated_response['choices'][0]['message']['content']
59
- return advice
60
-
61
- # Create Gradio interface for the career advice application
62
- career_advice_app = gr.Interface(
63
- fn=generate_career_advice,
64
- flagging_mode="never", # Deactivate the flag function in gradio as it is not needed.
65
- inputs=[
66
- gr.Textbox(label="Position Applied For", placeholder="Enter the position you are applying for..."),
67
- gr.Textbox(label="Job Description Information", placeholder="Paste the job description here...", lines=10),
68
- gr.Textbox(label="Your Resume Content", placeholder="Paste your resume content here...", lines=10),
69
- ],
70
- outputs=gr.Textbox(label="Advice"),
71
- title="Career Advisor",
72
- description="Enter the position you're applying for, paste the job description, and your resume content to get advice on what to improve for getting this job."
73
- )
74
-
75
- # Launch the application
76
  career_advice_app.launch()
 
1
+ # Import necessary packages
2
+ from ibm_watsonx_ai import Credentials
3
+ from ibm_watsonx_ai import APIClient
4
+ from ibm_watsonx_ai.foundation_models import Model, ModelInference
5
+ from ibm_watsonx_ai.foundation_models.schema import TextChatParameters
6
+ from ibm_watsonx_ai.metanames import GenTextParamsMetaNames
7
+ import gradio as gr
8
+
9
+ # Model and project settings
10
+ model_id = "meta-llama/llama-3-2-11b-vision-instruct" # Directly specifying the LLAMA3 model
11
+
12
+ watsonx_API="L0sx3BXcQRWNmz45mbBLxL1UiZGnftHFQTwITAci-523"
13
+ project_id="ed8f7a2c-e597-4a09-a98f-dbdcef57a0d0"
14
+
15
+ # Set credentials to use the model
16
+ credentials = {
17
+ "url" : "https://au-syd.ml.cloud.ibm.com",
18
+ "apikey": watsonx_API
19
+ }
20
+
21
+ # Generation parameters
22
+ params = TextChatParameters(
23
+ temperature=0.7,
24
+ max_tokens=1024
25
+ )
26
+
27
+
28
+ # Initialize the model
29
+ model = ModelInference(
30
+ model_id=model_id,
31
+ credentials=credentials,
32
+ project_id=project_id,
33
+ params=params
34
+ )
35
+
36
+ # Function to generate career advice
37
+ def generate_career_advice(position_applied, job_description, resume_content):
38
+ # The prompt for the model
39
+ prompt = f"Considering the job description: {job_description}, and the resume provided: {resume_content}, identify areas for enhancement in the resume. Offer specific suggestions on how to improve these aspects to better match the job requirements and increase the likelihood of being selected for the position of {position_applied}."
40
+
41
+ messages = [
42
+ {
43
+ "role": "user",
44
+ "content": [
45
+ {
46
+ "type": "text",
47
+ "text": prompt
48
+ },
49
+ ]
50
+ }
51
+ ]
52
+
53
+ # Generate a response using the model with parameters
54
+ generated_response = model.chat(messages=messages)
55
+
56
+ # Extract and format the generated text
57
+ advice = generated_response['choices'][0]['message']['content']
58
+ return advice
59
+
60
+ # Create Gradio interface for the career advice application
61
+ career_advice_app = gr.Interface(
62
+ fn=generate_career_advice,
63
+ flagging_mode="never", # Deactivate the flag function in gradio as it is not needed.
64
+ inputs=[
65
+ gr.Textbox(label="Position Applied For", placeholder="Enter the position you are applying for..."),
66
+ gr.Textbox(label="Job Description Information", placeholder="Paste the job description here...", lines=10),
67
+ gr.Textbox(label="Your Resume Content", placeholder="Paste your resume content here...", lines=10),
68
+ ],
69
+ outputs=gr.Textbox(label="Advice"),
70
+ title="Career Advisor",
71
+ description="Enter the position you're applying for, paste the job description, and your resume content to get advice on what to improve for getting this job."
72
+ )
73
+
74
+ # Launch the application
 
75
  career_advice_app.launch()