Ganesh89 commited on
Commit
1ec8ec9
·
verified ·
1 Parent(s): 72d5004

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -0
app.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from huggingface_hub import InferenceClient
3
+ from dotenv import load_dotenv
4
+ import os
5
+
6
+ # Load environment variables
7
+ load_dotenv()
8
+ api_key = os.getenv("HF_API_KEY")
9
+
10
+ # Initialize Hugging Face client
11
+ client = InferenceClient(api_key=api_key)
12
+
13
+ # Streamlit UI
14
+ st.title("Job Fit Assessment")
15
+ st.write("Find out if your resume is a strong fit for the job you're applying to.")
16
+
17
+ # User inputs
18
+ job_title = st.text_input("Job Title")
19
+ job_description = st.text_area("Job Description")
20
+ resume_description = st.text_area("Resume Description")
21
+ educational_qualifications = st.text_area("Educational Qualifications")
22
+
23
+ if st.button("Assess Job Fit"):
24
+ # Combine inputs into a message for the model
25
+ input_text = (
26
+ f"Based on the following:\n"
27
+ f"- Job Title: {job_title}\n"
28
+ f"- Job Description: {job_description}\n"
29
+ f"- Resume Description: {resume_description}\n"
30
+ f"- Educational Qualifications: {educational_qualifications}\n\n"
31
+ "Evaluate if this candidate's profile is a strong fit for the job."
32
+ )
33
+
34
+ messages = [{"role": "user", "content": input_text}]
35
+
36
+ # Make the API call
37
+ result = ""
38
+ try:
39
+ stream = client.chat.completions.create(
40
+ model="meta-llama/Llama-3.2-1B-Instruct",
41
+ messages=messages,
42
+ max_tokens=500,
43
+ stream=True
44
+ )
45
+
46
+ # Collect the response in chunks
47
+ for chunk in stream:
48
+ content = chunk.choices[0].delta.content
49
+ result += content
50
+
51
+ # Display the full result at once
52
+ st.write("Job Fit Assessment Result:")
53
+ st.write(result)
54
+
55
+ except Exception as e:
56
+ st.error(f"Error: {e}")