Spaces:
Sleeping
Sleeping
File size: 3,138 Bytes
e404fff |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
import streamlit as st
import os
import logging
import dotenv
import yaml
import PyPDF2
from langchain_community.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.llms import HuggingFaceHub
import os
dotenv.load_dotenv()
# Load configuration from YAML
def load_config():
with open("config.yaml", "r") as f:
return yaml.safe_load(f)
config = load_config()
hf_token = os.getenv("Gem") # Store API token in .env
logging.basicConfig(level=logging.INFO)
# Load embedding model
embeddings_model = HuggingFaceEmbeddings(model_name=config["embedding_model"])
# Extract text from PDFs
def extract_text_from_pdf(file):
reader = PyPDF2.PdfReader(file)
text = ""
for page in reader.pages:
text += page.extract_text() or ""
return text.strip()
# Get interview questions and assess responses
def get_interview_response(jd_text, resume_text, candidate_response=None):
prompt_template = """
You are an AI interviewer assessing a candidate for a job role.
JOB DESCRIPTION:
{jd_text}
CANDIDATE PROFILE:
{resume_text}
1. Start by asking an **introductory question**: "Tell me about yourself."
2. Then, based on the job description, ask a **technical question**.
3. If the candidate has already responded, evaluate their answer and provide constructive feedback.
Maintain a professional yet friendly tone.
"""
prompt = PromptTemplate(
input_variables=["jd_text", "resume_text"],
template=prompt_template
).format(jd_text=jd_text, resume_text=resume_text)
if candidate_response:
prompt += f"\n\nCANDIDATE RESPONSE: {candidate_response}\n\nAssess the response and provide feedback."
llm = HuggingFaceHub(
repo_id=config["model_name"],
model_kwargs={"temperature": config["temperature"], "max_length": 200},
huggingfacehub_api_token=hf_token
)
return llm(prompt).strip()
# Streamlit UI
st.set_page_config(page_title="AI Interviewer", layout="centered")
st.title("🤖 AI Interview Chatbot")
st.write("Upload a Job Description and Resume to start the interview.")
jd_file = st.file_uploader("Upload Job Description (PDF)", type=["pdf"])
resume_file = st.file_uploader("Upload Candidate Resume (PDF)", type=["pdf"])
if jd_file and resume_file:
jd_text = extract_text_from_pdf(jd_file)
resume_text = extract_text_from_pdf(resume_file)
if "interview_history" not in st.session_state:
st.session_state["interview_history"] = []
first_question = get_interview_response(jd_text, resume_text)
st.session_state["interview_history"].append(("AI", first_question))
for role, msg in st.session_state["interview_history"]:
st.chat_message(role).write(msg)
query = st.chat_input("Your Response:")
if query:
response = get_interview_response(jd_text, resume_text, query)
st.session_state["interview_history"].append(("You", query))
st.session_state["interview_history"].append(("AI", response))
st.rerun() |