Spaces:
Sleeping
Sleeping
import streamlit as st | |
import os | |
import logging | |
import dotenv | |
import yaml | |
import PyPDF2 | |
from langchain_community.vectorstores import FAISS | |
from langchain_community.embeddings import HuggingFaceEmbeddings | |
from langchain.prompts import PromptTemplate | |
from langchain.llms import HuggingFaceHub | |
import os | |
dotenv.load_dotenv() | |
# Load configuration from YAML | |
def load_config(): | |
with open("config.yaml", "r") as f: | |
return yaml.safe_load(f) | |
config = load_config() | |
hf_token = os.getenv("Gem") # Store API token in .env | |
logging.basicConfig(level=logging.INFO) | |
# Load embedding model | |
embeddings_model = HuggingFaceEmbeddings(model_name=config["embedding_model"]) | |
# Extract text from PDFs | |
def extract_text_from_pdf(file): | |
reader = PyPDF2.PdfReader(file) | |
text = "" | |
for page in reader.pages: | |
text += page.extract_text() or "" | |
return text.strip() | |
# Get interview questions and assess responses | |
def get_interview_response(jd_text, resume_text, candidate_response=None): | |
prompt_template = """ | |
You are an AI interviewer assessing a candidate for a job role. | |
JOB DESCRIPTION: | |
{jd_text} | |
CANDIDATE PROFILE: | |
{resume_text} | |
1. Start by asking an **introductory question**: "Tell me about yourself." | |
2. Then, based on the job description, ask a **technical question**. | |
3. If the candidate has already responded, evaluate their answer and provide constructive feedback. | |
Maintain a professional yet friendly tone. | |
""" | |
prompt = PromptTemplate( | |
input_variables=["jd_text", "resume_text"], | |
template=prompt_template | |
).format(jd_text=jd_text, resume_text=resume_text) | |
if candidate_response: | |
prompt += f"\n\nCANDIDATE RESPONSE: {candidate_response}\n\nAssess the response and provide feedback." | |
llm = HuggingFaceHub( | |
repo_id=config["model_name"], | |
model_kwargs={"temperature": config["temperature"], "max_length": 200}, | |
huggingfacehub_api_token=hf_token | |
) | |
return llm(prompt).strip() | |
# Streamlit UI | |
st.set_page_config(page_title="AI Interviewer", layout="centered") | |
st.title("π€ AI Interview Chatbot") | |
st.write("Upload a Job Description and Resume to start the interview.") | |
jd_file = st.file_uploader("Upload Job Description (PDF)", type=["pdf"]) | |
resume_file = st.file_uploader("Upload Candidate Resume (PDF)", type=["pdf"]) | |
if jd_file and resume_file: | |
jd_text = extract_text_from_pdf(jd_file) | |
resume_text = extract_text_from_pdf(resume_file) | |
if "interview_history" not in st.session_state: | |
st.session_state["interview_history"] = [] | |
first_question = get_interview_response(jd_text, resume_text) | |
st.session_state["interview_history"].append(("AI", first_question)) | |
for role, msg in st.session_state["interview_history"]: | |
st.chat_message(role).write(msg) | |
query = st.chat_input("Your Response:") | |
if query: | |
response = get_interview_response(jd_text, resume_text, query) | |
st.session_state["interview_history"].append(("You", query)) | |
st.session_state["interview_history"].append(("AI", response)) | |
st.rerun() |