codingprepdemo / app.py
rishabhpr's picture
Update app.py
71085af verified
raw
history blame
6.15 kB
import streamlit as st
from openai import OpenAI
import os
import pandas as pd
import numpy as np
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
import torch
# Set up OpenAI client
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
# Check if GPU is available
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")
# Load metadata and embeddings (ensure these files are in your working directory or update paths)
metadata_path = 'question_metadata.csv' # Update this path if needed
embeddings_path = 'question_dataset_embeddings.npy' # Update this path if needed
metadata = pd.read_csv(metadata_path)
embeddings = np.load(embeddings_path)
# Load the SentenceTransformer model
model = SentenceTransformer("all-MiniLM-L6-v2").to(device)
# Load prompts from files
with open("technical_interviewer_prompt.txt", "r") as file:
technical_interviewer_prompt = file.read()
with open("question_generation_prompt.txt", "r") as file:
question_generation_prompt = file.read()
st.title("Real-World Programming Question Mock Interview")
# Initialize session state variables
if "messages" not in st.session_state:
st.session_state.messages = []
if "follow_up_mode" not in st.session_state:
st.session_state.follow_up_mode = False
if "generated_question" not in st.session_state:
st.session_state.generated_question = None
if "debug_logs" not in st.session_state:
st.session_state.debug_logs = []
# Function to find the top 1 most similar question based on user input
def find_top_question(query):
query_embedding = model.encode(query, convert_to_tensor=True, device=device).cpu().numpy()
query_embedding = query_embedding.reshape(1, -1)
similarities = cosine_similarity(query_embedding, embeddings).flatten()
top_index = similarities.argsort()[-1]
top_result = metadata.iloc[top_index].copy()
top_result['similarity_score'] = similarities[top_index]
return top_result
# Function to generate response using OpenAI API
def generate_response(messages):
response = client.chat.completions.create(
model="o1-mini",
messages=messages,
)
return response.choices[0].message.content
# User input form for generating a new question
with st.form(key="input_form"):
company = st.text_input("Company", value="Google")
difficulty = st.selectbox("Difficulty", ["Easy", "Medium", "Hard"], index=1)
topic = st.text_input("Topic (e.g., Backtracking)", value="Backtracking")
generate_button = st.form_submit_button(label="Generate")
if generate_button:
# Clear session state and reset follow-up mode
st.session_state.messages = []
st.session_state.follow_up_mode = False
# Create a query from user inputs and find the most relevant question
query = f"{company} {difficulty} {topic}"
top_question = find_top_question(query)
# Prepare a detailed prompt for GPT using the top question's details
detailed_prompt = (
f"Transform this LeetCode question into a real-world interview scenario:\n\n"
f"**Company**: {top_question['company']}\n"
f"**Question Name**: {top_question['questionName']}\n"
f"**Difficulty Level**: {top_question['difficulty level']}\n"
f"**Tags**: {top_question['Tags']}\n"
f"**Content**: {top_question['Content']}\n"
f"\nPlease create a real-world interview question based on this information."
)
# Generate response using GPT-4
response = generate_response([{"role": "user", "content": detailed_prompt}])
# Store the generated question for display but do not add the prompt to history
st.session_state.generated_question = response
st.session_state.messages.append({"role": "assistant", "content": response})
# Enable follow-up mode
st.session_state.follow_up_mode = True
# Display the generated question and follow-up chat
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if st.session_state.follow_up_mode:
if user_input := st.chat_input("Continue your conversation or ask follow-up questions here:"):
with st.chat_message("user"):
st.markdown(user_input)
st.session_state.messages.append({"role": "user", "content": user_input})
# Generate follow-up response using the interviewer prompt but exclude it from history
follow_up_response = generate_response(
[{"role": "user", "content": user_input}]
)
with st.chat_message("assistant"):
st.markdown(follow_up_response)
st.session_state.messages.append({"role": "assistant", "content": follow_up_response})
# Sidebar content to display the generated question
st.sidebar.markdown("## Generated Question")
if st.session_state.generated_question:
st.sidebar.markdown(st.session_state.generated_question)
else:
st.sidebar.markdown("_No question generated yet._")
st.sidebar.markdown("""
## About
This is a Real-World Interview Question Generator powered by AI.
Enter a company name, topic, and level of difficulty, and it will transform a relevant question into a real-world interview scenario.
""")
# Debug logs and code interpreter section
with st.expander("Debug Logs (Toggle On/Off)", expanded=False):
if len(st.session_state.debug_logs) > 0:
for log_entry in reversed(st.session_state.debug_logs):
st.write(log_entry)
st.sidebar.markdown("---")
st.sidebar.markdown("## Python Code Interpreter")
code_input = st.sidebar.text_area("Write your Python code here:")
if st.sidebar.button("Run Code"):
try:
exec_globals = {}
exec(code_input, exec_globals)
output_key = [k for k in exec_globals.keys() if k != "__builtins__"]
if output_key:
output_value = exec_globals[output_key[0]]
st.sidebar.success(f"Output: {output_value}")
else:
st.sidebar.success("Code executed successfully!")
except Exception as e:
st.sidebar.error(f"Error: {e}")