codingprepdemo / app.py
rishabhpr's picture
voice + code evaluation
b0ff7a0 verified
raw
history blame
6.19 kB
import streamlit as st
from openai import OpenAI
import os
import pandas as pd
import numpy as np
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
import torch
import requests
# Set up OpenAI client
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
# Check if GPU is available
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")
# Load metadata and embeddings (ensure these files are in your working directory or update paths)
metadata_path = 'question_metadata.csv'
embeddings_path = 'question_dataset_embeddings.npy'
metadata = pd.read_csv(metadata_path)
embeddings = np.load(embeddings_path)
# Load the SentenceTransformer model
model = SentenceTransformer("all-MiniLM-L6-v2").to(device)
# Load prompts from files
with open("question_generation_prompt.txt", "r") as file:
question_generation_prompt = file.read()
with open("technical_interviewer_prompt.txt", "r") as file:
technical_interviewer_prompt = file.read()
st.title("Real-World Programming Question Mock Interview")
# Initialize session state variables
if "messages" not in st.session_state:
st.session_state.messages = []
if "generated_question" not in st.session_state:
st.session_state.generated_question = None
if "code_output" not in st.session_state:
st.session_state.code_output = ""
if "evaluation_output" not in st.session_state:
st.session_state.evaluation_output = ""
# Sidebar layout for Generated Question and Code Box
st.sidebar.markdown("## Generated Question")
if st.session_state.generated_question:
st.sidebar.markdown(st.session_state.generated_question)
else:
st.sidebar.markdown("_No question generated yet._")
st.sidebar.markdown("---")
st.sidebar.markdown("## Code Box")
code_input = st.sidebar.text_area(
label="Write your Python code here:",
height=200,
placeholder="Enter your code...",
)
col1, col2 = st.sidebar.columns(2)
# Button to run code and display output
if col1.button("Run Code"):
try:
exec_globals = {}
exec(code_input, exec_globals)
st.session_state.code_output = exec_globals.get("output", "Code executed successfully.")
except Exception as e:
st.session_state.code_output = f"Error: {str(e)}"
# Button to evaluate code using OpenAI API
if col2.button("Evaluate Code"):
if not st.session_state.generated_question:
st.sidebar.error("Generate a question first!")
else:
try:
evaluation_prompt = (
f"Question: {st.session_state.generated_question}\n\n"
f"Code:\n{code_input}\n\n"
f"Evaluate this code's correctness, efficiency, and style."
)
response = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": evaluation_prompt}],
)
evaluation_response = response.choices[0].message.content
st.session_state.evaluation_output = evaluation_response
# Add evaluation output to follow-up conversation
st.session_state.messages.append({"role": "assistant", "content": evaluation_response})
except Exception as e:
st.sidebar.error(f"Error during evaluation: {str(e)}")
# Display outputs below the main app content
st.subheader("Code Output")
st.text(st.session_state.code_output)
st.subheader("Evaluation Output")
st.text(st.session_state.evaluation_output)
# Main app logic for generating questions and follow-up conversation remains unchanged.
with st.form(key="input_form"):
company = st.text_input("Company", value="Google")
difficulty = st.selectbox("Difficulty", ["Easy", "Medium", "Hard"], index=1)
topic = st.text_input("Topic (e.g., Backtracking)", value="Backtracking")
generate_button = st.form_submit_button(label="Generate")
if generate_button:
query = f"{company} {difficulty} {topic}"
def find_top_question(query):
query_embedding = model.encode(query, convert_to_tensor=True, device=device).cpu().numpy()
query_embedding = query_embedding.reshape(1, -1)
similarities = cosine_similarity(query_embedding, embeddings).flatten()
top_index = similarities.argsort()[-1]
top_result = metadata.iloc[top_index].copy()
top_result['similarity_score'] = similarities[top_index]
return top_result
top_question = find_top_question(query)
detailed_prompt = (
f"Transform this LeetCode question into a real-world interview scenario:\n\n"
f"**Company**: {top_question['company']}\n"
f"**Question Name**: {top_question['questionName']}\n"
f"**Difficulty Level**: {top_question['difficulty level']}\n"
f"**Tags**: {top_question['Tags']}\n"
f"**Content**: {top_question['Content']}\n"
f"\nPlease create a real-world interview question based on this information."
)
response_text = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "assistant", "content": question_generation_prompt}, {"role": "user", "content": detailed_prompt}],
).choices[0].message.content
st.session_state.generated_question = response_text
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if user_input := st.chat_input("Continue your conversation or ask follow-up questions here:"):
with st.chat_message("user"):
st.markdown(user_input)
assistant_response_text = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "assistant", "content": technical_interviewer_prompt}] + [{"role": msg["role"], "content": msg["content"]} for msg in st.session_state.messages],
).choices[0].message.content
with st.chat_message("assistant"):
st.markdown(assistant_response_text)
# Append to session state messages for persistence
st.session_state.messages.append({"role": "user", "content": user_input})
st.session_state.messages.append({"role": "assistant", "content": assistant_response_text})