File size: 6,185 Bytes
555d77a
d4c83d2
555d77a
9d504d6
 
 
a2c958a
9d504d6
f54e1ed
555d77a
d4c83d2
 
555d77a
9d504d6
 
 
 
 
b0ff7a0
 
9d504d6
 
 
 
 
 
ba65c08
c050106
 
 
 
 
 
555d77a
1535def
9d504d6
777cd06
555d77a
c050106
 
777cd06
b0ff7a0
07b37ec
f54e1ed
b0ff7a0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f54e1ed
b0ff7a0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1535def
f54e1ed
 
 
1535def
 
555d77a
1535def
 
b0ff7a0
 
 
 
 
 
 
 
 
 
1535def
9d504d6
 
 
 
 
 
 
 
 
 
 
b0ff7a0
 
 
 
f54e1ed
 
d903541
c050106
1535def
 
 
b0ff7a0
 
 
f54e1ed
b0ff7a0
 
 
 
f54e1ed
b0ff7a0
 
f54e1ed
b0ff7a0
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
import streamlit as st
from openai import OpenAI
import os
import pandas as pd
import numpy as np
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
import torch
import requests

# Set up OpenAI client
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))

# Check if GPU is available
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")

# Load metadata and embeddings (ensure these files are in your working directory or update paths)
metadata_path = 'question_metadata.csv'
embeddings_path = 'question_dataset_embeddings.npy'

metadata = pd.read_csv(metadata_path)
embeddings = np.load(embeddings_path)

# Load the SentenceTransformer model
model = SentenceTransformer("all-MiniLM-L6-v2").to(device)

# Load prompts from files
with open("question_generation_prompt.txt", "r") as file:
    question_generation_prompt = file.read()

with open("technical_interviewer_prompt.txt", "r") as file:
    technical_interviewer_prompt = file.read()

st.title("Real-World Programming Question Mock Interview")

# Initialize session state variables
if "messages" not in st.session_state:
    st.session_state.messages = []

if "generated_question" not in st.session_state:
    st.session_state.generated_question = None

if "code_output" not in st.session_state:
    st.session_state.code_output = ""

if "evaluation_output" not in st.session_state:
    st.session_state.evaluation_output = ""

# Sidebar layout for Generated Question and Code Box
st.sidebar.markdown("## Generated Question")
if st.session_state.generated_question:
    st.sidebar.markdown(st.session_state.generated_question)
else:
    st.sidebar.markdown("_No question generated yet._")

st.sidebar.markdown("---")
st.sidebar.markdown("## Code Box")

code_input = st.sidebar.text_area(
    label="Write your Python code here:",
    height=200,
    placeholder="Enter your code...",
)

col1, col2 = st.sidebar.columns(2)

# Button to run code and display output
if col1.button("Run Code"):
    try:
        exec_globals = {}
        exec(code_input, exec_globals)
        st.session_state.code_output = exec_globals.get("output", "Code executed successfully.")
    except Exception as e:
        st.session_state.code_output = f"Error: {str(e)}"

# Button to evaluate code using OpenAI API
if col2.button("Evaluate Code"):
    if not st.session_state.generated_question:
        st.sidebar.error("Generate a question first!")
    else:
        try:
            evaluation_prompt = (
                f"Question: {st.session_state.generated_question}\n\n"
                f"Code:\n{code_input}\n\n"
                f"Evaluate this code's correctness, efficiency, and style."
            )
            response = client.chat.completions.create(
                model="gpt-4",
                messages=[{"role": "user", "content": evaluation_prompt}],
            )
            evaluation_response = response.choices[0].message.content
            st.session_state.evaluation_output = evaluation_response

            # Add evaluation output to follow-up conversation
            st.session_state.messages.append({"role": "assistant", "content": evaluation_response})
        except Exception as e:
            st.sidebar.error(f"Error during evaluation: {str(e)}")

# Display outputs below the main app content
st.subheader("Code Output")
st.text(st.session_state.code_output)

st.subheader("Evaluation Output")
st.text(st.session_state.evaluation_output)

# Main app logic for generating questions and follow-up conversation remains unchanged.
with st.form(key="input_form"):
    company = st.text_input("Company", value="Google")
    difficulty = st.selectbox("Difficulty", ["Easy", "Medium", "Hard"], index=1)
    topic = st.text_input("Topic (e.g., Backtracking)", value="Backtracking")
    
    generate_button = st.form_submit_button(label="Generate")

if generate_button:
    query = f"{company} {difficulty} {topic}"
    
    def find_top_question(query):
        query_embedding = model.encode(query, convert_to_tensor=True, device=device).cpu().numpy()
        query_embedding = query_embedding.reshape(1, -1)
        similarities = cosine_similarity(query_embedding, embeddings).flatten()
        top_index = similarities.argsort()[-1]
        top_result = metadata.iloc[top_index].copy()
        top_result['similarity_score'] = similarities[top_index]
        return top_result
    
    top_question = find_top_question(query)
    
    detailed_prompt = (
        f"Transform this LeetCode question into a real-world interview scenario:\n\n"
        f"**Company**: {top_question['company']}\n"
        f"**Question Name**: {top_question['questionName']}\n"
        f"**Difficulty Level**: {top_question['difficulty level']}\n"
        f"**Tags**: {top_question['Tags']}\n"
        f"**Content**: {top_question['Content']}\n"
        f"\nPlease create a real-world interview question based on this information."
    )
    
    response_text = client.chat.completions.create(
        model="gpt-4",
        messages=[{"role": "assistant", "content": question_generation_prompt}, {"role": "user", "content": detailed_prompt}],
    ).choices[0].message.content
    
    st.session_state.generated_question = response_text

for message in st.session_state.messages:
    with st.chat_message(message["role"]):
        st.markdown(message["content"])

if user_input := st.chat_input("Continue your conversation or ask follow-up questions here:"):
    with st.chat_message("user"):
        st.markdown(user_input)
    
    assistant_response_text = client.chat.completions.create(
        model="gpt-4",
        messages=[{"role": "assistant", "content": technical_interviewer_prompt}] + [{"role": msg["role"], "content": msg["content"]} for msg in st.session_state.messages],
    ).choices[0].message.content
    
    with st.chat_message("assistant"):
        st.markdown(assistant_response_text)
    
    # Append to session state messages for persistence
    st.session_state.messages.append({"role": "user", "content": user_input})
    st.session_state.messages.append({"role": "assistant", "content": assistant_response_text})