rishabhpr commited on
Commit
65d1540
·
verified ·
1 Parent(s): 923245a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -14
app.py CHANGED
@@ -28,9 +28,10 @@ model = SentenceTransformer("all-MiniLM-L6-v2").to(device)
28
  with open("technical_interviewer_prompt.txt", "r") as file:
29
  technical_interviewer_prompt = file.read()
30
 
 
31
  with open("question_generation_prompt.txt", "r") as file:
32
  question_generation_prompt = file.read()
33
-
34
  st.title("Real-World Programming Question Mock Interview")
35
 
36
  # Initialize session state variables
@@ -109,35 +110,38 @@ if generate_button:
109
  # Generate response using GPT-4 with detailed prompt and debugging logs
110
  response = generate_response([{"role": "user", "content": detailed_prompt}]) # Question generation prompt excluded here
111
 
112
- # Store generated question in session state for persistence
113
  st.session_state.generated_question = response
114
 
115
- # Add the generated question and technical interviewer prompt as hidden start for follow-up
116
- st.session_state.messages.append({"role": "user", "content": technical_interviewer_prompt})
117
- st.session_state.messages.append({"role": "assistant", "content": response})
118
-
119
  # Enable follow-up mode after generating the initial question
120
  st.session_state.follow_up_mode = True
121
 
122
  # Display chat messages from history on app rerun (for subsequent conversation)
123
- for message in st.session_state.messages[2:]: # Exclude the hidden messages (first two)
124
  with st.chat_message(message["role"]):
125
  st.markdown(message["content"])
126
 
127
  # Chatbox for subsequent conversations with assistant (follow-up mode)
128
  if st.session_state.follow_up_mode:
129
  if user_input := st.chat_input("Continue your conversation or ask follow-up questions here:"):
130
- # Add the user's input to the session state
 
 
 
131
  st.session_state.messages.append({"role": "user", "content": user_input})
132
 
133
- # Generate assistant's response based on follow-up input
134
- assistant_response = generate_response(st.session_state.messages)
 
 
 
 
 
 
135
 
136
- # Display the assistant's response
137
  with st.chat_message("assistant"):
138
  st.markdown(assistant_response)
139
 
140
- # Append the assistant's response to the conversation history
141
  st.session_state.messages.append({"role": "assistant", "content": assistant_response})
142
 
143
  st.sidebar.markdown("""
@@ -147,15 +151,32 @@ Enter a company name, topic, and level of difficulty, and it will transform a re
147
  Continue chatting with the AI interviewer in the chatbox.
148
  """)
149
 
150
- # Sidebar content to display persistent generated question
151
  st.sidebar.markdown("## Generated Question")
152
  if st.session_state.generated_question:
153
  st.sidebar.markdown(st.session_state.generated_question)
154
  else:
155
  st.sidebar.markdown("_No question generated yet._")
156
 
157
- # Right sidebar toggleable debug logs
158
  with st.expander("Debug Logs (Toggle On/Off)", expanded=False):
159
  if len(st.session_state.debug_logs) > 0:
160
  for log_entry in reversed(st.session_state.debug_logs): # Show most recent logs first
161
  st.write(log_entry)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  with open("technical_interviewer_prompt.txt", "r") as file:
29
  technical_interviewer_prompt = file.read()
30
 
31
+ # Load prompts from files
32
  with open("question_generation_prompt.txt", "r") as file:
33
  question_generation_prompt = file.read()
34
+
35
  st.title("Real-World Programming Question Mock Interview")
36
 
37
  # Initialize session state variables
 
110
  # Generate response using GPT-4 with detailed prompt and debugging logs
111
  response = generate_response([{"role": "user", "content": detailed_prompt}]) # Question generation prompt excluded here
112
 
113
+ # Store generated question in session state for persistence in sidebar and follow-up conversation state
114
  st.session_state.generated_question = response
115
 
 
 
 
 
116
  # Enable follow-up mode after generating the initial question
117
  st.session_state.follow_up_mode = True
118
 
119
  # Display chat messages from history on app rerun (for subsequent conversation)
120
+ for message in st.session_state.messages:
121
  with st.chat_message(message["role"]):
122
  st.markdown(message["content"])
123
 
124
  # Chatbox for subsequent conversations with assistant (follow-up mode)
125
  if st.session_state.follow_up_mode:
126
  if user_input := st.chat_input("Continue your conversation or ask follow-up questions here:"):
127
+ # Display user message in chat message container and add to session history
128
+ with st.chat_message("user"):
129
+ st.markdown(user_input)
130
+
131
  st.session_state.messages.append({"role": "user", "content": user_input})
132
 
133
+ # Generate assistant's response based on follow-up input using technical_interviewer_prompt and generated question,
134
+ # but do not include them in the displayed conversation history.
135
+ messages_to_send = [
136
+ {"role": "system", "content": technical_interviewer_prompt},
137
+ {"role": "assistant", "content": st.session_state.generated_question}
138
+ ] + st.session_state.messages
139
+
140
+ assistant_response = generate_response(messages_to_send)
141
 
 
142
  with st.chat_message("assistant"):
143
  st.markdown(assistant_response)
144
 
 
145
  st.session_state.messages.append({"role": "assistant", "content": assistant_response})
146
 
147
  st.sidebar.markdown("""
 
151
  Continue chatting with the AI interviewer in the chatbox.
152
  """)
153
 
154
+ # Sidebar content to display persistent generated question (left sidebar)
155
  st.sidebar.markdown("## Generated Question")
156
  if st.session_state.generated_question:
157
  st.sidebar.markdown(st.session_state.generated_question)
158
  else:
159
  st.sidebar.markdown("_No question generated yet._")
160
 
161
+ # Right sidebar toggleable debug logs and code interpreter section
162
  with st.expander("Debug Logs (Toggle On/Off)", expanded=False):
163
  if len(st.session_state.debug_logs) > 0:
164
  for log_entry in reversed(st.session_state.debug_logs): # Show most recent logs first
165
  st.write(log_entry)
166
+
167
+ st.sidebar.markdown("---")
168
+ st.sidebar.markdown("## Python Code Interpreter")
169
+ code_input = st.sidebar.text_area("Write your Python code here:")
170
+ if st.sidebar.button("Run Code"):
171
+ try:
172
+ exec_globals = {}
173
+ exec(code_input, exec_globals) # Execute user-provided code safely within its own scope.
174
+ output_key = [k for k in exec_globals.keys() if k != "__builtins__"]
175
+ if output_key:
176
+ output_value = exec_globals[output_key[0]]
177
+ st.sidebar.success(f"Output: {output_value}")
178
+ else:
179
+ st.sidebar.success("Code executed successfully!")
180
+
181
+ except Exception as e:
182
+ st.sidebar.error(f"Error: {e}")