rishabhpr commited on
Commit
d903541
·
verified ·
1 Parent(s): a993dcc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -2
app.py CHANGED
@@ -109,9 +109,12 @@ if generate_button:
109
  # Generate response using GPT-4 with detailed prompt and debugging logs
110
  response = generate_response([{"role": "assistant", "content": question_generation_prompt}, {"role": "user", "content": detailed_prompt}])
111
 
112
- # Store generated question in session state for persistence in sidebar
113
  st.session_state.generated_question = response
114
 
 
 
 
115
  # Enable follow-up mode after generating the initial question
116
  st.session_state.follow_up_mode = True
117
 
@@ -129,7 +132,8 @@ if st.session_state.follow_up_mode:
129
 
130
  st.session_state.messages.append({"role": "user", "content": user_input})
131
 
132
- # Generate assistant's response based on follow-up input using technical_interviewer_prompt as system prompt
 
133
  assistant_response = generate_response(
134
  [{"role": "assistant", "content": technical_interviewer_prompt}] + st.session_state.messages
135
  )
 
109
  # Generate response using GPT-4 with detailed prompt and debugging logs
110
  response = generate_response([{"role": "assistant", "content": question_generation_prompt}, {"role": "user", "content": detailed_prompt}])
111
 
112
+ # Store generated question in session state for persistence in sidebar and follow-up conversation state
113
  st.session_state.generated_question = response
114
 
115
+ # Add the generated question to the conversation history as an assistant message (to make it part of follow-up conversations)
116
+ st.session_state.messages.append({"role": "assistant", "content": response})
117
+
118
  # Enable follow-up mode after generating the initial question
119
  st.session_state.follow_up_mode = True
120
 
 
132
 
133
  st.session_state.messages.append({"role": "user", "content": user_input})
134
 
135
+ # Generate assistant's response based on follow-up input using technical_interviewer_prompt as system prompt,
136
+ # including the generated question in context.
137
  assistant_response = generate_response(
138
  [{"role": "assistant", "content": technical_interviewer_prompt}] + st.session_state.messages
139
  )