Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -28,7 +28,6 @@ model = SentenceTransformer("all-MiniLM-L6-v2").to(device)
|
|
28 |
with open("technical_interviewer_prompt.txt", "r") as file:
|
29 |
technical_interviewer_prompt = file.read()
|
30 |
|
31 |
-
# Load prompts from files
|
32 |
with open("question_generation_prompt.txt", "r") as file:
|
33 |
question_generation_prompt = file.read()
|
34 |
|
@@ -107,7 +106,7 @@ if generate_button:
|
|
107 |
f"\nPlease create a real-world interview question based on this information."
|
108 |
)
|
109 |
|
110 |
-
# Generate response using
|
111 |
response = generate_response([{"role": "user", "content": detailed_prompt}]) # Question generation prompt excluded here
|
112 |
|
113 |
# Store generated question in session state for persistence in sidebar and follow-up conversation state
|
@@ -130,11 +129,18 @@ if st.session_state.follow_up_mode:
|
|
130 |
|
131 |
st.session_state.messages.append({"role": "user", "content": user_input})
|
132 |
|
133 |
-
#
|
134 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
messages_to_send = [
|
136 |
{"role": "user", "content": technical_interviewer_prompt},
|
137 |
-
{"role": "assistant", "content": st.session_state.generated_question}
|
|
|
138 |
] + st.session_state.messages
|
139 |
|
140 |
assistant_response = generate_response(messages_to_send)
|
@@ -176,6 +182,5 @@ if st.sidebar.button("Run Code"):
|
|
176 |
st.sidebar.success(f"Output: {output_value}")
|
177 |
else:
|
178 |
st.sidebar.success("Code executed successfully!")
|
179 |
-
|
180 |
except Exception as e:
|
181 |
st.sidebar.error(f"Error: {e}")
|
|
|
28 |
with open("technical_interviewer_prompt.txt", "r") as file:
|
29 |
technical_interviewer_prompt = file.read()
|
30 |
|
|
|
31 |
with open("question_generation_prompt.txt", "r") as file:
|
32 |
question_generation_prompt = file.read()
|
33 |
|
|
|
106 |
f"\nPlease create a real-world interview question based on this information."
|
107 |
)
|
108 |
|
109 |
+
# Generate response using OpenAI API with detailed prompt and debugging logs
|
110 |
response = generate_response([{"role": "user", "content": detailed_prompt}]) # Question generation prompt excluded here
|
111 |
|
112 |
# Store generated question in session state for persistence in sidebar and follow-up conversation state
|
|
|
129 |
|
130 |
st.session_state.messages.append({"role": "user", "content": user_input})
|
131 |
|
132 |
+
# Prepare messages to send to the assistant
|
133 |
+
# Include the technical interviewer prompt and generated question, but do not display them
|
134 |
+
# Add an instruction for the assistant to reply as a real-world interviewer would
|
135 |
+
assistant_instruction = (
|
136 |
+
"As a real-world interviewer, please reply to the candidate's follow-up questions "
|
137 |
+
"specific to the generated interview question, to the point, and in a natural, human-sounding way."
|
138 |
+
)
|
139 |
+
|
140 |
messages_to_send = [
|
141 |
{"role": "user", "content": technical_interviewer_prompt},
|
142 |
+
{"role": "assistant", "content": st.session_state.generated_question},
|
143 |
+
{"role": "user", "content": assistant_instruction}
|
144 |
] + st.session_state.messages
|
145 |
|
146 |
assistant_response = generate_response(messages_to_send)
|
|
|
182 |
st.sidebar.success(f"Output: {output_value}")
|
183 |
else:
|
184 |
st.sidebar.success("Code executed successfully!")
|
|
|
185 |
except Exception as e:
|
186 |
st.sidebar.error(f"Error: {e}")
|