Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -28,6 +28,10 @@ model = SentenceTransformer("all-MiniLM-L6-v2").to(device)
|
|
28 |
with open("technical_interviewer_prompt.txt", "r") as file:
|
29 |
technical_interviewer_prompt = file.read()
|
30 |
|
|
|
|
|
|
|
|
|
31 |
st.title("Real-World Programming Question Mock Interview")
|
32 |
|
33 |
# Initialize session state variables
|
@@ -87,7 +91,9 @@ if generate_button:
|
|
87 |
# Clear session state and start fresh with follow-up mode disabled
|
88 |
st.session_state.messages = []
|
89 |
st.session_state.follow_up_mode = False
|
90 |
-
|
|
|
|
|
91 |
# Create a query from user inputs and find the most relevant question
|
92 |
query = f"{company} {difficulty} {topic}"
|
93 |
top_question = find_top_question(query)
|
@@ -132,7 +138,7 @@ if st.session_state.follow_up_mode:
|
|
132 |
# Generate assistant's response based on follow-up input using technical_interviewer_prompt as system prompt,
|
133 |
# including the generated question in context.
|
134 |
assistant_response = generate_response(
|
135 |
-
[{"role": "
|
136 |
)
|
137 |
|
138 |
with st.chat_message("assistant"):
|
|
|
28 |
with open("technical_interviewer_prompt.txt", "r") as file:
|
29 |
technical_interviewer_prompt = file.read()
|
30 |
|
31 |
+
# Load prompts from files
|
32 |
+
with open("question_generation_prompt.txt", "r") as file:
|
33 |
+
question_generation_prompt = file.read()
|
34 |
+
|
35 |
st.title("Real-World Programming Question Mock Interview")
|
36 |
|
37 |
# Initialize session state variables
|
|
|
91 |
# Clear session state and start fresh with follow-up mode disabled
|
92 |
st.session_state.messages = []
|
93 |
st.session_state.follow_up_mode = False
|
94 |
+
|
95 |
+
st.session_state.messages.append([{"role": "user", "content": question_generation_prompt}])
|
96 |
+
|
97 |
# Create a query from user inputs and find the most relevant question
|
98 |
query = f"{company} {difficulty} {topic}"
|
99 |
top_question = find_top_question(query)
|
|
|
138 |
# Generate assistant's response based on follow-up input using technical_interviewer_prompt as system prompt,
|
139 |
# including the generated question in context.
|
140 |
assistant_response = generate_response(
|
141 |
+
[{"role": "user", "content": technical_interviewer_prompt}] + st.session_state.messages
|
142 |
)
|
143 |
|
144 |
with st.chat_message("assistant"):
|