rishabhpr commited on
Commit
c050106
·
verified ·
1 Parent(s): 3b110a5

divide prompt

Browse files
Files changed (1) hide show
  1. app.py +39 -26
app.py CHANGED
@@ -24,15 +24,21 @@ embeddings = np.load(embeddings_path)
24
  # Load the SentenceTransformer model
25
  model = SentenceTransformer("all-MiniLM-L6-v2").to(device)
26
 
27
- # Load the system prompt from the file
28
- with open("prompt.txt", "r") as file:
29
- system_prompt = file.read()
 
 
 
30
 
31
  st.title("Real-World Programming Question Mock Interview")
32
 
33
  # Initialize chat history in session state
34
  if "messages" not in st.session_state:
35
- st.session_state.messages = [{"role": "assistant", "content": system_prompt}]
 
 
 
36
 
37
  # Function to find the top 1 most similar question based on user input
38
  def find_top_question(query):
@@ -55,13 +61,13 @@ def find_top_question(query):
55
  return top_result
56
 
57
  # Function to generate response using OpenAI API with debugging logs
58
- def generate_response(prompt):
59
  st.write("### Debugging Log: Data Sent to GPT")
60
- st.write(prompt) # Log the prompt being sent to GPT for debugging
61
 
62
  response = client.chat.completions.create(
63
  model="o1-mini",
64
- messages=st.session_state.messages + [{"role": "assistant", "content": prompt}]
65
  )
66
  return response.choices[0].message.content
67
 
@@ -74,8 +80,9 @@ with st.form(key="input_form"):
74
  generate_button = st.form_submit_button(label="Generate")
75
 
76
  if generate_button:
77
- # Clear session state and start a new conversation history with system prompt
78
- st.session_state.messages = [{"role": "assistant", "content": system_prompt}]
 
79
 
80
  # Create a query from user inputs and find the most relevant question
81
  query = f"{company} {difficulty} {topic}"
@@ -93,34 +100,40 @@ if generate_button:
93
  )
94
 
95
  # Generate response using GPT-4 with detailed prompt and debugging logs
96
- response = generate_response(detailed_prompt)
97
 
98
  # Display assistant response in chat message container and add to session history
99
  with st.chat_message("assistant"):
100
  st.markdown(response)
101
 
102
  st.session_state.messages.append({"role": "assistant", "content": response})
 
 
 
103
 
104
  # Display chat messages from history on app rerun (for subsequent conversation)
105
- for message in st.session_state.messages[1:]: # Skip the system message
106
  with st.chat_message(message["role"]):
107
  st.markdown(message["content"])
108
 
109
- # Chatbox for subsequent conversations with assistant
110
- if user_input := st.chat_input("Continue your conversation or ask follow-up questions here:"):
111
- # Display user message in chat message container and add to session history
112
- with st.chat_message("user"):
113
- st.markdown(user_input)
114
-
115
- st.session_state.messages.append({"role": "user", "content": user_input})
116
-
117
- # Generate assistant's response based on follow-up input
118
- assistant_response = generate_response(user_input)
119
-
120
- with st.chat_message("assistant"):
121
- st.markdown(assistant_response)
122
-
123
- st.session_state.messages.append({"role": "assistant", "content": assistant_response})
 
 
 
124
 
125
  st.sidebar.markdown("""
126
  ## About
 
24
  # Load the SentenceTransformer model
25
  model = SentenceTransformer("all-MiniLM-L6-v2").to(device)
26
 
27
+ # Load prompts from files
28
+ with open("question_generation_prompt.txt", "r") as file:
29
+ question_generation_prompt = file.read()
30
+
31
+ with open("technical_interviewer_prompt.txt", "r") as file:
32
+ technical_interviewer_prompt = file.read()
33
 
34
  st.title("Real-World Programming Question Mock Interview")
35
 
36
  # Initialize chat history in session state
37
  if "messages" not in st.session_state:
38
+ st.session_state.messages = []
39
+
40
+ if "follow_up_mode" not in st.session_state:
41
+ st.session_state.follow_up_mode = False # Tracks whether we're in follow-up mode
42
 
43
  # Function to find the top 1 most similar question based on user input
44
  def find_top_question(query):
 
61
  return top_result
62
 
63
  # Function to generate response using OpenAI API with debugging logs
64
+ def generate_response(messages):
65
  st.write("### Debugging Log: Data Sent to GPT")
66
+ st.write(messages) # Log the messages being sent to GPT for debugging
67
 
68
  response = client.chat.completions.create(
69
  model="o1-mini",
70
+ messages=messages,
71
  )
72
  return response.choices[0].message.content
73
 
 
80
  generate_button = st.form_submit_button(label="Generate")
81
 
82
  if generate_button:
83
+ # Clear session state and start fresh with follow-up mode disabled
84
+ st.session_state.messages = []
85
+ st.session_state.follow_up_mode = False
86
 
87
  # Create a query from user inputs and find the most relevant question
88
  query = f"{company} {difficulty} {topic}"
 
100
  )
101
 
102
  # Generate response using GPT-4 with detailed prompt and debugging logs
103
+ response = generate_response([{"role": "system", "content": question_generation_prompt}, {"role": "user", "content": detailed_prompt}])
104
 
105
  # Display assistant response in chat message container and add to session history
106
  with st.chat_message("assistant"):
107
  st.markdown(response)
108
 
109
  st.session_state.messages.append({"role": "assistant", "content": response})
110
+
111
+ # Enable follow-up mode after generating the initial question
112
+ st.session_state.follow_up_mode = True
113
 
114
  # Display chat messages from history on app rerun (for subsequent conversation)
115
+ for message in st.session_state.messages:
116
  with st.chat_message(message["role"]):
117
  st.markdown(message["content"])
118
 
119
+ # Chatbox for subsequent conversations with assistant (follow-up mode)
120
+ if st.session_state.follow_up_mode:
121
+ if user_input := st.chat_input("Continue your conversation or ask follow-up questions here:"):
122
+ # Display user message in chat message container and add to session history
123
+ with st.chat_message("user"):
124
+ st.markdown(user_input)
125
+
126
+ st.session_state.messages.append({"role": "user", "content": user_input})
127
+
128
+ # Generate assistant's response based on follow-up input using technical_interviewer_prompt as system prompt
129
+ assistant_response = generate_response(
130
+ [{"role": "system", "content": technical_interviewer_prompt}] + st.session_state.messages
131
+ )
132
+
133
+ with st.chat_message("assistant"):
134
+ st.markdown(assistant_response)
135
+
136
+ st.session_state.messages.append({"role": "assistant", "content": assistant_response})
137
 
138
  st.sidebar.markdown("""
139
  ## About