rishabhpr commited on
Commit
108fa14
·
verified ·
1 Parent(s): da578f7

fix system prompt in follow up

Browse files
Files changed (1) hide show
  1. app.py +2 -5
app.py CHANGED
@@ -25,9 +25,6 @@ embeddings = np.load(embeddings_path)
25
  model = SentenceTransformer("all-MiniLM-L6-v2").to(device)
26
 
27
  # Load prompts from files
28
- with open("question_generation_prompt.txt", "r") as file:
29
- question_generation_prompt = file.read()
30
-
31
  with open("technical_interviewer_prompt.txt", "r") as file:
32
  technical_interviewer_prompt = file.read()
33
 
@@ -107,12 +104,12 @@ if generate_button:
107
  )
108
 
109
  # Generate response using GPT-4 with detailed prompt and debugging logs
110
- response = generate_response([{"role": "assistant", "content": question_generation_prompt}, {"role": "user", "content": detailed_prompt}])
111
 
112
  # Store generated question in session state for persistence in sidebar and follow-up conversation state
113
  st.session_state.generated_question = response
114
 
115
- # Add the generated question to the conversation history as an assistant message (to make it part of follow-up conversations)
116
  st.session_state.messages.append({"role": "assistant", "content": response})
117
 
118
  # Enable follow-up mode after generating the initial question
 
25
  model = SentenceTransformer("all-MiniLM-L6-v2").to(device)
26
 
27
  # Load prompts from files
 
 
 
28
  with open("technical_interviewer_prompt.txt", "r") as file:
29
  technical_interviewer_prompt = file.read()
30
 
 
104
  )
105
 
106
  # Generate response using GPT-4 with detailed prompt and debugging logs
107
+ response = generate_response([{"role": "user", "content": detailed_prompt}]) # Question generation prompt excluded here
108
 
109
  # Store generated question in session state for persistence in sidebar and follow-up conversation state
110
  st.session_state.generated_question = response
111
 
112
+ # Add the generated question to the conversation history as an assistant message (but omit the prompt)
113
  st.session_state.messages.append({"role": "assistant", "content": response})
114
 
115
  # Enable follow-up mode after generating the initial question