bhulston commited on
Commit
146b565
·
1 Parent(s): 61dbd5e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -37
app.py CHANGED
@@ -60,42 +60,37 @@ for message in st.session_state.messages:
60
  with st.chat_message(message["role"]):
61
  st.markdown(message["content"])
62
 
63
- if prompt := st.chat_input("What kind of class are you looking for?"):
64
- # Display user message in chat message container
 
65
  with st.chat_message("user"):
66
  st.markdown(prompt)
67
- # Add user message to chat history
68
- st.session_state.messages.append({"role": "user", "content": prompt})
69
-
70
-
71
- response = filter_agent(prompt, OPENAI_API)
72
- query = response
73
-
74
- response = index.query(
75
- vector= embeddings.embed_query(query),
76
- # filter= build_filter(json),
77
- top_k=5,
78
- include_metadata=True
79
- )
80
-
81
- response = reranker(query, response)
82
-
83
- result_query = 'Original Query:' + query + 'Query Results:' + str(response)
84
-
85
- print(results_agent(result_query, OPENAI_API))
86
-
87
- ### GPT Response
88
- # Display assistant response in chat message container
89
- with st.chat_message("assistant"):
90
- message_placeholder = st.empty()
91
- full_response = ""
92
- assistant_response = "Hello there! How can I assist you today?"
93
- # Simulate stream of response with milliseconds delay
94
- for chunk in assistant_response.split():
95
- full_response += chunk + " "
96
- time.sleep(0.05)
97
- # Add a blinking cursor to simulate typing
98
- message_placeholder.markdown(full_response + "▌")
99
- message_placeholder.markdown(full_response)
100
- # Add assistant response to chat history
101
- st.session_state.messages.append({"role": "assistant", "content": full_response})
 
60
  with st.chat_message(message["role"]):
61
  st.markdown(message["content"])
62
 
63
+ prompt = st.chat_input("What kind of class are you looking for?")
64
+
65
+ if st.button("Submit"):
66
  with st.chat_message("user"):
67
  st.markdown(prompt)
68
+ st.session_state.message.append({"role": "user", "content": prompt})
69
+
70
+ response = filter_agent(prompt, OPENAI_API)
71
+ query = response
72
+
73
+ response = index.query(
74
+ vector= embeddings.embed_query(query),
75
+ # filter= build_filter(json),
76
+ top_k=5,
77
+ include_metadata=True
78
+ )
79
+ response = reranker(query, response)
80
+ result_query = 'Original Query:' + query + 'Query Results:' + str(response)
81
+ final_response = result_agent(result_query, OPENAI_API)
82
+
83
+ ### GPT Response
84
+ # Display assistant response in chat message container
85
+ with st.chat_message("assistant"):
86
+ message_placeholder = st.empty()
87
+ assistant_response = "Hello there! How can I assist you today?"
88
+ # Simulate stream of response with milliseconds delay
89
+ for chunk in assistant_response.split():
90
+ full_response += chunk + " "
91
+ time.sleep(0.05)
92
+ # Add a blinking cursor to simulate typing
93
+ message_placeholder.markdown(full_response + "")
94
+ message_placeholder.markdown(final_response)
95
+ # Add assistant response to chat history
96
+ st.session_state.messages.append({"role": "assistant", "content": final_response})