prasadnu commited on
Commit
98e288c
·
1 Parent(s): c0e4e7c

colpali fix

Browse files
pages/Multimodal_Conversational_Search.py CHANGED
@@ -208,11 +208,6 @@ def handle_input(state,dummy):
208
  if key.startswith('input_'):
209
  inputs[key.removeprefix('input_')] = st.session_state[key]
210
  st.session_state.inputs_ = inputs
211
-
212
- #######
213
-
214
-
215
- #st.write(inputs)
216
  question_with_id = {
217
  'question': inputs["query"],
218
  'id': len(st.session_state.questions_)
@@ -230,32 +225,7 @@ def handle_input(state,dummy):
230
  'image': out_['image'],
231
  'table':out_['table']
232
  })
233
- #st.session_state.input_query=""
234
-
235
-
236
-
237
- # search_type = st.selectbox('Select the Search type',
238
- # ('Conversational Search (RAG)',
239
- # 'OpenSearch vector search',
240
- # 'LLM Text Generation'
241
- # ),
242
-
243
- # key = 'input_searchType',
244
- # help = "Select the type of retriever\n1. Conversational Search (Recommended) - This will include both the OpenSearch and LLM in the retrieval pipeline \n (note: This will put opensearch response as context to LLM to answer) \n2. OpenSearch vector search - This will put only OpenSearch's vector search in the pipeline, \n(Warning: this will lead to unformatted results )\n3. LLM Text Generation - This will include only LLM in the pipeline, \n(Warning: This will give hallucinated and out of context answers_)"
245
- # )
246
-
247
- # col1, col2, col3, col4 = st.columns(4)
248
-
249
- # with col1:
250
- # st.text_input('Temperature', value = "0.001", placeholder='LLM Temperature', key = 'input_temperature',help = "Set the temperature of the Large Language model. \n Note: 1. Set this to values lower to 1 in the order of 0.001, 0.0001, such low values reduces hallucination and creativity in the LLM response; 2. This applies only when LLM is a part of the retriever pipeline")
251
- # with col2:
252
- # st.number_input('Top K', value = 200, placeholder='Top K', key = 'input_topK', step = 50, help = "This limits the LLM's predictions to the top k most probable tokens at each step of generation, this applies only when LLM is a prt of the retriever pipeline")
253
- # with col3:
254
- # st.number_input('Top P', value = 0.95, placeholder='Top P', key = 'input_topP', step = 0.05, help = "This sets a threshold probability and selects the top tokens whose cumulative probability exceeds the threshold while the tokens are generated by the LLM")
255
- # with col4:
256
- # st.number_input('Max Output Tokens', value = 500, placeholder='Max Output Tokens', key = 'input_maxTokens', step = 100, help = "This decides the total number of tokens generated as the final response. Note: Values greater than 1000 takes longer response time")
257
-
258
- # st.markdown('---')
259
 
260
 
261
  def write_user_message(md):
 
208
  if key.startswith('input_'):
209
  inputs[key.removeprefix('input_')] = st.session_state[key]
210
  st.session_state.inputs_ = inputs
 
 
 
 
 
211
  question_with_id = {
212
  'question': inputs["query"],
213
  'id': len(st.session_state.questions_)
 
225
  'image': out_['image'],
226
  'table':out_['table']
227
  })
228
+ st.session_state.input_query=""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229
 
230
 
231
  def write_user_message(md):