uralk commited on
Commit
c5b45d0
Β·
verified Β·
1 Parent(s): 23cb0fc

Update ui.py

Browse files
Files changed (1) hide show
  1. ui.py +58 -22
ui.py CHANGED
@@ -70,6 +70,8 @@ def create_ui(llm_model, solr_client):
70
  "Qualitative URL will appear here...", visible=False)
71
  qualitative_data_display = gr.Markdown(
72
  "Example data will appear here...", visible=False)
 
 
73
  plot_display = gr.Image(
74
  label="Visualization", type="filepath", visible=False)
75
  report_display = gr.Markdown(
@@ -79,25 +81,28 @@ def create_ui(llm_model, solr_client):
79
  """
80
  Manages the conversation and yields UI updates.
81
  """
 
 
 
82
  if state is None:
83
  state = {'query_count': 0, 'last_suggestions': []}
84
  if history is None:
85
  history = []
86
 
87
  # Reset all displays at the beginning of a new flow
88
- yield (history, state, gr.update(value=None, visible=False), gr.update(value=None, visible=False), gr.update(value=None, visible=False), gr.update(value=None, visible=False), gr.update(value=None, visible=False), gr.update(value=None, visible=False), gr.update(value=None, visible=False), gr.update(value="Suggestions from the external API will appear here...", visible=False))
89
 
90
  query_context = user_input.strip()
91
  if not query_context:
92
  history.append((user_input, "Please enter a question to analyze."))
93
- yield (history, state, None, None, None, None, None, None, None, None)
94
  return
95
 
96
  history.append((user_input, f"Analyzing: '{query_context}'\n\n*Generating analysis plan...*"))
97
- yield (history, state, None, None, None, None, None, None, None, None)
98
 
99
  # Generate plan, get search field suggestions, and intent.
100
- analysis_plan, mapped_search_fields, core_name, intent = llm_generate_analysis_plan_with_history(llm_model, query_context, history)
101
 
102
  # Update and display search field suggestions in its own accordion
103
  if mapped_search_fields:
@@ -112,7 +117,7 @@ def create_ui(llm_model, solr_client):
112
  else:
113
  message = "I'm sorry, I couldn't generate a valid analysis plan. Please try rephrasing your question."
114
  history.append((None, message))
115
- yield (history, state, None, None, None, None, None, None, None, suggestions_display_update)
116
  return
117
 
118
  history.append((None, f"βœ… Analysis plan generated for core: **`{core_name}`**"))
@@ -123,10 +128,10 @@ def create_ui(llm_model, solr_client):
123
  """
124
  history.append((None, plan_summary))
125
  formatted_plan = f"**Full Analysis Plan (Core: `{core_name}`):**\n```json\n{json.dumps(analysis_plan, indent=2)}\n```"
126
- yield (history, state, None, None, gr.update(value=formatted_plan, visible=True), None, None, None, None, suggestions_display_update)
127
 
128
  history.append((None, "*Executing queries for aggregates and examples...*"))
129
- yield (history, state, None, None, gr.update(value=formatted_plan, visible=True), None, None, None, None, suggestions_display_update)
130
 
131
  # --- DYNAMIC CORE SWITCH (Thread-safe) ---
132
  with solr_lock:
@@ -153,7 +158,7 @@ def create_ui(llm_model, solr_client):
153
 
154
  if not aggregate_data or aggregate_data.get('count', 0) == 0:
155
  history.append((None, f"No data was found for your query in the '{core_name}' core. Please try a different question."))
156
- yield (history, state, None, None, gr.update(value=formatted_plan, visible=True), None, None, None, None, suggestions_display_update)
157
  return
158
 
159
  # Display retrieved data
@@ -162,36 +167,66 @@ def create_ui(llm_model, solr_client):
162
  formatted_agg_data = f"**Quantitative (Aggregate) Data:**\n```json\n{json.dumps(aggregate_data, indent=2)}\n```"
163
  formatted_qual_data = f"**Qualitative (Example) Data:**\n```json\n{json.dumps(example_data, indent=2)}\n```"
164
  qual_data_display_update = gr.update(value=formatted_qual_data, visible=True)
165
- yield (history, state, None, None, gr.update(value=formatted_plan, visible=True), quantitative_url_update, gr.update(value=formatted_agg_data, visible=True), qualitative_url_update, qual_data_display_update, suggestions_display_update)
166
 
167
  history.append((None, "βœ… Data retrieved. Generating visualization and final report..."))
168
- yield (history, state, None, None, gr.update(value=formatted_plan, visible=True), quantitative_url_update, gr.update(value=formatted_agg_data, visible=True), qualitative_url_update, qual_data_display_update, suggestions_display_update)
169
-
170
  # Generate viz and report
171
  with concurrent.futures.ThreadPoolExecutor() as executor:
172
  viz_future = executor.submit(llm_generate_visualization_code, llm_model, query_context, aggregate_data)
173
-
174
  report_text = ""
175
  stream_history = history[:]
176
- for chunk in llm_synthesize_enriched_report_stream(llm_model, query_context, aggregate_data, example_data, analysis_plan):
177
- report_text += chunk
178
- yield (stream_history, state, None, gr.update(value=report_text, visible=True), gr.update(value=formatted_plan, visible=True), quantitative_url_update, gr.update(value=formatted_agg_data, visible=True), qualitative_url_update, qual_data_display_update, suggestions_display_update)
179
-
 
 
 
 
 
180
  history.append((None, report_text))
181
 
182
- viz_code = viz_future.result()
183
  plot_path = execute_viz_code_and_get_path(viz_code, aggregate_data)
184
  output_plot = gr.update(value=plot_path, visible=True) if plot_path else gr.update(visible=False)
185
  if not plot_path:
186
  history.append((None, "*I was unable to generate a plot for this data.*\n"))
187
-
188
- yield (history, state, output_plot, gr.update(value=report_text), gr.update(value=formatted_plan, visible=True), quantitative_url_update, gr.update(value=formatted_agg_data, visible=True), qualitative_url_update, qual_data_display_update, suggestions_display_update)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
 
190
  state['query_count'] += 1
191
  state['last_suggestions'] = parse_suggestions_from_report(report_text)
192
  next_prompt = "Analysis complete. What would you like to explore next?"
193
  history.append((None, next_prompt))
194
- yield (history, state, output_plot, gr.update(value=report_text), gr.update(value=formatted_plan, visible=True), quantitative_url_update, gr.update(value=formatted_agg_data, visible=True), qualitative_url_update, qual_data_display_update, suggestions_display_update)
195
 
196
  def reset_all():
197
  """Resets the entire UI for a new analysis session."""
@@ -206,6 +241,7 @@ def create_ui(llm_model, solr_client):
206
  gr.update(value=None, visible=False),
207
  gr.update(value=None, visible=False),
208
  gr.update(value=None, visible=False),
 
209
  gr.update(value=None, visible=False)
210
  )
211
 
@@ -213,7 +249,7 @@ def create_ui(llm_model, solr_client):
213
  fn=process_analysis_flow,
214
  inputs=[msg_textbox, chatbot, state],
215
  outputs=[chatbot, state, plot_display, report_display, plan_display, quantitative_url_display,
216
- quantitative_data_display, qualitative_url_display, qualitative_data_display, suggestions_display],
217
  ).then(
218
  lambda: gr.update(value=""),
219
  None,
@@ -225,7 +261,7 @@ def create_ui(llm_model, solr_client):
225
  fn=reset_all,
226
  inputs=None,
227
  outputs=[chatbot, state, msg_textbox, plot_display, report_display, plan_display, quantitative_url_display,
228
- quantitative_data_display, qualitative_url_display, qualitative_data_display, suggestions_display],
229
  queue=False
230
  )
231
 
 
70
  "Qualitative URL will appear here...", visible=False)
71
  qualitative_data_display = gr.Markdown(
72
  "Example data will appear here...", visible=False)
73
+ with gr.Accordion("Token Usage", open=False):
74
+ token_summary_box = gr.Markdown(visible=False)
75
  plot_display = gr.Image(
76
  label="Visualization", type="filepath", visible=False)
77
  report_display = gr.Markdown(
 
81
  """
82
  Manages the conversation and yields UI updates.
83
  """
84
+ analysis_plan_input_token_count = analysis_plan_output_token_count = analysis_plan_total_token_count = None
85
+ enriched_report_input_token_count = enriched_report_output_token_count = enriched_report_total_token_count = None
86
+ visualization_input_token_count = visualization_output_token_count = visualization_total_token_count = None
87
  if state is None:
88
  state = {'query_count': 0, 'last_suggestions': []}
89
  if history is None:
90
  history = []
91
 
92
  # Reset all displays at the beginning of a new flow
93
+ yield (history, state, gr.update(value=None, visible=False), gr.update(value=None, visible=False), gr.update(value=None, visible=False), gr.update(value=None, visible=False), gr.update(value=None, visible=False), gr.update(value=None, visible=False), gr.update(value=None, visible=False), gr.update(value=None, visible=False), gr.update(value="Suggestions from the external API will appear here...", visible=False))
94
 
95
  query_context = user_input.strip()
96
  if not query_context:
97
  history.append((user_input, "Please enter a question to analyze."))
98
+ yield (history, state, None, None, None, None, None, None, None, None, None)
99
  return
100
 
101
  history.append((user_input, f"Analyzing: '{query_context}'\n\n*Generating analysis plan...*"))
102
+ yield (history, state, None, None, None, None, None, None, None, None, None)
103
 
104
  # Generate plan, get search field suggestions, and intent.
105
+ analysis_plan, mapped_search_fields, core_name, intent, analysis_plan_input_token_count, analysis_plan_output_token_count, analysis_plan_total_token_count = llm_generate_analysis_plan_with_history(llm_model, query_context, history)
106
 
107
  # Update and display search field suggestions in its own accordion
108
  if mapped_search_fields:
 
117
  else:
118
  message = "I'm sorry, I couldn't generate a valid analysis plan. Please try rephrasing your question."
119
  history.append((None, message))
120
+ yield (history, state, None, None, None, None, None, None, None, None, suggestions_display_update)
121
  return
122
 
123
  history.append((None, f"βœ… Analysis plan generated for core: **`{core_name}`**"))
 
128
  """
129
  history.append((None, plan_summary))
130
  formatted_plan = f"**Full Analysis Plan (Core: `{core_name}`):**\n```json\n{json.dumps(analysis_plan, indent=2)}\n```"
131
+ yield (history, state, None, None, gr.update(value=formatted_plan, visible=True), None, None, None, None, None, suggestions_display_update)
132
 
133
  history.append((None, "*Executing queries for aggregates and examples...*"))
134
+ yield (history, state, None, None, gr.update(value=formatted_plan, visible=True), None, None, None, None, None, suggestions_display_update)
135
 
136
  # --- DYNAMIC CORE SWITCH (Thread-safe) ---
137
  with solr_lock:
 
158
 
159
  if not aggregate_data or aggregate_data.get('count', 0) == 0:
160
  history.append((None, f"No data was found for your query in the '{core_name}' core. Please try a different question."))
161
+ yield (history, state, None, None, gr.update(value=formatted_plan, visible=True), None, None, None, None, None, suggestions_display_update)
162
  return
163
 
164
  # Display retrieved data
 
167
  formatted_agg_data = f"**Quantitative (Aggregate) Data:**\n```json\n{json.dumps(aggregate_data, indent=2)}\n```"
168
  formatted_qual_data = f"**Qualitative (Example) Data:**\n```json\n{json.dumps(example_data, indent=2)}\n```"
169
  qual_data_display_update = gr.update(value=formatted_qual_data, visible=True)
170
+ yield (history, state, None, None, gr.update(value=formatted_plan, visible=True), quantitative_url_update, gr.update(value=formatted_agg_data, visible=True), qualitative_url_update, qual_data_display_update, None, suggestions_display_update)
171
 
172
  history.append((None, "βœ… Data retrieved. Generating visualization and final report..."))
173
+ yield (history, state, None, None, gr.update(value=formatted_plan, visible=True), quantitative_url_update, gr.update(value=formatted_agg_data, visible=True), qualitative_url_update, qual_data_display_update, None, suggestions_display_update)
 
174
  # Generate viz and report
175
  with concurrent.futures.ThreadPoolExecutor() as executor:
176
  viz_future = executor.submit(llm_generate_visualization_code, llm_model, query_context, aggregate_data)
177
+ viz_code, visualization_input_token_count, visualization_output_token_count, visualization_total_token_count = viz_future.result()
178
  report_text = ""
179
  stream_history = history[:]
180
+ report_stream = llm_synthesize_enriched_report_stream(llm_model, query_context, aggregate_data, example_data, analysis_plan)
181
+ for item in report_stream:
182
+ if item["text"] is not None:
183
+ report_text += item["text"]
184
+ yield (stream_history, state, None, gr.update(value=report_text, visible=True), gr.update(value=formatted_plan, visible=True), quantitative_url_update, gr.update(value=formatted_agg_data, visible=True), qualitative_url_update, qual_data_display_update, None, suggestions_display_update)
185
+ elif item["tokens"] is not None:
186
+ enriched_report_input_token_count = item["tokens"]["input"]
187
+ enriched_report_output_token_count = item["tokens"]["output"]
188
+ enriched_report_total_token_count = item["tokens"]["total"]
189
  history.append((None, report_text))
190
 
 
191
  plot_path = execute_viz_code_and_get_path(viz_code, aggregate_data)
192
  output_plot = gr.update(value=plot_path, visible=True) if plot_path else gr.update(visible=False)
193
  if not plot_path:
194
  history.append((None, "*I was unable to generate a plot for this data.*\n"))
195
+
196
+ cumulative_tokens = sum(filter(None, [
197
+ analysis_plan_total_token_count,
198
+ enriched_report_total_token_count,
199
+ visualization_total_token_count
200
+ ]))
201
+
202
+ total_input = sum(filter(None, [
203
+ analysis_plan_input_token_count,
204
+ enriched_report_input_token_count,
205
+ visualization_input_token_count
206
+ ]))
207
+ total_output = sum(filter(None, [
208
+ analysis_plan_output_token_count,
209
+ enriched_report_output_token_count,
210
+ visualization_output_token_count
211
+ ]))
212
+ expected_cost = round((total_input*0.3+total_output*2.5)/1000000, 3)
213
+
214
+ token_summary_box_update = gr.update(
215
+ value=f"""**Analysis Plan Tokens** β†’ Prompt: `{analysis_plan_input_token_count or '-'}`, Output: `{analysis_plan_output_token_count or '-'}`, Total: `{analysis_plan_total_token_count or '-'}`
216
+ **Report Tokens** β†’ Prompt: `{enriched_report_input_token_count or '-'}`, Output: `{enriched_report_output_token_count or '-'}`, Total: `{enriched_report_total_token_count or '-'}`
217
+ **Visualization Tokens** β†’ Prompt: `{visualization_input_token_count or '-'}`, Output: `{visualization_output_token_count or '-'}`, Total: `{visualization_total_token_count or '-'}`
218
+
219
+ **Cumulative Tokens** β†’ `{cumulative_tokens}`
220
+ **Expected Cost** β†’ `{expected_cost}$`""",
221
+ visible=True
222
+ )
223
+ yield (history, state, output_plot, gr.update(value=report_text), gr.update(value=formatted_plan, visible=True), quantitative_url_update, gr.update(value=formatted_agg_data, visible=True), qualitative_url_update, qual_data_display_update, token_summary_box_update, suggestions_display_update)
224
 
225
  state['query_count'] += 1
226
  state['last_suggestions'] = parse_suggestions_from_report(report_text)
227
  next_prompt = "Analysis complete. What would you like to explore next?"
228
  history.append((None, next_prompt))
229
+ yield (history, state, output_plot, gr.update(value=report_text), gr.update(value=formatted_plan, visible=True), quantitative_url_update, gr.update(value=formatted_agg_data, visible=True), qualitative_url_update, qual_data_display_update, token_summary_box_update, suggestions_display_update)
230
 
231
  def reset_all():
232
  """Resets the entire UI for a new analysis session."""
 
241
  gr.update(value=None, visible=False),
242
  gr.update(value=None, visible=False),
243
  gr.update(value=None, visible=False),
244
+ gr.update(value=None, visible=False),
245
  gr.update(value=None, visible=False)
246
  )
247
 
 
249
  fn=process_analysis_flow,
250
  inputs=[msg_textbox, chatbot, state],
251
  outputs=[chatbot, state, plot_display, report_display, plan_display, quantitative_url_display,
252
+ quantitative_data_display, qualitative_url_display, qualitative_data_display, token_summary_box, suggestions_display],
253
  ).then(
254
  lambda: gr.update(value=""),
255
  None,
 
261
  fn=reset_all,
262
  inputs=None,
263
  outputs=[chatbot, state, msg_textbox, plot_display, report_display, plan_display, quantitative_url_display,
264
+ quantitative_data_display, qualitative_url_display, qualitative_data_display, token_summary_box, suggestions_display],
265
  queue=False
266
  )
267