Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -208,13 +208,13 @@ def visualize_logprobs(json_input, chunk=0, chunk_size=100):
|
|
208 |
logger.debug("top_logprobs is None for token: %s, using empty dict", token)
|
209 |
top_logprobs = {} # Default to empty dict for None
|
210 |
# Ensure all values in top_logprobs are floats
|
211 |
-
|
212 |
for key, value in top_logprobs.items():
|
213 |
float_value = ensure_float(value)
|
214 |
if float_value is not None and math.isfinite(float_value):
|
215 |
-
|
216 |
# Sort by log probability (descending)
|
217 |
-
sorted_probs = sorted(
|
218 |
row = [token, f"{logprob:.4f}"]
|
219 |
for alt_token, alt_logprob in sorted_probs[:max_alternatives]: # Use max number of alternatives
|
220 |
row.append(f"{alt_token}: {alt_logprob:.4f}")
|
@@ -281,93 +281,306 @@ def visualize_logprobs(json_input, chunk=0, chunk_size=100):
|
|
281 |
logger.error("Visualization failed: %s (Input: %s)", str(e), json_input[:100] + "..." if len(json_input) > 100 else json_input)
|
282 |
return (create_empty_figure("Log Probabilities of Generated Tokens"), None, "No finite log probabilities to display.", create_empty_figure("Top Token Log Probabilities"), create_empty_figure("Significant Probability Drops"), 1, 0)
|
283 |
|
284 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
285 |
with gr.Blocks(title="Log Probability Visualizer") as app:
|
286 |
gr.Markdown("# Log Probability Visualizer")
|
287 |
gr.Markdown(
|
288 |
-
"Paste your JSON log prob data below to visualize tokens in chunks of 100. Fixed filter ≥ -100000, dynamic number of top_logprobs, handles missing or null fields. Next chunk is precomputed proactively."
|
289 |
)
|
290 |
|
291 |
-
with gr.
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
text_output = gr.HTML(label="Colored Text (Confidence Visualization)")
|
309 |
-
|
310 |
-
with gr.Row():
|
311 |
-
prev_btn = gr.Button("Previous Chunk")
|
312 |
-
next_btn = gr.Button("Next Chunk")
|
313 |
-
total_chunks_output = gr.Number(label="Total Chunks", interactive=False)
|
314 |
-
|
315 |
-
# Precomputed next chunk state (hidden)
|
316 |
-
precomputed_next = gr.State(value=None)
|
317 |
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
324 |
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
|
373 |
app.launch()
|
|
|
208 |
logger.debug("top_logprobs is None for token: %s, using empty dict", token)
|
209 |
top_logprobs = {} # Default to empty dict for None
|
210 |
# Ensure all values in top_logprobs are floats
|
211 |
+
finite_top_probs = []
|
212 |
for key, value in top_logprobs.items():
|
213 |
float_value = ensure_float(value)
|
214 |
if float_value is not None and math.isfinite(float_value):
|
215 |
+
finite_top_probs.append((key, float_value))
|
216 |
# Sort by log probability (descending)
|
217 |
+
sorted_probs = sorted(finite_top_probs, key=lambda x: x[1], reverse=True)
|
218 |
row = [token, f"{logprob:.4f}"]
|
219 |
for alt_token, alt_logprob in sorted_probs[:max_alternatives]: # Use max number of alternatives
|
220 |
row.append(f"{alt_token}: {alt_logprob:.4f}")
|
|
|
281 |
logger.error("Visualization failed: %s (Input: %s)", str(e), json_input[:100] + "..." if len(json_input) > 100 else json_input)
|
282 |
return (create_empty_figure("Log Probabilities of Generated Tokens"), None, "No finite log probabilities to display.", create_empty_figure("Top Token Log Probabilities"), create_empty_figure("Significant Probability Drops"), 1, 0)
|
283 |
|
284 |
+
# Analysis functions for detecting correct vs. incorrect traces
|
285 |
+
def analyze_confidence_signature(logprobs, tokens):
|
286 |
+
if not logprobs or not tokens:
|
287 |
+
return "No data for confidence signature analysis.", None
|
288 |
+
# Track moving average of top token probability
|
289 |
+
top_probs = [lps[0][1] if lps else -float('inf') for lps in logprobs] # Extract top probability, handle empty
|
290 |
+
moving_avg = np.convolve(
|
291 |
+
top_probs,
|
292 |
+
np.ones(20) / 20, # 20-token window
|
293 |
+
mode='valid'
|
294 |
+
)
|
295 |
+
|
296 |
+
# Detect significant drops (potential error points)
|
297 |
+
drops = np.where(np.diff(moving_avg) < -0.15)[0]
|
298 |
+
if not drops.size:
|
299 |
+
return "No significant confidence drops detected.", None
|
300 |
+
drop_positions = [(i, tokens[i + 19] if i + 19 < len(tokens) else "End of trace") for i in drops] # Adjust for convolution window
|
301 |
+
return "Significant confidence drops detected at positions:", drop_positions
|
302 |
+
|
303 |
+
def detect_interpretation_pivots(logprobs, tokens):
|
304 |
+
if not logprobs or not tokens:
|
305 |
+
return "No data for interpretation pivot detection.", None
|
306 |
+
pivots = []
|
307 |
+
reconsideration_tokens = ["wait", "but", "actually", "however", "hmm"]
|
308 |
+
|
309 |
+
for i, (token, lps) in enumerate(zip(tokens, logprobs)):
|
310 |
+
# Check if reconsideration tokens have unusually high probability
|
311 |
+
for rt in reconsideration_tokens:
|
312 |
+
for t, p in lps:
|
313 |
+
if t.lower() == rt and p > -2.5: # High probability
|
314 |
+
# Look back to find what's being reconsidered
|
315 |
+
context = tokens[max(0, i-50):i]
|
316 |
+
pivots.append((i, rt, context))
|
317 |
+
|
318 |
+
if not pivots:
|
319 |
+
return "No interpretation pivots detected.", None
|
320 |
+
return "Interpretation pivots detected:", pivots
|
321 |
+
|
322 |
+
def calculate_decision_entropy(logprobs):
|
323 |
+
if not logprobs:
|
324 |
+
return "No data for entropy spike detection.", None
|
325 |
+
# Calculate entropy at each token position
|
326 |
+
entropies = []
|
327 |
+
for lps in logprobs:
|
328 |
+
if not lps:
|
329 |
+
entropies.append(0.0)
|
330 |
+
continue
|
331 |
+
# Calculate entropy: -sum(p * log(p)) for each probability
|
332 |
+
probs = [math.exp(p) for _, p in lps] # Convert log probs to probabilities
|
333 |
+
if not probs or sum(probs) == 0:
|
334 |
+
entropies.append(0.0)
|
335 |
+
continue
|
336 |
+
entropy = -sum(p * math.log(p) for p in probs if p > 0)
|
337 |
+
entropies.append(entropy)
|
338 |
+
|
339 |
+
# Detect significant entropy spikes
|
340 |
+
baseline = np.percentile(entropies, 75) if entropies else 0.0
|
341 |
+
spikes = [i for i, e in enumerate(entropies) if e > baseline * 1.5 if baseline > 0]
|
342 |
+
|
343 |
+
if not spikes:
|
344 |
+
return "No entropy spikes detected at decision points.", None
|
345 |
+
return "Entropy spikes detected at positions:", spikes
|
346 |
+
|
347 |
+
def analyze_conclusion_competition(logprobs, tokens):
|
348 |
+
if not logprobs or not tokens:
|
349 |
+
return "No data for conclusion competition analysis.", None
|
350 |
+
# Find tokens related to conclusion
|
351 |
+
conclusion_indices = [i for i, t in enumerate(tokens)
|
352 |
+
if any(marker in t.lower() for marker in
|
353 |
+
["therefore", "thus", "boxed", "answer"])]
|
354 |
+
|
355 |
+
if not conclusion_indices:
|
356 |
+
return "No conclusion markers found in trace.", None
|
357 |
+
|
358 |
+
# Analyze probability gap between top and second choices near conclusion
|
359 |
+
gaps = []
|
360 |
+
conclusion_idx = conclusion_indices[-1]
|
361 |
+
end_range = min(conclusion_idx + 50, len(logprobs))
|
362 |
+
for idx in range(conclusion_idx, end_range):
|
363 |
+
if idx < len(logprobs) and len(logprobs[idx]) >= 2:
|
364 |
+
top_prob = logprobs[idx][0][1] if logprobs[idx] else -float('inf')
|
365 |
+
second_prob = logprobs[idx][1][1] if len(logprobs[idx]) > 1 else -float('inf')
|
366 |
+
gap = top_prob - second_prob if top_prob != -float('inf') and second_prob != -float('inf') else 0.0
|
367 |
+
gaps.append(gap)
|
368 |
+
|
369 |
+
if not gaps:
|
370 |
+
return "No conclusion competition data available.", None
|
371 |
+
mean_gap = np.mean(gaps)
|
372 |
+
return f"Mean probability gap at conclusion: {mean_gap:.4f} (higher indicates more confident conclusion)", None
|
373 |
+
|
374 |
+
def analyze_verification_signals(logprobs, tokens):
|
375 |
+
if not logprobs or not tokens:
|
376 |
+
return "No data for verification signal analysis.", None
|
377 |
+
verification_terms = ["verify", "check", "confirm", "ensure", "double"]
|
378 |
+
verification_probs = []
|
379 |
+
|
380 |
+
for lps in logprobs:
|
381 |
+
# Look for verification terms in top-k tokens
|
382 |
+
max_v_prob = -float('inf')
|
383 |
+
for token, prob in lps:
|
384 |
+
if any(v_term in token.lower() for v_term in verification_terms):
|
385 |
+
max_v_prob = max(max_v_prob, prob)
|
386 |
+
|
387 |
+
if max_v_prob > -float('inf'):
|
388 |
+
verification_probs.append(max_v_prob)
|
389 |
+
|
390 |
+
if not verification_probs:
|
391 |
+
return "No verification signals detected.", None
|
392 |
+
count, mean_prob = len(verification_probs), np.mean(verification_probs)
|
393 |
+
return f"Verification signals found: {count} instances, mean probability: {mean_prob:.4f}", None
|
394 |
+
|
395 |
+
def detect_semantic_inversions(logprobs, tokens):
|
396 |
+
if not logprobs or not tokens:
|
397 |
+
return "No data for semantic inversion detection.", None
|
398 |
+
inversion_pairs = [
|
399 |
+
("more", "less"), ("larger", "smaller"),
|
400 |
+
("winning", "losing"), ("increase", "decrease"),
|
401 |
+
("greater", "lesser"), ("positive", "negative")
|
402 |
+
]
|
403 |
+
|
404 |
+
inversions = []
|
405 |
+
for i, (token, lps) in enumerate(zip(tokens, logprobs)):
|
406 |
+
for pos, neg in inversion_pairs:
|
407 |
+
if token.lower() == pos:
|
408 |
+
# Check if negative term has high probability
|
409 |
+
for t, p in lps:
|
410 |
+
if t.lower() == neg and p > -3.0: # High competitor
|
411 |
+
inversions.append((i, pos, neg, p))
|
412 |
+
elif token.lower() == neg:
|
413 |
+
# Check if positive term has high probability
|
414 |
+
for t, p in lps:
|
415 |
+
if t.lower() == pos and p > -3.0: # High competitor
|
416 |
+
inversions.append((i, neg, pos, p))
|
417 |
+
|
418 |
+
if not inversions:
|
419 |
+
return "No semantic inversions detected.", None
|
420 |
+
return "Semantic inversions detected:", inversions
|
421 |
+
|
422 |
+
# Function to perform full trace analysis
|
423 |
+
def analyze_full_trace(json_input):
|
424 |
+
try:
|
425 |
+
data = parse_input(json_input)
|
426 |
+
content = data.get("content", []) if isinstance(data, dict) else data
|
427 |
+
if not isinstance(content, list):
|
428 |
+
raise ValueError("Content must be a list of entries")
|
429 |
+
|
430 |
+
tokens = []
|
431 |
+
logprobs = []
|
432 |
+
for entry in content:
|
433 |
+
if not isinstance(entry, dict):
|
434 |
+
logger.warning("Skipping non-dictionary entry: %s", entry)
|
435 |
+
continue
|
436 |
+
logprob = ensure_float(entry.get("logprob", None))
|
437 |
+
if logprob >= -100000: # Include all entries with default 0.0
|
438 |
+
tokens.append(get_token(entry))
|
439 |
+
top_probs = entry.get("top_logprobs", {})
|
440 |
+
if top_probs is None:
|
441 |
+
top_probs = {}
|
442 |
+
finite_top_probs = []
|
443 |
+
for key, value in top_probs.items():
|
444 |
+
float_value = ensure_float(value)
|
445 |
+
if float_value is not None and math.isfinite(float_value):
|
446 |
+
finite_top_probs.append((key, float_value))
|
447 |
+
logprobs.append(finite_top_probs)
|
448 |
+
|
449 |
+
if not logprobs or not tokens:
|
450 |
+
return "No valid data for trace analysis.", None, None, None, None, None
|
451 |
+
|
452 |
+
# Perform all analyses
|
453 |
+
confidence_result, confidence_data = analyze_confidence_signature(logprobs, tokens)
|
454 |
+
pivot_result, pivot_data = detect_interpretation_pivots(logprobs, tokens)
|
455 |
+
entropy_result, entropy_data = calculate_decision_entropy(logprobs)
|
456 |
+
conclusion_result, conclusion_data = analyze_conclusion_competition(logprobs, tokens)
|
457 |
+
verification_result, verification_data = analyze_verification_signals(logprobs, tokens)
|
458 |
+
inversion_result, inversion_data = detect_semantic_inversions(logprobs, tokens)
|
459 |
+
|
460 |
+
# Format results for display
|
461 |
+
analysis_html = f"""
|
462 |
+
<h3>Trace Analysis Results</h3>
|
463 |
+
<ul>
|
464 |
+
<li><strong>Confidence Signature:</strong> {confidence_result}</li>
|
465 |
+
{f"<ul><li>Positions: {', '.join(str(pos) for pos, tok in confidence_data)}</li></ul>" if confidence_data else ""}
|
466 |
+
<li><strong>Interpretation Pivots:</strong> {pivot_result}</li>
|
467 |
+
{f"<ul><li>Positions: {', '.join(str(pos) for pos, _, _ in pivot_data)}</li></ul>" if pivot_data else ""}
|
468 |
+
<li><strong>Decision Entropy Spikes:</strong> {entropy_result}</li>
|
469 |
+
{f"<ul><li>Positions: {', '.join(str(pos) for pos in entropy_data)}</li></ul>" if entropy_data else ""}
|
470 |
+
<li><strong>Conclusion Competition:</strong> {conclusion_result}</li>
|
471 |
+
<li><strong>Verification Signals:</strong> {verification_result}</li>
|
472 |
+
<li><strong>Semantic Inversions:</strong> {inversion_result}</li>
|
473 |
+
{f"<ul><li>Positions: {', '.join(str(pos) for pos, _, _, _ in inversion_data)}</li></ul>" if inversion_data else ""}
|
474 |
+
</ul>
|
475 |
+
"""
|
476 |
+
return analysis_html, None, None, None, None, None
|
477 |
+
|
478 |
+
# Gradio interface with two tabs: Trace Analysis and Visualization
|
479 |
with gr.Blocks(title="Log Probability Visualizer") as app:
|
480 |
gr.Markdown("# Log Probability Visualizer")
|
481 |
gr.Markdown(
|
482 |
+
"Paste your JSON log prob data below to analyze reasoning traces and visualize tokens in chunks of 100. Fixed filter ≥ -100000, dynamic number of top_logprobs, handles missing or null fields. Next chunk is precomputed proactively."
|
483 |
)
|
484 |
|
485 |
+
with gr.Tabs():
|
486 |
+
with gr.Tab("Trace Analysis"):
|
487 |
+
with gr.Row():
|
488 |
+
json_input_analysis = gr.Textbox(
|
489 |
+
label="JSON Input for Trace Analysis",
|
490 |
+
lines=10,
|
491 |
+
placeholder="Paste your JSON (e.g., {\"content\": [{\"bytes\": [44], \"logprob\": 0.0, \"token\": \",\", \"top_logprobs\": {\" so\": -13.8046875, \".\": -13.8046875, \",\": -13.640625}}]}).",
|
492 |
+
)
|
493 |
+
with gr.Row():
|
494 |
+
analysis_output = gr.HTML(label="Trace Analysis Results")
|
495 |
+
|
496 |
+
btn_analyze = gr.Button("Analyze Trace")
|
497 |
+
btn_analyze.click(
|
498 |
+
fn=analyze_full_trace,
|
499 |
+
inputs=[json_input_analysis],
|
500 |
+
outputs=[analysis_output, gr.State(), gr.State(), gr.State(), gr.State(), gr.State()],
|
501 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
502 |
|
503 |
+
with gr.Tab("Visualization"):
|
504 |
+
with gr.Row():
|
505 |
+
json_input_viz = gr.Textbox(
|
506 |
+
label="JSON Input for Visualization",
|
507 |
+
lines=10,
|
508 |
+
placeholder="Paste your JSON (e.g., {\"content\": [{\"bytes\": [44], \"logprob\": 0.0, \"token\": \",\", \"top_logprobs\": {\" so\": -13.8046875, \".\": -13.8046875, \",\": -13.640625}}]}).",
|
509 |
+
)
|
510 |
+
chunk = gr.Number(value=0, label="Current Chunk", precision=0, minimum=0)
|
511 |
+
|
512 |
+
with gr.Row():
|
513 |
+
plot_output = gr.Plot(label="Log Probability Plot (Click for Tokens)")
|
514 |
+
drops_output = gr.Plot(label="Probability Drops (Click for Details)")
|
515 |
+
|
516 |
+
with gr.Row():
|
517 |
+
table_output = gr.Dataframe(label="Token Log Probabilities and Top Alternatives")
|
518 |
+
alt_viz_output = gr.Plot(label="Top Token Log Probabilities (Click for Details)")
|
519 |
+
|
520 |
+
with gr.Row():
|
521 |
+
text_output = gr.HTML(label="Colored Text (Confidence Visualization)")
|
522 |
+
|
523 |
+
with gr.Row():
|
524 |
+
prev_btn = gr.Button("Previous Chunk")
|
525 |
+
next_btn = gr.Button("Next Chunk")
|
526 |
+
total_chunks_output = gr.Number(label="Total Chunks", interactive=False)
|
527 |
+
|
528 |
+
# Precomputed next chunk state (hidden)
|
529 |
+
precomputed_next = gr.State(value=None)
|
530 |
+
|
531 |
+
btn_viz = gr.Button("Visualize")
|
532 |
+
btn_viz.click(
|
533 |
+
fn=visualize_logprobs,
|
534 |
+
inputs=[json_input_viz, chunk],
|
535 |
+
outputs=[plot_output, table_output, text_output, alt_viz_output, drops_output, total_chunks_output, chunk],
|
536 |
+
)
|
537 |
|
538 |
+
# Precompute next chunk proactively when on current chunk
|
539 |
+
async def precompute_next_chunk(json_input, current_chunk, precomputed_next):
|
540 |
+
if precomputed_next is not None:
|
541 |
+
return precomputed_next # Use cached precomputed chunk if available
|
542 |
+
next_tokens, next_logprobs, next_alternatives = await precompute_chunk(json_input, 100, current_chunk)
|
543 |
+
if next_tokens is None or next_logprobs is None or next_alternatives is None:
|
544 |
+
return None
|
545 |
+
return (next_tokens, next_logprobs, next_alternatives)
|
546 |
+
|
547 |
+
# Update chunk on button clicks
|
548 |
+
def update_chunk(json_input, current_chunk, action, precomputed_next=None):
|
549 |
+
total_chunks = visualize_logprobs(json_input, 0)[5] # Get total chunks
|
550 |
+
if action == "prev" and current_chunk > 0:
|
551 |
+
current_chunk -= 1
|
552 |
+
elif action == "next" and current_chunk < total_chunks - 1:
|
553 |
+
current_chunk += 1
|
554 |
+
# If precomputed next chunk exists, use it; otherwise, compute it
|
555 |
+
if precomputed_next:
|
556 |
+
next_tokens, next_logprobs, next_alternatives = precomputed_next
|
557 |
+
if next_tokens and next_logprobs and next_alternatives:
|
558 |
+
logger.debug("Using precomputed next chunk for chunk %d", current_chunk)
|
559 |
+
return visualize_logprobs(json_input, current_chunk)
|
560 |
+
return visualize_logprobs(json_input, current_chunk)
|
561 |
+
|
562 |
+
prev_btn.click(
|
563 |
+
fn=update_chunk,
|
564 |
+
inputs=[json_input_viz, chunk, gr.State(value="prev"), precomputed_next],
|
565 |
+
outputs=[plot_output, table_output, text_output, alt_viz_output, drops_output, total_chunks_output, chunk],
|
566 |
+
)
|
567 |
|
568 |
+
next_btn.click(
|
569 |
+
fn=update_chunk,
|
570 |
+
inputs=[json_input_viz, chunk, gr.State(value="next"), precomputed_next],
|
571 |
+
outputs=[plot_output, table_output, text_output, alt_viz_output, drops_output, total_chunks_output, chunk],
|
572 |
+
)
|
573 |
|
574 |
+
# Trigger precomputation when chunk changes (via button clicks or initial load)
|
575 |
+
def trigger_precomputation(json_input, current_chunk):
|
576 |
+
asyncio.create_task(precompute_next_chunk(json_input, current_chunk, None))
|
577 |
+
return gr.update(value=current_chunk)
|
578 |
|
579 |
+
# Use a dummy event to trigger precomputation on chunk change (simplified for Gradio)
|
580 |
+
chunk.change(
|
581 |
+
fn=trigger_precomputation,
|
582 |
+
inputs=[json_input_viz, chunk],
|
583 |
+
outputs=[chunk],
|
584 |
+
)
|
585 |
|
586 |
app.launch()
|