File size: 30,300 Bytes
0244d3c
 
 
 
6934db6
 
c28bdaa
527fd08
0e1182d
cbaf223
527fd08
 
 
 
d8a969c
46e0493
d8a969c
527fd08
d8a969c
 
527fd08
d8a969c
 
94f3efa
 
0244d3c
46e0493
527fd08
 
94f3efa
ea373a2
527fd08
 
 
 
94f3efa
 
 
 
 
527fd08
94f3efa
7d5d680
94f3efa
 
 
 
7d5d680
6b2ca38
 
 
 
ea373a2
 
9ba1537
 
 
 
94f3efa
9ba1537
 
 
 
 
 
94f3efa
9ba1537
 
94f3efa
9ba1537
 
ea373a2
94f3efa
 
 
 
 
 
 
9ba1537
 
 
 
 
 
 
 
 
 
ea373a2
9ba1537
 
 
 
94f3efa
71e7643
0244d3c
d8a969c
46e0493
 
94f3efa
46e0493
c28bdaa
 
94f3efa
c28bdaa
0d41503
94f3efa
0d41503
527fd08
94f3efa
9ba1537
527fd08
46e0493
94f3efa
 
 
 
 
 
 
46e0493
ccde0a2
94f3efa
9ba1537
 
 
 
 
 
94f3efa
cbaf223
94f3efa
cbaf223
9ba1537
94f3efa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9ba1537
94f3efa
cbaf223
94f3efa
 
 
 
 
 
 
 
 
 
 
 
 
 
46e0493
94f3efa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46e0493
94f3efa
46e0493
94f3efa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46e0493
94f3efa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46e0493
0244d3c
46e0493
94f3efa
 
 
 
 
 
485d05c
 
 
94f3efa
 
485d05c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c655f91
485d05c
94f3efa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7fa46e2
94f3efa
 
7fa46e2
 
94f3efa
 
7fa46e2
94f3efa
 
7fa46e2
 
 
 
 
 
94f3efa
 
 
7fa46e2
 
 
 
 
94f3efa
7fa46e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94f3efa
7fa46e2
 
 
 
 
 
 
 
94f3efa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b3f22a1
b55a616
 
 
 
 
94f3efa
b55a616
94f3efa
 
 
 
 
 
 
 
 
 
 
 
46e0493
94f3efa
 
 
 
 
8da2ad5
94f3efa
 
 
 
8da2ad5
 
 
 
94f3efa
 
 
 
b3f22a1
94f3efa
8da2ad5
94f3efa
b3f22a1
94f3efa
 
 
b3f22a1
94f3efa
 
b55a616
46e0493
 
 
b55a616
94f3efa
4615d41
 
 
ea373a2
181b7be
4615d41
 
94f3efa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9ba1537
4615d41
 
94f3efa
 
 
 
 
4615d41
94f3efa
4615d41
94f3efa
 
 
4615d41
94f3efa
 
 
4615d41
94f3efa
 
4615d41
 
 
 
 
94f3efa
4615d41
 
94f3efa
 
 
 
 
 
46e0493
ea373a2
 
 
4615d41
94f3efa
4615d41
 
 
 
94f3efa
 
 
4615d41
 
94f3efa
 
 
 
 
 
 
 
 
 
 
9ba1537
4615d41
94f3efa
ea373a2
 
94f3efa
 
4615d41
 
94f3efa
 
 
 
 
d38b65e
 
4615d41
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
import gradio as gr
import json
import matplotlib.pyplot as plt
import pandas as pd
import io
import base64
import math
import logging
import numpy as np
import plotly.graph_objects as go

# Set up logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)

# Function to safely parse JSON input
def parse_input(json_input):
    logger.debug("Attempting to parse input: %s", json_input)
    try:
        data = json.loads(json_input)
        logger.debug("Successfully parsed as JSON")
        return data
    except json.JSONDecodeError as e:
        logger.error("JSON parsing failed: %s (Input: %s)", str(e), json_input[:100] + "..." if len(json_input) > 100 else json_input)
        raise ValueError(f"Malformed JSON: {str(e)}. Use double quotes for property names (e.g., \"content\") and ensure valid JSON format.")

# Function to ensure a value is a float
def ensure_float(value):
    if value is None:
        logger.debug("Replacing None logprob with 0.0")
        return 0.0  # Default to 0.0 for None
    if isinstance(value, str):
        try:
            return float(value)
        except ValueError:
            logger.error("Failed to convert string '%s' to float", value)
            return 0.0  # Default to 0.0 for invalid strings
    if isinstance(value, (int, float)):
        return float(value)
    return 0.0  # Default for any other type

# Function to get or generate a token value (default to "Unknown" if missing)
def get_token(entry):
    token = entry.get("token", "Unknown")
    if token == "Unknown":
        logger.warning("Missing 'token' key for entry: %s, using 'Unknown'", entry)
    return token

# Function to create an empty Plotly figure
def create_empty_figure(title):
    return go.Figure().update_layout(title=title, xaxis_title="", yaxis_title="", showlegend=False)

# Precompute the next chunk (synchronous for Hugging Face Spaces)
def precompute_chunk(json_input, chunk_size, current_chunk):
    try:
        data = parse_input(json_input)
        content = data.get("content", []) if isinstance(data, dict) else data
        if not isinstance(content, list):
            raise ValueError("Content must be a list of entries")

        tokens = []
        logprobs = []
        top_alternatives = []
        for entry in content:
            if not isinstance(entry, dict):
                logger.warning("Skipping non-dictionary entry: %s", entry)
                continue
            logprob = ensure_float(entry.get("logprob", None))
            if logprob >= -100000:  # Include all entries with default 0.0
                tokens.append(get_token(entry))
                logprobs.append(logprob)
                top_probs = entry.get("top_logprobs", {}) or {}
                finite_top_probs = []
                for key, value in top_probs.items():
                    float_value = ensure_float(value)
                    if float_value is not None and math.isfinite(float_value):
                        finite_top_probs.append((key, float_value))
                sorted_probs = sorted(finite_top_probs, key=lambda x: x[1], reverse=True)
                top_alternatives.append(sorted_probs)

        if not tokens or not logprobs:
            return None, None, None

        next_chunk = current_chunk + 1
        start_idx = next_chunk * chunk_size
        end_idx = min((next_chunk + 1) * chunk_size, len(tokens))
        if start_idx >= len(tokens):
            return None, None, None

        return tokens[start_idx:end_idx], logprobs[start_idx:end_idx], top_alternatives[start_idx:end_idx]
    except Exception as e:
        logger.error("Precomputation failed for chunk %d: %s", current_chunk + 1, str(e))
        return None, None, None

# Function to process and visualize a chunk of log probs with dynamic top_logprobs
def visualize_logprobs(json_input, chunk=0, chunk_size=100):
    try:
        data = parse_input(json_input)
        content = data.get("content", []) if isinstance(data, dict) else data
        if not isinstance(content, list):
            raise ValueError("Content must be a list of entries")

        tokens = []
        logprobs = []
        top_alternatives = []  # List to store all top_logprobs (dynamic length)
        for entry in content:
            if not isinstance(entry, dict):
                logger.warning("Skipping non-dictionary entry: %s", entry)
                continue
            logprob = ensure_float(entry.get("logprob", None))
            if logprob >= -100000:  # Include all entries with default 0.0
                tokens.append(get_token(entry))
                logprobs.append(logprob)
                top_probs = entry.get("top_logprobs", {}) or {}
                finite_top_probs = []
                for key, value in top_probs.items():
                    float_value = ensure_float(value)
                    if float_value is not None and math.isfinite(float_value):
                        finite_top_probs.append((key, float_value))
                sorted_probs = sorted(finite_top_probs, key=lambda x: x[1], reverse=True)
                top_alternatives.append(sorted_probs)

        if not logprobs or not tokens:
            return (create_empty_figure("Log Probabilities of Generated Tokens"), None, "No tokens to display.", create_empty_figure("Top Token Log Probabilities"), create_empty_figure("Significant Probability Drops"), 1, 0)

        total_chunks = max(1, (len(logprobs) + chunk_size - 1) // chunk_size)
        start_idx = chunk * chunk_size
        end_idx = min((chunk + 1) * chunk_size, len(logprobs))
        paginated_tokens = tokens[start_idx:end_idx]
        paginated_logprobs = logprobs[start_idx:end_idx]
        paginated_alternatives = top_alternatives[start_idx:end_idx] if top_alternatives else []

        # Main Log Probability Plot (Interactive Plotly)
        main_fig = go.Figure()
        main_fig.add_trace(go.Scatter(x=list(range(len(paginated_logprobs))), y=paginated_logprobs, mode='markers+lines', name='Log Prob', marker=dict(color='blue')))
        main_fig.update_layout(
            title=f"Log Probabilities of Generated Tokens (Chunk {chunk + 1})",
            xaxis_title="Token Position (within chunk)",
            yaxis_title="Log Probability",
            hovermode="closest",
            clickmode='event+select'
        )
        main_fig.update_traces(
            customdata=[f"Token: {tok}, Log Prob: {prob:.4f}, Position: {i+start_idx}" for i, (tok, prob) in enumerate(zip(paginated_tokens, paginated_logprobs))],
            hovertemplate='<b>%{customdata}</b><extra></extra>'
        )

        # Probability Drop Analysis (Interactive Plotly)
        if len(paginated_logprobs) < 2:
            drops_fig = create_empty_figure(f"Significant Probability Drops (Chunk {chunk + 1})")
        else:
            drops = [paginated_logprobs[i+1] - paginated_logprobs[i] for i in range(len(paginated_logprobs)-1)]
            drops_fig = go.Figure()
            drops_fig.add_trace(go.Bar(x=list(range(len(drops))), y=drops, name='Drop', marker_color='red'))
            drops_fig.update_layout(
                title=f"Significant Probability Drops (Chunk {chunk + 1})",
                xaxis_title="Token Position (within chunk)",
                yaxis_title="Log Probability Drop",
                hovermode="closest",
                clickmode='event+select'
            )
            drops_fig.update_traces(
                customdata=[f"Drop: {drop:.4f}, From: {paginated_tokens[i]} to {paginated_tokens[i+1]}, Position: {i+start_idx}" for i, drop in enumerate(drops)],
                hovertemplate='<b>%{customdata}</b><extra></extra>'
            )

        # Create DataFrame for the table with dynamic top_logprobs
        table_data = []
        max_alternatives = max(len(alts) for alts in paginated_alternatives) if paginated_alternatives else 0
        for i, entry in enumerate(content[start_idx:end_idx]):
            if not isinstance(entry, dict):
                continue
            logprob = ensure_float(entry.get("logprob", None))
            if logprob >= -100000 and "top_logprobs" in entry:
                token = get_token(entry)
                top_logprobs = entry.get("top_logprobs", {}) or {}
                finite_top_probs = []
                for key, value in top_logprobs.items():
                    float_value = ensure_float(value)
                    if float_value is not None and math.isfinite(float_value):
                        finite_top_probs.append((key, float_value))
                sorted_probs = sorted(finite_top_probs, key=lambda x: x[1], reverse=True)
                row = [token, f"{logprob:.4f}"]
                for alt_token, alt_logprob in sorted_probs[:max_alternatives]:
                    row.append(f"{alt_token}: {alt_logprob:.4f}")
                while len(row) < 2 + max_alternatives:
                    row.append("")
                table_data.append(row)

        df = pd.DataFrame(table_data, columns=["Token", "Log Prob"] + [f"Alt {i+1}" for i in range(max_alternatives)]) if table_data else None

        # Generate colored text (for the current chunk)
        if paginated_logprobs:
            min_logprob = min(paginated_logprobs)
            max_logprob = max(paginated_logprobs)
            normalized_probs = [0.5] * len(paginated_logprobs) if max_logprob == min_logprob else \
                              [(lp - min_logprob) / (max_logprob - min_logprob) for lp in paginated_logprobs]

            colored_text = ""
            for i, (token, norm_prob) in enumerate(zip(paginated_tokens, normalized_probs)):
                r = int(255 * (1 - norm_prob))  # Red for low confidence
                g = int(255 * norm_prob)        # Green for high confidence
                b = 0
                color = f"rgb({r}, {g}, {b})"
                colored_text += f'<span style="color: {color}; font-weight: bold;">{token}</span>'
                if i < len(paginated_tokens) - 1:
                    colored_text += " "
            colored_text_html = f"<p>{colored_text}</p>"
        else:
            colored_text_html = "No tokens to display in this chunk."

        # Top Token Log Probabilities (Interactive Plotly, dynamic length, for the current chunk)
        alt_viz_fig = create_empty_figure(f"Top Token Log Probabilities (Chunk {chunk + 1})") if not paginated_alternatives else go.Figure()
        if paginated_alternatives:
            for i, (token, probs) in enumerate(zip(paginated_tokens, paginated_alternatives)):
                for j, (alt_tok, prob) in enumerate(probs):
                    alt_viz_fig.add_trace(go.Bar(x=[f"{token} (Pos {i+start_idx})"], y=[prob], name=f"{alt_tok}", marker_color=['blue', 'green', 'red', 'purple', 'orange'][:len(probs)]))
            alt_viz_fig.update_layout(
                title=f"Top Token Log Probabilities (Chunk {chunk + 1})",
                xaxis_title="Token (Position)",
                yaxis_title="Log Probability",
                barmode='stack',
                hovermode="closest",
                clickmode='event+select'
            )
            alt_viz_fig.update_traces(
                customdata=[f"Token: {tok}, Alt: {alt}, Log Prob: {prob:.4f}, Position: {i+start_idx}" for i, (tok, alts) in enumerate(zip(paginated_tokens, paginated_alternatives)) for alt, prob in alts],
                hovertemplate='<b>%{customdata}</b><extra></extra>'
            )

        return (main_fig, df, colored_text_html, alt_viz_fig, drops_fig, total_chunks, chunk)

    except Exception as e:
        logger.error("Visualization failed: %s", str(e))
        return (create_empty_figure("Log Probabilities of Generated Tokens"), None, f"Error: {e}", create_empty_figure("Top Token Log Probabilities"), create_empty_figure("Significant Probability Drops"), 1, 0)

# Analysis functions for detecting correct vs. incorrect traces
def analyze_confidence_signature(logprobs, tokens):
    if not logprobs or not tokens:
        return "No data for confidence signature analysis.", None
    
    # Extract top probabilities
    top_probs = [lps[0][1] if lps and lps[0][1] is not None else -float('inf') for lps in logprobs]
    if not any(p != -float('inf') for p in top_probs):
        return "No valid log probabilities for confidence analysis.", None
    
    # Use a larger window for smoother trends
    window_size = 30  # Increased from 20
    moving_avg = np.convolve(top_probs, np.ones(window_size) / window_size, mode='valid')
    
    # Calculate drop magnitudes
    drops = np.diff(moving_avg)
    
    # Use adaptive thresholding - only flag drops in the bottom 5% of all changes
    drop_threshold = np.percentile(drops, 5)  # More selective
    significant_drops = np.where(drops < drop_threshold)[0]
    
    # Cluster nearby drops (within 10 tokens) to avoid reporting multiple points in the same reasoning shift
    if len(significant_drops) > 0:
        clustered_drops = [significant_drops[0]]
        for drop in significant_drops[1:]:
            if drop - clustered_drops[-1] > 10:  # At least 10 tokens apart
                clustered_drops.append(drop)
    else:
        clustered_drops = []
    
    # Look for context markers near drops
    filtered_drops = []
    reasoning_markers = ["therefore", "thus", "so", "hence", "wait", "but", "however", "actually"]
    
    for drop in clustered_drops:
        # Adjust index for convolution window
        token_idx = drop + window_size - 1
        
        # Check surrounding context (10 tokens before and after)
        start_idx = max(0, token_idx - 10)
        end_idx = min(len(tokens), token_idx + 10)
        context = " ".join(tokens[start_idx:end_idx])
        
        # Only keep drops near reasoning transition markers
        if any(marker in context.lower() for marker in reasoning_markers):
            drop_magnitude = drops[drop]
            filtered_drops.append((token_idx, drop_magnitude, tokens[token_idx] if token_idx < len(tokens) else "End of trace"))
    
    # Sort by drop magnitude (largest drops first)
    filtered_drops.sort(key=lambda x: x[1])
    
    if not filtered_drops:
        return "No significant confidence shifts at reasoning transitions detected.", None
    
    # Return at most 3 most significant drops as the data
    return "Significant confidence shifts detected at reasoning transitions:", filtered_drops[:3]

def detect_interpretation_pivots(logprobs, tokens):
    if not logprobs or not tokens:
        return "No data for interpretation pivot detection.", None
    pivots = []
    reconsideration_tokens = ["wait", "but", "actually", "however", "hmm"]
    for i, (token, lps) in enumerate(zip(tokens, logprobs)):
        if not lps:
            continue
        for rt in reconsideration_tokens:
            for t, p in lps:
                if t.lower() == rt and p > -2.5:  # High probability
                    context = tokens[max(0, i-50):i]
                    pivots.append((i, rt, context))
    if not pivots:
        return "No interpretation pivots detected.", None
    return "Interpretation pivots detected:", pivots

def calculate_decision_entropy(logprobs, tokens=None):
    if not logprobs:
        return "No data for entropy spike detection.", None
    
    # Calculate entropy at each position
    entropies = []
    for lps in logprobs:
        if not lps or len(lps) < 2:  # Need at least two tokens for meaningful entropy
            entropies.append(0.0)
            continue
            
        # Only use top-5 tokens for entropy calculation to reduce noise
        top_k = min(5, len(lps))
        probs = [math.exp(p) for _, p in lps[:top_k] if p is not None]
        
        # Normalize probabilities to sum to 1
        if not probs or sum(probs) == 0:
            entropies.append(0.0)
            continue
            
        prob_sum = sum(probs)
        normalized_probs = [p/prob_sum for p in probs]
        
        entropy = -sum(p * math.log(p) for p in normalized_probs if p > 0)
        entropies.append(entropy)
    
    # Smooth entropy values with moving average
    window_size = 15
    if len(entropies) >= window_size:
        smoothed_entropies = np.convolve(entropies, np.ones(window_size)/window_size, mode='valid')
    else:
        smoothed_entropies = entropies
    
    # More selective threshold - 90th percentile and 2x multiplier
    baseline = np.percentile(smoothed_entropies, 90) if smoothed_entropies.size > 0 else 0.0
    
    # Find significant spikes (much more selective)
    spikes = []
    if baseline > 0:
        raw_spikes = np.where(smoothed_entropies > baseline * 2.0)[0]
        
        # Cluster nearby spikes (within 20 tokens)
        if raw_spikes.size > 0:
            spikes = [raw_spikes[0]]
            for spike in raw_spikes[1:]:
                if spike - spikes[-1] > 20:
                    spikes.append(spike)
    
    # If we have token information, check context around spikes
    if tokens and spikes:
        context_spikes = []
        decision_markers = ["therefore", "thus", "so", "hence", "because", 
                          "wait", "but", "however", "actually", "instead"]
        
        for spike in spikes:
            # Adjust index for convolution window if using smoothed values
            spike_idx = spike + window_size//2 if len(entropies) >= window_size else spike
            
            if spike_idx >= len(tokens):
                continue
                
            # Check surrounding context (15 tokens before and after)
            start_idx = max(0, spike_idx - 15)
            end_idx = min(len(tokens), spike_idx + 15)
            
            if end_idx <= start_idx:
                continue
                
            context = " ".join(tokens[start_idx:end_idx])
            
            # Only keep spikes near reasoning transitions
            if any(marker in context.lower() for marker in decision_markers):
                entropy_value = smoothed_entropies[spike - window_size//2] if len(entropies) >= window_size else entropies[spike]
                context_spikes.append((spike_idx, entropy_value, tokens[spike_idx] if spike_idx < len(tokens) else "End"))
        
        spikes = context_spikes
    
    # Return at most 3 most significant spikes
    if not spikes:
        return "No significant entropy spikes detected at decision points.", None
    
    # Sort by entropy value (highest first) if we have context information
    if tokens and spikes:
        spikes.sort(key=lambda x: x[1], reverse=True)
        return "Significant entropy spikes detected at positions:", spikes[:3]
    
    return "Entropy spikes detected at positions:", spikes[:3]

def analyze_conclusion_competition(logprobs, tokens):
    if not logprobs or not tokens:
        return "No data for conclusion competition analysis.", None
    conclusion_indices = [i for i, t in enumerate(tokens) if any(marker in t.lower() for marker in ["therefore", "thus", "boxed", "answer"])]
    if not conclusion_indices:
        return "No conclusion markers found in trace.", None
    gaps = []
    conclusion_idx = conclusion_indices[-1]
    end_range = min(conclusion_idx + 50, len(logprobs))
    for idx in range(conclusion_idx, end_range):
        if idx < len(logprobs) and len(logprobs[idx]) >= 2 and logprobs[idx][0][1] is not None and logprobs[idx][1][1] is not None:
            gap = logprobs[idx][0][1] - logprobs[idx][1][1]
            gaps.append(gap)
    if not gaps:
        return "No conclusion competition data available.", None
    mean_gap = np.mean(gaps)
    return f"Mean probability gap at conclusion: {mean_gap:.4f} (higher indicates more confident conclusion)", None

def analyze_verification_signals(logprobs, tokens):
    if not logprobs or not tokens:
        return "No data for verification signal analysis.", None
    verification_terms = ["verify", "check", "confirm", "ensure", "double"]
    verification_probs = []
    for lps in logprobs:
        if not lps:
            continue
        max_v_prob = -float('inf')
        for token, prob in lps:
            if any(v_term in token.lower() for v_term in verification_terms) and prob is not None:
                max_v_prob = max(max_v_prob, prob)
        if max_v_prob > -float('inf'):
            verification_probs.append(max_v_prob)
    if not verification_probs:
        return "No verification signals detected.", None
    count, mean_prob = len(verification_probs), np.mean(verification_probs)
    return f"Verification signals found: {count} instances, mean probability: {mean_prob:.4f}", None

def detect_semantic_inversions(logprobs, tokens):
    if not logprobs or not tokens:
        return "No data for semantic inversion detection.", None
    inversion_pairs = [("more", "less"), ("larger", "smaller"), ("winning", "losing"), ("increase", "decrease"), ("greater", "lesser"), ("positive", "negative")]
    inversions = []
    for i, (token, lps) in enumerate(zip(tokens, logprobs)):
        if not lps:
            continue
        for pos, neg in inversion_pairs:
            if token.lower() == pos:
                for t, p in lps:
                    if t.lower() == neg and p > -3.0 and p is not None:
                        inversions.append((i, pos, neg, p))
            elif token.lower() == neg:
                for t, p in lps:
                    if t.lower() == pos and p > -3.0 and p is not None:
                        inversions.append((i, neg, pos, p))
    if not inversions:
        return "No semantic inversions detected.", None
    return "Semantic inversions detected:", inversions

# Function to perform full trace analysis (FIXED)
def analyze_full_trace(json_input):
    try:
        data = parse_input(json_input)
        content = data.get("content", []) if isinstance(data, dict) else data
        if not isinstance(content, list):
            raise ValueError("Content must be a list of entries")

        tokens = []
        logprobs = []
        for entry in content:
            if not isinstance(entry, dict):
                logger.warning("Skipping non-dictionary entry: %s", entry)
                continue
            logprob = ensure_float(entry.get("logprob", None))
            if logprob >= -100000:
                tokens.append(get_token(entry))
                top_probs = entry.get("top_logprobs", {}) or {}
                finite_top_probs = [(key, ensure_float(value)) for key, value in top_probs.items() if ensure_float(value) is not None and math.isfinite(ensure_float(value))]
                logprobs.append(finite_top_probs)

        if not logprobs or not tokens:
            return "No valid data for trace analysis.", None, None, None, None, None

        confidence_result, confidence_data = analyze_confidence_signature(logprobs, tokens)
        pivot_result, pivot_data = detect_interpretation_pivots(logprobs, tokens)
        entropy_result, entropy_data = calculate_decision_entropy(logprobs, tokens)
        conclusion_result, conclusion_data = analyze_conclusion_competition(logprobs, tokens)
        verification_result, verification_data = analyze_verification_signals(logprobs, tokens)
        inversion_result, inversion_data = detect_semantic_inversions(logprobs, tokens)

        # Precompute the joined context strings for pivots to avoid backslashes in f-string expressions
        pivot_details = ', '.join(f"Position: {pos}, Reconsideration: {rt}, Context: {' '.join(context)}" for pos, rt, context in pivot_data) if pivot_data else ""

        # Updated HTML formatting without backslashes in f-string expressions
        analysis_html = f"""
        <h3>Trace Analysis Results</h3>
        <ul>
            <li><strong>Confidence Signature:</strong> {confidence_result}</li>
            {f"<ul><li>Details: {', '.join(f'Position: {pos}, Drop: {drop:.4f}, Token: {tok}' for pos, drop, tok in confidence_data)}</li></ul>" if confidence_data else ""}
            <li><strong>Interpretation Pivots:</strong> {pivot_result}</li>
            {f"<ul><li>Details: {pivot_details}</li></ul>" if pivot_data else ""}
            <li><strong>Decision Entropy Spikes:</strong> {entropy_result}</li>
            {f"<ul><li>Details: {', '.join(f'Position: {idx}, Entropy: {entropy:.4f}, Token: {tok}' for idx, entropy, tok in entropy_data)}</li></ul>" if entropy_data else ""}
            <li><strong>Conclusion Competition:</strong> {conclusion_result}</li>
            <li><strong>Verification Signals:</strong> {verification_result}</li>
            <li><strong>Semantic Inversions:</strong> {inversion_result}</li>
            {f"<ul><li>Details: {', '.join(f'Position: {pos}, Positive: {pos_word}, Negative: {neg_word}, Probability: {prob:.4f}' for pos, pos_word, neg_word, prob in inversion_data)}</li></ul>" if inversion_data else ""}
        </ul>
        """
        return analysis_html, None, None, None, None, None
    except Exception as e:
        logger.error("Trace analysis failed: %s", str(e))
        return f"Error: {e}", None, None, None, None, None

# Gradio interface with two tabs
try:
    with gr.Blocks(title="Log Probability Visualizer") as app:
        gr.Markdown("# Log Probability Visualizer")
        gr.Markdown("Paste your JSON log prob data below to analyze reasoning traces or visualize tokens in chunks of 100. Fixed filter ≥ -100000, dynamic number of top_logprobs, handles missing or null fields.")

        with gr.Tabs():
            with gr.Tab("Trace Analysis"):
                with gr.Row():
                    json_input_analysis = gr.Textbox(
                        label="JSON Input for Trace Analysis",
                        lines=10,
                        placeholder='{"content": [{"bytes": [44], "logprob": 0.0, "token": ",", "top_logprobs": {" so": -13.8046875, ".": -13.8046875, ",": -13.640625}}]}'
                    )
                with gr.Row():
                    analysis_output = gr.HTML(label="Trace Analysis Results")

                btn_analyze = gr.Button("Analyze Trace")
                btn_analyze.click(
                    fn=analyze_full_trace,
                    inputs=[json_input_analysis],
                    outputs=[analysis_output, gr.State(), gr.State(), gr.State(), gr.State(), gr.State()],
                )

            with gr.Tab("Visualization"):
                with gr.Row():
                    json_input_viz = gr.Textbox(
                        label="JSON Input for Visualization",
                        lines=10,
                        placeholder='{"content": [{"bytes": [44], "logprob": 0.0, "token": ",", "top_logprobs": {" so": -13.8046875, ".": -13.8046875, ",": -13.640625}}]}'
                    )
                    chunk = gr.Number(value=0, label="Current Chunk", precision=0, minimum=0)

                with gr.Row():
                    plot_output = gr.Plot(label="Log Probability Plot (Click for Tokens)")
                    drops_output = gr.Plot(label="Probability Drops (Click for Details)")

                with gr.Row():
                    table_output = gr.Dataframe(label="Token Log Probabilities and Top Alternatives")
                    alt_viz_output = gr.Plot(label="Top Token Log Probabilities (Click for Details)")

                with gr.Row():
                    text_output = gr.HTML(label="Colored Text (Confidence Visualization)")

                with gr.Row():
                    prev_btn = gr.Button("Previous Chunk")
                    next_btn = gr.Button("Next Chunk")
                    total_chunks_output = gr.Number(label="Total Chunks", interactive=False)

                # Precomputed next chunk state (hidden)
                precomputed_next = gr.State(value=None)

                btn_viz = gr.Button("Visualize")
                btn_viz.click(
                    fn=visualize_logprobs,
                    inputs=[json_input_viz, chunk],
                    outputs=[plot_output, table_output, text_output, alt_viz_output, drops_output, total_chunks_output, chunk],
                )

                def precompute_next_chunk(json_input, current_chunk):
                    return precompute_chunk(json_input, 100, current_chunk)

                def update_chunk(json_input, current_chunk, action, precomputed_next=None):
                    total_chunks = visualize_logprobs(json_input, 0)[5]  # Get total chunks
                    if action == "prev" and current_chunk > 0:
                        current_chunk -= 1
                    elif action == "next" and current_chunk < total_chunks - 1:
                        current_chunk += 1
                        if precomputed_next and all(precomputed_next):
                            logger.debug("Using precomputed next chunk for chunk %d", current_chunk)
                            return visualize_logprobs(json_input, current_chunk)
                    return visualize_logprobs(json_input, current_chunk)

                prev_btn.click(
                    fn=update_chunk,
                    inputs=[json_input_viz, chunk, gr.State(value="prev"), precomputed_next],
                    outputs=[plot_output, table_output, text_output, alt_viz_output, drops_output, total_chunks_output, chunk],
                )

                next_btn.click(
                    fn=update_chunk,
                    inputs=[json_input_viz, chunk, gr.State(value="next"), precomputed_next],
                    outputs=[plot_output, table_output, text_output, alt_viz_output, drops_output, total_chunks_output, chunk],
                )

                def trigger_precomputation(json_input, current_chunk):
                    try:
                        precomputed = precompute_next_chunk(json_input, current_chunk)
                        precomputed_next.value = precomputed  # Update state directly
                    except Exception as e:
                        logger.error("Precomputation trigger failed: %s", str(e))
                    return gr.update(value=current_chunk)

                chunk.change(
                    fn=trigger_precomputation,
                    inputs=[json_input_viz, chunk],
                    outputs=[chunk],
                )
    # Launch the Gradio application
    app.launch()
except Exception as e:
    logger.error("Application startup failed: %s", str(e))
    raise