File size: 18,630 Bytes
0244d3c
 
 
 
6934db6
 
c28bdaa
d8a969c
527fd08
0e1182d
cbaf223
9ba1537
 
527fd08
 
 
 
d8a969c
 
 
527fd08
d8a969c
 
 
527fd08
d8a969c
 
0d41503
9ba1537
0244d3c
527fd08
 
 
f292ecf
 
527fd08
 
 
 
 
f292ecf
527fd08
 
f292ecf
527fd08
7d5d680
 
 
 
 
 
 
6b2ca38
 
 
 
9ba1537
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0244d3c
9ba1537
d8a969c
 
0d41503
181b7be
 
0d41503
 
c28bdaa
0d41503
c28bdaa
0d41503
181b7be
f292ecf
c28bdaa
 
f292ecf
c28bdaa
0d41503
 
 
527fd08
9ba1537
 
527fd08
a83f370
 
b8e291e
9ba1537
b8e291e
f292ecf
 
a83f370
 
 
f292ecf
 
 
 
527fd08
 
181b7be
9ba1537
ccde0a2
9ba1537
 
 
 
 
 
 
 
 
cbaf223
 
 
9ba1537
cbaf223
9ba1537
 
cbaf223
 
 
 
 
9ba1537
cbaf223
 
0e1182d
cbaf223
9ba1537
 
cbaf223
9ba1537
cbaf223
 
 
9ba1537
 
cbaf223
 
 
 
 
9ba1537
cbaf223
 
0e1182d
f292ecf
0244d3c
9ba1537
 
0d41503
 
527fd08
0d41503
9ba1537
b8e291e
 
 
 
527fd08
f292ecf
527fd08
 
 
f292ecf
 
 
0244d3c
f292ecf
0244d3c
f292ecf
 
0244d3c
 
181b7be
 
 
 
f292ecf
181b7be
 
 
 
 
9ba1537
 
 
 
c28bdaa
9ba1537
c28bdaa
181b7be
9ba1537
181b7be
 
c28bdaa
9ba1537
181b7be
 
 
 
c28bdaa
9ba1537
181b7be
 
7e141c2
9ba1537
181b7be
9ba1537
 
 
 
cf7578d
9ba1537
cf7578d
9ba1537
cf7578d
 
 
 
 
 
 
9ba1537
cf7578d
 
a83f370
9ba1537
181b7be
0244d3c
0d41503
9ba1537
0244d3c
9ba1537
0244d3c
 
181b7be
9ba1537
181b7be
 
0e1182d
f292ecf
 
 
0d41503
f292ecf
9ba1537
ccde0a2
cbaf223
 
 
ccde0a2
cbaf223
 
f292ecf
ccde0a2
cbaf223
 
181b7be
9ba1537
 
 
 
 
 
 
 
0244d3c
 
 
9ba1537
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0244d3c
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
import gradio as gr
import json
import matplotlib.pyplot as plt
import pandas as pd
import io
import base64
import math
import ast
import logging
import numpy as np
import plotly.graph_objects as go
import asyncio
import anyio

# Set up logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)

# Function to safely parse JSON or Python dictionary input
def parse_input(json_input):
    logger.debug("Attempting to parse input: %s", json_input)
    try:
        # Try to parse as JSON first
        data = json.loads(json_input)
        logger.debug("Successfully parsed as JSON")
        return data
    except json.JSONDecodeError as e:
        logger.error("JSON parsing failed: %s (Input: %s)", str(e), json_input[:100] + "..." if len(json_input) > 100 else json_input)
        raise ValueError(f"Malformed input: {str(e)}. Ensure property names are in double quotes (e.g., \"content\") and the format matches JSON (e.g., {{\"content\": [...]}}).")

# Function to ensure a value is a float, converting from string if necessary
def ensure_float(value):
    if value is None:
        logger.debug("Replacing None logprob with 0.0")
        return 0.0  # Default to 0.0 for None to ensure visualization
    if isinstance(value, str):
        try:
            return float(value)
        except ValueError:
            logger.error("Failed to convert string '%s' to float", value)
            return 0.0  # Default to 0.0 for invalid strings
    if isinstance(value, (int, float)):
        return float(value)
    return 0.0  # Default for any other type

# Function to get or generate a token value (default to "Unknown" if missing)
def get_token(entry):
    token = entry.get("token", "Unknown")
    if token == "Unknown":
        logger.warning("Missing 'token' key for entry: %s, using 'Unknown'", entry)
    return token

# Function to create an empty Plotly figure
def create_empty_figure(title):
    return go.Figure().update_layout(title=title, xaxis_title="", yaxis_title="", showlegend=False)

# Precompute the next chunk asynchronously
async def precompute_chunk(json_input, chunk_size, current_chunk):
    try:
        data = parse_input(json_input)
        content = data.get("content", []) if isinstance(data, dict) else data
        if not isinstance(content, list):
            raise ValueError("Content must be a list of entries")

        tokens = []
        logprobs = []
        top_alternatives = []
        for entry in content:
            if not isinstance(entry, dict):
                logger.warning("Skipping non-dictionary entry: %s", entry)
                continue
            logprob = ensure_float(entry.get("logprob", None))
            if logprob >= -100000:  # Include all entries with default 0.0
                tokens.append(get_token(entry))
                logprobs.append(logprob)
                top_probs = entry.get("top_logprobs", {})
                if top_probs is None:
                    logger.debug("top_logprobs is None for token: %s, using empty dict", get_token(entry))
                    top_probs = {}
                finite_top_probs = []
                for key, value in top_probs.items():
                    float_value = ensure_float(value)
                    if float_value is not None and math.isfinite(float_value):
                        finite_top_probs.append((key, float_value))
                sorted_probs = sorted(finite_top_probs, key=lambda x: x[1], reverse=True)
                top_alternatives.append(sorted_probs)

        if not tokens or not logprobs:
            return None, None, None

        next_chunk = current_chunk + 1
        start_idx = next_chunk * chunk_size
        end_idx = min((next_chunk + 1) * chunk_size, len(tokens))
        if start_idx >= len(tokens):
            return None, None, None

        paginated_tokens = tokens[start_idx:end_idx]
        paginated_logprobs = logprobs[start_idx:end_idx]
        paginated_alternatives = top_alternatives[start_idx:end_idx]

        return paginated_tokens, paginated_logprobs, paginated_alternatives
    except Exception as e:
        logger.error("Precomputation failed for chunk %d: %s", current_chunk + 1, str(e))
        return None, None, None

# Function to process and visualize a chunk of log probs with dynamic top_logprobs
def visualize_logprobs(json_input, chunk=0, chunk_size=1000):
    try:
        # Parse the input (handles JSON only)
        data = parse_input(json_input)
        
        # Ensure data is a dictionary with 'content' key containing a list
        if isinstance(data, dict) and "content" in data:
            content = data["content"]
            if not isinstance(content, list):
                raise ValueError("Content must be a list of entries")
        elif isinstance(data, list):
            content = data  # Handle direct list input (though only JSON is expected)
        else:
            raise ValueError("Input must be a dictionary with 'content' key or a list of entries")

        # Extract tokens, log probs, and top alternatives, skipping non-finite values with fixed filter of -100000
        tokens = []
        logprobs = []
        top_alternatives = []  # List to store all top_logprobs (dynamic length)
        for entry in content:
            if not isinstance(entry, dict):
                logger.warning("Skipping non-dictionary entry: %s", entry)
                continue
            logprob = ensure_float(entry.get("logprob", None))
            if logprob >= -100000:  # Include all entries with default 0.0
                tokens.append(get_token(entry))
                logprobs.append(logprob)
                # Get top_logprobs, default to empty dict if None
                top_probs = entry.get("top_logprobs", {})
                if top_probs is None:
                    logger.debug("top_logprobs is None for token: %s, using empty dict", get_token(entry))
                    top_probs = {}  # Default to empty dict for None
                # Ensure all values in top_logprobs are floats and create a list of tuples
                finite_top_probs = []
                for key, value in top_probs.items():
                    float_value = ensure_float(value)
                    if float_value is not None and math.isfinite(float_value):
                        finite_top_probs.append((key, float_value))
                # Sort by log probability (descending) to get all alternatives
                sorted_probs = sorted(finite_top_probs, key=lambda x: x[1], reverse=True)
                top_alternatives.append(sorted_probs)  # Store all alternatives, dynamic length
            else:
                logger.debug("Skipping entry with logprob: %s (type: %s)", entry.get("logprob"), type(entry.get("logprob", None)))

        # Check if there's valid data after filtering
        if not logprobs or not tokens:
            return (create_empty_figure("Log Probabilities of Generated Tokens"), None, "No tokens to display.", create_empty_figure("Top Token Log Probabilities"), create_empty_figure("Significant Probability Drops"), 1, 0)

        # Paginate data for chunks of 1,000 tokens
        total_chunks = max(1, (len(logprobs) + chunk_size - 1) // chunk_size)
        start_idx = chunk * chunk_size
        end_idx = min((chunk + 1) * chunk_size, len(logprobs))
        paginated_tokens = tokens[start_idx:end_idx]
        paginated_logprobs = logprobs[start_idx:end_idx]
        paginated_alternatives = top_alternatives[start_idx:end_idx] if top_alternatives else []

        # 1. Main Log Probability Plot (Interactive Plotly)
        main_fig = go.Figure()
        main_fig.add_trace(go.Scatter(x=list(range(len(paginated_logprobs))), y=paginated_logprobs, mode='markers+lines', name='Log Prob', marker=dict(color='blue')))
        main_fig.update_layout(
            title="Log Probabilities of Generated Tokens (Chunk %d)" % (chunk + 1),
            xaxis_title="Token Position (within chunk)",
            yaxis_title="Log Probability",
            hovermode="closest",
            clickmode='event+select'
        )
        main_fig.update_traces(
            customdata=[f"Token: {tok}, Log Prob: {prob:.4f}, Position: {i+start_idx}" for i, (tok, prob) in enumerate(zip(paginated_tokens, paginated_logprobs))],
            hovertemplate='<b>%{customdata}</b><extra></extra>'
        )

        # 2. Probability Drop Analysis (Interactive Plotly)
        if len(paginated_logprobs) < 2:
            drops_fig = create_empty_figure("Significant Probability Drops (Chunk %d)" % (chunk + 1))
        else:
            drops = [paginated_logprobs[i+1] - paginated_logprobs[i] for i in range(len(paginated_logprobs)-1)]
            drops_fig = go.Figure()
            drops_fig.add_trace(go.Bar(x=list(range(len(drops))), y=drops, name='Drop', marker_color='red'))
            drops_fig.update_layout(
                title="Significant Probability Drops (Chunk %d)" % (chunk + 1),
                xaxis_title="Token Position (within chunk)",
                yaxis_title="Log Probability Drop",
                hovermode="closest",
                clickmode='event+select'
            )
            drops_fig.update_traces(
                customdata=[f"Drop: {drop:.4f}, From: {paginated_tokens[i]} to {paginated_tokens[i+1]}, Position: {i+start_idx}" for i, drop in enumerate(drops)],
                hovertemplate='<b>%{customdata}</b><extra></extra>'
            )

        # Create DataFrame for the table with dynamic top_logprobs
        table_data = []
        max_alternatives = max(len(alts) for alts in paginated_alternatives) if paginated_alternatives else 0
        for i, entry in enumerate(content[start_idx:end_idx]):
            if not isinstance(entry, dict):
                continue
            logprob = ensure_float(entry.get("logprob", None))
            if logprob >= -100000 and "top_logprobs" in entry:  # Include all entries with default 0.0
                token = get_token(entry)
                top_logprobs = entry.get("top_logprobs", {})
                if top_logprobs is None:
                    logger.debug("top_logprobs is None for token: %s, using empty dict", token)
                    top_logprobs = {}  # Default to empty dict for None
                # Ensure all values in top_logprobs are floats
                finite_top_logprobs = []
                for key, value in top_logprobs.items():
                    float_value = ensure_float(value)
                    if float_value is not None and math.isfinite(float_value):
                        finite_top_logprobs.append((key, float_value))
                # Sort by log probability (descending)
                sorted_probs = sorted(finite_top_logprobs, key=lambda x: x[1], reverse=True)
                row = [token, f"{logprob:.4f}"]
                for alt_token, alt_logprob in sorted_probs[:max_alternatives]:  # Use max number of alternatives
                    row.append(f"{alt_token}: {alt_logprob:.4f}")
                # Pad with empty strings if fewer alternatives than max
                while len(row) < 2 + max_alternatives:
                    row.append("")
                table_data.append(row)

        df = (
            pd.DataFrame(
                table_data,
                columns=["Token", "Log Prob"] + [f"Alt {i+1}" for i in range(max_alternatives)],
            )
            if table_data
            else None
        )

        # Generate colored text (for the current chunk)
        if paginated_logprobs:
            min_logprob = min(paginated_logprobs)
            max_logprob = max(paginated_logprobs)
            if max_logprob == min_logprob:
                normalized_probs = [0.5] * len(paginated_logprobs)
            else:
                normalized_probs = [
                    (lp - min_logprob) / (max_logprob - min_logprob) for lp in paginated_logprobs
                ]

            colored_text = ""
            for i, (token, norm_prob) in enumerate(zip(paginated_tokens, normalized_probs)):
                r = int(255 * (1 - norm_prob))  # Red for low confidence
                g = int(255 * norm_prob)        # Green for high confidence
                b = 0
                color = f"rgb({r}, {g}, {b})"
                colored_text += f'<span style="color: {color}; font-weight: bold;">{token}</span>'
                if i < len(paginated_tokens) - 1:
                    colored_text += " "
            colored_text_html = f"<p>{colored_text}</p>"
        else:
            colored_text_html = "No tokens to display in this chunk."

        # Top Token Log Probabilities (Interactive Plotly, dynamic length, for the current chunk)
        alt_viz_fig = create_empty_figure("Top Token Log Probabilities (Chunk %d)" % (chunk + 1)) if not paginated_logprobs or not paginated_alternatives else go.Figure()
        if paginated_logprobs and paginated_alternatives:
            for i, (token, probs) in enumerate(zip(paginated_tokens, paginated_alternatives)):
                for j, (alt_tok, prob) in enumerate(probs):
                    alt_viz_fig.add_trace(go.Bar(x=[f"{token} (Pos {i+start_idx})"], y=[prob], name=f"{alt_tok}", marker_color=['blue', 'green', 'red', 'purple', 'orange'][:len(probs)]))
            alt_viz_fig.update_layout(
                title="Top Token Log Probabilities (Chunk %d)" % (chunk + 1),
                xaxis_title="Token (Position)",
                yaxis_title="Log Probability",
                barmode='stack',
                hovermode="closest",
                clickmode='event+select'
            )
            alt_viz_fig.update_traces(
                customdata=[f"Token: {tok}, Alt: {alt}, Log Prob: {prob:.4f}, Position: {i+start_idx}" for i, (tok, alts) in enumerate(zip(paginated_tokens, paginated_alternatives)) for alt, prob in alts],
                hovertemplate='<b>%{customdata}</b><extra></extra>'
            )

        return (main_fig, df, colored_text_html, alt_viz_fig, drops_fig, total_chunks, chunk)

    except Exception as e:
        logger.error("Visualization failed: %s (Input: %s)", str(e), json_input[:100] + "..." if len(json_input) > 100 else json_input)
        return (create_empty_figure("Log Probabilities of Generated Tokens"), None, "No finite log probabilities to display.", create_empty_figure("Top Token Log Probabilities"), create_empty_figure("Significant Probability Drops"), 1, 0)

# Gradio interface with chunked visualization and proactive precomputation
with gr.Blocks(title="Log Probability Visualizer") as app:
    gr.Markdown("# Log Probability Visualizer")
    gr.Markdown(
        "Paste your JSON log prob data below to visualize tokens in chunks of 1,000. Fixed filter ≥ -100000, dynamic number of top_logprobs, handles missing or null fields. Next chunk is precomputed proactively."
    )

    with gr.Row():
        json_input = gr.Textbox(
            label="JSON Input",
            lines=10,
            placeholder="Paste your JSON (e.g., {\"content\": [{\"bytes\": [44], \"logprob\": 0.0, \"token\": \",\", \"top_logprobs\": {\" so\": -13.8046875, \".\": -13.8046875, \",\": -13.640625}}]}).",
        )
        chunk = gr.Number(value=0, label="Current Chunk", precision=0, minimum=0)

    with gr.Row():
        plot_output = gr.Plot(label="Log Probability Plot (Click for Tokens)")
        drops_output = gr.Plot(label="Probability Drops (Click for Details)")

    with gr.Row():
        table_output = gr.Dataframe(label="Token Log Probabilities and Top Alternatives")
        alt_viz_output = gr.Plot(label="Top Token Log Probabilities (Click for Details)")

    with gr.Row():
        text_output = gr.HTML(label="Colored Text (Confidence Visualization)")

    with gr.Row():
        prev_btn = gr.Button("Previous Chunk")
        next_btn = gr.Button("Next Chunk")
        total_chunks_output = gr.Number(label="Total Chunks", interactive=False)

    # Precomputed next chunk state (hidden)
    precomputed_next = gr.State(value=None)

    btn = gr.Button("Visualize")
    btn.click(
        fn=visualize_logprobs,
        inputs=[json_input, chunk],
        outputs=[plot_output, table_output, text_output, alt_viz_output, drops_output, total_chunks_output, chunk],
    )

    # Precompute next chunk proactively when on current chunk
    async def precompute_next_chunk(json_input, current_chunk, precomputed_next):
        if precomputed_next is not None:
            return precomputed_next  # Use cached precomputed chunk if available
        next_tokens, next_logprobs, next_alternatives = await precompute_chunk(json_input, 1000, current_chunk)
        if next_tokens is None or next_logprobs is None or next_alternatives is None:
            return None
        return (next_tokens, next_logprobs, next_alternatives)

    # Update chunk on button clicks
    def update_chunk(json_input, current_chunk, action, precomputed_next=None):
        total_chunks = visualize_logprobs(json_input, 0)[5]  # Get total chunks
        if action == "prev" and current_chunk > 0:
            current_chunk -= 1
        elif action == "next" and current_chunk < total_chunks - 1:
            current_chunk += 1
            # If precomputed next chunk exists, use it; otherwise, compute it
            if precomputed_next:
                next_tokens, next_logprobs, next_alternatives = precomputed_next
                if next_tokens and next_logprobs and next_alternatives:
                    logger.debug("Using precomputed next chunk for chunk %d", current_chunk)
                    return visualize_logprobs(json_input, current_chunk)
        return visualize_logprobs(json_input, current_chunk)

    prev_btn.click(
        fn=update_chunk,
        inputs=[json_input, chunk, gr.State(value="prev"), precomputed_next],
        outputs=[plot_output, table_output, text_output, alt_viz_output, drops_output, total_chunks_output, chunk],
    )

    next_btn.click(
        fn=update_chunk,
        inputs=[json_input, chunk, gr.State(value="next"), precomputed_next],
        outputs=[plot_output, table_output, text_output, alt_viz_output, drops_output, total_chunks_output, chunk],
    )

    # Trigger precomputation when chunk changes (via button clicks or initial load)
    def trigger_precomputation(json_input, current_chunk):
        asyncio.create_task(precompute_next_chunk(json_input, current_chunk, None))
        return gr.update(value=current_chunk)

    # Use a dummy event to trigger precomputation on chunk change (simplified for Gradio)
    chunk.change(
        fn=trigger_precomputation,
        inputs=[json_input, chunk],
        outputs=[chunk],
    )

app.launch()