Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -104,7 +104,7 @@ async def precompute_chunk(json_input, chunk_size, current_chunk):
|
|
104 |
return None, None, None
|
105 |
|
106 |
# Function to process and visualize a chunk of log probs with dynamic top_logprobs
|
107 |
-
def visualize_logprobs(json_input, chunk=0, chunk_size=
|
108 |
try:
|
109 |
# Parse the input (handles JSON only)
|
110 |
data = parse_input(json_input)
|
@@ -152,7 +152,7 @@ def visualize_logprobs(json_input, chunk=0, chunk_size=1000):
|
|
152 |
if not logprobs or not tokens:
|
153 |
return (create_empty_figure("Log Probabilities of Generated Tokens"), None, "No tokens to display.", create_empty_figure("Top Token Log Probabilities"), create_empty_figure("Significant Probability Drops"), 1, 0)
|
154 |
|
155 |
-
# Paginate data for chunks of
|
156 |
total_chunks = max(1, (len(logprobs) + chunk_size - 1) // chunk_size)
|
157 |
start_idx = chunk * chunk_size
|
158 |
end_idx = min((chunk + 1) * chunk_size, len(logprobs))
|
@@ -285,7 +285,7 @@ def visualize_logprobs(json_input, chunk=0, chunk_size=1000):
|
|
285 |
with gr.Blocks(title="Log Probability Visualizer") as app:
|
286 |
gr.Markdown("# Log Probability Visualizer")
|
287 |
gr.Markdown(
|
288 |
-
"Paste your JSON log prob data below to visualize tokens in chunks of
|
289 |
)
|
290 |
|
291 |
with gr.Row():
|
@@ -326,7 +326,7 @@ with gr.Blocks(title="Log Probability Visualizer") as app:
|
|
326 |
async def precompute_next_chunk(json_input, current_chunk, precomputed_next):
|
327 |
if precomputed_next is not None:
|
328 |
return precomputed_next # Use cached precomputed chunk if available
|
329 |
-
next_tokens, next_logprobs, next_alternatives = await precompute_chunk(json_input,
|
330 |
if next_tokens is None or next_logprobs is None or next_alternatives is None:
|
331 |
return None
|
332 |
return (next_tokens, next_logprobs, next_alternatives)
|
|
|
104 |
return None, None, None
|
105 |
|
106 |
# Function to process and visualize a chunk of log probs with dynamic top_logprobs
|
107 |
+
def visualize_logprobs(json_input, chunk=0, chunk_size=100):
|
108 |
try:
|
109 |
# Parse the input (handles JSON only)
|
110 |
data = parse_input(json_input)
|
|
|
152 |
if not logprobs or not tokens:
|
153 |
return (create_empty_figure("Log Probabilities of Generated Tokens"), None, "No tokens to display.", create_empty_figure("Top Token Log Probabilities"), create_empty_figure("Significant Probability Drops"), 1, 0)
|
154 |
|
155 |
+
# Paginate data for chunks of 100 tokens
|
156 |
total_chunks = max(1, (len(logprobs) + chunk_size - 1) // chunk_size)
|
157 |
start_idx = chunk * chunk_size
|
158 |
end_idx = min((chunk + 1) * chunk_size, len(logprobs))
|
|
|
285 |
with gr.Blocks(title="Log Probability Visualizer") as app:
|
286 |
gr.Markdown("# Log Probability Visualizer")
|
287 |
gr.Markdown(
|
288 |
+
"Paste your JSON log prob data below to visualize tokens in chunks of 100. Fixed filter ≥ -100000, dynamic number of top_logprobs, handles missing or null fields. Next chunk is precomputed proactively."
|
289 |
)
|
290 |
|
291 |
with gr.Row():
|
|
|
326 |
async def precompute_next_chunk(json_input, current_chunk, precomputed_next):
|
327 |
if precomputed_next is not None:
|
328 |
return precomputed_next # Use cached precomputed chunk if available
|
329 |
+
next_tokens, next_logprobs, next_alternatives = await precompute_chunk(json_input, 100, current_chunk)
|
330 |
if next_tokens is None or next_logprobs is None or next_alternatives is None:
|
331 |
return None
|
332 |
return (next_tokens, next_logprobs, next_alternatives)
|