Spaces:
Running
Running
File size: 8,355 Bytes
0244d3c 6934db6 c28bdaa d8a969c 527fd08 d8a969c 527fd08 d8a969c 527fd08 d8a969c 527fd08 d8a969c 527fd08 d8a969c 527fd08 d8a969c 527fd08 d8a969c 0244d3c 527fd08 0244d3c d8a969c 181b7be c28bdaa 181b7be c28bdaa 527fd08 181b7be 527fd08 181b7be 0244d3c c28bdaa 527fd08 181b7be 527fd08 181b7be 527fd08 181b7be 527fd08 181b7be 527fd08 181b7be 0244d3c 181b7be c28bdaa 181b7be c28bdaa 181b7be c28bdaa 181b7be c28bdaa 181b7be c28bdaa 181b7be c28bdaa 181b7be c28bdaa 181b7be c28bdaa 181b7be c28bdaa 181b7be c28bdaa 181b7be c28bdaa 181b7be 7e141c2 c28bdaa 181b7be 7e141c2 181b7be 0244d3c 527fd08 7e141c2 0244d3c 181b7be d8a969c 181b7be d8a969c 181b7be 6934db6 0244d3c 7e141c2 181b7be 0244d3c 181b7be 0244d3c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 |
import gradio as gr
import json
import matplotlib.pyplot as plt
import pandas as pd
import io
import base64
import math
import ast
import logging
# Set up logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
# Function to safely parse JSON or Python dictionary input
def parse_input(json_input):
logger.debug("Attempting to parse input: %s", json_input)
try:
# Try to parse as JSON first
data = json.loads(json_input)
logger.debug("Successfully parsed as JSON")
return data
except json.JSONDecodeError as e:
logger.error("JSON parsing failed: %s", str(e))
try:
# If JSON fails, try to parse as Python literal (e.g., with single quotes)
data = ast.literal_eval(json_input)
logger.debug("Successfully parsed as Python literal")
# Convert Python dictionary to JSON-compatible format (replace single quotes with double quotes)
def dict_to_json(obj):
if isinstance(obj, dict):
return {str(k): dict_to_json(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [dict_to_json(item) for item in obj]
else:
return obj
converted_data = dict_to_json(data)
logger.debug("Converted to JSON-compatible format")
return converted_data
except (SyntaxError, ValueError) as e:
logger.error("Python literal parsing failed: %s", str(e))
raise ValueError(f"Malformed input: {str(e)}. Ensure property names are in double quotes (e.g., \"content\") or correct Python dictionary format.")
# Function to ensure a value is a float, converting from string if necessary
def ensure_float(value):
if value is None:
return None
if isinstance(value, str):
try:
return float(value)
except ValueError:
logger.error("Failed to convert string '%s' to float", value)
return None
if isinstance(value, (int, float)):
return float(value)
return None
# Function to process and visualize log probs
def visualize_logprobs(json_input):
try:
# Parse the input (handles both JSON and Python dictionaries)
data = parse_input(json_input)
# Ensure data is a list or dictionary with 'content'
if isinstance(data, dict) and "content" in data:
content = data["content"]
elif isinstance(data, list):
content = data
else:
raise ValueError("Input must be a list or dictionary with 'content' key")
# Extract tokens and log probs, skipping None or non-finite values
tokens = []
logprobs = []
for entry in content:
logprob = ensure_float(entry.get("logprob", None))
if logprob is not None and math.isfinite(logprob):
tokens.append(entry["token"])
logprobs.append(logprob)
else:
logger.debug("Skipping entry with logprob: %s (type: %s)", entry.get("logprob"), type(entry.get("logprob", None)))
# Prepare table data, handling None in top_logprobs
table_data = []
for entry in content:
logprob = ensure_float(entry.get("logprob", None))
# Only include entries with finite logprob and non-None top_logprobs
if (
logprob is not None
and math.isfinite(logprob)
and "top_logprobs" in entry
and entry["top_logprobs"] is not None
):
token = entry["token"]
logger.debug("Processing token: %s, logprob: %s (type: %s)", token, logprob, type(logprob))
top_logprobs = entry["top_logprobs"]
# Ensure all values in top_logprobs are floats
finite_top_logprobs = {}
for key, value in top_logprobs.items():
float_value = ensure_float(value)
if float_value is not None and math.isfinite(float_value):
finite_top_logprobs[key] = float_value
# Extract top 3 alternatives from top_logprobs
top_3 = sorted(
finite_top_logprobs.items(), key=lambda x: x[1], reverse=True
)[:3]
row = [token, f"{logprob:.4f}"]
for alt_token, alt_logprob in top_3:
row.append(f"{alt_token}: {alt_logprob:.4f}")
# Pad with empty strings if fewer than 3 alternatives
while len(row) < 5:
row.append("")
table_data.append(row)
# Create the plot
if logprobs:
plt.figure(figsize=(10, 5))
plt.plot(range(len(logprobs)), logprobs, marker="o", linestyle="-", color="b")
plt.title("Log Probabilities of Generated Tokens")
plt.xlabel("Token Position")
plt.ylabel("Log Probability")
plt.grid(True)
plt.xticks(range(len(logprobs)), tokens, rotation=45, ha="right")
plt.tight_layout()
# Save plot to a bytes buffer
buf = io.BytesIO()
plt.savefig(buf, format="png", bbox_inches="tight")
buf.seek(0)
plt.close()
# Convert to base64 for Gradio
img_bytes = buf.getvalue()
img_base64 = base64.b64encode(img_bytes).decode("utf-8")
img_html = f'<img src="data:image/png;base64,{img_base64}" style="max-width: 100%; height: auto;">'
else:
img_html = "No finite log probabilities to plot."
# Create DataFrame for the table
df = (
pd.DataFrame(
table_data,
columns=[
"Token",
"Log Prob",
"Top 1 Alternative",
"Top 2 Alternative",
"Top 3 Alternative",
],
)
if table_data
else None
)
# Generate colored text
if logprobs:
min_logprob = min(logprobs)
max_logprob = max(logprobs)
if max_logprob == min_logprob:
normalized_probs = [0.5] * len(logprobs)
else:
normalized_probs = [
(lp - min_logprob) / (max_logprob - min_logprob) for lp in logprobs
]
colored_text = ""
for i, (token, norm_prob) in enumerate(zip(tokens, normalized_probs)):
r = int(255 * (1 - norm_prob)) # Red for low confidence
g = int(255 * norm_prob) # Green for high confidence
b = 0
color = f"rgb({r}, {g}, {b})"
colored_text += f'<span style="color: {color}; font-weight: bold;">{token}</span>'
if i < len(tokens) - 1:
colored_text += " "
colored_text_html = f"<p>{colored_text}</p>"
else:
colored_text_html = "No finite log probabilities to display."
return img_html, df, colored_text_html
except Exception as e:
logger.error("Visualization failed: %s", str(e))
return f"Error: {str(e)}", None, None
# Gradio interface
with gr.Blocks(title="Log Probability Visualizer") as app:
gr.Markdown("# Log Probability Visualizer")
gr.Markdown(
"Paste your JSON or Python dictionary log prob data below to visualize the tokens and their probabilities. Ensure property names are in double quotes (e.g., \"content\") for JSON, or use correct Python dictionary format."
)
json_input = gr.Textbox(
label="JSON Input",
lines=10,
placeholder="Paste your JSON (e.g., {\"content\": [...]}) or Python dict (e.g., {'content': [...]}) here...",
)
plot_output = gr.HTML(label="Log Probability Plot")
table_output = gr.Dataframe(label="Token Log Probabilities and Top Alternatives")
text_output = gr.HTML(label="Colored Text (Confidence Visualization)")
btn = gr.Button("Visualize")
btn.click(
fn=visualize_logprobs,
inputs=json_input,
outputs=[plot_output, table_output, text_output],
)
app.launch() |