File size: 27,575 Bytes
ce90309
47fc4a0
ce90309
47fc4a0
f4ff30a
168a7c1
825e87d
47fc4a0
f4ff30a
7c32497
4474e7a
7c32497
4474e7a
 
fb0307e
4474e7a
7c32497
 
 
4474e7a
 
 
7c32497
 
 
 
 
 
4474e7a
 
7c32497
 
 
fb0307e
4474e7a
7c32497
 
4474e7a
 
7c32497
 
 
 
 
 
 
 
 
 
 
 
 
 
4474e7a
7c32497
 
 
 
 
 
4474e7a
7c32497
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
badae07
 
4474e7a
 
7c32497
 
3d09f97
f4ff30a
 
168a7c1
 
7c32497
f4ff30a
168a7c1
f4ff30a
825e87d
 
fb0307e
4474e7a
7c32497
f4ff30a
 
 
7c32497
825e87d
fb0307e
 
 
7c32497
 
 
 
f4ff30a
4474e7a
 
 
fb0307e
 
7c32497
 
 
 
 
4474e7a
fb0307e
f4ff30a
7c32497
47fc4a0
fb0307e
47fc4a0
 
2491cbe
fb0307e
825e87d
47fc4a0
 
2491cbe
badae07
7c32497
 
 
fb0307e
 
 
 
f4ff30a
47fc4a0
7c32497
47fc4a0
4474e7a
7c32497
 
 
 
 
4474e7a
7c32497
fb0307e
4474e7a
7c32497
 
4474e7a
7c32497
 
 
 
 
4474e7a
 
 
f4ff30a
 
fb0307e
 
f4ff30a
7c32497
f4ff30a
 
 
 
 
 
 
 
 
7c32497
badae07
825e87d
7c32497
 
 
 
f4ff30a
 
 
badae07
f4ff30a
7c32497
 
 
825e87d
f4ff30a
7c32497
f4ff30a
 
7c32497
825e87d
 
badae07
7c32497
f4ff30a
 
7c32497
 
f4ff30a
 
badae07
2491cbe
4474e7a
2491cbe
f4ff30a
4474e7a
 
 
 
 
1321d2f
 
3d09f97
1321d2f
badae07
 
3d09f97
2491cbe
4474e7a
badae07
4474e7a
3d09f97
badae07
7c32497
 
 
 
4474e7a
 
 
 
 
7c32497
 
3d09f97
 
4474e7a
7c32497
 
4474e7a
 
 
 
 
 
badae07
7c32497
 
 
 
 
 
 
 
 
4474e7a
7c32497
4474e7a
1321d2f
7c32497
 
 
4474e7a
7c32497
 
4474e7a
 
7c32497
4474e7a
badae07
4474e7a
 
 
7c32497
 
 
 
 
 
 
 
 
 
3d09f97
7c32497
4474e7a
 
 
7c32497
4474e7a
1321d2f
2491cbe
7c32497
 
7279ae9
badae07
fb0307e
7c32497
badae07
7c32497
 
 
 
 
 
 
 
 
 
 
d07e660
 
fb0307e
7c32497
 
7279ae9
7c32497
 
 
 
7279ae9
7c32497
 
 
fb0307e
badae07
7c32497
 
 
 
 
fb0307e
4474e7a
7c32497
4474e7a
 
7c32497
1321d2f
4474e7a
7c32497
4474e7a
 
badae07
7c32497
 
 
1321d2f
 
 
 
 
fb0307e
1321d2f
 
 
2491cbe
badae07
4474e7a
7c32497
 
 
 
 
 
3d09f97
7c32497
 
4474e7a
825e87d
 
f4ff30a
9b91020
4474e7a
 
825e87d
7c32497
 
 
 
 
 
ce90309
4474e7a
 
7c32497
f4ff30a
3d09f97
 
7c32497
 
 
3d09f97
 
 
 
 
 
 
 
 
7c32497
4474e7a
47fc4a0
4474e7a
1321d2f
4474e7a
7c32497
 
 
 
4474e7a
f4ff30a
 
2491cbe
47fc4a0
 
 
 
b522c51
ce90309
168a7c1
 
fb0307e
168a7c1
47fc4a0
 
 
b522c51
 
 
 
 
 
 
 
ce90309
 
47fc4a0
fb0307e
 
47fc4a0
f4ff30a
47fc4a0
f4ff30a
7c32497
fb0307e
47fc4a0
 
 
b522c51
 
7c32497
 
b522c51
47fc4a0
 
7c32497
ce90309
badae07
 
 
 
 
7c32497
badae07
3c422aa
 
badae07
28bc334
badae07
b522c51
47fc4a0
ce90309
47fc4a0
3d09f97
2491cbe
7c32497
3d09f97
f4ff30a
 
7c32497
3d09f97
7c32497
3d09f97
fb0307e
7c32497
 
 
5807c79
7c32497
3d09f97
5807c79
 
 
7c32497
2491cbe
7c32497
 
 
5807c79
7c32497
 
 
3d09f97
5807c79
3d09f97
7c32497
 
5807c79
 
7c32497
5807c79
7c32497
 
 
3d09f97
5807c79
3c422aa
7c32497
3c422aa
5807c79
 
3d09f97
47fc4a0
7c32497
47fc4a0
7c32497
 
825e87d
 
47fc4a0
 
f4ff30a
47fc4a0
ce90309
fb0307e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
# dream_app.py
import torch
import numpy as np
import gradio as gr
import spaces # Ensure spaces is installed if needed for GPU decorator
import torch.nn.functional as F
from transformers import AutoTokenizer, AutoModel, AutoConfig
import time
import re
from typing import List, Dict, Tuple, Optional, Any # Added Any
import torch.distributions as dists # Added import
import traceback # For better error printing

# --- START: Copied Helper functions from generation_utils.py ---
# [Keep the copied functions: top_p_logits, top_k_logits, sample_tokens]
def top_p_logits(logits, top_p=None):
    """ Applies top-p filtering to logits. """
    if top_p is None or top_p >= 1.0:
        return logits
    sorted_logits, sorted_indices = torch.sort(logits, descending=True)
    cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
    sorted_indices_to_remove = cumulative_probs > top_p
    sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
    sorted_indices_to_remove[..., 0] = 0
    mask = torch.zeros_like(logits, dtype=torch.bool, device=logits.device)
    mask = mask.scatter_(-1, sorted_indices, sorted_indices_to_remove)
    logits = logits.masked_fill(mask, torch.finfo(logits.dtype).min)
    return logits

def top_k_logits(logits, top_k=None):
    """ Applies top-k filtering to logits. """
    if top_k is None or top_k <= 0:
        return logits
    top_k = min(top_k, logits.size(-1))
    indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
    logits = logits.masked_fill(indices_to_remove, torch.finfo(logits.dtype).min)
    return logits

def sample_tokens(logits, temperature=0.0, top_p=None, top_k=None, margin_confidence=False, neg_entropy=False):
    """ Samples tokens based on logits and calculates confidence. """
    if temperature > 0:
        safe_temp = max(temperature, 1e-6)
        logits = logits / safe_temp
    if top_p is not None and 0.0 < top_p < 1.0:
        logits = top_p_logits(logits, top_p)
    if top_k is not None and top_k > 0:
        logits = top_k_logits(logits, top_k)

    is_all_neg_inf = torch.all(logits <= torch.finfo(logits.dtype).min, dim=-1, keepdim=True)
    if torch.any(is_all_neg_inf):
        uniform_logits = torch.zeros_like(logits)
        logits = torch.where(is_all_neg_inf, uniform_logits, logits)

    probs = torch.softmax(logits, dim=-1)
    probs = torch.clamp(probs, min=0.0)
    prob_sum = probs.sum(dim=-1, keepdim=True)
    safe_prob_sum = torch.max(prob_sum, torch.tensor(1e-12, device=probs.device, dtype=probs.dtype))
    probs = probs / safe_prob_sum
    probs = torch.nan_to_num(probs, nan=0.0)

    if temperature > 0:
        try:
            x0 = dists.Categorical(probs=probs).sample()
            confidence = torch.gather(probs, -1, x0.unsqueeze(-1)).squeeze(-1)
        except Exception as e:
            print(f"Warning: Error during Categorical sampling: {e}. Falling back to argmax.")
            confidence, x0 = probs.max(dim=-1)
    else:
        confidence, x0 = probs.max(dim=-1)

    if margin_confidence:
        sorted_probs, _ = torch.sort(probs, dim=-1, descending=True)
        top1_probs = sorted_probs[..., 0]
        top2_probs = sorted_probs[..., 1] if sorted_probs.shape[-1] > 1 else top1_probs
        confidence = top1_probs - top2_probs
    elif neg_entropy: # Use elif to avoid calculating entropy if margin_confidence was True
        epsilon = 1e-10
        log_probs = torch.log(probs + epsilon)
        confidence = torch.sum(probs * log_probs, dim=-1) # Negative entropy
    # Else: confidence is just the probability of the sampled token if temperature > 0, or max prob otherwise

    confidence = torch.nan_to_num(confidence, nan=0.0)
    return confidence, x0
# --- END: Copied Helper functions ---


# --- Model Loading and Constants ---
# Load model configuration to get special token IDs
config = AutoConfig.from_pretrained("Dream-org/Dream-v0-Instruct-7B", trust_remote_code=True)
model_path = "Dream-org/Dream-v0-Instruct-7B"
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(f"Using device: {device}")

print("Loading tokenizer...")
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
print("Loading model...")
model = AutoModel.from_pretrained(
    model_path,
    torch_dtype=torch.bfloat16 if device == 'cuda' else torch.float32,
    trust_remote_code=True,
    attn_implementation="sdpa" # Explicitly request SDPA
)
model = model.to(device).eval()
print("Model loaded.")

MASK_TOKEN = tokenizer.mask_token
MASK_ID = tokenizer.mask_token_id
PAD_ID = tokenizer.pad_token_id
EOS_ID = tokenizer.eos_token_id

if MASK_ID is None:
    raise ValueError("Cannot determine MASK_ID. Check model's tokenizer configuration.")

SPECIAL_TOKEN_IDS = {PAD_ID, EOS_ID, MASK_ID}
try:
    IM_START_ID = tokenizer.convert_tokens_to_ids("<|im_start|>")
    IM_END_ID = tokenizer.convert_tokens_to_ids("<|im_end|>")
    SPECIAL_TOKEN_IDS.add(IM_START_ID)
    SPECIAL_TOKEN_IDS.add(IM_END_ID)
except KeyError:
    print("Warning: <|im_start|> or <|im_end|> not found in tokenizer vocab.")
    IM_START_ID = None
    IM_END_ID = None


# --- Helper Functions ---
def parse_constraints(constraints_text: str) -> Dict[int, List[int]]:
    """ Parses word constraints. """
    constraints = {}
    if not constraints_text: return constraints
    parts = constraints_text.split(',')
    for part in parts:
        part = part.strip()
        if ':' not in part: continue
        pos_str, word = part.split(':', 1)
        try:
            pos = int(pos_str.strip())
            word = word.strip()
            token_ids = []
            if word:
                text_to_encode = (" " + word) if (pos > 0 and not word.startswith(" ")) else word
                token_ids = tokenizer.encode(text_to_encode, add_special_tokens=False)
            if token_ids and pos >= 0: constraints[pos] = token_ids
            elif not token_ids and word: print(f"Warning: Could not tokenize constraint word '{word}'")
        except ValueError: print(f"Warning: Invalid position '{pos_str}' in constraint part '{part}'")
        except Exception as e: print(f"Warning: Error processing constraint '{part}': {e}")
    return constraints

# Removed format_chat_history as the state will now be in the correct format

def apply_constraints_to_state(
    x: torch.Tensor,
    prompt_length: int,
    total_length: int,
    parsed_constraints: Dict[int, List[int]],
    current_step: Optional[int] = None
) -> torch.Tensor:
    """ Applies constraints directly to the state tensor `x`. """
    modified_x = x.clone()
    for rel_pos, word_token_ids in parsed_constraints.items():
        abs_start_pos = prompt_length + rel_pos
        abs_end_pos = abs_start_pos + len(word_token_ids)
        if abs_start_pos < total_length and abs_end_pos <= total_length:
            try:
                constraint_tensor = torch.tensor(word_token_ids, dtype=torch.long, device=modified_x.device)
                modified_x[0, abs_start_pos:abs_end_pos] = constraint_tensor
            except IndexError: print(f"Warning (Step {current_step}): Constraint at {rel_pos} ('{tokenizer.decode(word_token_ids)}') goes out of bounds.")
            except Exception as e: print(f"Warning (Step {current_step}): Failed to apply constraint at {rel_pos}: {e}")
    return modified_x


# --- Core Generation Logic with Live Visualization ---

@spaces.GPU
@torch.no_grad()
def generate_dream_response(
    history_dict_list: List[Dict[str, str]], # Now expects list of dicts
    gen_length: int,
    steps: int,
    constraints_text: str,
    temperature: float,
    top_p: Optional[float],
    top_k: Optional[int],
    alg: str,
    alg_temp: Optional[float],
    visualization_delay: float
    ) -> List[Tuple[str, str]]:
    """ Generates text step-by-step and yields visualization states live. """

    if not history_dict_list or history_dict_list[-1]['role'] != 'user':
        # Handle cases where history is empty or doesn't end with user message
        # This check might be redundant if add_user_message handles it, but good for safety.
        yield history_dict_list, [("No user message found.", "red")], ""
        return

    # --- 1. Preparation ---
    parsed_constraints = parse_constraints(constraints_text)

    # Prepare history for the model template (don't include the empty assistant msg yet)
    history_for_template = history_dict_list # Already in list-of-dicts format

    try:
        inputs = tokenizer.apply_chat_template(
            history_for_template, # Pass the list of dicts directly
            return_tensors="pt",
            return_dict=True,
            add_generation_prompt=True # Crucial: Adds the '<|im_start|>assistant\n' turn
        )
        input_ids = inputs.input_ids.to(device)
        prompt_attention_mask = inputs.attention_mask.to(device) if 'attention_mask' in inputs else torch.ones_like(input_ids)
        prompt_length = input_ids.shape[1]
    except Exception as e:
        print(f"Error applying chat template: {e}")
        traceback.print_exc()
        yield history_dict_list, [("Error preparing input.", "red")], ""
        return

    eps = 1e-3
    top_p_val = top_p if top_p is not None and 0.0 < top_p < 1.0 else None
    top_k_val = top_k if top_k is not None and top_k > 0 else None
    alg_temp_val = alg_temp if alg in ['maskgit_plus', 'topk_margin', 'entropy'] and alg_temp is not None and alg_temp > 0 else None

    # --- 2. Initialize Generation State ---
    total_length = prompt_length + gen_length
    initial_generation_part = torch.full((1, gen_length), MASK_ID, dtype=torch.long, device=device)
    x = torch.cat((input_ids, initial_generation_part), dim=1)

    generation_attention_mask = torch.ones((1, gen_length), dtype=torch.long, device=device)
    full_attention_mask_long = torch.cat((prompt_attention_mask, generation_attention_mask), dim=1)

    attention_mask_for_model = full_attention_mask_long.to(model.dtype)
    large_neg_val = torch.finfo(model.dtype).min
    attention_mask_for_model = (1.0 - attention_mask_for_model) * large_neg_val
    attention_mask_for_model = attention_mask_for_model.unsqueeze(1).unsqueeze(2) # [B, 1, 1, N]

    timesteps = torch.linspace(1, eps, steps + 1, device=device)
    x = apply_constraints_to_state(x, prompt_length, total_length, parsed_constraints, current_step=-1)

    # --- 3. Visualization & History Setup ---
    previous_tokens_vis = None
    final_response_text = ""
    # The history_dict_list is the state we update and yield for the chatbot UI
    # Add the empty assistant message placeholder *to the history state* now
    history_dict_list.append({"role": "assistant", "content": ""})

    # --- 4. Initial Yield (Masked State) ---
    initial_generated_tokens = x[0, prompt_length:].cpu()
    vis_data_initial = []
    for tok_id in initial_generated_tokens.tolist():
        display_token = MASK_TOKEN
        color = "#444444"
        vis_data_initial.append((display_token, color))

    previous_tokens_vis = initial_generated_tokens
    # Yield the history (which now includes the empty assistant turn)
    yield history_dict_list, vis_data_initial, ""
    time.sleep(visualization_delay)

    # --- 5. Step-by-Step Diffusion Loop ---
    try:
        start_time = time.time()
        for i in range(steps):
            mask_index = (x == MASK_ID)
            if not mask_index.any():
                 print(f"No mask tokens left at step {i}. Stopping early.")
                 break

            outputs = model(
                input_ids=x,
                attention_mask=attention_mask_for_model,
                position_ids=None, use_cache=False, return_dict=True
            )
            logits = outputs.logits
            logits = torch.cat([logits[:,:1], logits[:, :-1]], dim=1)

            mask_logits = logits[mask_index]
            if mask_logits.numel() == 0:
                 print(f"No masked tokens found for logit selection at step {i}. Stopping.")
                 break

            t = timesteps[i]
            s = timesteps[i + 1]
            x_new_masked_part = torch.full_like(x[mask_index], MASK_ID, device=device, dtype=torch.long)

            # [Keep sampling logic the same - 'origin' and confidence-based]
            if alg == 'origin':
                p_transfer = (1.0 - s / t) if i < steps - 1 else 1.0
                num_masked = mask_logits.shape[0]
                transfer_indices_relative = torch.rand(num_masked, device=device) < p_transfer
                logits_to_sample = mask_logits[transfer_indices_relative]
                if logits_to_sample.numel() > 0:
                    _, sampled_tokens = sample_tokens(logits_to_sample, temperature=temperature, top_p=top_p_val, top_k=top_k_val)
                    if transfer_indices_relative.sum() == sampled_tokens.numel(): # Basic check
                        x_new_masked_part[transfer_indices_relative] = sampled_tokens
                    else: print(f"Warning step {i} (origin): Mismatch transfer indices and sampled tokens.")


            else: # Confidence-based
                use_margin = (alg == 'topk_margin')
                use_entropy = (alg == 'entropy')
                confidence, x0_candidates = sample_tokens(mask_logits, temperature=temperature, top_p=top_p_val, top_k=top_k_val, margin_confidence=use_margin, neg_entropy=use_entropy)

                num_mask_token = mask_logits.shape[0]
                target_num_revealed_float = num_mask_token * (1.0 - s / t)
                number_transfer_tokens = int(target_num_revealed_float) if i < steps - 1 else num_mask_token

                if number_transfer_tokens > 0:
                    num_samples = min(number_transfer_tokens, num_mask_token)
                    if num_samples > 0:
                        transfer_indices_relative = torch.tensor([], dtype=torch.long, device=device) # Init empty
                        if alg_temp_val is None or alg_temp_val <= 0: # Top-k
                            sort_metric = confidence
                            k_topk = min(num_samples, sort_metric.numel())
                            if k_topk > 0: _, transfer_indices_relative = torch.topk(sort_metric, k=k_topk)
                        else: # Sample based on temp
                            if confidence.numel() > 0:
                                conf_probs = confidence / alg_temp_val
                                conf_probs = torch.nan_to_num(conf_probs, nan=0.0, posinf=1e9, neginf=-1e9)
                                conf_probs = torch.clamp(conf_probs - conf_probs.max(), min=-30)
                                conf_probs = F.softmax(conf_probs, dim=-1)
                                conf_probs = torch.clamp(conf_probs, min=0.0)
                                conf_probs = torch.nan_to_num(conf_probs, nan=0.0)
                                prob_sum = conf_probs.sum()
                                target_sum_tensor = torch.tensor(1.0, device=device, dtype=prob_sum.dtype)
                                if not torch.isclose(prob_sum, target_sum_tensor, atol=1e-4) and prob_sum > 0:
                                    safe_prob_sum = torch.max(prob_sum, torch.tensor(1e-12, device=device, dtype=prob_sum.dtype))
                                    conf_probs = conf_probs / safe_prob_sum
                                final_prob_sum_check = conf_probs.sum()
                                if conf_probs.numel() > 0 and num_samples > 0 and torch.all(conf_probs >= 0) and torch.isclose(final_prob_sum_check, target_sum_tensor, atol=1e-4):
                                    try: transfer_indices_relative = torch.multinomial(conf_probs, num_samples=num_samples, replacement=False)
                                    except RuntimeError as e:
                                        print(f"Warning step {i}: Multinomial sampling failed ('{e}'). Falling back to top-k.")
                                        sort_metric = confidence
                                        k_multinomial_fallback = min(num_samples, sort_metric.numel())
                                        if k_multinomial_fallback > 0: _, transfer_indices_relative = torch.topk(sort_metric, k=k_multinomial_fallback)
                                else: # Fallback if probs invalid for multinomial
                                    # print(f"Warning step {i}: Invalid probabilities for multinomial sampling (sum={final_prob_sum_check:.4f}). Falling back to top-k.")
                                    sort_metric = confidence
                                    k_multinomial_fallback = min(num_samples, sort_metric.numel())
                                    if k_multinomial_fallback > 0: _, transfer_indices_relative = torch.topk(sort_metric, k=k_multinomial_fallback)

                        # Apply transfer
                        if transfer_indices_relative.numel() > 0:
                             if x0_candidates.numel() > 0 and transfer_indices_relative.max() < x0_candidates.shape[0]:
                                 if transfer_indices_relative.max() < x_new_masked_part.shape[0]:
                                      x_new_masked_part[transfer_indices_relative] = x0_candidates[transfer_indices_relative].clone()
                                 else: print(f"Warning step {i}: transfer_indices out of bounds for x_new_masked_part.")
                             else: print(f"Warning step {i}: transfer_indices out of bounds for x0_candidates or x0_candidates empty.")


            x[mask_index] = x_new_masked_part
            x = apply_constraints_to_state(x, prompt_length, total_length, parsed_constraints, current_step=i)

            # --- Yield Visualization & Update History ---
            current_generated_tokens = x[0, prompt_length:].cpu()
            vis_data = []
            # [Visualization formatting logic remains the same]
            for j in range(gen_length):
                current_tok_id = current_generated_tokens[j].item()
                previous_tok_id = previous_tokens_vis[j].item() if previous_tokens_vis is not None and j < len(previous_tokens_vis) else MASK_ID
                try:
                    decoded_token = tokenizer.decode([current_tok_id], skip_special_tokens=False, clean_up_tokenization_spaces=False)
                    display_token = MASK_TOKEN if current_tok_id == MASK_ID else decoded_token
                except Exception: display_token = f"[ID:{current_tok_id}]"
                color = None; token_to_display = display_token
                if current_tok_id == MASK_ID: color = "#444444"
                elif previous_tok_id == MASK_ID: color = "#66CC66"
                else: color = "#6699CC"
                should_hide = (PAD_ID is not None and current_tok_id == PAD_ID) or (EOS_ID is not None and current_tok_id == EOS_ID)
                if should_hide and previous_tok_id == current_tok_id: token_to_display = ""; color = None
                if token_to_display: vis_data.append((token_to_display, color))

            previous_tokens_vis = current_generated_tokens

            intermediate_response_tokens = x[0, prompt_length:]
            intermediate_response_text = tokenizer.decode(
                intermediate_response_tokens, skip_special_tokens=True, clean_up_tokenization_spaces=True
            ).strip()

            # --- Update the *last* message in history_dict_list ---
            history_dict_list[-1]['content'] = intermediate_response_text

            # Yield the updated history list (for chatbot UI), vis data, and response text
            yield history_dict_list, vis_data, intermediate_response_text
            time.sleep(visualization_delay)

        end_time = time.time()
        print(f"Dream generation finished in {end_time - start_time:.2f} seconds.")

        # --- 6. Final Processing & Yield ---
        final_sequence = x[0]
        response_tokens = final_sequence[prompt_length:]
        final_response_text = tokenizer.decode(
            response_tokens, skip_special_tokens=True, clean_up_tokenization_spaces=True
        ).strip()

        # Ensure the final text is in the history object before the last yield
        history_dict_list[-1]['content'] = final_response_text

        final_generated_tokens = x[0, prompt_length:].cpu()
        vis_data_final = []
        # [Final visualization formatting logic remains the same]
        for j in range(gen_length):
            current_tok_id = final_generated_tokens[j].item()
            previous_tok_id = previous_tokens_vis[j].item() if previous_tokens_vis is not None and j < len(previous_tokens_vis) else MASK_ID
            try:
                decoded_token = tokenizer.decode([current_tok_id], skip_special_tokens=False, clean_up_tokenization_spaces=False)
                display_token = MASK_TOKEN if current_tok_id == MASK_ID else decoded_token
            except Exception: display_token = f"[ID:{current_tok_id}]"
            color = None; token_to_display = display_token
            if current_tok_id == MASK_ID: color = "#444444"
            elif previous_tok_id == MASK_ID: color = "#66CC66"
            else: color = "#6699CC"
            should_hide = (PAD_ID is not None and current_tok_id == PAD_ID) or (EOS_ID is not None and current_tok_id == EOS_ID)
            if should_hide and previous_tok_id == current_tok_id: token_to_display = ""; color = None
            if token_to_display: vis_data_final.append((token_to_display, color))

        yield history_dict_list, vis_data_final, final_response_text
        print("Visualization streaming complete.")

    except Exception as e:
        print(f"Error during generation or processing: {e}")
        traceback.print_exc()
        # Attempt to add error message to history if possible
        if history_dict_list and history_dict_list[-1]['role'] == 'assistant':
             history_dict_list[-1]['content'] = f"Error: {e}"
        yield history_dict_list, [("Error during generation.", "red")], f"Error: {e}" # Also show error in text box
        return


# --- Gradio UI ---
css = '''
.category-legend{display:none}
'''
def create_chatbot_demo():
    with gr.Blocks(css=css) as demo:
        gr.Markdown("# Dream 7B - Diffusion Language Model Demo")
        gr.Markdown(
            "[[Model Card](https://huggingface.co/Dream-org/Dream-v0-Instruct-7B)] "
            "[[Blog](https://hkunlp.github.io/blog/2025/dream/)]"
        )

        with gr.Row():
            with gr.Column(scale=3):
                chatbot_ui = gr.Chatbot(
                    label="Conversation",
                    height=500,
                    show_copy_button=True,
                    bubble_full_width=False,
                    value=[], # Initialize empty
                    type="messages" # Crucial: Use the messages format
                )
                with gr.Group():
                    with gr.Row():
                        user_input = gr.Textbox(
                            label="Your Message", placeholder="Type your message here...",
                            scale=7, autofocus=True, show_label=False, container=False
                        )
                        send_btn = gr.Button("Send", scale=1, variant="primary")
                constraints_input = gr.Textbox(
                    label="Word Constraints (Optional)",
                    info="Format: 'pos:word, pos:word,...'. Example: '0:Once, 5:upon'",
                    placeholder="0:Hello, 10:world", value=""
                )
            with gr.Column(scale=2):
                output_vis = gr.HighlightedText(
                    label="Denoising Process Visualization", combine_adjacent=False,
                    show_legend=True, interactive=False
                )
                response_text_display = gr.Textbox(
                    label="Current/Final Response", interactive=False, lines=5, visible=False
                )

        # [Keep Accordion with Generation Settings the same]
        with gr.Accordion("Generation Settings", open=False):
             with gr.Row():
                gen_length = gr.Slider(minimum=16, maximum=512, value=128, step=8, label="Max New Tokens")
                steps = gr.Slider(minimum=8, maximum=512, value=128, step=8, label="Diffusion Steps")
             with gr.Row():
                temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.4, step=0.05, label="Temperature (0 = greedy)")
                alg_temp = gr.Slider(minimum=0.0, maximum=1.0, value=0.1, step=0.05, label="Remasking Temp (Conf Algs)")
             with gr.Row():
                top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, step=0.05, label="Top-P (0 disables)")
                top_k = gr.Slider(minimum=0, maximum=200, value=0, step=5, label="Top-K (0 disables)")
             with gr.Row():
                 remasking_strategy = gr.Radio(choices=['origin', 'maskgit_plus', 'topk_margin', 'entropy'], value='origin', label="Remasking Strategy")
             with gr.Row():
                visualization_delay = gr.Slider(minimum=0.0, maximum=0.5, value=0.0, step=0.01, label="Visualization Delay (s)")

        clear_btn = gr.Button("Clear Conversation")

        # --- Event Handlers ---

        # User function: Appends user message to the history (list of dicts)
        def add_user_message(message: str, history: List[Dict[str, str]]):
            if not message.strip():
                gr.Warning("Please enter a message.")
                return history, "" # Return unchanged history, empty input
            history.append({"role": "user", "content": message})
            # Return updated history for chatbot UI, and clear input box
            return history, ""

        # Bot function (now the generator)
        # Inputs: Chatbot history (list of dicts), generation params
        # Outputs: Chatbot history (updated list of dicts), visualization, response text
        generation_inputs = [
            chatbot_ui, # Pass chatbot state directly (list of dicts)
            gen_length, steps, constraints_input,
            temperature, top_p, top_k, remasking_strategy, alg_temp,
            visualization_delay
        ]
        generation_outputs = [chatbot_ui, output_vis, response_text_display]

        # --- Connect UI elements ---

        # Textbox Submission (Enter key)
        submit_listener = user_input.submit(
            fn=add_user_message,
            inputs=[user_input, chatbot_ui],
            outputs=[chatbot_ui, user_input] # Update chatbot UI and clear input
        ).then(
            fn=generate_dream_response,
            inputs=generation_inputs,
            outputs=generation_outputs,
            show_progress="hidden" # Hide default progress bar
        )

        # Send Button Click
        click_listener = send_btn.click(
            fn=add_user_message,
            inputs=[user_input, chatbot_ui],
            outputs=[chatbot_ui, user_input] # Update chatbot UI and clear input
        ).then(
            fn=generate_dream_response,
            inputs=generation_inputs,
            outputs=generation_outputs,
            show_progress="hidden"
        )

        # Clear Button Action
        clear_btn.click(
            lambda: ([], [], ""), # Function to return empty values
            inputs=[],
            outputs=[chatbot_ui, output_vis, response_text_display], # Clear chatbot, vis, text
            queue=False # No need to queue clearing usually
        )

    return demo

# --- Launch ---
if __name__ == "__main__":
    demo = create_chatbot_demo()
    demo.queue().launch(debug=True, share=False)