Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -274,9 +274,10 @@ def generate(model, idx, max_new_tokens, context_size, tokenizer, text_to_token_
|
|
274 |
# print(token_ids_to_text(idx_next, tokenizer))
|
275 |
# Same as before: append sampled index to the running sequence
|
276 |
idx = torch.cat((idx, idx_next), dim=1) # (batch_size, num_tokens+1)
|
277 |
-
new_idx = re.sub(".", ". Meow.", idx)
|
278 |
|
279 |
-
return new_idx
|
|
|
280 |
|
281 |
def text_to_token_ids(text, tokenizer):
|
282 |
encoded = tokenizer.encode(text, allowed_special={'<|endoftext|>'})
|
|
|
274 |
# print(token_ids_to_text(idx_next, tokenizer))
|
275 |
# Same as before: append sampled index to the running sequence
|
276 |
idx = torch.cat((idx, idx_next), dim=1) # (batch_size, num_tokens+1)
|
277 |
+
# new_idx = re.sub(".", ". Meow.", idx)
|
278 |
|
279 |
+
# return new_idx
|
280 |
+
return idx
|
281 |
|
282 |
def text_to_token_ids(text, tokenizer):
|
283 |
encoded = tokenizer.encode(text, allowed_special={'<|endoftext|>'})
|