Spaces:
Running
on
Zero
Running
on
Zero
use the MeanScaleUniformBins tokenizer correctly
Browse files
app.py
CHANGED
@@ -421,20 +421,10 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
|
|
421 |
if isinstance(value, torch.Tensor):
|
422 |
pipe.tokenizer.__dict__[name] = value.to(device)
|
423 |
|
424 |
-
#
|
425 |
if hasattr(pipe.tokenizer, '_append_eos_token'):
|
426 |
-
# Create a wrapper
|
427 |
-
original_append_eos = pipe.tokenizer._append_eos_token
|
428 |
def wrapped_append_eos(token_ids, attention_mask):
|
429 |
-
# Ensure both tensors are on GPU
|
430 |
-
token_ids = token_ids.to(device)
|
431 |
-
attention_mask = attention_mask.to(device)
|
432 |
-
# Get the EOS token and ensure it's on GPU
|
433 |
-
eos_token = torch.tensor([pipe.tokenizer.eos_token_id], device=device)
|
434 |
-
eos_tokens = eos_token.unsqueeze(0).expand(token_ids.shape[0], 1)
|
435 |
-
# Concatenate on GPU
|
436 |
-
token_ids = torch.cat((token_ids, eos_tokens), dim=1)
|
437 |
-
attention_mask = torch.cat((attention_mask, torch.ones_like(eos_tokens)), dim=1)
|
438 |
return token_ids, attention_mask
|
439 |
pipe.tokenizer._append_eos_token = wrapped_append_eos
|
440 |
|
|
|
421 |
if isinstance(value, torch.Tensor):
|
422 |
pipe.tokenizer.__dict__[name] = value.to(device)
|
423 |
|
424 |
+
# Remove the EOS token handling since MeanScaleUniformBins doesn't use it
|
425 |
if hasattr(pipe.tokenizer, '_append_eos_token'):
|
426 |
+
# Create a wrapper that just returns the input tensors
|
|
|
427 |
def wrapped_append_eos(token_ids, attention_mask):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
428 |
return token_ids, attention_mask
|
429 |
pipe.tokenizer._append_eos_token = wrapped_append_eos
|
430 |
|