Spaces:
Running
on
Zero
Running
on
Zero
attempt to solve gpu error
Browse files
app.py
CHANGED
@@ -335,6 +335,7 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
|
|
335 |
|
336 |
actual_prediction_length = max(1, actual_prediction_length)
|
337 |
|
|
|
338 |
with torch.amp.autocast('cuda'):
|
339 |
# Ensure all inputs are on GPU
|
340 |
context = context.to(device)
|
@@ -463,6 +464,44 @@ def make_prediction(symbol: str, timeframe: str = "1d", prediction_days: int = 5
|
|
463 |
# Force synchronization again to ensure all tensors are on GPU
|
464 |
torch.cuda.synchronize()
|
465 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
466 |
# Make prediction
|
467 |
quantiles, mean = pipe.predict_quantiles(
|
468 |
context=context,
|
|
|
335 |
|
336 |
actual_prediction_length = max(1, actual_prediction_length)
|
337 |
|
338 |
+
# Use predict_quantiles with proper formatting
|
339 |
with torch.amp.autocast('cuda'):
|
340 |
# Ensure all inputs are on GPU
|
341 |
context = context.to(device)
|
|
|
464 |
# Force synchronization again to ensure all tensors are on GPU
|
465 |
torch.cuda.synchronize()
|
466 |
|
467 |
+
# Ensure tokenizer is on GPU and all its tensors are on GPU
|
468 |
+
if hasattr(pipe, 'tokenizer'):
|
469 |
+
# Move tokenizer to GPU if it supports it
|
470 |
+
if hasattr(pipe.tokenizer, 'to'):
|
471 |
+
pipe.tokenizer = pipe.tokenizer.to(device)
|
472 |
+
|
473 |
+
# Move all tokenizer tensors to GPU
|
474 |
+
for name, value in pipe.tokenizer.__dict__.items():
|
475 |
+
if isinstance(value, torch.Tensor):
|
476 |
+
setattr(pipe.tokenizer, name, value.to(device))
|
477 |
+
|
478 |
+
# Move any additional tensors in the tokenizer's modules to GPU
|
479 |
+
for name, module in pipe.tokenizer.named_modules():
|
480 |
+
if hasattr(module, 'to'):
|
481 |
+
module.to(device)
|
482 |
+
# Move any tensors in the module's __dict__
|
483 |
+
for key, value in module.__dict__.items():
|
484 |
+
if isinstance(value, torch.Tensor):
|
485 |
+
setattr(module, key, value.to(device))
|
486 |
+
|
487 |
+
# Move any additional tensors in the tokenizer's buffers to GPU
|
488 |
+
for name, buffer in pipe.tokenizer.named_buffers():
|
489 |
+
if buffer is not None:
|
490 |
+
pipe.tokenizer.register_buffer(name, buffer.to(device))
|
491 |
+
|
492 |
+
# Move any additional tensors in the tokenizer's parameters to GPU
|
493 |
+
for name, param in pipe.tokenizer.named_parameters():
|
494 |
+
if param is not None:
|
495 |
+
param.data = param.data.to(device)
|
496 |
+
|
497 |
+
# Move any additional tensors in the tokenizer's attributes to GPU
|
498 |
+
for name, value in pipe.tokenizer.__dict__.items():
|
499 |
+
if isinstance(value, torch.Tensor):
|
500 |
+
pipe.tokenizer.__dict__[name] = value.to(device)
|
501 |
+
|
502 |
+
# Force synchronization again to ensure all tensors are on GPU
|
503 |
+
torch.cuda.synchronize()
|
504 |
+
|
505 |
# Make prediction
|
506 |
quantiles, mean = pipe.predict_quantiles(
|
507 |
context=context,
|