fffiloni commited on
Commit
3bec464
·
verified ·
1 Parent(s): 3d05192

fix error_status typo

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -114,10 +114,10 @@ def generate_image(setup_args, num_iterations):
114
  except torch.cuda.OutOfMemoryError as e:
115
  # Handle CUDA OOM error
116
  print("CUDA Out of Memory Error: ", e)
117
- status["error_occurred"] = True # Update status on error
118
  except RuntimeError as e:
119
  if 'out of memory' in str(e):
120
- status["error_occurred"] = True # Update status on error
121
  else:
122
  raise # Reraise if it's not a CUDA OOM error
123
 
@@ -126,7 +126,7 @@ def generate_image(setup_args, num_iterations):
126
  main_thread.start()
127
 
128
  last_step_yielded = 0
129
- while main_thread.is_alive() and not status["error_occurred"]:
130
  # Check if new steps have been completed
131
  if steps_completed and steps_completed[-1] > last_step_yielded:
132
  last_step_yielded = steps_completed[-1]
@@ -142,7 +142,7 @@ def generate_image(setup_args, num_iterations):
142
  time.sleep(0.1)
143
 
144
  # If an error occurred, clean up resources and stop
145
- if status["error_occurred"]:
146
  torch.cuda.empty_cache() # Free up cached memory
147
  yield (None, "CUDA out of memory. Please reduce your batch size or image resolution.", None)
148
  else:
 
114
  except torch.cuda.OutOfMemoryError as e:
115
  # Handle CUDA OOM error
116
  print("CUDA Out of Memory Error: ", e)
117
+ error_status["error_occurred"] = True # Update status on error
118
  except RuntimeError as e:
119
  if 'out of memory' in str(e):
120
+ error_status["error_occurred"] = True # Update status on error
121
  else:
122
  raise # Reraise if it's not a CUDA OOM error
123
 
 
126
  main_thread.start()
127
 
128
  last_step_yielded = 0
129
+ while main_thread.is_alive() and not error_status["error_occurred"]:
130
  # Check if new steps have been completed
131
  if steps_completed and steps_completed[-1] > last_step_yielded:
132
  last_step_yielded = steps_completed[-1]
 
142
  time.sleep(0.1)
143
 
144
  # If an error occurred, clean up resources and stop
145
+ if error_status["error_occurred"]:
146
  torch.cuda.empty_cache() # Free up cached memory
147
  yield (None, "CUDA out of memory. Please reduce your batch size or image resolution.", None)
148
  else: