multimodalart HF Staff commited on
Commit
b972f40
·
verified ·
1 Parent(s): de0b990

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -114,6 +114,8 @@ if PIPELINE_CONFIG_YAML.get("spatial_upscaler_model_path"):
114
  )
115
  print("Latent upsampler created on CPU.")
116
 
 
 
117
  pipeline_instance.to(target_inference_device)
118
  latent_upsampler_instance.to(target_inference_device)
119
 
@@ -125,9 +127,6 @@ def generate(prompt, negative_prompt, input_image_filepath, input_video_filepath
125
  seed_ui, randomize_seed, ui_guidance_scale, improve_texture_flag,
126
  progress=gr.Progress(track_tqdm=True)):
127
 
128
- target_inference_device = get_device()
129
- print(f"Target inference device: {target_inference_device}")
130
-
131
  if randomize_seed:
132
  seed_ui = random.randint(0, 2**32 - 1)
133
  seed_everething(int(seed_ui))
 
114
  )
115
  print("Latent upsampler created on CPU.")
116
 
117
+ target_inference_device = "cuda"
118
+ print(f"Target inference device: {target_inference_device}")
119
  pipeline_instance.to(target_inference_device)
120
  latent_upsampler_instance.to(target_inference_device)
121
 
 
127
  seed_ui, randomize_seed, ui_guidance_scale, improve_texture_flag,
128
  progress=gr.Progress(track_tqdm=True)):
129
 
 
 
 
130
  if randomize_seed:
131
  seed_ui = random.randint(0, 2**32 - 1)
132
  seed_everething(int(seed_ui))