Spaces:
Running
on
Zero
Running
on
Zero
guangkaixu
commited on
Commit
•
336b7b8
1
Parent(s):
c5642f1
Update app.py
Browse files
app.py
CHANGED
@@ -333,22 +333,22 @@ def run_demo_server(pipe_depth, pipe_normal, pipe_dis):
|
|
333 |
|
334 |
filenames = ['normal_1.jpg','normal_10.jpg']
|
335 |
# filenames.extend(["normal_%d.jpg" %(i+1) for i in range(10)])
|
336 |
-
|
337 |
-
print('line 337', __file__)
|
338 |
-
example_folder = os.path.join(os.path.dirname(__file__), "images")
|
339 |
-
# print(example_folder)
|
340 |
|
341 |
Examples(
|
342 |
fn=process_pipe_normal,
|
343 |
examples=[
|
344 |
-
os.path.join(
|
345 |
for name in filenames
|
346 |
],
|
347 |
inputs=[normal_image_input],
|
348 |
outputs=[normal_image_output, normal_image_output_files],
|
349 |
-
|
350 |
# directory_name="examples_normal",
|
351 |
-
|
|
|
352 |
)
|
353 |
|
354 |
|
@@ -404,9 +404,9 @@ def run_demo_server(pipe_depth, pipe_normal, pipe_dis):
|
|
404 |
],
|
405 |
inputs=[dis_image_input],
|
406 |
outputs=[dis_image_output, dis_image_output_files],
|
407 |
-
|
408 |
-
|
409 |
-
cache_examples=False,
|
410 |
)
|
411 |
|
412 |
|
@@ -528,7 +528,7 @@ def main():
|
|
528 |
dtype = torch.float16
|
529 |
|
530 |
vae = AutoencoderKL.from_pretrained("guangkaixu/GenPercept", subfolder='vae').to(dtype)
|
531 |
-
unet_depth_v1 = UNet2DConditionModel.from_pretrained('guangkaixu/
|
532 |
unet_normal_v1 = UNet2DConditionModel.from_pretrained('guangkaixu/GenPercept', subfolder="unet_normal_v1").to(dtype)
|
533 |
unet_dis_v1 = UNet2DConditionModel.from_pretrained('guangkaixu/GenPercept', subfolder="unet_dis_v1").to(dtype)
|
534 |
|
|
|
333 |
|
334 |
filenames = ['normal_1.jpg','normal_10.jpg']
|
335 |
# filenames.extend(["normal_%d.jpg" %(i+1) for i in range(10)])
|
336 |
+
example_folder = "images"
|
337 |
+
# print('line 337', __file__)
|
338 |
+
# example_folder = os.path.join(os.path.dirname(__file__), "images")
|
|
|
339 |
|
340 |
Examples(
|
341 |
fn=process_pipe_normal,
|
342 |
examples=[
|
343 |
+
os.path.join(example_folder, name)
|
344 |
for name in filenames
|
345 |
],
|
346 |
inputs=[normal_image_input],
|
347 |
outputs=[normal_image_output, normal_image_output_files],
|
348 |
+
cache_examples=True,
|
349 |
# directory_name="examples_normal",
|
350 |
+
directory_name="images_cache",
|
351 |
+
# cache_examples=False,
|
352 |
)
|
353 |
|
354 |
|
|
|
404 |
],
|
405 |
inputs=[dis_image_input],
|
406 |
outputs=[dis_image_output, dis_image_output_files],
|
407 |
+
cache_examples=True,
|
408 |
+
directory_name="images_cache",
|
409 |
+
# cache_examples=False,
|
410 |
)
|
411 |
|
412 |
|
|
|
528 |
dtype = torch.float16
|
529 |
|
530 |
vae = AutoencoderKL.from_pretrained("guangkaixu/GenPercept", subfolder='vae').to(dtype)
|
531 |
+
unet_depth_v1 = UNet2DConditionModel.from_pretrained('guangkaixu/genpercept-depth', subfolder="unet").to(dtype)
|
532 |
unet_normal_v1 = UNet2DConditionModel.from_pretrained('guangkaixu/GenPercept', subfolder="unet_normal_v1").to(dtype)
|
533 |
unet_dis_v1 = UNet2DConditionModel.from_pretrained('guangkaixu/GenPercept', subfolder="unet_dis_v1").to(dtype)
|
534 |
|