kd5678 commited on
Commit
b0e960e
1 Parent(s): 15f0a6a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -29
app.py CHANGED
@@ -10,8 +10,8 @@ from torchvision.transforms import functional as F
10
  from diffusers import (
11
  AutoPipelineForInpainting,
12
  )
13
- from generate_dataset import outpainting_generator_rectangle
14
-
15
 
16
  transform = transforms.Compose([
17
  transforms.ToPILImage(),
@@ -23,15 +23,23 @@ def pref_inpainting(image,
23
  mask_random_start,
24
  steps,
25
  ):
26
- with open("/data0/kendong/Diffusions/zero123-live/configs/imagereward_train_configs.yaml") as file:
27
  config_dict= yaml.safe_load(file)
28
  config = munchify(config_dict)
29
 
30
- pipe = AutoPipelineForInpainting.from_pretrained(
31
- '/data1/kendong/joint-rl-diffusion/alignment_log/exp_reward_group_regression_all_1w_1.6boundary/iteration_2560', num_inference_steps=steps)
32
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
33
- pipe = pipe.to(device)
 
 
34
 
 
 
 
 
 
 
35
 
36
  color, mask = outpainting_generator_rectangle(image, box_width_ratio/100, mask_random_start)
37
  mask = mask.convert('L')
@@ -47,12 +55,16 @@ def pref_inpainting(image,
47
 
48
 
49
  color, mask = transform(color), transform(mask)
50
- res = pipe(prompt='', image=color, mask_image=mask, eta=config.eta).images[0]
51
-
 
 
52
 
53
  # res.save(os.path.join('./', 'test.png'))
54
-
55
- return color, res
 
 
56
 
57
 
58
  inputs = [
@@ -63,29 +75,31 @@ inputs = [
63
  ]
64
 
65
  outputs = [
66
- gr.Image(type="pil", image_mode="RGBA", label='Mask RGB Image', container=True, width="65%"),
67
- gr.Image(type="pil", image_mode="RGBA", label='Results', container=True, width="65%"),
68
  ]
69
 
 
70
  examples = [
71
- ["/data0/kendong/Diffusions/zero123-live/test_demo/assets/ILSVRC2012_test_00000003.JPEG", 35, 125, 50],
72
- ["/data0/kendong/Diffusions/zero123-live/test_demo/assets/ILSVRC2012_test_00000181.JPEG", 35, 125, 50],
73
- ["/data0/kendong/Diffusions/zero123-live/test_demo/assets/ILSVRC2012_test_00002334.JPEG", 35, 125, 50],
74
- ["/data0/kendong/Diffusions/zero123-live/test_demo/assets/ILSVRC2012_test_00002613.JPEG", 35, 125, 50],
75
  ]
76
 
77
 
78
-
79
- iface = gr.Interface(
80
- fn=pref_inpainting,
81
- inputs=inputs,
82
- outputs=outputs,
83
- title="Inpainting with Human Preference (Utilizing Free CPU Resources)",
84
- description="Upload an image and start your inpainting (currently only supporting outpainting masks; other mask types coming soon).",
85
- theme="default",
86
- examples= examples,
87
- allow_flagging="never"
88
- )
 
 
 
89
 
 
90
 
91
- iface.launch(share=True)
 
10
  from diffusers import (
11
  AutoPipelineForInpainting,
12
  )
13
+ from generate_dataset import outpainting_generator_rectangle, merge_images_horizontally
14
+ from ddim_with_prob import DDIMSchedulerCustom
15
 
16
  transform = transforms.Compose([
17
  transforms.ToPILImage(),
 
23
  mask_random_start,
24
  steps,
25
  ):
26
+ with open("./configs/paintreward_train_configs.yaml") as file:
27
  config_dict= yaml.safe_load(file)
28
  config = munchify(config_dict)
29
 
30
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
31
+
32
+
33
+ pipe_ours = AutoPipelineForInpainting.from_pretrained(
34
+ './model_ckpt', torch_dtype=torch.float16, variant='fp16')
35
+ pipe_ours.scheduler = DDIMSchedulerCustom.from_config(pipe_ours.scheduler.config)
36
 
37
+ pipe_runway = AutoPipelineForInpainting.from_pretrained("runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, variant='fp16')
38
+
39
+
40
+ pipe_ours = pipe_ours.to(device)
41
+ pipe_runway = pipe_runway.to(device)
42
+ print('Loading pipeline')
43
 
44
  color, mask = outpainting_generator_rectangle(image, box_width_ratio/100, mask_random_start)
45
  mask = mask.convert('L')
 
55
 
56
 
57
  color, mask = transform(color), transform(mask)
58
+ res_ours = pipe_ours(prompt='', image=color, mask_image=mask, eta=config.eta).images[0]
59
+ print('Running inference ours')
60
+ res_runway = pipe_runway(prompt="", image=color, mask_image=mask).images[0]
61
+ print('Running inference runway')
62
 
63
  # res.save(os.path.join('./', 'test.png'))
64
+ res_ours = merge_images_horizontally(color, res_ours)
65
+ res_runway = merge_images_horizontally(color, res_runway)
66
+
67
+ return res_ours, res_runway
68
 
69
 
70
  inputs = [
 
75
  ]
76
 
77
  outputs = [
78
+ gr.Image(type="pil", image_mode="RGBA", label='PrefPaint', container=True, width="100%"),
79
+ gr.Image(type="pil", image_mode="RGBA", label='RunwayPaint', container=True, width="100%"),
80
  ]
81
 
82
+ files = os.listdir("./assets")
83
  examples = [
84
+ [f"./assets/{file_name}", 35, 125, 50] for file_name in files
 
 
 
85
  ]
86
 
87
 
88
+ with gr.Blocks() as demo:
89
+
90
+
91
+ iface = gr.Interface(
92
+ fn=pref_inpainting,
93
+ inputs=inputs,
94
+ outputs=outputs,
95
+ title="Inpainting with Human Preference (Utilizing Free CPU Resources)",
96
+ description="Upload an image and start your inpainting (currently only supporting outpainting masks; other mask types coming soon).",
97
+ theme="default",
98
+ examples=examples,
99
+ # allow_flagging="never"
100
+ )
101
+
102
 
103
+ # iface.launch()
104
 
105
+ demo.launch()