Shaoan commited on
Commit
7510b6c
·
verified ·
1 Parent(s): 048f364

update main to disable super-res due to api change

Browse files
Files changed (1) hide show
  1. main.py +40 -42
main.py CHANGED
@@ -3,8 +3,7 @@ import torch
3
  import pickle
4
  from torchvision.utils import save_image
5
  import numpy as np
6
- from diffusers import StableDiffusionUpscalePipeline
7
- with open('../concept_checkpoints/augceleba_4838.pkl', 'rb') as f:
8
  G = pickle.load(f)['G_ema'].cpu().float() # torch.nn.Module
9
 
10
 
@@ -44,26 +43,19 @@ cchoices = [
44
  import requests
45
  from PIL import Image
46
  from io import BytesIO
47
- from diffusers import LDMSuperResolutionPipeline
48
  import torch
49
 
50
  device = "cuda" if torch.cuda.is_available() else "cpu"
51
  model_id = "CompVis/ldm-super-resolution-4x-openimages"
52
 
53
  # load model and scheduler
54
- pipeline = LDMSuperResolutionPipeline.from_pretrained(model_id)
55
- pipeline = pipeline.to(device)
56
- model_id = "stabilityai/stable-diffusion-x4-upscaler"
57
- pipeline = StableDiffusionUpscalePipeline.from_pretrained(
58
- model_id, variant="fp32", torch_dtype=torch.float32
59
- )
60
  # let's download an image
61
 
62
 
63
- def super_res(low_res_img):
64
  # run pipeline in inference (sample random noise and denoise)
65
- #upscaled_image = pipeline(low_res_img, num_inference_steps=10, eta=1).images[0]
66
- upscaled_image = pipeline(prompt="a sharp image of human face", image=low_res_img, num_inference_steps=10).images[0]
67
  return upscaled_image
68
 
69
 
@@ -82,7 +74,7 @@ def generate(seed, *checkboxes):
82
  elif i == 4:
83
  checkboxes_vector[cchoices.index('Wavy Hair')] = checkboxes[i]
84
  elif i == 5:
85
- checkboxes_vector[cchoices.index('Young')] = checkboxes[i]
86
  elif i == 6:
87
  checkboxes_vector[cchoices.index('Male')] = checkboxes[i]
88
  elif i == 9:
@@ -90,13 +82,13 @@ def generate(seed, *checkboxes):
90
  elif i == 10:
91
  checkboxes_vector[cchoices.index('Chubby')] = checkboxes[i]
92
  elif i == 11:
93
- checkboxes_vector[cchoices.index('Eyeglasses')] = checkboxes[i]
94
  elif i == 12:
95
  checkboxes_vector[cchoices.index('Pale Skin')] = checkboxes[i]
96
  elif i == 13:
97
  checkboxes_vector[cchoices.index('Smiling')] = checkboxes[i]
98
  elif i == 14:
99
- checkboxes_vector[cchoices.index('Wearing Hat')] = checkboxes[i] * 1.5
100
 
101
 
102
  is_young = checkboxes[5]
@@ -105,10 +97,10 @@ def generate(seed, *checkboxes):
105
  is_goatee = checkboxes[7]
106
  is_mustache = checkboxes[8]
107
 
108
- checkboxes_vector[12] = is_mustache * 1.5
109
- checkboxes_vector[13] = is_mustache * 1.5
110
- checkboxes_vector[14] = is_goatee *1.5
111
- checkboxes_vector[15] = is_goatee*1.5
112
 
113
  checkboxes_vector[16] = is_bald
114
  checkboxes_vector[17] = is_bald
@@ -122,48 +114,54 @@ def generate(seed, *checkboxes):
122
  m = checkboxes_vector.view(1, 20)
123
  ws = G.mapping(z, m, truncation_psi=0.5)
124
  img = (G.synthesis(ws, force_fp32=True).clip(-1,1)+1)/2
125
- up_img = np.array(super_res(img))
126
- print(img.min(), img.max(), up_img.min(), up_img.max(), ' >>>>>>image sis zee')
127
- #return img[0].permute(1, 2, 0).numpy()
128
- return up_img
 
129
 
130
 
131
  # Create the interface using gr.Blocks
132
  with gr.Blocks() as demo:
133
  with gr.Row():
134
- sliders = [
135
- gr.Slider(label='Bald', minimum=0, maximum=1, step=0.01),
136
- gr.Slider(label='Black Hair', minimum=0, maximum=1, step=0.01),
137
- gr.Slider(label='Blond Hair', minimum=0, maximum=1, step=0.01),
138
- gr.Slider(label='Straight Hair', minimum=0, maximum=1, step=0.01),
139
- gr.Slider(label='Wavy Hair', minimum=0, maximum=1, step=0.01),
140
- ]
141
 
142
  with gr.Row():
143
- sliders += [gr.Slider(label='Young', minimum=0, maximum=1, step=0.01)]
144
- sliders += [gr.Slider(label='Male', minimum=0, maximum=1, step=0.01)]
145
 
146
  with gr.Row():
147
- sliders += [gr.Slider(label='Goatee', minimum=0, maximum=1, step=0.01)]
148
- sliders += [gr.Slider(label='Mustache', minimum=0, maximum=1, step=0.01)]
149
 
150
  with gr.Row():
151
  sliders += [
152
- gr.Slider(label='Big Nose', minimum=0, maximum=1, step=0.01),
153
- gr.Slider(label='Chubby', minimum=0, maximum=1, step=0.01),
154
- gr.Slider(label='Eyeglasses', minimum=0, maximum=1, step=0.01),
155
- gr.Slider(label='Pale Skin', minimum=0, maximum=1, step=0.01),
156
- gr.Slider(label='Smiling', minimum=0, maximum=1, step=0.01),
157
- gr.Slider(label='Wearing Hat', minimum=0, maximum=1, step=0.01),
158
  ]
159
 
160
- seed_input = gr.Number(label="Seed")
 
 
 
 
161
  generate_button = gr.Button("Generate")
162
 
163
  output_image = gr.Image(label="Generated Image")
 
 
164
 
165
  # Set the action for the button
166
- generate_button.click(fn=generate, inputs=[seed_input] + sliders, outputs=output_image)
167
 
168
  # Launch the demo
169
  demo.launch()
 
3
  import pickle
4
  from torchvision.utils import save_image
5
  import numpy as np
6
+ with open('./augceleba_8064.pkl', 'rb') as f:
 
7
  G = pickle.load(f)['G_ema'].cpu().float() # torch.nn.Module
8
 
9
 
 
43
  import requests
44
  from PIL import Image
45
  from io import BytesIO
 
46
  import torch
47
 
48
  device = "cuda" if torch.cuda.is_available() else "cpu"
49
  model_id = "CompVis/ldm-super-resolution-4x-openimages"
50
 
51
  # load model and scheduler
 
 
 
 
 
 
52
  # let's download an image
53
 
54
 
55
+ def super_res(low_res_img, num_steps):
56
  # run pipeline in inference (sample random noise and denoise)
57
+ upscaled_image = pipeline(low_res_img, num_inference_steps=num_steps, eta=1).images[0]
58
+ #upscaled_image = text_pipeline(prompt="a sharp image of human face", image=low_res_img, num_inference_steps=75).images[0]
59
  return upscaled_image
60
 
61
 
 
74
  elif i == 4:
75
  checkboxes_vector[cchoices.index('Wavy Hair')] = checkboxes[i]
76
  elif i == 5:
77
+ checkboxes_vector[cchoices.index('Young')] = checkboxes[i] * 2
78
  elif i == 6:
79
  checkboxes_vector[cchoices.index('Male')] = checkboxes[i]
80
  elif i == 9:
 
82
  elif i == 10:
83
  checkboxes_vector[cchoices.index('Chubby')] = checkboxes[i]
84
  elif i == 11:
85
+ checkboxes_vector[cchoices.index('Eyeglasses')] = checkboxes[i] * 2
86
  elif i == 12:
87
  checkboxes_vector[cchoices.index('Pale Skin')] = checkboxes[i]
88
  elif i == 13:
89
  checkboxes_vector[cchoices.index('Smiling')] = checkboxes[i]
90
  elif i == 14:
91
+ checkboxes_vector[cchoices.index('Wearing Hat')] = checkboxes[i] * 2
92
 
93
 
94
  is_young = checkboxes[5]
 
97
  is_goatee = checkboxes[7]
98
  is_mustache = checkboxes[8]
99
 
100
+ checkboxes_vector[12] = is_mustache * 2
101
+ checkboxes_vector[13] = is_mustache * 2
102
+ checkboxes_vector[14] = is_goatee *2
103
+ checkboxes_vector[15] = is_goatee*2
104
 
105
  checkboxes_vector[16] = is_bald
106
  checkboxes_vector[17] = is_bald
 
114
  m = checkboxes_vector.view(1, 20)
115
  ws = G.mapping(z, m, truncation_psi=0.5)
116
  img = (G.synthesis(ws, force_fp32=True).clip(-1,1)+1)/2
117
+ if False:
118
+ up_img = np.array(super_res(img*2-1, upscale_steps))
119
+ return up_img
120
+ else:
121
+ return img[0].permute(1, 2, 0).numpy()
122
 
123
 
124
  # Create the interface using gr.Blocks
125
  with gr.Blocks() as demo:
126
  with gr.Row():
127
+ slider1 = gr.Slider(label='Not Bald <--------------> Bald', minimum=0, maximum=1, step=0.01)
128
+ slider2 = gr.Slider(label='No Black Hair <--------> Black Hair', minimum=0, maximum=1, step=0.01)
129
+ slider3 = gr.Slider(label='No Blond Hair <--------> Blond Hair', minimum=0, maximum=1, step=0.01)
130
+ slider4 = gr.Slider(label='No Straight Hair <-----> Straight Hair', minimum=0, maximum=1, step=0.01)
131
+ slider5 = gr.Slider(label='No Wavy Hair <-------> Wavy Hair', minimum=0, maximum=1, step=0.01)
132
+ sliders = [ slider1, slider2, slider3, slider4, slider5]
 
133
 
134
  with gr.Row():
135
+ sliders += [gr.Slider(label='Old <--------------> Young', minimum=0, maximum=1, step=0.01)]
136
+ sliders += [gr.Slider(label='Female <--------------> Male', minimum=0, maximum=1, step=0.01)]
137
 
138
  with gr.Row():
139
+ sliders += [gr.Slider(label='No Goatee <--------------> Goatee', minimum=0, maximum=1, step=0.01)]
140
+ sliders += [gr.Slider(label='No Mustache <--------------> Mustache', minimum=0, maximum=1, step=0.01)]
141
 
142
  with gr.Row():
143
  sliders += [
144
+ gr.Slider(label='Small Nose <-------> Big Nose', minimum=0, maximum=1, step=0.01),
145
+ gr.Slider(label='Slim <--------> Chubby', minimum=0, maximum=1, step=0.01),
146
+ gr.Slider(label='No Eyeglasses <--------> Eyeglasses', minimum=0, maximum=1, step=0.01),
147
+ gr.Slider(label='Tan Skin <-------> Pale Skin', minimum=0, maximum=1, step=0.01),
148
+ gr.Slider(label='Not Smiling <---------> Smiling', minimum=0, maximum=1, step=0.01),
149
+ gr.Slider(label='No Hat <---------> Wearing Hat', minimum=0, maximum=1, step=0.01),
150
  ]
151
 
152
+ seed_input = gr.Number(label="Seed", value=6)
153
+ upscale_funcs = []
154
+ #with gr.Row():
155
+ # upscale_funcs = [gr.Checkbox(label="Upscale 4x")]
156
+ # upscale_funcs += [gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=10)]
157
  generate_button = gr.Button("Generate")
158
 
159
  output_image = gr.Image(label="Generated Image")
160
+ for slider in sliders:
161
+ slider.change(fn=generate, inputs=[seed_input] + sliders, outputs=output_image)
162
 
163
  # Set the action for the button
164
+ generate_button.click(fn=generate, inputs=[seed_input] +sliders, outputs=output_image)
165
 
166
  # Launch the demo
167
  demo.launch()