yizhangliu commited on
Commit
f73b71e
·
1 Parent(s): 8997d74

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -3
app.py CHANGED
@@ -71,6 +71,91 @@ def get_image_ext(img_bytes):
71
  def diffuser_callback(i, t, latents):
72
  pass
73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  model = ModelManager(
75
  name='lama',
76
  device=device,
@@ -101,9 +186,11 @@ def read_content(file_path: str) -> str:
101
  return content
102
 
103
  def predict(dict, prompt=""):
104
- init_image = dict["image"].convert("RGB").resize((512, 512))
105
- mask = dict["mask"].convert("RGB").resize((512, 512))
106
- output = pipe(prompt = prompt, image=init_image, mask_image=mask,guidance_scale=7.5)
 
 
107
  return output.images[0], gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
108
 
109
 
 
71
  def diffuser_callback(i, t, latents):
72
  pass
73
 
74
+ def process(init_image, mask):
75
+ global model
76
+ '''
77
+ input = request.files
78
+ # RGB
79
+ origin_image_bytes = input["image"].read()
80
+ '''
81
+
82
+ # image, alpha_channel = load_img(origin_image_bytes)
83
+ original_shape = init_image.shape
84
+ interpolation = cv2.INTER_CUBIC
85
+
86
+ '''
87
+ form = request.form
88
+ '''
89
+ size_limit = 1080 # : Union[int, str] = form.get("sizeLimit", "1080")
90
+ if size_limit == "Original":
91
+ size_limit = max(image.shape)
92
+ else:
93
+ size_limit = int(size_limit)
94
+
95
+ config = Config(
96
+ '''
97
+ ldm_steps=form["ldmSteps"],
98
+ ldm_sampler=form["ldmSampler"],
99
+ hd_strategy=form["hdStrategy"],
100
+ zits_wireframe=form["zitsWireframe"],
101
+ hd_strategy_crop_margin=form["hdStrategyCropMargin"],
102
+ hd_strategy_crop_trigger_size=form["hdStrategyCropTrigerSize"],
103
+ hd_strategy_resize_limit=form["hdStrategyResizeLimit"],
104
+ prompt=form["prompt"],
105
+ use_croper=form["useCroper"],
106
+ croper_x=form["croperX"],
107
+ croper_y=form["croperY"],
108
+ croper_height=form["croperHeight"],
109
+ croper_width=form["croperWidth"],
110
+ sd_mask_blur=form["sdMaskBlur"],
111
+ sd_strength=form["sdStrength"],
112
+ sd_steps=form["sdSteps"],
113
+ sd_guidance_scale=form["sdGuidanceScale"],
114
+ sd_sampler=form["sdSampler"],
115
+ sd_seed=form["sdSeed"],
116
+ cv2_flag=form["cv2Flag"],
117
+ cv2_radius=form['cv2Radius']
118
+ '''
119
+ )
120
+
121
+ if config.sd_seed == -1:
122
+ config.sd_seed = random.randint(1, 999999999)
123
+
124
+ logger.info(f"Origin image shape: {original_shape}")
125
+ image = resize_max_size(image, size_limit=size_limit, interpolation=interpolation)
126
+ logger.info(f"Resized image shape: {image.shape}")
127
+
128
+ mask, _ = load_img(input["mask"].read(), gray=True)
129
+ mask = resize_max_size(mask, size_limit=size_limit, interpolation=interpolation)
130
+
131
+ start = time.time()
132
+ res_np_img = model(image, mask, config)
133
+ logger.info(f"process time: {(time.time() - start) * 1000}ms")
134
+
135
+ torch.cuda.empty_cache()
136
+
137
+ if alpha_channel is not None:
138
+ if alpha_channel.shape[:2] != res_np_img.shape[:2]:
139
+ alpha_channel = cv2.resize(
140
+ alpha_channel, dsize=(res_np_img.shape[1], res_np_img.shape[0])
141
+ )
142
+ res_np_img = np.concatenate(
143
+ (res_np_img, alpha_channel[:, :, np.newaxis]), axis=-1
144
+ )
145
+
146
+ ext = get_image_ext(origin_image_bytes)
147
+ return ext
148
+ '''
149
+ response = make_response(
150
+ send_file(
151
+ io.BytesIO(numpy_to_bytes(res_np_img, ext)),
152
+ mimetype=f"image/{ext}",
153
+ )
154
+ )
155
+ response.headers["X-Seed"] = str(config.sd_seed)
156
+ return response
157
+ '''
158
+
159
  model = ModelManager(
160
  name='lama',
161
  device=device,
 
186
  return content
187
 
188
  def predict(dict, prompt=""):
189
+ init_image = dict["image"].convert("RGB") #.resize((512, 512))
190
+ mask = dict["mask"].convert("RGB") #.resize((512, 512))
191
+ output = process(init_image, mask)
192
+ # output = pipe(prompt = prompt, image=init_image, mask_image=mask,guidance_scale=7.5)
193
+
194
  return output.images[0], gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
195
 
196