zhengqilin commited on
Commit
5e185f5
·
1 Parent(s): 96fc329

handler.py

Browse files
Files changed (1) hide show
  1. handler.py +60 -10
handler.py CHANGED
@@ -142,7 +142,7 @@ class EndpointHandler():
142
  Return:
143
  A :obj:`list` | `dict`: will be serialized and returned
144
  """
145
- args = {
146
  "do_not_save_samples": True,
147
  "do_not_save_grid": True,
148
  "outpath_samples": "./output",
@@ -155,15 +155,65 @@ class EndpointHandler():
155
  "height": 768,
156
  "seed": -1,
157
  }
158
- if data["inputs"]:
159
- for field in args:
160
- if field in data["inputs"].keys():
161
- args[field] = data["inputs"][field]
162
- # if "prompt" in data["inputs"].keys():
163
- # prompt = data["inputs"]["prompt"]
164
- # print("get prompt from request: ", prompt)
165
- # args["prompt"] = prompt
166
- p = StableDiffusionProcessingTxt2Img(sd_model=self.shared.sd_model, **args)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
  processed = process_images(p)
168
  single_image_b64 = encode_pil_to_base64(processed.images[0]).decode('utf-8')
169
  return {
 
142
  Return:
143
  A :obj:`list` | `dict`: will be serialized and returned
144
  """
145
+ txt2img_args = {
146
  "do_not_save_samples": True,
147
  "do_not_save_grid": True,
148
  "outpath_samples": "./output",
 
155
  "height": 768,
156
  "seed": -1,
157
  }
158
+ img2img_args = {
159
+ "init_images": ["data:image/png;base64,"],
160
+ "resize_mode": 0,
161
+ "denoising_strength": 0.75,
162
+ "image_cfg_scale": 0,
163
+ "mask_blur": 4,
164
+ "inpainting_fill": 0,
165
+ "inpaint_full_res": 1,
166
+ "inpaint_full_res_padding": 0,
167
+ "inpainting_mask_invert": 0,
168
+ "initial_noise_multiplier": 0,
169
+ "prompt": "lora:koreanDollLikeness_v15:0.66, best quality, ultra high res, (photorealistic:1.4), 1girl, beige sweater, black choker, smile, laughing, bare shoulders, solo focus, ((full body), (brown hair:1), looking at viewer",
170
+ "styles": [],
171
+ "seed": -1,
172
+ "subseed": -1,
173
+ "subseed_strength": 0,
174
+ "seed_resize_from_h": -1,
175
+ "seed_resize_from_w": -1,
176
+ "sampler_name": "Euler a",
177
+ "batch_size": 1,
178
+ "n_iter": 1,
179
+ "steps": 50,
180
+ "cfg_scale": 7,
181
+ "width": 512,
182
+ "height": 512,
183
+ "restore_faces": 0,
184
+ "tiling": 0,
185
+ "negative_prompt": "paintings, sketches, (worst quality:2), (low quality:2), (normal quality:2), lowres, normal quality, ((monochrome)), ((grayscale)), skin spots, acnes, skin blemishes, age spot, glans, (ugly:1.331), (duplicate:1.331), (morbid:1.21), (mutilated:1.21), (tranny:1.331), mutated hands, (poorly drawn hands:1.331), blurry, 3hands,4fingers,3arms, bad anatomy, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts,poorly drawn face,mutation,deformed",
186
+ "eta": 0,
187
+ "s_churn": 0,
188
+ "s_tmax": 0,
189
+ "s_tmin": 0,
190
+ "s_noise": 1,
191
+ "override_settings": {},
192
+ "override_settings_restore_afterwards": 1,
193
+ "script_args": [],
194
+ "sampler_index": "Euler",
195
+ "include_init_images": 0
196
+ }
197
+
198
+ p = None
199
+ if data["type"] == "txt2img":
200
+ if data["inputs"]:
201
+ for field in txt2img_args:
202
+ if field in data["inputs"].keys():
203
+ txt2img_args[field] = data["inputs"][field]
204
+ # if "prompt" in data["inputs"].keys():
205
+ # prompt = data["inputs"]["prompt"]
206
+ # print("get prompt from request: ", prompt)
207
+ # args["prompt"] = prompt
208
+ p = StableDiffusionProcessingTxt2Img(sd_model=self.shared.sd_model, **txt2img_args)
209
+ if data["type"] == "txt2img":
210
+ if data["inputs"]:
211
+ for field in img2img_args:
212
+ if field in data["inputs"].keys():
213
+ img2img_args[field] = data["inputs"][field]
214
+ p = StableDiffusionProcessingImg2Img(sd_model=self.shared.sd_model, **img2img_args)
215
+ if p is None:
216
+ raise Exception("No processing object created")
217
  processed = process_images(p)
218
  single_image_b64 = encode_pil_to_base64(processed.images[0]).decode('utf-8')
219
  return {