emeersman commited on
Commit
1828bb0
·
1 Parent(s): 0ece009

Add encoding logic and remove image decoding logic

Browse files
Files changed (1) hide show
  1. handler.py +4 -13
handler.py CHANGED
@@ -28,7 +28,6 @@ class EndpointHandler():
28
  A :obj:`dict`:. base64 encoded image
29
  """
30
  inputs = data.pop("inputs", data)
31
- encoded_image = data.pop("image", None)
32
  params = data.pop("parameters", data)
33
 
34
  # hyperparamters
@@ -41,12 +40,8 @@ class EndpointHandler():
41
 
42
  generator = torch.Generator(device).manual_seed(manual_seed)
43
 
44
- if encoded_image is not None:
45
- image = self.decode_base64_image(encoded_image)
46
-
47
  # run inference pipeline
48
  out = self.pipe(inputs,
49
- image=image,
50
  generator=generator,
51
  num_inference_steps=num_inference_steps,
52
  guidance_scale=guidance_scale,
@@ -57,11 +52,7 @@ class EndpointHandler():
57
  )
58
 
59
  # return first generate PIL image
60
- return out.images[0]
61
-
62
- # helper to decode input image
63
- def decode_base64_image(self, image_string):
64
- base64_image = base64.b64decode(image_string)
65
- buffer = BytesIO(base64_image)
66
- image = Image.open(buffer)
67
- return image
 
28
  A :obj:`dict`:. base64 encoded image
29
  """
30
  inputs = data.pop("inputs", data)
 
31
  params = data.pop("parameters", data)
32
 
33
  # hyperparamters
 
40
 
41
  generator = torch.Generator(device).manual_seed(manual_seed)
42
 
 
 
 
43
  # run inference pipeline
44
  out = self.pipe(inputs,
 
45
  generator=generator,
46
  num_inference_steps=num_inference_steps,
47
  guidance_scale=guidance_scale,
 
52
  )
53
 
54
  # return first generate PIL image
55
+ image = out.images[0]
56
+ buffered = BytesIO()
57
+ image.save(buffered, format="JPEG")
58
+ return base64.b64encode(buffered.getvalue())