Commit
·
c6d0824
1
Parent(s):
081c459
manually unpack base64 urls
Browse files- handler.py +43 -30
handler.py
CHANGED
@@ -2,9 +2,24 @@ from typing import Dict, List, Any
|
|
2 |
import torch
|
3 |
from PIL import Image
|
4 |
from io import BytesIO
|
5 |
-
from urllib import request
|
6 |
from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DDIMScheduler
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
# set device
|
9 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
10 |
|
@@ -36,38 +51,36 @@ class EndpointHandler():
|
|
36 |
prompt = data.pop("inputs", data)
|
37 |
url = data.pop("url", data)
|
38 |
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
init_image = Image.open(url)
|
43 |
-
init_image.thumbnail((512, 512))
|
44 |
|
45 |
|
46 |
-
|
47 |
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
|
57 |
-
|
58 |
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
|
72 |
-
|
73 |
-
|
|
|
2 |
import torch
|
3 |
from PIL import Image
|
4 |
from io import BytesIO
|
|
|
5 |
from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, DDIMScheduler
|
6 |
|
7 |
+
import base64
|
8 |
+
import requests
|
9 |
+
from io import BytesIO
|
10 |
+
from PIL import Image
|
11 |
+
|
12 |
+
def load_image(image_url):
|
13 |
+
if image_url.startswith('data:'):
|
14 |
+
# Decode base64 data_uri
|
15 |
+
image_data = base64.b64decode(image_url.split(',')[1])
|
16 |
+
image = Image.open(BytesIO(image_data))
|
17 |
+
else:
|
18 |
+
# Load standard image url
|
19 |
+
response = requests.get(image_url)
|
20 |
+
image = Image.open(BytesIO(response.content))
|
21 |
+
return image
|
22 |
+
|
23 |
# set device
|
24 |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
25 |
|
|
|
51 |
prompt = data.pop("inputs", data)
|
52 |
url = data.pop("url", data)
|
53 |
|
54 |
+
init_image = load_image(url).convert("RGB")
|
55 |
+
init_image = Image.open(url)
|
56 |
+
init_image.thumbnail((512, 512))
|
|
|
|
|
57 |
|
58 |
|
59 |
+
params = data.pop("parameters", data)
|
60 |
|
61 |
+
# hyperparamters
|
62 |
+
num_inference_steps = params.pop("num_inference_steps", 25)
|
63 |
+
guidance_scale = params.pop("guidance_scale", 7.5)
|
64 |
+
negative_prompt = params.pop("negative_prompt", None)
|
65 |
+
prompt = params.pop("prompt", None)
|
66 |
+
height = params.pop("height", None)
|
67 |
+
width = params.pop("width", None)
|
68 |
+
manual_seed = params.pop("manual_seed", -1)
|
69 |
|
70 |
+
out = None
|
71 |
|
72 |
+
generator = torch.Generator(device='cuda')
|
73 |
+
generator.manual_seed(manual_seed)
|
74 |
+
# run img2img pipeline
|
75 |
+
out = self.imgPipe(prompt,
|
76 |
+
image=init_image,
|
77 |
+
num_inference_steps=num_inference_steps,
|
78 |
+
guidance_scale=guidance_scale,
|
79 |
+
num_images_per_prompt=1,
|
80 |
+
negative_prompt=negative_prompt,
|
81 |
+
height=height,
|
82 |
+
width=width
|
83 |
+
)
|
84 |
|
85 |
+
# return first generated PIL image
|
86 |
+
return out.images[0]
|