Spaces:
Runtime error
Runtime error
Commit
·
77f12ff
1
Parent(s):
319d641
Atualização
Browse files
app.py
CHANGED
@@ -21,10 +21,10 @@ os.environ['REPLICATE_API_TOKEN'] = 'cbd8d6421f3037d482bd7d6ec8e7368350e3aaab'
|
|
21 |
model = replicate.models.get("stability-ai/stable-diffusion-inpainting")
|
22 |
version = model.versions.get("c28b92a7ecd66eee4aefcd8a94eb9e7f6c3805d5f06038165407fb5cb355ba67")
|
23 |
|
24 |
-
sf_prompt_1 = "old bridge, mountain, grass"
|
25 |
-
sf_neg_prompt_1 = ""
|
26 |
|
27 |
-
sf_prompt_2 = "
|
28 |
sf_neg_prompt_2 = "animal"
|
29 |
|
30 |
template1 = Image.open("templates/template1.png").resize((512, 512))
|
@@ -33,6 +33,7 @@ template2 = Image.open("templates/template2.png").resize((512, 512))
|
|
33 |
fontMain = ImageFont.truetype(font="fonts/arial.ttf", size=32)
|
34 |
fontSecond = ImageFont.truetype(font="fonts/arial.ttf", size=18)
|
35 |
|
|
|
36 |
def numpy_to_pil(images):
|
37 |
if images.ndim == 3:
|
38 |
images = images[None, ...]
|
@@ -110,9 +111,7 @@ async def predict(prompt, negative_prompt, image, mask_img):
|
|
110 |
|
111 |
response = requests.get(output[0])
|
112 |
img_final = Image.open(BytesIO(response.content))
|
113 |
-
|
114 |
mask = mask_image.convert('L')
|
115 |
-
|
116 |
PIL.Image.composite(img_final, image, mask)
|
117 |
return (img_final)
|
118 |
|
@@ -157,7 +156,7 @@ def inference(obj2mask, image_numpy, main_text, second_text, only_test):
|
|
157 |
generator = torch.Generator()
|
158 |
generator.manual_seed(int(52362))
|
159 |
|
160 |
-
image =
|
161 |
|
162 |
mask_img = get_mask(obj2mask, image)
|
163 |
|
|
|
21 |
model = replicate.models.get("stability-ai/stable-diffusion-inpainting")
|
22 |
version = model.versions.get("c28b92a7ecd66eee4aefcd8a94eb9e7f6c3805d5f06038165407fb5cb355ba67")
|
23 |
|
24 |
+
sf_prompt_1 = "sunflowers, old bridge, mountain, grass"
|
25 |
+
sf_neg_prompt_1 = "animal"
|
26 |
|
27 |
+
sf_prompt_2 = "fire, landscape"
|
28 |
sf_neg_prompt_2 = "animal"
|
29 |
|
30 |
template1 = Image.open("templates/template1.png").resize((512, 512))
|
|
|
33 |
fontMain = ImageFont.truetype(font="fonts/arial.ttf", size=32)
|
34 |
fontSecond = ImageFont.truetype(font="fonts/arial.ttf", size=18)
|
35 |
|
36 |
+
|
37 |
def numpy_to_pil(images):
|
38 |
if images.ndim == 3:
|
39 |
images = images[None, ...]
|
|
|
111 |
|
112 |
response = requests.get(output[0])
|
113 |
img_final = Image.open(BytesIO(response.content))
|
|
|
114 |
mask = mask_image.convert('L')
|
|
|
115 |
PIL.Image.composite(img_final, image, mask)
|
116 |
return (img_final)
|
117 |
|
|
|
156 |
generator = torch.Generator()
|
157 |
generator.manual_seed(int(52362))
|
158 |
|
159 |
+
image = Image.fromarray(image_numpy).convert("RGB").resize((512, 512))
|
160 |
|
161 |
mask_img = get_mask(obj2mask, image)
|
162 |
|