Spaces:
Sleeping
Sleeping
sl
commited on
Commit
·
184c926
1
Parent(s):
cf69fde
.scr generation
Browse files
app.py
CHANGED
@@ -4,8 +4,13 @@ import cv2
|
|
4 |
import torch
|
5 |
import numpy as np
|
6 |
from PIL import Image
|
|
|
7 |
|
8 |
model = "shadowlamer/sd-zxspectrum-model-256"
|
|
|
|
|
|
|
|
|
9 |
|
10 |
# Borrowed from here: https://stackoverflow.com/a/73667318
|
11 |
def quantize_to_palette(_image, _palette):
|
@@ -19,17 +24,85 @@ def quantize_to_palette(_image, _palette):
|
|
19 |
return Image.fromarray(cv2.cvtColor(np.array(_quantized_image, dtype=np.uint8), cv2.COLOR_BGR2RGB))
|
20 |
|
21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
def generate(prompt, seed):
|
23 |
pipe = DiffusionPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False)
|
24 |
generator = torch.Generator("cpu").manual_seed(int(seed))
|
25 |
-
raw_image = pipe(prompt, height=
|
26 |
palette = np.array(
|
27 |
[[0, 0, 0], [0, 0, 255], [0, 255, 0], [0, 255, 255], [255, 0, 0], [255, 0, 255], [255, 255, 0],
|
28 |
[255, 255, 255]])
|
29 |
input_image = np.array(raw_image)
|
30 |
input_image = input_image[:, :, ::-1].copy()
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
|
34 |
-
iface = gr.Interface(fn=generate,
|
|
|
|
|
|
|
|
|
35 |
iface.launch()
|
|
|
4 |
import torch
|
5 |
import numpy as np
|
6 |
from PIL import Image
|
7 |
+
import os
|
8 |
|
9 |
model = "shadowlamer/sd-zxspectrum-model-256"
|
10 |
+
image_width = 256
|
11 |
+
image_height = 192
|
12 |
+
samples_dir = "/tmp"
|
13 |
+
|
14 |
|
15 |
# Borrowed from here: https://stackoverflow.com/a/73667318
|
16 |
def quantize_to_palette(_image, _palette):
|
|
|
24 |
return Image.fromarray(cv2.cvtColor(np.array(_quantized_image, dtype=np.uint8), cv2.COLOR_BGR2RGB))
|
25 |
|
26 |
|
27 |
+
def collect_char_colors(image, _x, _y):
|
28 |
+
_colors = {}
|
29 |
+
for _char_y in range(8):
|
30 |
+
for _char_x in range(8):
|
31 |
+
_pixel = image.getpixel((_x + _char_x, _y + _char_y))
|
32 |
+
_colors[_pixel] = 1 if _pixel not in _colors else _colors[_pixel] + 1
|
33 |
+
_colors = sorted(_colors.items(), key=lambda _v: _v[1], reverse=True)
|
34 |
+
return [list(_tuple[0]) for _tuple in list(_colors)]
|
35 |
+
|
36 |
+
|
37 |
+
def palette_to_attr(_palette):
|
38 |
+
if len(_palette) == 0:
|
39 |
+
return 0x00
|
40 |
+
_attr = 0x40
|
41 |
+
_paper = _palette[0]
|
42 |
+
if _paper[0] > 0:
|
43 |
+
_attr = _attr + 0x10 # r
|
44 |
+
if _paper[1] > 0:
|
45 |
+
_attr = _attr + 0x20 # g
|
46 |
+
if _paper[2] > 0:
|
47 |
+
_attr = _attr + 0x08 # b
|
48 |
+
if len(_palette) == 1:
|
49 |
+
return _attr
|
50 |
+
_ink = _palette[1]
|
51 |
+
if _ink[0] > 0:
|
52 |
+
_attr = _attr + 0x02 # r
|
53 |
+
if _ink[1] > 0:
|
54 |
+
_attr = _attr + 0x04 # g
|
55 |
+
if _ink[2] > 0:
|
56 |
+
_attr = _attr + 0x01 # b
|
57 |
+
return _attr
|
58 |
+
|
59 |
+
|
60 |
def generate(prompt, seed):
|
61 |
pipe = DiffusionPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False)
|
62 |
generator = torch.Generator("cpu").manual_seed(int(seed))
|
63 |
+
raw_image = pipe(prompt, height=image_height, width=image_width, num_inference_steps=20, generator=generator).images[0]
|
64 |
palette = np.array(
|
65 |
[[0, 0, 0], [0, 0, 255], [0, 255, 0], [0, 255, 255], [255, 0, 0], [255, 0, 255], [255, 255, 0],
|
66 |
[255, 255, 255]])
|
67 |
input_image = np.array(raw_image)
|
68 |
input_image = input_image[:, :, ::-1].copy()
|
69 |
+
image = quantize_to_palette(_image=input_image, _palette=palette)
|
70 |
+
|
71 |
+
out = samples_dir + "/" + prompt.replace(" ", "_") + "_" + str(seed) + ".scr"
|
72 |
+
|
73 |
+
if not os.path.exists(out):
|
74 |
+
byte_buffer = [0] * 0x1800
|
75 |
+
attr_buffer = [0b00111000] * 0x300
|
76 |
+
|
77 |
+
for y in range(0, image_height, 8):
|
78 |
+
for x in range(0, image_width, 8):
|
79 |
+
px = int(x / 8)
|
80 |
+
py = int(y / 8)
|
81 |
+
palette = collect_char_colors(image, x, y)
|
82 |
+
byte_index = int(py / 8) * 0x800 + (py % 8) * 32 + px
|
83 |
+
for cy in range(8):
|
84 |
+
byte = 0
|
85 |
+
for cx in range(8):
|
86 |
+
byte = byte * 2
|
87 |
+
pixel = list(image.getpixel((x + cx, y + cy)))
|
88 |
+
if palette[0] != pixel:
|
89 |
+
byte = byte + 1
|
90 |
+
byte_buffer[byte_index] = byte
|
91 |
+
byte_index = byte_index + 0x100
|
92 |
+
attr = palette_to_attr(palette)
|
93 |
+
attr_buffer[py * 32 + px] = attr
|
94 |
+
|
95 |
+
scr = open(out, 'wb')
|
96 |
+
scr.write(bytearray(byte_buffer))
|
97 |
+
scr.write(bytearray(attr_buffer))
|
98 |
+
scr.close()
|
99 |
+
|
100 |
+
return [image, out]
|
101 |
|
102 |
|
103 |
+
iface = gr.Interface(fn=generate,
|
104 |
+
title="ZX-Spectrum inspired images generator ",
|
105 |
+
inputs=["text", "number"],
|
106 |
+
outputs=["image", "file"],
|
107 |
+
examples=[["Cute cat", 123], ["Solar system", 123], ["Disco ball", 123]])
|
108 |
iface.launch()
|