Spaces:
Sleeping
Sleeping
Commit
·
d49dda4
1
Parent(s):
07eed4b
Initial import
Browse files- .gitignore +2 -0
- app.py +35 -0
- requireents.txt +7 -0
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
.env
|
2 |
+
.flagged
|
app.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from diffusers import DiffusionPipeline
|
3 |
+
import cv2
|
4 |
+
import torch
|
5 |
+
import numpy as np
|
6 |
+
from PIL import Image
|
7 |
+
|
8 |
+
model = "shadowlamer/sd-zxspectrum-model-256"
|
9 |
+
|
10 |
+
# Borrowed from here: https://stackoverflow.com/a/73667318
|
11 |
+
def quantize_to_palette(_image, _palette):
|
12 |
+
x_query = _image.reshape(-1, 3).astype(np.float32)
|
13 |
+
x_index = _palette.astype(np.float32)
|
14 |
+
knn = cv2.ml.KNearest_create()
|
15 |
+
knn.train(x_index, cv2.ml.ROW_SAMPLE, np.arange(len(_palette)))
|
16 |
+
ret, results, neighbours, dist = knn.findNearest(x_query, 1)
|
17 |
+
_quantized_image = np.array([_palette[idx] for idx in neighbours.astype(int)])
|
18 |
+
_quantized_image = _quantized_image.reshape(_image.shape)
|
19 |
+
return Image.fromarray(cv2.cvtColor(np.array(_quantized_image, dtype=np.uint8), cv2.COLOR_BGR2RGB))
|
20 |
+
|
21 |
+
|
22 |
+
def generate(prompt, seed):
|
23 |
+
pipe = DiffusionPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False)
|
24 |
+
generator = torch.Generator("cpu").manual_seed(int(seed))
|
25 |
+
raw_image = pipe(prompt, height=192, width=256, num_inference_steps=20, generator=generator).images[0]
|
26 |
+
palette = np.array(
|
27 |
+
[[0, 0, 0], [0, 0, 255], [0, 255, 0], [0, 255, 255], [255, 0, 0], [255, 0, 255], [255, 255, 0],
|
28 |
+
[255, 255, 255]])
|
29 |
+
input_image = np.array(raw_image)
|
30 |
+
input_image = input_image[:, :, ::-1].copy()
|
31 |
+
return quantize_to_palette(_image=input_image, _palette=palette)
|
32 |
+
|
33 |
+
|
34 |
+
iface = gr.Interface(fn=generate, inputs=["text", "number"], outputs="image")
|
35 |
+
iface.launch()
|
requireents.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
diffusers
|
3 |
+
torch
|
4 |
+
transformers
|
5 |
+
accelerate
|
6 |
+
opencv-python
|
7 |
+
numpy
|