Spaces:
Running
Running
import gradio as gr | |
from diffusers import DiffusionPipeline | |
import cv2 | |
import torch | |
import numpy as np | |
from PIL import Image | |
model = "shadowlamer/sd-zxspectrum-model-256" | |
# Borrowed from here: https://stackoverflow.com/a/73667318 | |
def quantize_to_palette(_image, _palette): | |
x_query = _image.reshape(-1, 3).astype(np.float32) | |
x_index = _palette.astype(np.float32) | |
knn = cv2.ml.KNearest_create() | |
knn.train(x_index, cv2.ml.ROW_SAMPLE, np.arange(len(_palette))) | |
ret, results, neighbours, dist = knn.findNearest(x_query, 1) | |
_quantized_image = np.array([_palette[idx] for idx in neighbours.astype(int)]) | |
_quantized_image = _quantized_image.reshape(_image.shape) | |
return Image.fromarray(cv2.cvtColor(np.array(_quantized_image, dtype=np.uint8), cv2.COLOR_BGR2RGB)) | |
def generate(prompt, seed): | |
pipe = DiffusionPipeline.from_pretrained(model, safety_checker=None, requires_safety_checker=False) | |
generator = torch.Generator("cpu").manual_seed(int(seed)) | |
raw_image = pipe(prompt, height=192, width=256, num_inference_steps=20, generator=generator).images[0] | |
palette = np.array( | |
[[0, 0, 0], [0, 0, 255], [0, 255, 0], [0, 255, 255], [255, 0, 0], [255, 0, 255], [255, 255, 0], | |
[255, 255, 255]]) | |
input_image = np.array(raw_image) | |
input_image = input_image[:, :, ::-1].copy() | |
return quantize_to_palette(_image=input_image, _palette=palette) | |
iface = gr.Interface(fn=generate, inputs=["text", "number"], outputs="image") | |
iface.launch() | |