Spaces:
Sleeping
Sleeping
from diffusers import DPMSolverMultistepScheduler | |
import gradio as gr | |
from PIL import Image | |
import cv2 | |
import qrcode | |
import os, random, gc | |
import numpy as np | |
from transformers import pipeline | |
import PIL.Image | |
from diffusers.utils import load_image | |
from accelerate import Accelerator | |
from diffusers import StableDiffusionPipeline | |
import torch | |
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel | |
accelerator = Accelerator(cpu=True) | |
models =[ | |
"runwayml/stable-diffusion-v1-5", | |
"prompthero/openjourney-v4", | |
"CompVis/stable-diffusion-v1-4", | |
"stabilityai/stable-diffusion-2-1", | |
"stablediffusionapi/disney-pixal-cartoon", | |
"stablediffusionapi/edge-of-realism", | |
"MirageML/fantasy-scene", | |
"wavymulder/lomo-diffusion", | |
"sd-dreambooth-library/fashion", | |
"DucHaiten/DucHaitenDreamWorld", | |
"VegaKH/Ultraskin", | |
"kandinsky-community/kandinsky-2-1", | |
"MirageML/lowpoly-cyberpunk", | |
"thehive/everyjourney-sdxl-0.9-finetuned", | |
"plasmo/woolitize-768sd1-5", | |
"plasmo/food-crit", | |
"johnslegers/epic-diffusion-v1.1", | |
"Fictiverse/ElRisitas", | |
"robotjung/SemiRealMix", | |
"herpritts/FFXIV-Style", | |
"prompthero/linkedin-diffusion", | |
"RayHell/popupBook-diffusion", | |
"MirageML/lowpoly-world", | |
"deadman44/SD_Photoreal_Merged_Models", | |
"Conflictx/CGI_Animation", | |
"johnslegers/epic-diffusion", | |
"tilake/China-Chic-illustration", | |
"wavymulder/modelshoot", | |
"prompthero/openjourney-lora", | |
"Fictiverse/Stable_Diffusion_VoxelArt_Model", | |
"darkstorm2150/Protogen_v2.2_Official_Release", | |
"hassanblend/HassanBlend1.5.1.2", | |
"hassanblend/hassanblend1.4", | |
"nitrosocke/redshift-diffusion", | |
"prompthero/openjourney-v2", | |
"nitrosocke/Arcane-Diffusion", | |
"Lykon/DreamShaper", | |
"wavymulder/Analog-Diffusion", | |
"nitrosocke/mo-di-diffusion", | |
"dreamlike-art/dreamlike-diffusion-1.0", | |
"dreamlike-art/dreamlike-photoreal-2.0", | |
"digiplay/RealismEngine_v1", | |
"digiplay/AIGEN_v1.4_diffusers", | |
"stablediffusionapi/dreamshaper-v6", | |
"JackAnon/GorynichMix", | |
"p1atdev/liminal-space-diffusion", | |
"nadanainone/gigaschizonegs", | |
"darkVOYAGE/dvMJv4", | |
"lckidwell/album-cover-style", | |
"axolotron/ice-cream-animals", | |
"perion/ai-avatar", | |
"digiplay/GhostMix", | |
"ThePioneer/MISA", | |
"TheLastBen/froggy-style-v21-768", | |
"FloydianSound/Nixeu_Diffusion_v1-5", | |
"kakaobrain/karlo-v1-alpha-image-variations", | |
"digiplay/PotoPhotoRealism_v1", | |
"ConsistentFactor/Aurora-By_Consistent_Factor", | |
"rim0/quadruped_mechas", | |
"Akumetsu971/SD_Samurai_Anime_Model", | |
"Bojaxxx/Fantastic-Mr-Fox-Diffusion", | |
"sd-dreambooth-library/original-character-cyclps", | |
"AIArtsChannel/steampunk-diffusion", | |
] | |
controlnet = accelerator.prepare(ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float32)) | |
def plex(qr_code_value, text, neg_prompt, modil, one, two, three): | |
gc.collect() | |
apol=[] | |
modal=""+modil+"" | |
pipe = accelerator.prepare(StableDiffusionControlNetPipeline.from_pretrained(modal, controlnet=controlnet, torch_dtype=torch.float32, use_safetensors=False, safety_checker=None)) | |
pipe.unet.to(memory_format=torch.channels_last) | |
pipe.scheduler = accelerator.prepare(DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)) | |
pipe = pipe.to("cpu") | |
negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality" | |
prompt = text | |
qr_code = qrcode.make(qr_code_value).resize((512, 512)) | |
rmage = load_image(qr_code) | |
original = rmage.convert("RGB") | |
original.thumbnail((512, 512)) | |
cannyimage = load_image(original).resize((512,512)) | |
cannyimage = np.array(cannyimage) | |
pannyimage = load_image(original).resize((512,512)) | |
pannyimage = np.array(pannyimage) | |
pannyimage = np.invert(pannyimage) | |
pannyimage = Image.fromarray(pannyimage) | |
low_threshold = 100 | |
high_threshold = 200 | |
cannyimage = cv2.Canny(cannyimage, low_threshold, high_threshold) | |
cannyimage = cannyimage[:, :, None] | |
cannyimage = np.concatenate([cannyimage, cannyimage, cannyimage], axis=2) | |
cannyimage = Image.fromarray(cannyimage) | |
images = [cannyimage] | |
generator = torch.Generator(device="cpu").manual_seed(random.randint(1, 4836923)) | |
imzge = pipe(prompt,original,num_inference_steps=one,generator=generator,strength=two,negative_prompt=neg_prompt,controlnet_conditioning_scale=three,).images[0] | |
apol.append(imzge) | |
image = pipe([prompt]*2,images,num_inference_steps=one,generator=generator,strength=two,negative_prompt=[neg_prompt]*2,controlnet_conditioning_scale=three,) | |
for i, imge in enumerate(image["images"]): | |
apol.append(imge) | |
img = load_image(imge) | |
img.save('./image.png', 'PNG') | |
img = img.resize((512, 512)) | |
img = img.convert("RGBA") | |
img.save('./image.png', 'PNG') | |
iog = load_image(original) | |
iog.save('./imoge.png', 'PNG') | |
iog = iog.resize((512, 512)) | |
iog = iog.convert("RGBA") | |
iog.save('./imoge.png', 'PNG') | |
doto = iog.getdata() | |
new_data = [] | |
for item in doto: | |
if item[0] in list(range(200, 256)): | |
new_data.append((255, 255, 255, 0)) | |
else: | |
new_data.append(item) | |
iog.putdata(new_data) | |
iog.save('./image.png', 'PNG') | |
pixel_data1 = list(iog.getdata()) | |
pixel_data2 = list(img.getdata()) | |
new_pixel_data = [pixel if pixel[3] > 0 else pixel_data2[i] for i, pixel in enumerate(pixel_data1)] | |
if i==1: | |
new_imoge = Image.new("RGBA", img.size) | |
new_imoge.putdata(new_pixel_data) | |
new_imoge.save('./new_imoge.png', 'PNG') | |
apol.append(new_imoge) | |
else: | |
new_image = Image.new("RGBA", img.size) | |
new_image.putdata(new_pixel_data) | |
new_image.save('./new_image.png', 'PNG') | |
apol.append(new_image) | |
apol.append(original) | |
apol.append(cannyimage) | |
apol.append(pannyimage) | |
return apol | |
iface = gr.Interface(fn=plex, inputs=[gr.Textbox(label="QR Code URL"),gr.Textbox(label="prompt"),gr.Textbox(label="neg prompt"),gr.Dropdown(choices=models, label="some sd models", value=models[0], type="value"), gr.Slider(label="num inference steps", minimum=1, step=1, maximum=5, value=5), gr.Slider(label="prompt strength", minimum=0.01, step=0.01, maximum=0.99, value=0.20), gr.Slider(label="controlnet scale", minimum=0.01, step=0.01, maximum=0.99, value=0.80)], outputs=gr.Gallery(label="out", columns=2),description="Running on cpu, very slow! by JoPmt.") | |
iface.queue(max_size=1,api_open=False) | |
iface.launch(max_threads=1) |