Spaces:
Sleeping
Sleeping
from diffusers import AutoPipelineForText2Image | |
import torch | |
import gradio as gr | |
from PIL import Image | |
import os, random | |
from diffusers.utils import load_image | |
from accelerate import Accelerator | |
accelerator = Accelerator() | |
models =[ | |
"prompthero/midjourney-v4-diffusion", | |
"nitrosocke/classic-anim-diffusion", | |
"stablediffusionapi/disney-pixal-cartoon", | |
"stablediffusionapi/edge-of-realism", | |
"sd-dreambooth-library/original-character-cyclps", | |
"AIArtsChannel/steampunk-diffusion", | |
"nitrosocke/mo-di-diffusion", | |
"MirageML/fantasy-scene", | |
"wavymulder/lomo-diffusion", | |
"sd-dreambooth-library/fashion", | |
"DucHaiten/DucHaitenDreamWorld", | |
"VegaKH/Ultraskin", | |
"kandinsky-community/kandinsky-2-1", | |
"plasmo/woolitize-768sd1-5", | |
"plasmo/food-crit", | |
"johnslegers/epic-diffusion-v1.1", | |
"robotjung/SemiRealMix", | |
"prompthero/linkedin-diffusion", | |
"RayHell/popupBook-diffusion", | |
"MirageML/lowpoly-world", | |
"warp-ai/wuerstchen", | |
"deadman44/SD_Photoreal_Merged_Models", | |
"johnslegers/epic-diffusion", | |
"wavymulder/modelshoot", | |
"Fictiverse/Stable_Diffusion_VoxelArt_Model", | |
"nousr/robo-diffusion-2-base", | |
"darkstorm2150/Protogen_v2.2_Official_Release", | |
"hassanblend/HassanBlend1.5.1.2", | |
"hassanblend/hassanblend1.4", | |
"nitrosocke/redshift-diffusion", | |
"prompthero/openjourney-v2", | |
"nitrosocke/Arcane-Diffusion", | |
"Lykon/DreamShaper", | |
"wavymulder/Analog-Diffusion", | |
"dreamlike-art/dreamlike-diffusion-1.0", | |
"dreamlike-art/dreamlike-photoreal-2.0", | |
"digiplay/RealismEngine_v1", | |
"digiplay/AIGEN_v1.4_diffusers", | |
"stablediffusionapi/dreamshaper-v6", | |
"axolotron/ice-cream-animals", | |
"TheLastBen/froggy-style-v21-768", | |
"FloydianSound/Nixeu_Diffusion_v1-5", | |
"digiplay/PotoPhotoRealism_v1", | |
] | |
###bor = len(models) | |
###current = random.randint(1, bor) | |
def plex(modil,prompt,neg_prompt): | |
pipe = accelerator.prepare(AutoPipelineForText2Image.from_pretrained(""+modil+"", torch_dtype=torch.float32)) | |
pipe = accelerator.prepare(pipe.to("cpu")) | |
image = pipe(prompt=prompt, negative_prompt=neg_prompt,num_inference_steps=10).images[0] | |
return image | |
iface = gr.Interface(fn=plex,inputs=[gr.Dropdown(choices=models, type="value", value=models[0]), gr.Textbox(label="Prompt"), gr.Textbox(label="negative_prompt", value="low quality, bad quality")],outputs=gr.Image(label="Generated Output Image"), title="AutoPipelineForText2Image_SD_Multi",description="AutoPipelineForText2Image_SD_Multi") | |
iface.queue(max_size=1,api_open=False) | |
iface.launch(max_threads=1) |