File size: 4,467 Bytes
804f188 caf0600 846fabc 44ebdab a9b1817 846fabc 44ebdab 846fabc 44ebdab 846fabc 44ebdab 846fabc caf0600 846fabc 44ebdab 846fabc 44ebdab 846fabc 44ebdab 846fabc 44ebdab 846fabc 44ebdab 846fabc 44ebdab 846fabc 44ebdab 846fabc 44ebdab 846fabc 44ebdab 846fabc 44ebdab 23cd40c f37275f 23cd40c 44ebdab 846fabc 44ebdab 846fabc 89e5ee7 e306b09 a9b1817 804f188 e306b09 44ebdab e5e5614 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
PATH = 'harpomaxx/deeplili' #stable diffusion 1.5
from PIL import Image
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import argparse
#from diffusers import StableDiffusionPipeline, LMSDiscreteScheduler
from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler
from tqdm.auto import tqdm
import random
import gradio as gr
def generate_images(prompt, guidance_scale, n_samples, num_inference_steps):
seeds = [random.randint(1, 10000) for _ in range(n_samples)]
images = []
for seed in tqdm(seeds):
torch.manual_seed(seed)
image = pipe(prompt, num_inference_steps=num_inference_steps,guidance_scale=guidance_scale).images[0]
images.append(image)
return images
def gr_generate_images(prompt: str, num_images = 1, num_inference = 20, guidance_scale = 8 ):
prompt = prompt + "sks style"
images = generate_images(prompt, guidance_scale, num_images, num_inference)
return images
with gr.Blocks() as demo:
examples = [
[
'A black and white cute character on top of a hill',
1,
30
],
[
'Bubbles and mountains in the sky',
1,
20
],
[
'A tree with multiple eyes and a small flower muted colors',
1,
20
],
[
"3d character on top of a hill",
1,
20
],
[
"a poster of a large forest with black and white characters",
1,
20
],
]
gr.Markdown(
"""
<img src="https://github.com/harpomaxx/DeepLili/raw/main/images/lilifiallo/660.png" width="150" height="150">
# #DeepLili v0.5b
## Enter your prompt and generate a work of art in the style of Lili's Toy Art paintings.
## (English, Spanish)
"""
)
with gr.Column(variant="panel"):
with gr.Row(variant="compact"):
text = gr.Textbox(
label="Enter your prompt",
show_label=False,
max_lines=2,
placeholder="a white and black drawing of a cute character on top of a house with a little animal"
).style(
container=False,
)
with gr.Row(variant="compact"):
# num_images_slider = gr.Slider(
# minimum=1,
# maximum=10,
# step=1,
# value=1,
# label="Number of Images",
# )
# num_inference_steps_slider = gr.Slider(
# minimum=1,
# maximum=25,
# step=1,
# value=20,
# label="Inference Steps",
# )
# guidance_slider = gr.Slider(
# minimum=1,
# maximum=14,
# step=1,
# value=8,
# label="Guidance Scale",
# )
btn = gr.Button("Generate image").style(full_width=False)
#gallery = gr.Gallery(
# label="Generated images", show_label=False, elem_id="gallery"
#).style(columns=[1], rows=[1], object_fit="scale-down", height="50%", width="50%")
gallery = gr.Image(
label="Generated image",
show_label = False,
tool=None, # Disable editing tools if not needed
source="upload" # Allows image uploading
).style(
width="50%", # Use the full view width
height="50%", # Use the full view height
object_fit="scale-down" # Maintain aspect ratio without cropping
)
num_images_slider = 1
num_inference_steps_slider = 20
guidance_slider = 8
btn.click(gr_generate_images, [text], gallery)
gr.Examples(examples, inputs=[text])
gr.HTML(
"""
<h6><a href="https://harpomaxx.github.io/"> harpomaxx </a></h6>
"""
)
if __name__ == "__main__":
# Check if CUDA is available
if torch.cuda.is_available():
device = "cuda"
dtype = torch.float16 # Use float16 for GPU to save memory
else:
device = "cpu"
dtype = torch.float32 # CPU does not support float16, use float32 instead
dpm = DPMSolverMultistepScheduler.from_pretrained(PATH, subfolder="scheduler")
pipe = StableDiffusionPipeline.from_pretrained(PATH,torch_dtype=dtype, scheduler=dpm).to(device)
demo.queue(concurrency_count=2,
).launch() |