File size: 3,398 Bytes
846fabc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
PATH = 'harpomaxx/deeplili' #stable diffusion 1.5
from PIL import Image
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler
from diffusers import UniPCMultistepScheduler
from diffusers import StableDiffusionPipeline
from PIL import Image
from tqdm.auto import tqdm
import random
import gradio as gr

guidance_scale = 8.5  # Scale for classifier-free guidance


pipe = StableDiffusionPipeline.from_pretrained(PATH,local_files_only=False ).to("cpu")
guidance_scale = 8.5

def generate_images(prompt, guidance_scale, n_samples, num_inference_steps):
    seeds = [random.randint(1, 10000) for _ in range(n_samples)]
    images = [] 
    for seed in tqdm(seeds):
        torch.manual_seed(seed)
        image = pipe(prompt, num_inference_steps=num_inference_steps,guidance_scale=guidance_scale).images[0]   
        images.append(image)
    return images

def gr_generate_images(prompt: str, num_images: int, num_inference: int):
    prompt = prompt + "sks style"
    images = generate_images(prompt, tokenizer, text_encoder, unet, vae, scheduler, guidance_scale, num_images, num_inference)
    return images

with gr.Blocks() as demo:
    examples = [
    [
        'A black and white cute character on top of a hill',
        1,
	30
    ],
    [
        'Bubbles and mountains in the sky',
        1,
	20
    ],
    [
        'A tree with multiple eyes and a small flower muted colors',
        1,
	20
    ],
    [
        "3d character on top of a hill",
        1,
	20
    ],
    [
        "a poster of a large forest with black and white characters",
        1,
	20
    ],
    ]
    gr.Markdown(
    """
    <img src="https://github.com/harpomaxx/DeepLili/raw/main/images/lilifiallo/660.png" width="150" height="150">

    # #DeepLili v0.45b

    ## Enter your prompt and generate a work of art in the style of Lili's Toy Art paintings.
    """
    )

    with gr.Column(variant="panel"):
        with gr.Row(variant="compact"):
            text = gr.Textbox(
                label="Enter your prompt",
                show_label=False,
                max_lines=2,
                placeholder="a white and black drawing of  a cute character on top of a house with a little animal"
            ).style(
                container=False,
            )
          
        with gr.Row(variant="compact"):
            num_images_slider = gr.Slider(
                minimum=1,
                maximum=10,
                step=1,
                value=1,
                label="Number of Images",
            )
   
            num_inference_steps_slider = gr.Slider(
                minimum=1,
                maximum=25,
                step=1,
                value=20,
                label="Number of Inference Steps",
            )

            btn = gr.Button("Generate image").style(full_width=False)
      
        gallery = gr.Gallery(
            label="Generated images", show_label=False, elem_id="gallery"
        ).style(columns=[5], rows=[1], object_fit="contain", height="250px", width="250px")

    btn.click(gr_generate_images, [text, num_images_slider,num_inference_steps_slider], gallery)
    gr.Examples(examples, inputs=[text])
    gr.HTML(
    """
    <h6><a href="https://harpomaxx.github.io/"> harpomaxx </a></h6>
    """
    )

if __name__ == "__main__":
    demo.queue().launch(share=True)