File size: 1,501 Bytes
75d7e79
 
 
 
 
92fe599
75d7e79
 
 
 
f9d9198
75d7e79
 
 
 
9b1f760
75d7e79
 
 
8058804
fcd5f7c
75d7e79
8058804
75d7e79
 
7c6f227
75d7e79
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
import gradio as gr
from torch import autocast
from diffusers import StableDiffusionPipeline
import torch

model_id = "pawelklimkowski/tylko-sd-dream" #@param {type:"string"}
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")

def inference(prompt, num_samples):
    all_images = [] 
    images = pipe(prompt,height=512, width=768, num_images_per_prompt=num_samples, num_inference_steps=70, guidance_scale=7.5).images
    all_images.extend(images)
    return all_images

with gr.Blocks() as demo:
    gr.HTML("<h2 style=\"font-size: 2em; font-weight: bold\" align=\"center\"> Tylko Concept model</h2><p style=\"font-size: 1em; padding-top: 1em;\" align=\"center\">Generate your own Tylko as you would see the world though Tylko's lenses</p>")
    with gr.Row():
        with gr.Column():
            prompt = gr.Textbox(label="prompt")
            samples = gr.Slider(label="Samples",value=1, step=1, maximum=3)
            run = gr.Button(value="Generate concept")
        with gr.Column():
            gallery = gr.Gallery(show_label=True)

    run.click(inference, inputs=[prompt,samples], outputs=gallery)
    gr.Examples([["Bauhaus illustration called 'oklyt tv stand ' by Wassily Kandinsky",3],["Painting called 'sideboard with diaries at coffeehouse in Paris' by Claude Monet",3],["a photo of oklyt", 1,1], ["living space in european home  in oklyt style, 4k",3]], [prompt,samples], gallery, inference, cache_examples=False)


demo.launch(debug=True)