File size: 4,628 Bytes
2eb58d1
199e379
84f6f2e
 
 
 
 
 
2eb58d1
 
84f6f2e
 
071944b
 
a17b7c9
84f6f2e
 
 
 
 
 
 
 
e9f0715
 
 
 
 
 
 
 
 
199e379
2eb58d1
4e01792
2eb58d1
 
071944b
 
 
 
 
2eb58d1
84f6f2e
 
 
94e174f
2eb58d1
94e174f
5517c09
e2596e5
09e1d9b
523a420
09e1d9b
523a420
09e1d9b
523a420
c0ceaaf
14dfe21
 
 
 
 
940a4d0
 
 
 
 
76dfd76
940a4d0
 
 
 
 
16489cb
940a4d0
 
6aa4b7b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import gradio as gr
import random
import os
import io, base64
from PIL import Image
import numpy
import shortuuid

latent = gr.Interface.load("spaces/multimodalart/latentdiffusion")
rudalle = gr.Interface.load("spaces/multimodalart/rudalle")

#print(rudalle)
#guided = gr.Interface.load("spaces/EleutherAI/clip-guided-diffusion")
#print(guided)
def text2image_latent(text,steps,width,height,images,diversity):
    results = latent(text, steps, width, height, images, diversity)
    image_paths = []
    image_arrays = []
    for image in results[1]:
        image_str = image[0]
        image_str = image_str.replace("data:image/png;base64,","")
        decoded_bytes = base64.decodebytes(bytes(image_str, "utf-8"))
        img = Image.open(io.BytesIO(decoded_bytes))
        #image_arrays.append(numpy.asarray(img))
        url = shortuuid.uuid()
        temp_dir = './tmp'
        if not os.path.exists(temp_dir):
            os.makedirs(temp_dir, exist_ok=True)
        image_path = f'{temp_dir}/{url}.png'
        img.save(f'{temp_dir}/{url}.png')
        image_paths.append(image_path)
    return(results[0],image_paths)
  
def text2image_rudalle(text,aspect,model):
    image = rudalle(text,aspect,model)[0]
    return(image)

#def text2image_guided(text):
#    image = guided(text, None, 10, 600, 0, 0, 0, random.randint(0,2147483647), None, 50, 32)
#    print(image)
#    image = image[0]
#    return(image)

css_mt = {"margin-top": "1em"}

empty = gr.outputs.HTML()    
mindseye = gr.Blocks()

with mindseye:
    gr.Markdown("<h1>MindsEye Lite <small><small>run multiple text-to-image models in one place</small></small></h1><p>MindsEye Lite orchestrates multiple text-to-image Hugging Face Spaces in one convenient space, so you can try different models. This work carries the spirit of <a href='https://multimodal.art/mindseye' target='_blank'>MindsEye Beta</a>, a tool to run multiple models with a single UI, but adjusted to the current hardware limitations of Spaces. MindsEye Lite was created by <a style='color: rgb(99, 102, 241);font-weight:bold' href='https://twitter.com/multimodalart' target='_blank'>@multimodalart</a>, keep up with the <a style='color: rgb(99, 102, 241);' href='https://multimodal.art/news' target='_blank'>latest multimodal ai art news here</a> and consider <a style='color: rgb(99, 102, 241);' href='https://www.patreon.com/multimodalart' target='_blank'>supporting us on Patreon</a></div></p>")
    #gr.Markdown("<style>.mx-auto.container .gr-form-gap {flex-direction: row; gap: calc(1rem * calc(1 - var(--tw-space-y-reverse)));} .mx-auto.container .gr-form-gap .flex-col, .mx-auto.container .gr-form-gap .gr-box{width: 100%}</style>")
    text = gr.inputs.Textbox(placeholder="Try writing something..", label="Prompt", default="A mecha robot in a favela")
    
    with gr.Column():
         with gr.Row():
              with gr.Tabs():
                with gr.TabItem("Latent Diffusion"):
                    steps = gr.inputs.Slider(label="Steps - more steps can increase quality but will take longer to generate",default=45,maximum=50,minimum=1,step=1)
                    #width = gr.inputs.Slider(label="Width", default=256, step=32, maximum=256, minimum=32)
                    #height = gr.inputs.Slider(label="Height", default=256, step=32, maximum = 256, minimum=32)
                    #images = gr.inputs.Slider(label="Images - How many images you wish to generate", default=2, step=1, minimum=1, maximum=4)
                    #diversity = gr.inputs.Slider(label="Diversity scale - How different from one another you wish the images to be",default=5.0, minimum=1.0, maximum=15.0)
                    #get_image_latent = gr.Button("Generate Image",css=css_mt)
    #                
    #            with gr.TabItem("ruDALLE"):
    #                aspect = gr.inputs.Radio(label="Aspect Ratio", choices=["Square", "Horizontal", "Vertical"],default="Square")
    #                model = gr.inputs.Dropdown(label="Model", choices=["Surrealism","Realism", "Emoji"], default="Surrealism")
    #                get_image_rudalle = gr.Button("Generate Image",css=css_mt)
    #with gr.Row():
    #with gr.Tabs():
    #            with gr.TabItem("Image output"):
    #                image = gr.outputs.Image()
    #            with gr.TabItem("Gallery output"):
    #                gallery = gr.Gallery(label="Individual images")
    
    #get_image_latent.click(text2image_latent, inputs=[text,steps,width,height,images,diversity], outputs=[image,gallery])
    #get_image_rudalle.click(text2image_rudalle, inputs=[text,aspect,model], outputs=image)
mindseye.launch(share=False)