File size: 3,616 Bytes
d6818b9
dffbf86
 
 
 
 
 
d6818b9
dffbf86
 
091f288
dffbf86
 
 
 
 
 
 
 
 
 
 
d6818b9
dffbf86
 
 
 
 
 
 
 
 
 
 
 
 
 
8b7ebd7
dffbf86
 
 
 
 
1c2d937
8b7ebd7
 
5c1f25d
dffbf86
8b7ebd7
dffbf86
309bf38
dffbf86
 
309bf38
dffbf86
 
 
 
 
 
309bf38
 
dffbf86
 
 
309bf38
 
dffbf86
0a40fc9
 
 
 
 
 
 
091f288
8b7ebd7
 
 
 
 
 
0a40fc9
 
 
e43cab8
0a40fc9
e43cab8
0a40fc9
 
 
e43cab8
 
0a40fc9
 
 
47525ab
 
8b7ebd7
 
 
 
 
 
 
47525ab
dffbf86
 
 
 
091f288
51a4ea8
dffbf86
 
309bf38
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler, AutoencoderKL
import torch
import gradio as gr
import spaces


lora_path = "OedoSoldier/detail-tweaker-lora"
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse").to("cuda")

@spaces.GPU
def generate_image(prompt, negative_prompt, num_inference_steps=30, guidance_scale=7.0,model="Real6.0",num_images=1):
        
    if model == "Real5.0":
        model_id = "SG161222/Realistic_Vision_V5.0_noVAE"
        
    elif model == "Real5.1":
        model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
        
    else:
        model_id = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
        
        
    pipe = DiffusionPipeline.from_pretrained(model_id, vae=vae).to("cuda")
    
    if model == "Real6.0":
        pipe.safety_checker = lambda images, **kwargs: (images, [False] * len(images))

    pipe.load_lora_weights(lora_path)

    pipe.scheduler = DPMSolverMultistepScheduler.from_config(
        pipe.scheduler.config,
        algorithm_type="dpmsolver++",
        use_karras_sigmas=True
    )

    
    # Generate the image
    result = pipe(
        prompt = prompt,
        negative_prompt = negative_prompt,
        cross_attention_kwargs = {"scale":1},
        num_inference_steps = num_inference_steps,
        guidance_scale = guidance_scale,
        width = 720,
        height = 720,
        num_images_per_prompt=num_images
    )
    
    return result.images

title = """<h1 align="center">ProFaker</h1>"""
# Create the Gradio interface
with gr.Blocks() as demo:
    gr.HTML(title)
    
    with gr.Row():
        with gr.Column():
            # Input components
            prompt = gr.Textbox(
                label="Prompt",
                info="Enter your image description here...",
                lines=3
            )
            negative_prompt = gr.Textbox(
                label="Negative Prompt",
                info="Enter what you don't want in Image...",
                lines=3
            )
            generate_button = gr.Button("Generate Image")
            with gr.Accordion("Advanced Options", open=False):
                model = gr.Dropdown(
                    choices=["Real6.0","Real5.1","Real5.0"],
                    value="Real6.0",
                    label="Model",
                )
                num_images = gr.Slider(  # New slider for number of images
                        minimum=1,
                        maximum=4,
                        value=1,
                        step=1,
                        label="Number of Images to Generate"
                )
                steps_slider = gr.Slider(
                    minimum=1,
                    maximum=100,
                    value=30,
                    step=1,
                    label="Number of Steps"
                )
                guidance_slider = gr.Slider(
                    minimum=1,
                    maximum=10,
                    value=7.0,
                    step=0.5,
                    label="Guidance Scale"
                )
        with gr.Column():
            # Output component
            gallery = gr.Gallery(
                label="Generated Images",
                show_label=True,
                elem_id="gallery",
                columns=2,
                rows=2
            )
            
    
    # Connect the interface to the generation function
    generate_button.click(
        fn=generate_image,
        inputs=[prompt, negative_prompt, steps_slider, guidance_slider, model, num_images],
        outputs=gallery
    )

demo.queue(max_size=10).launch(share=False)