File size: 3,323 Bytes
dffbf86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
import torch
import gradio as gr
import spaces


lora_path = "OedoSoldier/detail-tweaker-lora"

@spaces.GPU
def generate_image(prompt, negative_prompt, num_inference_steps=50, guidance_scale=7.5,model="Real6.0"):
    """
    Generate an image using Stable Diffusion based on the input prompt
    """
        
    if model == "Real5.0":
        model_id = "SG161222/Realistic_Vision_V5.0_noVAE"
        
    elif model == "Real5.1":
        model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
        
    else:
        model_id = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
        
        
    pipe = DiffusionPipeline.from_pretrained(model_id).to("cuda")
    
    if model == "Real6.0":
        pipe.safety_checker = lambda images, **kwargs: (images, [False] * len(images))

    pipe.load_lora_weights(lora_path)

    pipe.scheduler = DPMSolverMultistepScheduler.from_config(
        pipe.scheduler.config,
        algorithm_type="dpmsolver++",
        use_karras_sigmas=True
    )

    
    # Generate the image
    image = pipe(
        prompt = prompt,
        negative_prompt = negative_prompt,
        cross_attention_kwargs = {"scale":1},
        num_inference_steps = num_inference_steps,
        guidance_scale = guidance_scale,
        width = 960,
        height = 960
    ).images[0]
    
    return image

# Create the Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# ProFaker ImageGen")
    
    with gr.Row():
        with gr.Column():
            # Input components
            prompt = gr.Textbox(
                label="Prompt",
                placeholder="Enter your image description here...",
                value="a photo of an astronaut riding a horse on mars"
            )
            negative_prompt = gr.Textbox(
                label="Negative Prompt",
                placeholder="Enter what you don't want in photo",
            )
            steps_slider = gr.Slider(
                minimum=1,
                maximum=100,
                value=50,
                step=1,
                label="Number of Inference Steps"
            )
            guidance_slider = gr.Slider(
                minimum=1,
                maximum=20,
                value=7.5,
                step=0.5,
                label="Guidance Scale"
            )
            model = gr.Dropdown(
                choices=["Real6.0","Real5.1","Real5.0"],
                value="Real6.0",
                label="Model",
            )
            generate_button = gr.Button("Generate Image")
        
        with gr.Column():
            # Output component
            image_output = gr.Image(label="Generated Image")
    
    # Connect the interface to the generation function
    generate_button.click(
        fn=generate_image,
        inputs=[prompt, negative_prompt, steps_slider, guidance_slider, model],
        outputs=image_output
    )
    
    gr.Markdown("""
    ## Instructions
    1. Enter your desired image description in the prompt field
    2. Adjust the inference steps (higher = better quality but slower)
    3. Adjust the guidance scale (higher = more prompt adherence)
    4. Click 'Generate Image' and wait for the result
    """)

# Launch the interface
if __name__ == "__main__":
    demo.launch(share=True)