File size: 5,071 Bytes
d6818b9 b7a803e dffbf86 d6818b9 dffbf86 8d3fced dffbf86 b7a803e dffbf86 b7a803e dffbf86 b7a803e dffbf86 8b7ebd7 b7a803e 8b7ebd7 5c1f25d dffbf86 8b7ebd7 dffbf86 309bf38 dffbf86 309bf38 dffbf86 309bf38 dffbf86 309bf38 dffbf86 0a40fc9 692438e 0a40fc9 692438e 091f288 8b7ebd7 692438e 0a40fc9 e43cab8 0a40fc9 e43cab8 0a40fc9 e43cab8 0a40fc9 47525ab 8b7ebd7 47525ab dffbf86 692438e 51a4ea8 dffbf86 309bf38 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler, AutoencoderKL
from transformers import CLIPTextModel, CLIPTokenizer
import torch
import gradio as gr
import spaces
lora_path = "OedoSoldier/detail-tweaker-lora"
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse").to("cuda")
@spaces.GPU
def generate_image(prompt, negative_prompt, num_inference_steps=30, guidance_scale=7.0,model="Real6.0",num_images=1, width=512, height=512):
if model == "Real5.0":
model_id = "SG161222/Realistic_Vision_V5.0_noVAE"
elif model == "Real5.1":
model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
else:
model_id = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
text_encoder = CLIPTextModel.from_pretrained(
model_id,
subfolder="text_encoder"
).to("cuda")
tokenizer = CLIPTokenizer.from_pretrained(
model_id,
subfolder="tokenizer"
)
pipe = DiffusionPipeline.from_pretrained(
model_id,
text_encoder=text_encoder,
tokenizer=tokenizer,
vae=vae
).to("cuda")
if model == "Real6.0":
pipe.safety_checker = lambda images, **kwargs: (images, [False] * len(images))
pipe.load_lora_weights(lora_path)
pipe.scheduler = DPMSolverMultistepScheduler.from_config(
pipe.scheduler.config,
algorithm_type="dpmsolver++",
use_karras_sigmas=True
)
text_inputs = tokenizer(
prompt,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt"
).to("cuda")
negative_text_inputs = tokenizer(
negative_prompt,
padding="max_length",
max_length=tokenizer.model_max_length,
truncation=True,
return_tensors="pt"
).to("cuda")
prompt_embeds = text_encoder(text_inputs.input_ids)[0]
negative_prompt_embeds = text_encoder(negative_text_inputs.input_ids)[0]
# Generate the image
result = pipe(
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
cross_attention_kwargs={"scale": 1},
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
width=width,
height=height,
num_images_per_prompt=num_images
)
return result.images
title = """<h1 align="center">ProFaker</h1>"""
# Create the Gradio interface
with gr.Blocks() as demo:
gr.HTML(title)
with gr.Row():
with gr.Column():
# Input components
prompt = gr.Textbox(
label="Prompt",
info="Enter your image description here...",
lines=3
)
negative_prompt = gr.Textbox(
label="Negative Prompt",
info="Enter what you don't want in Image...",
lines=3
)
generate_button = gr.Button("Generate Image")
with gr.Accordion("Advanced Options", open=False):
model = gr.Dropdown(
choices=["Real6.0","Real5.1","Real5.0"],
value="Real6.0",
label="Model",
)
num_images = gr.Slider( # New slider for number of images
minimum=1,
maximum=4,
value=1,
step=1,
label="Number of Images to Generate"
)
width = gr.Slider(
minimum=256,
maximum=1024,
value=512,
step=64,
label="Image Width"
)
height = gr.Slider(
minimum=256,
maximum=1024,
value=512,
step=64,
label="Image Height"
)
steps_slider = gr.Slider(
minimum=1,
maximum=100,
value=30,
step=1,
label="Number of Steps"
)
guidance_slider = gr.Slider(
minimum=1,
maximum=10,
value=7.0,
step=0.5,
label="Guidance Scale"
)
with gr.Column():
# Output component
gallery = gr.Gallery(
label="Generated Images",
show_label=True,
elem_id="gallery",
columns=2,
rows=2
)
# Connect the interface to the generation function
generate_button.click(
fn=generate_image,
inputs=[prompt, negative_prompt, steps_slider, guidance_slider, model, num_images, width, height],
outputs=gallery
)
demo.queue(max_size=10).launch(share=False)
|