File size: 2,978 Bytes
93b0d61 3852617 6904e5b 416fc0c e1e9b2f 10eced2 03b3c4e 93b0d61 2b71a62 93b0d61 3852617 93b0d61 03b3c4e 3820dc1 03b3c4e 3852617 5effa4b 3852617 c082d19 93b0d61 ee70940 93b0d61 03b3c4e eabda09 ee70940 6458b53 93b0d61 03b3c4e f5ea5bf 5c31923 93b0d61 03b3c4e 93b0d61 03b3c4e 93b0d61 d14eae4 93b0d61 ee70940 93b0d61 03b3c4e 754e435 03b3c4e 93b0d61 03b3c4e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 |
import gradio as gr
import torch
from optimum.intel.openvino.modeling_diffusion import OVModelVaeDecoder, OVBaseModel, OVStableDiffusionPipeline
from huggingface_hub import snapshot_download
model_id = "hsuwill000/Fluently-v4-LCM-openvino"
HIGH = 1024
WIDTH = 512
batch_size = -1 # Or set it to a specific positive integer if needed
class CustomOVModelVaeDecoder(OVModelVaeDecoder):
def __init__(
self, model: ov.Model, parent_model: OVBaseModel, ov_config: Optional[Dict[str, str]] = None, model_dir: str = None,
):
super(OVModelVaeDecoder, self).__init__(model, parent_model, ov_config, "vae_decoder", model_dir)
pipe = OVStableDiffusionPipeline.from_pretrained(
model_id,
compile=False,
ov_config={"CACHE_DIR": ""},
torch_dtype=torch.bfloat16, # More standard dtype for speed
safety_checker=None,
use_safetensors=False,
)
taesd_dir = snapshot_download(repo_id="deinferno/taesd-openvino")
pipe.vae_decoder = CustomOVModelVaeDecoder(model = OVBaseModel.load_model(f"{taesd_dir}/vae_decoder/openvino_model.xml"),
parent_model = pipe,
model_dir = taesd_dir
)
print(pipe.scheduler.compatibles)
pipe.reshape(batch_size=batch_size, height=HIGH, width=WIDTH, num_images_per_prompt=1)
pipe.compile()
prompt = ""
negative_prompt = "Easy Negative, worst quality, low quality, normal quality, lowers, monochrome, grayscales, skin spots, acnes, skin blemishes, age spot, 6 more fingers on one hand, deformity, bad legs, error legs, bad feet, malformed limbs, extra limbs, ugly, poorly drawn hands, poorly drawn feet, poorly drawn face, text, mutilated, extra fingers, mutated hands, mutation, bad anatomy, cloned face, disfigured, fused fingers"
def infer(prompt, negative_prompt, num_inference_steps=8):
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
width=WIDTH,
height=HIGH,
guidance_scale=1.0,
num_inference_steps=num_inference_steps,
num_images_per_prompt=1,
).images[0]
return image
css = """
#col-container {
margin: 0 auto;
max-width: 520px;
}
"""
power_device = "CPU"
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""
# {model_id.split('/')[1]} {WIDTH}x{HIGH}
Currently running on {power_device}.
""")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=1)
result = gr.Image(label="Result", show_label=False)
run_button.click(
fn=infer,
inputs=[prompt],
outputs=[result]
)
demo.queue().launch()
|