File size: 3,050 Bytes
93b0d61 3852617 13d4422 190a4b0 6904e5b bc4e9b2 e1e9b2f 3cb41a0 93b0d61 2b71a62 9e31b54 3852617 9e31b54 3852617 93b0d61 03b3c4e 3820dc1 03b3c4e 9e31b54 3852617 5effa4b 9e31b54 3852617 9014042 c082d19 93b0d61 82e6660 93b0d61 03b3c4e eabda09 ee70940 a981e33 93b0d61 03b3c4e 82e6660 5c31923 93b0d61 03b3c4e 93b0d61 03b3c4e 93b0d61 d14eae4 93b0d61 ee70940 93b0d61 03b3c4e 754e435 03b3c4e 93b0d61 03b3c4e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
import gradio as gr
import torch
from optimum.intel.openvino.modeling_diffusion import OVModelVaeDecoder, OVBaseModel, OVStableDiffusionPipeline
from huggingface_hub import snapshot_download
import openvino.runtime as ov
from typing import Optional, Dict
model_id = "hsuwill000/sd-turbo-openvino"
HIGH = 512
WIDTH = 512
batch_size = -1 # Or set it to a specific positive integer if needed
"""
class CustomOVModelVaeDecoder(OVModelVaeDecoder):
def __init__(
self, model: ov.Model, parent_model: OVBaseModel, ov_config: Optional[Dict[str, str]] = None, model_dir: str = None,
):
super(OVModelVaeDecoder, self).__init__(model, parent_model, ov_config, "vae_decoder", model_dir)
"""
pipe = OVStableDiffusionPipeline.from_pretrained(
model_id,
compile=False,
ov_config={"CACHE_DIR": ""},
torch_dtype=torch.bfloat16, # More standard dtype for speed
safety_checker=None,
use_safetensors=False,
)
"""
taesd_dir = snapshot_download(repo_id="deinferno/taesd-openvino")
pipe.vae_decoder = CustomOVModelVaeDecoder(model = OVBaseModel.load_model(f"{taesd_dir}/vae_decoder/openvino_model.xml"),
parent_model = pipe,
model_dir = taesd_dir
)
"""
print(pipe.scheduler.compatibles)
pipe.reshape(batch_size=batch_size, height=HIGH, width=WIDTH, num_images_per_prompt=2)
pipe.compile()
prompt = ""
negative_prompt = "Easy Negative, worst quality, low quality, normal quality, lowers, monochrome, grayscales, skin spots, acnes, skin blemishes, age spot, 6 more fingers on one hand, deformity, bad legs, error legs, bad feet, malformed limbs, extra limbs, ugly, poorly drawn hands, poorly drawn feet, poorly drawn face, text, mutilated, extra fingers, mutated hands, mutation, bad anatomy, cloned face, disfigured, fused fingers"
def infer(prompt, negative_prompt, num_inference_steps=1):
image = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
width=WIDTH,
height=HIGH,
guidance_scale=0.0,
num_inference_steps=num_inference_steps,
num_images_per_prompt=1,
).images[0]
return image
css = """
#col-container {
margin: 0 auto;
max-width: 520px;
}
"""
power_device = "CPU"
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown(f"""
# {model_id.split('/')[1]} {WIDTH}x{HIGH}
Currently running on {power_device}.
""")
with gr.Row():
prompt = gr.Text(
label="Prompt",
show_label=False,
max_lines=1,
placeholder="Enter your prompt",
container=False,
)
run_button = gr.Button("Run", scale=1)
result = gr.Image(label="Result", show_label=False)
run_button.click(
fn=infer,
inputs=[prompt],
outputs=[result]
)
demo.queue().launch()
|