hsuwill000 commited on
Commit
7227a9c
·
verified ·
1 Parent(s): b1cb9e3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -50
app.py CHANGED
@@ -8,60 +8,70 @@ from huggingface_hub import snapshot_download
8
  import openvino.runtime as ov
9
  from typing import Optional, Dict
10
 
 
 
11
  model_id = "Disty0/LCM_SoteMix"
12
- # model_id = "Disty0/sotediffusion-v2" #不可
13
 
14
- # 1024*512 記憶體不足
15
- HIGH = 512
16
- WIDTH = 512
17
 
18
  batch_size = -1
19
 
 
 
 
 
 
 
 
20
  pipe = OVStableDiffusionPipeline.from_pretrained(
21
- model_id,
22
- compile=False,
23
- ov_config={"CACHE_DIR": ""},
24
- torch_dtype=torch.int8, #
25
- # torch_dtype=torch.bfloat16, # 中
26
- # variant="fp16",
27
- # torch_dtype=torch.IntTensor, # 慢
28
- use_safetensors=False,
29
- )
30
 
31
  taesd_dir = snapshot_download(repo_id="deinferno/taesd-openvino")
32
 
33
- # 這裡直接使用 OVModelVaeDecoder,而不是自訂的 CustomOVModelVaeDecoder
34
- pipe.vae_decoder = OVModelVaeDecoder(
35
- model=OVBaseModel.load_model(f"{taesd_dir}/vae_decoder/openvino_model.xml"),
36
- parent_model=pipe,
37
- ov_config=None, # 如果沒有配置,傳入 None
38
- model_dir=taesd_dir
39
- )
40
-
41
- pipe.reshape(batch_size=-1, height=HIGH, width=WIDTH, num_images_per_prompt=1)
42
- # pipe.load_textual_inversion("./badhandv4.pt", "badhandv4")
43
- # pipe.load_textual_inversion("./Konpeto.pt", "Konpeto")
44
- # <shigure-ui-style>
45
- # pipe.load_textual_inversion("sd-concepts-library/shigure-ui-style")
46
- # pipe.load_textual_inversion("sd-concepts-library/ruan-jia")
47
- # pipe.load_textual_inversion("sd-concepts-library/agm-style-nao")
48
 
49
  pipe.compile()
50
 
51
- prompt = ""
52
- negative_prompt = "(worst quality, low quality, lowres), zombie, interlocked fingers,"
 
 
53
 
54
- def infer(prompt, negative_prompt):
55
  image = pipe(
56
- prompt=prompt,
57
- negative_prompt=negative_prompt,
58
- width=WIDTH,
59
- height=HIGH,
60
  guidance_scale=1.0,
61
  num_inference_steps=4,
62
  num_images_per_prompt=1,
63
- ).images[0]
64
-
65
  return image
66
 
67
 
@@ -72,23 +82,24 @@ examples = [
72
  "(illustration, 8k CG, extremely detailed),(whimsical),catgirl,teenage girl,playing in the snow,winter wonderland,snow-covered trees,soft pastel colors,gentle lighting,sparkling snow,joyful,magical atmosphere,highly detailed,fluffy cat ears and tail,intricate winter clothing,shallow depth of field,watercolor techniques,close-up shot,slightly tilted angle,fairy tale architecture,nostalgic,playful,winter magic,(masterpiece:2),best quality,ultra highres,original,extremely detailed,perfect lighting,",
73
  ]
74
 
75
- css = """
76
  #col-container {
77
  margin: 0 auto;
78
  max-width: 520px;
79
  }
80
  """
81
 
 
82
  power_device = "CPU"
83
 
84
  with gr.Blocks(css=css) as demo:
85
-
86
  with gr.Column(elem_id="col-container"):
87
  gr.Markdown(f"""
88
  # Disty0/LCM_SoteMix {WIDTH}x{HIGH}
89
  Currently running on {power_device}.
90
  """)
91
-
92
  with gr.Row():
93
  prompt = gr.Text(
94
  label="Prompt",
@@ -96,22 +107,22 @@ with gr.Blocks(css=css) as demo:
96
  max_lines=1,
97
  placeholder="Enter your prompt",
98
  container=False,
99
- )
100
  run_button = gr.Button("Run", scale=0)
101
-
102
  result = gr.Image(label="Result", show_label=False)
103
 
104
  gr.Examples(
105
- examples=examples,
106
- fn=infer,
107
- inputs=[prompt],
108
- outputs=[result]
109
  )
110
 
111
  run_button.click(
112
- fn=infer,
113
- inputs=[prompt],
114
- outputs=[result]
115
  )
116
 
117
- demo.queue().launch()
 
8
  import openvino.runtime as ov
9
  from typing import Optional, Dict
10
 
11
+
12
+
13
  model_id = "Disty0/LCM_SoteMix"
14
+ #model_id = "Disty0/sotediffusion-v2" #不可
15
 
16
+ #1024*512 記憶體不足
17
+ HIGH=768
18
+ WIDTH=512
19
 
20
  batch_size = -1
21
 
22
+ class CustomOVModelVaeDecoder(OVModelVaeDecoder):
23
+ def __init__(
24
+ self, model: ov.Model, parent_model: OVBaseModel, ov_config: Optional[Dict[str, str]] = None, model_dir: str = None,
25
+ ):
26
+ super(OVModelVaeDecoder, self).__init__(model, parent_model, ov_config, "vae_decoder", model_dir)
27
+
28
+
29
  pipe = OVStableDiffusionPipeline.from_pretrained(
30
+ model_id,
31
+ compile = False,
32
+ ov_config = {"CACHE_DIR":""},
33
+ torch_dtype=torch.int8, #快
34
+ #torch_dtype=torch.bfloat16, #中
35
+ #variant="fp16",
36
+ #torch_dtype=torch.IntTensor, #慢
37
+ use_safetensors=False,
38
+ )
39
 
40
  taesd_dir = snapshot_download(repo_id="deinferno/taesd-openvino")
41
 
42
+ pipe.vae_decoder = CustomOVModelVaeDecoder(model = OVBaseModel.load_model(f"{taesd_dir}/vae_decoder/openvino_model.xml"),
43
+ parent_model = pipe,
44
+ model_dir = taesd_dir
45
+ )
46
+
47
+
48
+
49
+ pipe.reshape( batch_size=-1, height=HIGH, width=WIDTH, num_images_per_prompt=1)
50
+ #pipe.load_textual_inversion("./badhandv4.pt", "badhandv4")
51
+ #pipe.load_textual_inversion("./Konpeto.pt", "Konpeto")
52
+ #<shigure-ui-style>
53
+ #pipe.load_textual_inversion("sd-concepts-library/shigure-ui-style")
54
+ #pipe.load_textual_inversion("sd-concepts-library/ruan-jia")
55
+ #pipe.load_textual_inversion("sd-concepts-library/agm-style-nao")
56
+
57
 
58
  pipe.compile()
59
 
60
+ prompt=""
61
+ negative_prompt="(worst quality, low quality, lowres), zombie, interlocked fingers,"
62
+
63
+ def infer(prompt,negative_prompt):
64
 
 
65
  image = pipe(
66
+ prompt = prompt,
67
+ negative_prompt = negative_prompt,
68
+ width = WIDTH,
69
+ height = HIGH,
70
  guidance_scale=1.0,
71
  num_inference_steps=4,
72
  num_images_per_prompt=1,
73
+ ).images[0]
74
+
75
  return image
76
 
77
 
 
82
  "(illustration, 8k CG, extremely detailed),(whimsical),catgirl,teenage girl,playing in the snow,winter wonderland,snow-covered trees,soft pastel colors,gentle lighting,sparkling snow,joyful,magical atmosphere,highly detailed,fluffy cat ears and tail,intricate winter clothing,shallow depth of field,watercolor techniques,close-up shot,slightly tilted angle,fairy tale architecture,nostalgic,playful,winter magic,(masterpiece:2),best quality,ultra highres,original,extremely detailed,perfect lighting,",
83
  ]
84
 
85
+ css="""
86
  #col-container {
87
  margin: 0 auto;
88
  max-width: 520px;
89
  }
90
  """
91
 
92
+
93
  power_device = "CPU"
94
 
95
  with gr.Blocks(css=css) as demo:
96
+
97
  with gr.Column(elem_id="col-container"):
98
  gr.Markdown(f"""
99
  # Disty0/LCM_SoteMix {WIDTH}x{HIGH}
100
  Currently running on {power_device}.
101
  """)
102
+
103
  with gr.Row():
104
  prompt = gr.Text(
105
  label="Prompt",
 
107
  max_lines=1,
108
  placeholder="Enter your prompt",
109
  container=False,
110
+ )
111
  run_button = gr.Button("Run", scale=0)
112
+
113
  result = gr.Image(label="Result", show_label=False)
114
 
115
  gr.Examples(
116
+ examples = examples,
117
+ fn = infer,
118
+ inputs = [prompt],
119
+ outputs = [result]
120
  )
121
 
122
  run_button.click(
123
+ fn = infer,
124
+ inputs = [prompt],
125
+ outputs = [result]
126
  )
127
 
128
+ demo.queue().launch()