Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,9 @@ import os
|
|
7 |
from huggingface_hub import hf_hub_download
|
8 |
from diffusers import StableDiffusionXLPipeline, AutoencoderKL, EulerAncestralDiscreteScheduler
|
9 |
from compel import Compel, ReturnedEmbeddingsType
|
|
|
|
|
|
|
10 |
|
11 |
# =====================================
|
12 |
# Prompt weights
|
@@ -212,6 +215,25 @@ def get_embed_new(prompt, pipeline, compel, only_convert_string=False, compel_pr
|
|
212 |
|
213 |
return merge_embeds([prompt_attention_to_invoke_prompt(i) for i in global_prompt_chanks], compel)
|
214 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
215 |
def add_comma_after_pattern_ti(text):
|
216 |
pattern = re.compile(r'\b\w+_\d+\b')
|
217 |
modified_text = pattern.sub(lambda x: x.group() + ',', text)
|
@@ -272,6 +294,8 @@ def infer(
|
|
272 |
# 在 infer 函数中调用 get_embed_new
|
273 |
if not use_negative_prompt:
|
274 |
negative_prompt = ""
|
|
|
|
|
275 |
prompt = get_embed_new(prompt, pipe, compel, only_convert_string=True)
|
276 |
negative_prompt = get_embed_new(negative_prompt, pipe, compel, only_convert_string=True)
|
277 |
conditioning, pooled = compel([prompt, negative_prompt]) # 必须同时处理来保证长度相等
|
@@ -289,7 +313,25 @@ def infer(
|
|
289 |
generator=generator,
|
290 |
use_resolution_binning=use_resolution_binning,
|
291 |
).images[0]
|
292 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
293 |
|
294 |
examples = [
|
295 |
"nahida (genshin impact)",
|
|
|
7 |
from huggingface_hub import hf_hub_download
|
8 |
from diffusers import StableDiffusionXLPipeline, AutoencoderKL, EulerAncestralDiscreteScheduler
|
9 |
from compel import Compel, ReturnedEmbeddingsType
|
10 |
+
from PIL import Image, PngImagePlugin
|
11 |
+
import json
|
12 |
+
import io
|
13 |
|
14 |
# =====================================
|
15 |
# Prompt weights
|
|
|
215 |
|
216 |
return merge_embeds([prompt_attention_to_invoke_prompt(i) for i in global_prompt_chanks], compel)
|
217 |
|
218 |
+
# Add metadata to the image
|
219 |
+
def add_metadata_to_image(image, metadata):
|
220 |
+
metadata_str = json.dumps(metadata)
|
221 |
+
|
222 |
+
# Convert PIL Image to PNG with metadata
|
223 |
+
img_with_metadata = image.copy()
|
224 |
+
|
225 |
+
# Create a PngInfo object and add metadata
|
226 |
+
png_info = PngImagePlugin.PngInfo()
|
227 |
+
png_info.add_text("parameters", metadata_str)
|
228 |
+
|
229 |
+
# Save to a byte buffer with metadata
|
230 |
+
buffer = io.BytesIO()
|
231 |
+
img_with_metadata.save(buffer, format="PNG", pnginfo=png_info)
|
232 |
+
|
233 |
+
# Reopen from buffer to get the image with metadata
|
234 |
+
buffer.seek(0)
|
235 |
+
return Image.open(buffer)
|
236 |
+
|
237 |
def add_comma_after_pattern_ti(text):
|
238 |
pattern = re.compile(r'\b\w+_\d+\b')
|
239 |
modified_text = pattern.sub(lambda x: x.group() + ',', text)
|
|
|
294 |
# 在 infer 函数中调用 get_embed_new
|
295 |
if not use_negative_prompt:
|
296 |
negative_prompt = ""
|
297 |
+
|
298 |
+
original_prompt = prompt # Store original prompt for metadata
|
299 |
prompt = get_embed_new(prompt, pipe, compel, only_convert_string=True)
|
300 |
negative_prompt = get_embed_new(negative_prompt, pipe, compel, only_convert_string=True)
|
301 |
conditioning, pooled = compel([prompt, negative_prompt]) # 必须同时处理来保证长度相等
|
|
|
313 |
generator=generator,
|
314 |
use_resolution_binning=use_resolution_binning,
|
315 |
).images[0]
|
316 |
+
|
317 |
+
# Create metadata dictionary
|
318 |
+
metadata = {
|
319 |
+
"prompt": original_prompt,
|
320 |
+
"processed_prompt": prompt,
|
321 |
+
"negative_prompt": negative_prompt,
|
322 |
+
"seed": seed,
|
323 |
+
"width": width,
|
324 |
+
"height": height,
|
325 |
+
"guidance_scale": guidance_scale,
|
326 |
+
"num_inference_steps": num_inference_steps,
|
327 |
+
"model": "MiaoMiaoPixel_V1.0",
|
328 |
+
"use_resolution_binning": use_resolution_binning,
|
329 |
+
"PreUrl": "https://huggingface.co/spaces/Menyu/MiaoPixel"
|
330 |
+
}
|
331 |
+
# Add metadata to the image
|
332 |
+
image_with_metadata = add_metadata_to_image(image, metadata)
|
333 |
+
|
334 |
+
return image_with_metadata, seed
|
335 |
|
336 |
examples = [
|
337 |
"nahida (genshin impact)",
|