Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,9 @@ import os
|
|
7 |
from huggingface_hub import hf_hub_download
|
8 |
from diffusers import StableDiffusionXLPipeline, AutoencoderKL, EulerAncestralDiscreteScheduler
|
9 |
from compel import Compel, ReturnedEmbeddingsType
|
|
|
|
|
|
|
10 |
|
11 |
# =====================================
|
12 |
# Prompt weights
|
@@ -216,6 +219,25 @@ def add_comma_after_pattern_ti(text):
|
|
216 |
pattern = re.compile(r'\b\w+_\d+\b')
|
217 |
modified_text = pattern.sub(lambda x: x.group() + ',', text)
|
218 |
return modified_text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
219 |
|
220 |
if not torch.cuda.is_available():
|
221 |
DESCRIPTION += "\n<p>你现在运行在CPU上 但是此项目只支持GPU.</p>"
|
@@ -289,7 +311,24 @@ def infer(
|
|
289 |
generator=generator,
|
290 |
use_resolution_binning=use_resolution_binning,
|
291 |
).images[0]
|
292 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
293 |
|
294 |
examples = [
|
295 |
"nahida (genshin impact)",
|
|
|
7 |
from huggingface_hub import hf_hub_download
|
8 |
from diffusers import StableDiffusionXLPipeline, AutoencoderKL, EulerAncestralDiscreteScheduler
|
9 |
from compel import Compel, ReturnedEmbeddingsType
|
10 |
+
from PIL import Image, PngImagePlugin
|
11 |
+
import json
|
12 |
+
import io
|
13 |
|
14 |
# =====================================
|
15 |
# Prompt weights
|
|
|
219 |
pattern = re.compile(r'\b\w+_\d+\b')
|
220 |
modified_text = pattern.sub(lambda x: x.group() + ',', text)
|
221 |
return modified_text
|
222 |
+
|
223 |
+
# Add metadata to the image
|
224 |
+
def add_metadata_to_image(image, metadata):
|
225 |
+
metadata_str = json.dumps(metadata)
|
226 |
+
|
227 |
+
# Convert PIL Image to PNG with metadata
|
228 |
+
img_with_metadata = image.copy()
|
229 |
+
|
230 |
+
# Create a PngInfo object and add metadata
|
231 |
+
png_info = PngImagePlugin.PngInfo()
|
232 |
+
png_info.add_text("parameters", metadata_str)
|
233 |
+
|
234 |
+
# Save to a byte buffer with metadata
|
235 |
+
buffer = io.BytesIO()
|
236 |
+
img_with_metadata.save(buffer, format="PNG", pnginfo=png_info)
|
237 |
+
|
238 |
+
# Reopen from buffer to get the image with metadata
|
239 |
+
buffer.seek(0)
|
240 |
+
return Image.open(buffer)
|
241 |
|
242 |
if not torch.cuda.is_available():
|
243 |
DESCRIPTION += "\n<p>你现在运行在CPU上 但是此项目只支持GPU.</p>"
|
|
|
311 |
generator=generator,
|
312 |
use_resolution_binning=use_resolution_binning,
|
313 |
).images[0]
|
314 |
+
# Create metadata dictionary
|
315 |
+
metadata = {
|
316 |
+
"prompt": original_prompt,
|
317 |
+
"processed_prompt": prompt,
|
318 |
+
"negative_prompt": negative_prompt,
|
319 |
+
"seed": seed,
|
320 |
+
"width": width,
|
321 |
+
"height": height,
|
322 |
+
"guidance_scale": guidance_scale,
|
323 |
+
"num_inference_steps": num_inference_steps,
|
324 |
+
"model": "MiaoMiao Harem 1.6",
|
325 |
+
"use_resolution_binning": use_resolution_binning
|
326 |
+
}
|
327 |
+
|
328 |
+
# Add metadata to the image
|
329 |
+
image_with_metadata = add_metadata_to_image(image, metadata)
|
330 |
+
|
331 |
+
return image_with_metadata, seed
|
332 |
|
333 |
examples = [
|
334 |
"nahida (genshin impact)",
|