Spaces:
Menyu
/
Running on Zero

Menyu commited on
Commit
def6395
·
verified ·
1 Parent(s): 7b7f3d2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -21
app.py CHANGED
@@ -245,22 +245,50 @@ if not torch.cuda.is_available():
245
  MAX_SEED = np.iinfo(np.int32).max
246
  MAX_IMAGE_SIZE = 2048
247
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
  if torch.cuda.is_available():
249
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
250
- #token = os.environ.get("HF_TOKEN") # 从环境变量读取令牌
251
- model_path = hf_hub_download(
252
- repo_id="Menyu/ModelFile", # 模型仓库名称(非完整URL)
253
- filename="naixlMmmmix_v50.safetensors" #,
254
- #use_auth_token=token
255
- )
256
- pipe = StableDiffusionXLPipeline.from_single_file(
257
- model_path,
258
- vae=vae,
259
- use_safetensors=True,
260
- torch_dtype=torch.float16,
261
- )
262
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
263
- pipe.to("cuda")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
264
 
265
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
266
  if randomize_seed:
@@ -279,11 +307,18 @@ def infer(
279
  num_inference_steps: int = 30,
280
  randomize_seed: bool = True,
281
  use_resolution_binning: bool = True,
 
282
  progress=gr.Progress(track_tqdm=True),
283
  ):
 
 
 
 
 
284
  seed = int(randomize_seed_fn(seed, randomize_seed))
285
  generator = torch.Generator().manual_seed(seed)
286
- # 初始化 Compel 实例
 
287
  compel = Compel(
288
  tokenizer=[pipe.tokenizer, pipe.tokenizer_2],
289
  text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
@@ -291,18 +326,17 @@ def infer(
291
  requires_pooled=[False, True],
292
  truncate_long_prompts=False
293
  )
 
294
  # Store the original prompt before processing
295
  original_prompt_text = prompt
296
 
297
- # 在 infer 函数中调用 get_embed_new
298
  if not use_negative_prompt:
299
  negative_prompt = ""
300
 
301
  processed_prompt = get_embed_new(prompt, pipe, compel, only_convert_string=True)
302
  processed_negative_prompt = get_embed_new(negative_prompt, pipe, compel, only_convert_string=True)
303
- conditioning, pooled = compel([processed_prompt, processed_negative_prompt]) # 必须同时处理来保证长度相等
304
 
305
- # 在调用 pipe 时,使用新的参数名称(确保参数名称正确)
306
  image = pipe(
307
  prompt_embeds=conditioning[0:1],
308
  pooled_prompt_embeds=pooled[0:1],
@@ -326,7 +360,7 @@ def infer(
326
  "height": height,
327
  "guidance_scale": guidance_scale,
328
  "num_inference_steps": num_inference_steps,
329
- "model": "naixlMmmmix_v50",
330
  "use_resolution_binning": use_resolution_binning,
331
  "PreUrl": "https://huggingface.co/spaces/Menyu/NaixlMix"
332
  }
@@ -348,7 +382,7 @@ h1{text-align:center}
348
 
349
  with gr.Blocks(css=css) as demo:
350
  gr.Markdown("""# 梦羽的模型生成器
351
- ### 快速生成NaixlMmmmix v50模型的图片""")
352
  with gr.Group():
353
  with gr.Row():
354
  prompt = gr.Text(
@@ -361,6 +395,11 @@ with gr.Blocks(css=css) as demo:
361
  run_button = gr.Button("生成", scale=0, variant="primary")
362
  result = gr.Image(label="Result", show_label=False, format="png")
363
  with gr.Accordion("高级选项", open=False):
 
 
 
 
 
364
  with gr.Row():
365
  use_negative_prompt = gr.Checkbox(label="使用反向词条", value=True)
366
  negative_prompt = gr.Text(
@@ -414,7 +453,7 @@ with gr.Blocks(css=css) as demo:
414
  examples=examples,
415
  inputs=prompt,
416
  outputs=[result, seed],
417
- fn=infer
418
  )
419
 
420
  use_negative_prompt.change(
@@ -436,6 +475,8 @@ with gr.Blocks(css=css) as demo:
436
  guidance_scale,
437
  num_inference_steps,
438
  randomize_seed,
 
 
439
  ],
440
  outputs=[result, seed],
441
  )
 
245
  MAX_SEED = np.iinfo(np.int32).max
246
  MAX_IMAGE_SIZE = 2048
247
 
248
+ # Define available models
249
+ AVAILABLE_MODELS = {
250
+ "NaixlMmmmix v50": "naixlMmmmix_v50.safetensors",
251
+ "Miaomiao": "miaomiao.safetensors",
252
+ # Add more models here as needed
253
+ }
254
+
255
+ # Global variable to store the loaded model and pipeline
256
+ loaded_model = None
257
+ pipe = None
258
+ vae = None
259
+
260
+ # Initialize VAE
261
  if torch.cuda.is_available():
262
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
263
+
264
+ def load_model(model_name):
265
+ global pipe, loaded_model
266
+
267
+ # If the requested model is already loaded, return the existing pipeline
268
+ if loaded_model == model_name and pipe is not None:
269
+ return pipe
270
+
271
+ if torch.cuda.is_available():
272
+ model_filename = AVAILABLE_MODELS[model_name]
273
+ model_path = hf_hub_download(
274
+ repo_id="Menyu/ModelFile",
275
+ filename=model_filename
276
+ )
277
+
278
+ pipe = StableDiffusionXLPipeline.from_single_file(
279
+ model_path,
280
+ vae=vae,
281
+ use_safetensors=True,
282
+ torch_dtype=torch.float16,
283
+ )
284
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
285
+ pipe.to("cuda")
286
+
287
+ # Update the loaded model name
288
+ loaded_model = model_name
289
+
290
+ return pipe
291
+ return None
292
 
293
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
294
  if randomize_seed:
 
307
  num_inference_steps: int = 30,
308
  randomize_seed: bool = True,
309
  use_resolution_binning: bool = True,
310
+ model_name: str = "NaixlMmmmix v50",
311
  progress=gr.Progress(track_tqdm=True),
312
  ):
313
+ # Load the selected model
314
+ pipe = load_model(model_name)
315
+ if pipe is None:
316
+ return None, seed
317
+
318
  seed = int(randomize_seed_fn(seed, randomize_seed))
319
  generator = torch.Generator().manual_seed(seed)
320
+
321
+ # Initialize Compel instance
322
  compel = Compel(
323
  tokenizer=[pipe.tokenizer, pipe.tokenizer_2],
324
  text_encoder=[pipe.text_encoder, pipe.text_encoder_2],
 
326
  requires_pooled=[False, True],
327
  truncate_long_prompts=False
328
  )
329
+
330
  # Store the original prompt before processing
331
  original_prompt_text = prompt
332
 
 
333
  if not use_negative_prompt:
334
  negative_prompt = ""
335
 
336
  processed_prompt = get_embed_new(prompt, pipe, compel, only_convert_string=True)
337
  processed_negative_prompt = get_embed_new(negative_prompt, pipe, compel, only_convert_string=True)
338
+ conditioning, pooled = compel([processed_prompt, processed_negative_prompt]) # Process both to ensure equal length
339
 
 
340
  image = pipe(
341
  prompt_embeds=conditioning[0:1],
342
  pooled_prompt_embeds=pooled[0:1],
 
360
  "height": height,
361
  "guidance_scale": guidance_scale,
362
  "num_inference_steps": num_inference_steps,
363
+ "model": model_name,
364
  "use_resolution_binning": use_resolution_binning,
365
  "PreUrl": "https://huggingface.co/spaces/Menyu/NaixlMix"
366
  }
 
382
 
383
  with gr.Blocks(css=css) as demo:
384
  gr.Markdown("""# 梦羽的模型生成器
385
+ ### 快速生成AI模型的图片""")
386
  with gr.Group():
387
  with gr.Row():
388
  prompt = gr.Text(
 
395
  run_button = gr.Button("生成", scale=0, variant="primary")
396
  result = gr.Image(label="Result", show_label=False, format="png")
397
  with gr.Accordion("高级选项", open=False):
398
+ model_selector = gr.Dropdown(
399
+ label="选择模型",
400
+ choices=list(AVAILABLE_MODELS.keys()),
401
+ value="NaixlMmmmix v50"
402
+ )
403
  with gr.Row():
404
  use_negative_prompt = gr.Checkbox(label="使用反向词条", value=True)
405
  negative_prompt = gr.Text(
 
453
  examples=examples,
454
  inputs=prompt,
455
  outputs=[result, seed],
456
+ fn=lambda x: infer(x, model_name=model_selector.value)
457
  )
458
 
459
  use_negative_prompt.change(
 
475
  guidance_scale,
476
  num_inference_steps,
477
  randomize_seed,
478
+ gr.Checkbox(value=True, visible=False, label="use_resolution_binning"), # Hidden default value
479
+ model_selector,
480
  ],
481
  outputs=[result, seed],
482
  )