ChiKyi commited on
Commit
d7cfdf9
·
1 Parent(s): 4c6161c

update model stable

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -49,7 +49,7 @@ blip_generator = BlipForConditionalGeneration.from_pretrained(
49
  pipe.to(accelerator.device)
50
  blip_generator.to(accelerator.device)
51
 
52
- def colorize_single_image(image, positive_prompt, negative_prompt, caption_generate):
53
  image = PIL.Image.fromarray(image)
54
 
55
  torch.cuda.empty_cache()
@@ -63,13 +63,13 @@ def colorize_single_image(image, positive_prompt, negative_prompt, caption_gener
63
  prompt = [positive_prompt + ", " + caption]
64
 
65
  colorized_image = pipe(prompt=prompt,
66
- num_inference_steps=5,
67
- generator=torch.manual_seed(0),
68
  image=control_image,
69
  negative_prompt=negative_prompt).images[0]
70
  result_image = apply_color(control_image, colorized_image)
71
  result_image = result_image.resize(original_size)
72
- return result_image, caption if caption_generate else gr.update(visible=False)
73
 
74
  # Hàm load models cho autoencoder và gan
75
  def load_autoencoder_model(auto_model_path):
 
49
  pipe.to(accelerator.device)
50
  blip_generator.to(accelerator.device)
51
 
52
+ def colorize_image_sdxl(image, positive_prompt=None, negative_prompt=None, caption_generate=True, seed=123, infer_steps=5):
53
  image = PIL.Image.fromarray(image)
54
 
55
  torch.cuda.empty_cache()
 
63
  prompt = [positive_prompt + ", " + caption]
64
 
65
  colorized_image = pipe(prompt=prompt,
66
+ num_inference_steps=infer_steps,
67
+ generator=torch.manual_seed(seed),
68
  image=control_image,
69
  negative_prompt=negative_prompt).images[0]
70
  result_image = apply_color(control_image, colorized_image)
71
  result_image = result_image.resize(original_size)
72
+ return result_image, caption
73
 
74
  # Hàm load models cho autoencoder và gan
75
  def load_autoencoder_model(auto_model_path):