Huage001 commited on
Commit
db87f0c
·
verified ·
1 Parent(s): a72db69

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -21,14 +21,14 @@ MAX_SEED = np.iinfo(np.int32).max
21
  MAX_IMAGE_SIZE = 1024
22
 
23
  #@spaces.GPU #[uncomment to use ZeroGPU]
24
- def infer_t2i(model_repo_id, prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
25
 
26
  if randomize_seed:
27
  seed = random.randint(0, MAX_SEED)
28
 
29
  generator = torch.Generator().manual_seed(seed)
30
 
31
- pipe = StableDiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
32
  pipe = pipe.to(device)
33
 
34
  image = pipe(
@@ -44,14 +44,14 @@ def infer_t2i(model_repo_id, prompt, negative_prompt, seed, randomize_seed, widt
44
  return image, seed
45
 
46
  #@spaces.GPU #[uncomment to use ZeroGPU]
47
- def infer_i2i(model_repo_id, prompt, image, strength, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
48
 
49
  if randomize_seed:
50
  seed = random.randint(0, MAX_SEED)
51
 
52
  generator = torch.Generator().manual_seed(seed)
53
 
54
- pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
55
  pipe = pipe.to(device)
56
 
57
  image = pipe(
@@ -69,14 +69,14 @@ def infer_i2i(model_repo_id, prompt, image, strength, negative_prompt, seed, ran
69
  return image, seed
70
 
71
  #@spaces.GPU #[uncomment to use ZeroGPU]
72
- def infer_ip_adapter(model_repo_id, prompt, image, scale, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
73
 
74
  if randomize_seed:
75
  seed = random.randint(0, MAX_SEED)
76
 
77
  generator = torch.Generator().manual_seed(seed)
78
 
79
- pipe = StableDiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
80
  pipe = pipe.to(device)
81
  pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin")
82
  pipeline.set_ip_adapter_scale(scale)
@@ -196,7 +196,7 @@ with gr.Blocks(css=css) as demo:
196
 
197
  run_button.click(
198
  fn=infer_t2i,
199
- inputs = [all_model_id[str(model_choice)], prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
200
  outputs = [result, seed]
201
  )
202
 
@@ -295,7 +295,7 @@ with gr.Blocks(css=css) as demo:
295
 
296
  run_button.click(
297
  fn=infer_i2i,
298
- inputs = [all_model_id[str(model_choice)], prompt, image_upload_input, editing_strength, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
299
  outputs = [result, seed]
300
  )
301
 
@@ -394,7 +394,7 @@ with gr.Blocks(css=css) as demo:
394
 
395
  run_button.click(
396
  fn=infer_ip_adapter,
397
- inputs = [all_model_id[str(model_choice)], prompt, image_upload_input, ip_adapter_scale, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
398
  outputs = [result, seed]
399
  )
400
 
 
21
  MAX_IMAGE_SIZE = 1024
22
 
23
  #@spaces.GPU #[uncomment to use ZeroGPU]
24
+ def infer_t2i(model, prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
25
 
26
  if randomize_seed:
27
  seed = random.randint(0, MAX_SEED)
28
 
29
  generator = torch.Generator().manual_seed(seed)
30
 
31
+ pipe = StableDiffusionPipeline.from_pretrained(all_model_id[model], torch_dtype=torch_dtype)
32
  pipe = pipe.to(device)
33
 
34
  image = pipe(
 
44
  return image, seed
45
 
46
  #@spaces.GPU #[uncomment to use ZeroGPU]
47
+ def infer_i2i(model, prompt, image, strength, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
48
 
49
  if randomize_seed:
50
  seed = random.randint(0, MAX_SEED)
51
 
52
  generator = torch.Generator().manual_seed(seed)
53
 
54
+ pipe = StableDiffusionImg2ImgPipeline.from_pretrained(all_model_id[model], torch_dtype=torch_dtype)
55
  pipe = pipe.to(device)
56
 
57
  image = pipe(
 
69
  return image, seed
70
 
71
  #@spaces.GPU #[uncomment to use ZeroGPU]
72
+ def infer_ip_adapter(model, prompt, image, scale, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress=gr.Progress(track_tqdm=True)):
73
 
74
  if randomize_seed:
75
  seed = random.randint(0, MAX_SEED)
76
 
77
  generator = torch.Generator().manual_seed(seed)
78
 
79
+ pipe = StableDiffusionPipeline.from_pretrained(all_model_id[model], torch_dtype=torch_dtype)
80
  pipe = pipe.to(device)
81
  pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin")
82
  pipeline.set_ip_adapter_scale(scale)
 
196
 
197
  run_button.click(
198
  fn=infer_t2i,
199
+ inputs = [prompt, model_choice, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
200
  outputs = [result, seed]
201
  )
202
 
 
295
 
296
  run_button.click(
297
  fn=infer_i2i,
298
+ inputs = [prompt, model_choice, image_upload_input, editing_strength, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
299
  outputs = [result, seed]
300
  )
301
 
 
394
 
395
  run_button.click(
396
  fn=infer_ip_adapter,
397
+ inputs = [prompt, model_choice, image_upload_input, ip_adapter_scale, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps],
398
  outputs = [result, seed]
399
  )
400