multimodalart HF staff commited on
Commit
b05580b
·
verified ·
1 Parent(s): 177a164

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -10
app.py CHANGED
@@ -4,6 +4,7 @@ import random
4
  import spaces
5
  from diffusers import AuraFlowPipeline
6
  import torch
 
7
 
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
9
 
@@ -35,12 +36,32 @@ MAX_SEED = np.iinfo(np.int32).max
35
  MAX_IMAGE_SIZE = 1024
36
 
37
  @spaces.GPU
38
- def infer(prompt, negative_prompt="", seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=5.0, num_inference_steps=28, model_version="0.2", progress=gr.Progress(track_tqdm=True)):
39
 
40
  if randomize_seed:
41
  seed = random.randint(0, MAX_SEED)
42
 
43
  generator = torch.Generator().manual_seed(seed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  if(model_version == "0.1"):
45
  image = pipe_v1(
46
  prompt = prompt,
@@ -62,7 +83,7 @@ def infer(prompt, negative_prompt="", seed=42, randomize_seed=False, width=1024,
62
  generator = generator
63
  ).images[0]
64
 
65
- return image, seed
66
 
67
  examples = [
68
  "A photo of a lavender cat",
@@ -78,11 +99,6 @@ css="""
78
  }
79
  """
80
 
81
- if torch.cuda.is_available():
82
- power_device = "GPU"
83
- else:
84
- power_device = "CPU"
85
-
86
  with gr.Blocks(css=css) as demo:
87
 
88
  with gr.Column(elem_id="col-container"):
@@ -105,7 +121,8 @@ with gr.Blocks(css=css) as demo:
105
  run_button = gr.Button("Run", scale=0)
106
 
107
  result = gr.Image(label="Result", show_label=False)
108
-
 
109
  with gr.Accordion("Advanced Settings", open=False):
110
 
111
  model_version = gr.Dropdown(
@@ -175,8 +192,8 @@ with gr.Blocks(css=css) as demo:
175
  gr.on(
176
  triggers=[run_button.click, prompt.submit, negative_prompt.submit],
177
  fn = infer,
178
- inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, model_version],
179
- outputs = [result, seed]
180
  )
181
 
182
  demo.queue().launch()
 
4
  import spaces
5
  from diffusers import AuraFlowPipeline
6
  import torch
7
+ from gradio_imageslider import ImageSlider
8
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
 
 
36
  MAX_IMAGE_SIZE = 1024
37
 
38
  @spaces.GPU
39
+ def infer(prompt, negative_prompt="", seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=5.0, num_inference_steps=28, model_version="0.2", comparison_mode=False, progress=gr.Progress(track_tqdm=True)):
40
 
41
  if randomize_seed:
42
  seed = random.randint(0, MAX_SEED)
43
 
44
  generator = torch.Generator().manual_seed(seed)
45
+ if(comparison_mode):
46
+ image_1 = pipe_v1(
47
+ prompt = prompt,
48
+ negative_prompt = negative_prompt,
49
+ width=width,
50
+ height=height,
51
+ guidance_scale = guidance_scale,
52
+ num_inference_steps = num_inference_steps,
53
+ generator = generator
54
+ ).images[0]
55
+ image_2 = image = pipe(
56
+ prompt = prompt,
57
+ negative_prompt = negative_prompt,
58
+ width=width,
59
+ height=height,
60
+ guidance_scale = guidance_scale,
61
+ num_inference_steps = num_inference_steps,
62
+ generator = generator
63
+ ).images[0]
64
+ return gr.update(visible=False), gr.update(visible=True, value=(image_1, image_2)), seed
65
  if(model_version == "0.1"):
66
  image = pipe_v1(
67
  prompt = prompt,
 
83
  generator = generator
84
  ).images[0]
85
 
86
+ return gr.update(visible=True, value=image), gr.update(visible=False), seed
87
 
88
  examples = [
89
  "A photo of a lavender cat",
 
99
  }
100
  """
101
 
 
 
 
 
 
102
  with gr.Blocks(css=css) as demo:
103
 
104
  with gr.Column(elem_id="col-container"):
 
121
  run_button = gr.Button("Run", scale=0)
122
 
123
  result = gr.Image(label="Result", show_label=False)
124
+ result_compare = ImageSlider(visible=False, label="Left 0.1, Right 0.2")
125
+ comparison_mode = gr.Checkbox(label="Comparison mode", info="Compare v0.1 with v0.2", value=False)
126
  with gr.Accordion("Advanced Settings", open=False):
127
 
128
  model_version = gr.Dropdown(
 
192
  gr.on(
193
  triggers=[run_button.click, prompt.submit, negative_prompt.submit],
194
  fn = infer,
195
+ inputs = [prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, model_version, comparison_mode],
196
+ outputs = [result, result_compare, seed]
197
  )
198
 
199
  demo.queue().launch()