prithivMLmods commited on
Commit
d89511b
·
verified ·
1 Parent(s): c923dca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -62
app.py CHANGED
@@ -1,13 +1,3 @@
1
- #!/usr/bin/env python
2
- #patch 2.0 ()
3
- # Permission is hereby granted, free of charge, to any person obtaining a copy
4
- # of this software and associated documentation files (the "Software"), to deal
5
- # in the Software without restriction, including without limitation the rights
6
- # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
- # copies of the Software, and to permit persons to whom the Software is
8
- # furnished to do so, subject to the following conditions:
9
- #
10
- # ...
11
  import os
12
  import random
13
  import uuid
@@ -19,16 +9,9 @@ import spaces
19
  import torch
20
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
21
 
22
- #Load the HTML content
23
- #html_file_url = "https://prithivmlmods-hamster-static.static.hf.space/index.html"
24
- #html_content = f'<iframe src="{html_file_url}" style="width:100%; height:180px; border:none;"></iframe>'
25
- #html_file_url = "https://prithivmlmods-static-loading-theme.static.hf.space/index.html"
26
-
27
- #html_file_url = "https://prithivhamster.vercel.app/"
28
- #html_content = f'<iframe src="{html_file_url}" style="width:100%; height:400px; border:none"></iframe>'
29
-
30
  DESCRIPTIONx = """## STABLE HAMSTER 🐹
31
 
 
32
  """
33
 
34
  DESCRIPTIONy = """
@@ -39,7 +22,6 @@ DESCRIPTIONy = """
39
  </p>
40
  """
41
 
42
-
43
  css = '''
44
  .gradio-container{max-width: 560px !important}
45
  h1{text-align:center}
@@ -51,34 +33,17 @@ footer {
51
  examples = [
52
  "3d image, cute girl, in the style of Pixar --ar 1:2 --stylize 750, 4K resolution highlights, Sharp focus, octane render, ray tracing, Ultra-High-Definition, 8k, UHD, HDR, (Masterpiece:1.5), (best quality:1.5)",
53
  "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K",
54
- "Vector illustration of a horse, vector graphic design with flat colors on an brown background in the style of vector art, using simple shapes and graphics with simple details, professionally designed as a tshirt logo ready for print on a white background. --ar 89:82 --v 6.0 --style raw",
55
  "Man in brown leather jacket posing for camera, in the style of sleek and stylized, clockpunk, subtle shades, exacting precision, ferrania p30 --ar 67:101 --v 5",
56
  "Commercial photography, giant burger, white lighting, studio light, 8k octane rendering, high resolution photography, insanely detailed, fine details, on white isolated plain, 8k, commercial photography, stock photo, professional color grading, --v 4 --ar 9:16 "
57
-
58
  ]
59
 
60
-
61
- #examples = [
62
- # ["file/1.png", "3d image, cute girl, in the style of Pixar --ar 1:2 --stylize 750, 4K resolution highlights, Sharp focus, octane render, ray tracing, Ultra-High-Definition, 8k, UHD, HDR, (Masterpiece:1.5), (best quality:1.5)"],
63
- # ["file/2.png", "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K"],
64
- #["file/3.png", "Vector illustration of a horse, vector graphic design with flat colors on a brown background in the style of vector art, using simple shapes and graphics with simple details, professionally designed as a tshirt logo ready for print on a white background. --ar 89:82 --v 6.0 --style raw"],
65
- #["file/4.png", "Man in brown leather jacket posing for the camera, in the style of sleek and stylized, clockpunk, subtle shades, exacting precision, ferrania p30 --ar 67:101 --v 5"],
66
- #["file/5.png", "Commercial photography, giant burger, white lighting, studio light, 8k octane rendering, high resolution photography, insanely detailed, fine details, on a white isolated plain, 8k, commercial photography, stock photo, professional color grading, --v 4 --ar 9:16"]
67
- #]
68
-
69
-
70
- #Set an os.Getenv variable
71
- #set VAR_NAME=”VALUE”
72
- #Fetch an environment variable
73
- #echo %VAR_NAME%
74
-
75
- MODEL_ID = os.getenv("MODEL_VAL_PATH") #Use SDXL Model as "MODEL_REPO" --------->>> ”VALUE”.
76
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
77
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
78
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
79
- BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1")) # Allow generating multiple images at once
80
 
81
- #Load model outside of function
82
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
83
  pipe = StableDiffusionXLPipeline.from_pretrained(
84
  MODEL_ID,
@@ -88,11 +53,9 @@ pipe = StableDiffusionXLPipeline.from_pretrained(
88
  ).to(device)
89
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
90
 
91
- # <compile speedup >
92
  if USE_TORCH_COMPILE:
93
  pipe.compile()
94
 
95
- # Offloading capacity (RAM)
96
  if ENABLE_CPU_OFFLOAD:
97
  pipe.enable_model_cpu_offload()
98
 
@@ -119,14 +82,24 @@ def generate(
119
  guidance_scale: float = 3,
120
  num_inference_steps: int = 25,
121
  randomize_seed: bool = False,
122
- use_resolution_binning: bool = True,
123
- num_images: int = 1, # Number of images to generate
124
  progress=gr.Progress(track_tqdm=True),
125
  ):
126
  seed = int(randomize_seed_fn(seed, randomize_seed))
127
  generator = torch.Generator(device=device).manual_seed(seed)
128
 
129
- #Options
 
 
 
 
 
 
 
 
 
 
130
  options = {
131
  "prompt": [prompt] * num_images,
132
  "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
@@ -138,11 +111,9 @@ def generate(
138
  "output_type": "pil",
139
  }
140
 
141
- #VRAM usage Lesser
142
  if use_resolution_binning:
143
  options["use_resolution_binning"] = True
144
 
145
- #Images potential batches
146
  images = []
147
  for i in range(0, num_images, BATCH_SIZE):
148
  batch_options = options.copy()
@@ -151,11 +122,17 @@ def generate(
151
  batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
152
  images.extend(pipe(**batch_options).images)
153
 
154
- image_paths = [save_image(img) for img in images]
155
- return image_paths, seed
156
- #Main gr.Block
 
 
 
 
 
 
157
  with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
158
- gr.Markdown(DESCRIPTIONx)
159
 
160
  with gr.Group():
161
  with gr.Row():
@@ -168,14 +145,23 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
168
  )
169
  run_button = gr.Button("Run", scale=0)
170
  result = gr.Gallery(label="Result", columns=1, show_label=False)
 
 
 
 
 
 
 
171
  with gr.Accordion("Advanced options", open=False, visible=False):
172
- num_images = gr.Slider(
173
- label="Number of Images",
174
- minimum=1,
175
- maximum=4,
176
- step=1,
177
- value=1,
178
- )
 
 
179
  with gr.Row():
180
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
181
  negative_prompt = gr.Text(
@@ -255,14 +241,12 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
255
  guidance_scale,
256
  num_inference_steps,
257
  randomize_seed,
258
- num_images
259
  ],
260
  outputs=[result, seed],
261
  api_name="run",
262
  )
263
 
264
-
265
-
266
  gr.Markdown(DESCRIPTIONy)
267
 
268
  gr.Markdown("**Disclaimer:**")
@@ -271,7 +255,5 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
271
  gr.Markdown("**Note:**")
272
  gr.Markdown("⚠️ users are accountable for the content they generate and are responsible for ensuring it meets appropriate ethical standards.")
273
 
274
- #gr.HTML(html_content)
275
-
276
  if __name__ == "__main__":
277
  demo.queue(max_size=40).launch()
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import random
3
  import uuid
 
9
  import torch
10
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
11
 
 
 
 
 
 
 
 
 
12
  DESCRIPTIONx = """## STABLE HAMSTER 🐹
13
 
14
+
15
  """
16
 
17
  DESCRIPTIONy = """
 
22
  </p>
23
  """
24
 
 
25
  css = '''
26
  .gradio-container{max-width: 560px !important}
27
  h1{text-align:center}
 
33
  examples = [
34
  "3d image, cute girl, in the style of Pixar --ar 1:2 --stylize 750, 4K resolution highlights, Sharp focus, octane render, ray tracing, Ultra-High-Definition, 8k, UHD, HDR, (Masterpiece:1.5), (best quality:1.5)",
35
  "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K",
36
+ "Vector illustration of a horse, vector graphic design with flat colors on a brown background in the style of vector art, using simple shapes and graphics with simple details, professionally designed as a tshirt logo ready for print on a white background. --ar 89:82 --v 6.0 --style raw",
37
  "Man in brown leather jacket posing for camera, in the style of sleek and stylized, clockpunk, subtle shades, exacting precision, ferrania p30 --ar 67:101 --v 5",
38
  "Commercial photography, giant burger, white lighting, studio light, 8k octane rendering, high resolution photography, insanely detailed, fine details, on white isolated plain, 8k, commercial photography, stock photo, professional color grading, --v 4 --ar 9:16 "
 
39
  ]
40
 
41
+ MODEL_ID = os.getenv("MODEL_VAL_PATH")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
43
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
44
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
45
+ BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
46
 
 
47
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
48
  pipe = StableDiffusionXLPipeline.from_pretrained(
49
  MODEL_ID,
 
53
  ).to(device)
54
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
55
 
 
56
  if USE_TORCH_COMPILE:
57
  pipe.compile()
58
 
 
59
  if ENABLE_CPU_OFFLOAD:
60
  pipe.enable_model_cpu_offload()
61
 
 
82
  guidance_scale: float = 3,
83
  num_inference_steps: int = 25,
84
  randomize_seed: bool = False,
85
+ use_resolution_binning: bool = True,
86
+ grid_size: str = "2x2",
87
  progress=gr.Progress(track_tqdm=True),
88
  ):
89
  seed = int(randomize_seed_fn(seed, randomize_seed))
90
  generator = torch.Generator(device=device).manual_seed(seed)
91
 
92
+ grid_sizes = {
93
+ "2x1": (2, 1),
94
+ "1x2": (1, 2),
95
+ "2x2": (2, 2),
96
+ "2x3": (2, 3),
97
+ "3x2": (3, 2),
98
+ "1x1": (1, 1)
99
+ }
100
+ grid_size_x, grid_size_y = grid_sizes.get(grid_size, (2, 2))
101
+ num_images = grid_size_x * grid_size_y
102
+
103
  options = {
104
  "prompt": [prompt] * num_images,
105
  "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
 
111
  "output_type": "pil",
112
  }
113
 
 
114
  if use_resolution_binning:
115
  options["use_resolution_binning"] = True
116
 
 
117
  images = []
118
  for i in range(0, num_images, BATCH_SIZE):
119
  batch_options = options.copy()
 
122
  batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
123
  images.extend(pipe(**batch_options).images)
124
 
125
+ torch.cuda.empty_cache()
126
+ grid_img = Image.new('RGB', (width * grid_size_x, height * grid_size_y))
127
+
128
+ for i, img in enumerate(images[:num_images]):
129
+ grid_img.paste(img, (i % grid_size_x * width, i // grid_size_x * height))
130
+
131
+ unique_name = save_image(grid_img)
132
+ return unique_name, seed
133
+
134
  with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
135
+ gr.Markdown(DESCRIPTIONx)
136
 
137
  with gr.Group():
138
  with gr.Row():
 
145
  )
146
  run_button = gr.Button("Run", scale=0)
147
  result = gr.Gallery(label="Result", columns=1, show_label=False)
148
+
149
+ with gr.Row(visible=True):
150
+ grid_size_selection = gr.Dropdown(
151
+ choices=["2x1", "1x2", "2x2", "2x3", "3x2", "1x1"],
152
+ value="1x1",
153
+ label="⚡Grid"
154
+ )
155
  with gr.Accordion("Advanced options", open=False, visible=False):
156
+
157
+ with gr.Row():
158
+ num_images = gr.Slider(
159
+ label="Number of Images",
160
+ minimum=1,
161
+ maximum=4,
162
+ step=1,
163
+ value=1,
164
+ )
165
  with gr.Row():
166
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
167
  negative_prompt = gr.Text(
 
241
  guidance_scale,
242
  num_inference_steps,
243
  randomize_seed,
244
+ grid_size_selection
245
  ],
246
  outputs=[result, seed],
247
  api_name="run",
248
  )
249
 
 
 
250
  gr.Markdown(DESCRIPTIONy)
251
 
252
  gr.Markdown("**Disclaimer:**")
 
255
  gr.Markdown("**Note:**")
256
  gr.Markdown("⚠️ users are accountable for the content they generate and are responsible for ensuring it meets appropriate ethical standards.")
257
 
 
 
258
  if __name__ == "__main__":
259
  demo.queue(max_size=40).launch()