prithivMLmods commited on
Commit
8a44313
·
verified ·
1 Parent(s): d89511b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -43
app.py CHANGED
@@ -1,3 +1,13 @@
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import random
3
  import uuid
@@ -9,6 +19,14 @@ import spaces
9
  import torch
10
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
11
 
 
 
 
 
 
 
 
 
12
  DESCRIPTIONx = """## STABLE HAMSTER 🐹
13
 
14
 
@@ -22,6 +40,7 @@ DESCRIPTIONy = """
22
  </p>
23
  """
24
 
 
25
  css = '''
26
  .gradio-container{max-width: 560px !important}
27
  h1{text-align:center}
@@ -33,17 +52,34 @@ footer {
33
  examples = [
34
  "3d image, cute girl, in the style of Pixar --ar 1:2 --stylize 750, 4K resolution highlights, Sharp focus, octane render, ray tracing, Ultra-High-Definition, 8k, UHD, HDR, (Masterpiece:1.5), (best quality:1.5)",
35
  "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K",
36
- "Vector illustration of a horse, vector graphic design with flat colors on a brown background in the style of vector art, using simple shapes and graphics with simple details, professionally designed as a tshirt logo ready for print on a white background. --ar 89:82 --v 6.0 --style raw",
37
  "Man in brown leather jacket posing for camera, in the style of sleek and stylized, clockpunk, subtle shades, exacting precision, ferrania p30 --ar 67:101 --v 5",
38
  "Commercial photography, giant burger, white lighting, studio light, 8k octane rendering, high resolution photography, insanely detailed, fine details, on white isolated plain, 8k, commercial photography, stock photo, professional color grading, --v 4 --ar 9:16 "
 
39
  ]
40
 
41
- MODEL_ID = os.getenv("MODEL_VAL_PATH")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
43
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
44
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
45
- BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
46
 
 
47
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
48
  pipe = StableDiffusionXLPipeline.from_pretrained(
49
  MODEL_ID,
@@ -53,9 +89,11 @@ pipe = StableDiffusionXLPipeline.from_pretrained(
53
  ).to(device)
54
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
55
 
 
56
  if USE_TORCH_COMPILE:
57
  pipe.compile()
58
 
 
59
  if ENABLE_CPU_OFFLOAD:
60
  pipe.enable_model_cpu_offload()
61
 
@@ -82,24 +120,14 @@ def generate(
82
  guidance_scale: float = 3,
83
  num_inference_steps: int = 25,
84
  randomize_seed: bool = False,
85
- use_resolution_binning: bool = True,
86
- grid_size: str = "2x2",
87
  progress=gr.Progress(track_tqdm=True),
88
  ):
89
  seed = int(randomize_seed_fn(seed, randomize_seed))
90
  generator = torch.Generator(device=device).manual_seed(seed)
91
 
92
- grid_sizes = {
93
- "2x1": (2, 1),
94
- "1x2": (1, 2),
95
- "2x2": (2, 2),
96
- "2x3": (2, 3),
97
- "3x2": (3, 2),
98
- "1x1": (1, 1)
99
- }
100
- grid_size_x, grid_size_y = grid_sizes.get(grid_size, (2, 2))
101
- num_images = grid_size_x * grid_size_y
102
-
103
  options = {
104
  "prompt": [prompt] * num_images,
105
  "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
@@ -111,9 +139,11 @@ def generate(
111
  "output_type": "pil",
112
  }
113
 
 
114
  if use_resolution_binning:
115
  options["use_resolution_binning"] = True
116
 
 
117
  images = []
118
  for i in range(0, num_images, BATCH_SIZE):
119
  batch_options = options.copy()
@@ -122,17 +152,11 @@ def generate(
122
  batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
123
  images.extend(pipe(**batch_options).images)
124
 
125
- torch.cuda.empty_cache()
126
- grid_img = Image.new('RGB', (width * grid_size_x, height * grid_size_y))
127
-
128
- for i, img in enumerate(images[:num_images]):
129
- grid_img.paste(img, (i % grid_size_x * width, i // grid_size_x * height))
130
-
131
- unique_name = save_image(grid_img)
132
- return unique_name, seed
133
-
134
  with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
135
- gr.Markdown(DESCRIPTIONx)
136
 
137
  with gr.Group():
138
  with gr.Row():
@@ -145,23 +169,14 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
145
  )
146
  run_button = gr.Button("Run", scale=0)
147
  result = gr.Gallery(label="Result", columns=1, show_label=False)
148
-
149
- with gr.Row(visible=True):
150
- grid_size_selection = gr.Dropdown(
151
- choices=["2x1", "1x2", "2x2", "2x3", "3x2", "1x1"],
152
- value="1x1",
153
- label="⚡Grid"
154
- )
155
  with gr.Accordion("Advanced options", open=False, visible=False):
156
-
157
- with gr.Row():
158
- num_images = gr.Slider(
159
- label="Number of Images",
160
- minimum=1,
161
- maximum=4,
162
- step=1,
163
- value=1,
164
- )
165
  with gr.Row():
166
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
167
  negative_prompt = gr.Text(
@@ -241,12 +256,14 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
241
  guidance_scale,
242
  num_inference_steps,
243
  randomize_seed,
244
- grid_size_selection
245
  ],
246
  outputs=[result, seed],
247
  api_name="run",
248
  )
249
 
 
 
250
  gr.Markdown(DESCRIPTIONy)
251
 
252
  gr.Markdown("**Disclaimer:**")
@@ -255,5 +272,7 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
255
  gr.Markdown("**Note:**")
256
  gr.Markdown("⚠️ users are accountable for the content they generate and are responsible for ensuring it meets appropriate ethical standards.")
257
 
 
 
258
  if __name__ == "__main__":
259
  demo.queue(max_size=40).launch()
 
1
+ #!/usr/bin/env python
2
+ #patch 2.0 ()
3
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ # of this software and associated documentation files (the "Software"), to deal
5
+ # in the Software without restriction, including without limitation the rights
6
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ # copies of the Software, and to permit persons to whom the Software is
8
+ # furnished to do so, subject to the following conditions:
9
+ #
10
+ # ...
11
  import os
12
  import random
13
  import uuid
 
19
  import torch
20
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
21
 
22
+ #Load the HTML content
23
+ #html_file_url = "https://prithivmlmods-hamster-static.static.hf.space/index.html"
24
+ #html_content = f'<iframe src="{html_file_url}" style="width:100%; height:180px; border:none;"></iframe>'
25
+ #html_file_url = "https://prithivmlmods-static-loading-theme.static.hf.space/index.html"
26
+
27
+ #html_file_url = "https://prithivhamster.vercel.app/"
28
+ #html_content = f'<iframe src="{html_file_url}" style="width:100%; height:400px; border:none"></iframe>'
29
+
30
  DESCRIPTIONx = """## STABLE HAMSTER 🐹
31
 
32
 
 
40
  </p>
41
  """
42
 
43
+
44
  css = '''
45
  .gradio-container{max-width: 560px !important}
46
  h1{text-align:center}
 
52
  examples = [
53
  "3d image, cute girl, in the style of Pixar --ar 1:2 --stylize 750, 4K resolution highlights, Sharp focus, octane render, ray tracing, Ultra-High-Definition, 8k, UHD, HDR, (Masterpiece:1.5), (best quality:1.5)",
54
  "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K",
55
+ "Vector illustration of a horse, vector graphic design with flat colors on an brown background in the style of vector art, using simple shapes and graphics with simple details, professionally designed as a tshirt logo ready for print on a white background. --ar 89:82 --v 6.0 --style raw",
56
  "Man in brown leather jacket posing for camera, in the style of sleek and stylized, clockpunk, subtle shades, exacting precision, ferrania p30 --ar 67:101 --v 5",
57
  "Commercial photography, giant burger, white lighting, studio light, 8k octane rendering, high resolution photography, insanely detailed, fine details, on white isolated plain, 8k, commercial photography, stock photo, professional color grading, --v 4 --ar 9:16 "
58
+
59
  ]
60
 
61
+
62
+ #examples = [
63
+ # ["file/1.png", "3d image, cute girl, in the style of Pixar --ar 1:2 --stylize 750, 4K resolution highlights, Sharp focus, octane render, ray tracing, Ultra-High-Definition, 8k, UHD, HDR, (Masterpiece:1.5), (best quality:1.5)"],
64
+ # ["file/2.png", "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K"],
65
+ #["file/3.png", "Vector illustration of a horse, vector graphic design with flat colors on a brown background in the style of vector art, using simple shapes and graphics with simple details, professionally designed as a tshirt logo ready for print on a white background. --ar 89:82 --v 6.0 --style raw"],
66
+ #["file/4.png", "Man in brown leather jacket posing for the camera, in the style of sleek and stylized, clockpunk, subtle shades, exacting precision, ferrania p30 --ar 67:101 --v 5"],
67
+ #["file/5.png", "Commercial photography, giant burger, white lighting, studio light, 8k octane rendering, high resolution photography, insanely detailed, fine details, on a white isolated plain, 8k, commercial photography, stock photo, professional color grading, --v 4 --ar 9:16"]
68
+ #]
69
+
70
+
71
+ #Set an os.Getenv variable
72
+ #set VAR_NAME=”VALUE”
73
+ #Fetch an environment variable
74
+ #echo %VAR_NAME%
75
+
76
+ MODEL_ID = os.getenv("MODEL_VAL_PATH") #Use SDXL Model as "MODEL_REPO" --------->>> ”VALUE”.
77
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
78
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
79
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
80
+ BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1")) # Allow generating multiple images at once
81
 
82
+ #Load model outside of function
83
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
84
  pipe = StableDiffusionXLPipeline.from_pretrained(
85
  MODEL_ID,
 
89
  ).to(device)
90
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
91
 
92
+ # <compile speedup >
93
  if USE_TORCH_COMPILE:
94
  pipe.compile()
95
 
96
+ # Offloading capacity (RAM)
97
  if ENABLE_CPU_OFFLOAD:
98
  pipe.enable_model_cpu_offload()
99
 
 
120
  guidance_scale: float = 3,
121
  num_inference_steps: int = 25,
122
  randomize_seed: bool = False,
123
+ use_resolution_binning: bool = True,
124
+ num_images: int = 1, # Number of images to generate
125
  progress=gr.Progress(track_tqdm=True),
126
  ):
127
  seed = int(randomize_seed_fn(seed, randomize_seed))
128
  generator = torch.Generator(device=device).manual_seed(seed)
129
 
130
+ #Options
 
 
 
 
 
 
 
 
 
 
131
  options = {
132
  "prompt": [prompt] * num_images,
133
  "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
 
139
  "output_type": "pil",
140
  }
141
 
142
+ #VRAM usage Lesser
143
  if use_resolution_binning:
144
  options["use_resolution_binning"] = True
145
 
146
+ #Images potential batches
147
  images = []
148
  for i in range(0, num_images, BATCH_SIZE):
149
  batch_options = options.copy()
 
152
  batch_options["negative_prompt"] = options["negative_prompt"][i:i+BATCH_SIZE]
153
  images.extend(pipe(**batch_options).images)
154
 
155
+ image_paths = [save_image(img) for img in images]
156
+ return image_paths, seed
157
+ #Main gr.Block
 
 
 
 
 
 
158
  with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
159
+ gr.Markdown(DESCRIPTIONx)
160
 
161
  with gr.Group():
162
  with gr.Row():
 
169
  )
170
  run_button = gr.Button("Run", scale=0)
171
  result = gr.Gallery(label="Result", columns=1, show_label=False)
 
 
 
 
 
 
 
172
  with gr.Accordion("Advanced options", open=False, visible=False):
173
+ num_images = gr.Slider(
174
+ label="Number of Images",
175
+ minimum=1,
176
+ maximum=4,
177
+ step=1,
178
+ value=1,
179
+ )
 
 
180
  with gr.Row():
181
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
182
  negative_prompt = gr.Text(
 
256
  guidance_scale,
257
  num_inference_steps,
258
  randomize_seed,
259
+ num_images
260
  ],
261
  outputs=[result, seed],
262
  api_name="run",
263
  )
264
 
265
+
266
+
267
  gr.Markdown(DESCRIPTIONy)
268
 
269
  gr.Markdown("**Disclaimer:**")
 
272
  gr.Markdown("**Note:**")
273
  gr.Markdown("⚠️ users are accountable for the content they generate and are responsible for ensuring it meets appropriate ethical standards.")
274
 
275
+ #gr.HTML(html_content)
276
+
277
  if __name__ == "__main__":
278
  demo.queue(max_size=40).launch()