Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -12,15 +12,11 @@ from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
|
|
12 |
#Load the HTML content
|
13 |
#html_file_url = "https://prithivmlmods-hamster-static.static.hf.space/index.html"
|
14 |
#html_content = f'<iframe src="{html_file_url}" style="width:100%; height:180px; border:none;"></iframe>'
|
15 |
-
|
16 |
-
|
17 |
html_file_url = "https://prithivmlmods-hamster-static.static.hf.space/index.html"
|
18 |
html_content = f'<iframe src="{html_file_url}" style="width:100%; height:200px; border:none"></iframe>'
|
19 |
|
20 |
-
|
21 |
DESCRIPTIONx = """## STABLE HAMSTER
|
22 |
"""
|
23 |
-
|
24 |
css = '''
|
25 |
.gradio-container{max-width: 560px !important}
|
26 |
h1{text-align:center}
|
@@ -38,15 +34,17 @@ examples = [
|
|
38 |
"Kids going to school, Anime style"
|
39 |
]
|
40 |
|
41 |
-
|
42 |
-
#
|
|
|
|
|
43 |
MODEL_ID = os.getenv("MODEL_REPO")
|
44 |
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
|
45 |
USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
|
46 |
ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
|
47 |
BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1")) # Allow generating multiple images at once
|
48 |
|
49 |
-
#
|
50 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
51 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
52 |
MODEL_ID,
|
@@ -56,11 +54,11 @@ pipe = StableDiffusionXLPipeline.from_pretrained(
|
|
56 |
).to(device)
|
57 |
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
58 |
|
59 |
-
#
|
60 |
if USE_TORCH_COMPILE:
|
61 |
pipe.compile()
|
62 |
|
63 |
-
#
|
64 |
if ENABLE_CPU_OFFLOAD:
|
65 |
pipe.enable_model_cpu_offload()
|
66 |
|
@@ -76,7 +74,7 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
|
76 |
seed = random.randint(0, MAX_SEED)
|
77 |
return seed
|
78 |
|
79 |
-
@spaces.GPU(duration=
|
80 |
def generate(
|
81 |
prompt: str,
|
82 |
negative_prompt: str = "",
|
@@ -85,7 +83,7 @@ def generate(
|
|
85 |
width: int = 1024,
|
86 |
height: int = 1024,
|
87 |
guidance_scale: float = 3,
|
88 |
-
num_inference_steps: int =
|
89 |
randomize_seed: bool = False,
|
90 |
use_resolution_binning: bool = True,
|
91 |
num_images: int = 1, # Number of images to generate
|
@@ -94,7 +92,7 @@ def generate(
|
|
94 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
95 |
generator = torch.Generator(device=device).manual_seed(seed)
|
96 |
|
97 |
-
#
|
98 |
options = {
|
99 |
"prompt": [prompt] * num_images,
|
100 |
"negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
|
@@ -106,11 +104,11 @@ def generate(
|
|
106 |
"output_type": "pil",
|
107 |
}
|
108 |
|
109 |
-
#
|
110 |
if use_resolution_binning:
|
111 |
options["use_resolution_binning"] = True
|
112 |
|
113 |
-
#
|
114 |
images = []
|
115 |
for i in range(0, num_images, BATCH_SIZE):
|
116 |
batch_options = options.copy()
|
@@ -121,8 +119,7 @@ def generate(
|
|
121 |
|
122 |
image_paths = [save_image(img) for img in images]
|
123 |
return image_paths, seed
|
124 |
-
|
125 |
-
|
126 |
with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
127 |
gr.Markdown(DESCRIPTIONx)
|
128 |
with gr.Group():
|
@@ -188,7 +185,7 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
|
188 |
num_inference_steps = gr.Slider(
|
189 |
label="Number of inference steps",
|
190 |
minimum=1,
|
191 |
-
maximum=
|
192 |
step=1,
|
193 |
value=8,
|
194 |
)
|
@@ -227,9 +224,7 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
|
227 |
],
|
228 |
outputs=[result, seed],
|
229 |
api_name="run",
|
230 |
-
)
|
231 |
-
|
232 |
-
|
233 |
gr.HTML(html_content)
|
234 |
if __name__ == "__main__":
|
235 |
demo.queue(max_size=50).launch()
|
|
|
12 |
#Load the HTML content
|
13 |
#html_file_url = "https://prithivmlmods-hamster-static.static.hf.space/index.html"
|
14 |
#html_content = f'<iframe src="{html_file_url}" style="width:100%; height:180px; border:none;"></iframe>'
|
|
|
|
|
15 |
html_file_url = "https://prithivmlmods-hamster-static.static.hf.space/index.html"
|
16 |
html_content = f'<iframe src="{html_file_url}" style="width:100%; height:200px; border:none"></iframe>'
|
17 |
|
|
|
18 |
DESCRIPTIONx = """## STABLE HAMSTER
|
19 |
"""
|
|
|
20 |
css = '''
|
21 |
.gradio-container{max-width: 560px !important}
|
22 |
h1{text-align:center}
|
|
|
34 |
"Kids going to school, Anime style"
|
35 |
]
|
36 |
|
37 |
+
#Set an os.Getenv variable
|
38 |
+
#set VAR_NAME=”VALUE”
|
39 |
+
#Fetch an environment variable
|
40 |
+
#echo %VAR_NAME%
|
41 |
MODEL_ID = os.getenv("MODEL_REPO")
|
42 |
MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
|
43 |
USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
|
44 |
ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
|
45 |
BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1")) # Allow generating multiple images at once
|
46 |
|
47 |
+
#Load model outside of function
|
48 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
49 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
50 |
MODEL_ID,
|
|
|
54 |
).to(device)
|
55 |
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
56 |
|
57 |
+
# <compile speedup >
|
58 |
if USE_TORCH_COMPILE:
|
59 |
pipe.compile()
|
60 |
|
61 |
+
# Offloading capacity (RAM)
|
62 |
if ENABLE_CPU_OFFLOAD:
|
63 |
pipe.enable_model_cpu_offload()
|
64 |
|
|
|
74 |
seed = random.randint(0, MAX_SEED)
|
75 |
return seed
|
76 |
|
77 |
+
@spaces.GPU(duration=60, enable_queue=True)
|
78 |
def generate(
|
79 |
prompt: str,
|
80 |
negative_prompt: str = "",
|
|
|
83 |
width: int = 1024,
|
84 |
height: int = 1024,
|
85 |
guidance_scale: float = 3,
|
86 |
+
num_inference_steps: int = 25,
|
87 |
randomize_seed: bool = False,
|
88 |
use_resolution_binning: bool = True,
|
89 |
num_images: int = 1, # Number of images to generate
|
|
|
92 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
93 |
generator = torch.Generator(device=device).manual_seed(seed)
|
94 |
|
95 |
+
#Options
|
96 |
options = {
|
97 |
"prompt": [prompt] * num_images,
|
98 |
"negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
|
|
|
104 |
"output_type": "pil",
|
105 |
}
|
106 |
|
107 |
+
#VRAM usage Lesser
|
108 |
if use_resolution_binning:
|
109 |
options["use_resolution_binning"] = True
|
110 |
|
111 |
+
#Images potential batches
|
112 |
images = []
|
113 |
for i in range(0, num_images, BATCH_SIZE):
|
114 |
batch_options = options.copy()
|
|
|
119 |
|
120 |
image_paths = [save_image(img) for img in images]
|
121 |
return image_paths, seed
|
122 |
+
#Main gr.Block
|
|
|
123 |
with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
|
124 |
gr.Markdown(DESCRIPTIONx)
|
125 |
with gr.Group():
|
|
|
185 |
num_inference_steps = gr.Slider(
|
186 |
label="Number of inference steps",
|
187 |
minimum=1,
|
188 |
+
maximum=25,
|
189 |
step=1,
|
190 |
value=8,
|
191 |
)
|
|
|
224 |
],
|
225 |
outputs=[result, seed],
|
226 |
api_name="run",
|
227 |
+
)
|
|
|
|
|
228 |
gr.HTML(html_content)
|
229 |
if __name__ == "__main__":
|
230 |
demo.queue(max_size=50).launch()
|