prithivMLmods commited on
Commit
ac40a94
Β·
verified Β·
1 Parent(s): 56f1983

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -234
app.py DELETED
@@ -1,234 +0,0 @@
1
- import gradio as gr
2
- import spaces
3
- import numpy as np
4
- import random
5
- from diffusers import DiffusionPipeline
6
- import torch
7
- from PIL import Image
8
-
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/stable-diffusion-3.5-large-turbo"
11
-
12
- torch_dtype = torch.bfloat16 if torch.cuda.is_available() else torch.float32
13
-
14
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
15
- pipe = pipe.to(device)
16
-
17
- pipe.load_lora_weights("strangerzonehf/SD3.5-Turbo-Portrait-LoRA", weight_name="SD3.5-Turbo-Portrait.safetensors")
18
- trigger_word = "Turbo Portrait"
19
- pipe.fuse_lora(lora_scale=1.0)
20
-
21
- MAX_SEED = np.iinfo(np.int32).max
22
- MAX_IMAGE_SIZE = 1024
23
-
24
- # Define styles
25
- style_list = [
26
- {
27
- "name": "3840 x 2160",
28
- "prompt": "hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
29
- "negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
30
- },
31
- {
32
- "name": "2560 x 1440",
33
- "prompt": "hyper-realistic 4K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
34
- "negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
35
- },
36
- {
37
- "name": "HD+",
38
- "prompt": "hyper-realistic 2K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
39
- "negative_prompt": "cartoonish, low resolution, blurry, simplistic, abstract, deformed, ugly",
40
- },
41
- {
42
- "name": "Style Zero",
43
- "prompt": "{prompt}",
44
- "negative_prompt": "",
45
- },
46
- ]
47
-
48
- STYLE_NAMES = [style["name"] for style in style_list]
49
- DEFAULT_STYLE_NAME = STYLE_NAMES[0]
50
-
51
- grid_sizes = {
52
- "2x1": (2, 1),
53
- "1x2": (1, 2),
54
- "2x2": (2, 2),
55
- "2x3": (2, 3),
56
- "3x2": (3, 2),
57
- "1x1": (1, 1)
58
- }
59
-
60
- @spaces.GPU(duration=60)
61
- def infer(
62
- prompt,
63
- negative_prompt="",
64
- seed=42,
65
- randomize_seed=False,
66
- width=1024,
67
- height=1024,
68
- guidance_scale=7.5,
69
- num_inference_steps=10,
70
- style="Style Zero",
71
- grid_size="1x1",
72
- progress=gr.Progress(track_tqdm=True),
73
- ):
74
- selected_style = next(s for s in style_list if s["name"] == style)
75
- styled_prompt = selected_style["prompt"].format(prompt=prompt)
76
- styled_negative_prompt = selected_style["negative_prompt"]
77
-
78
- if randomize_seed:
79
- seed = random.randint(0, MAX_SEED)
80
-
81
- generator = torch.Generator().manual_seed(seed)
82
-
83
- grid_size_x, grid_size_y = grid_sizes.get(grid_size, (1, 1))
84
- num_images = grid_size_x * grid_size_y
85
-
86
- options = {
87
- "prompt": styled_prompt,
88
- "negative_prompt": styled_negative_prompt,
89
- "guidance_scale": guidance_scale,
90
- "num_inference_steps": num_inference_steps,
91
- "width": width,
92
- "height": height,
93
- "generator": generator,
94
- "num_images_per_prompt": num_images,
95
- }
96
-
97
- torch.cuda.empty_cache() # Clear GPU memory
98
- result = pipe(**options)
99
-
100
- grid_img = Image.new('RGB', (width * grid_size_x, height * grid_size_y))
101
-
102
- for i, img in enumerate(result.images[:num_images]):
103
- grid_img.paste(img, (i % grid_size_x * width, i // grid_size_x * height))
104
-
105
- return grid_img, seed
106
-
107
- examples = [
108
- "A tiny astronaut hatching from an egg on the moon, 4k, planet theme",
109
- "An anime-style illustration of a delicious, golden-brown wiener schnitzel on a plate, served with fresh lemon slices, parsley --style raw5",
110
- "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K, Photo-Realistic",
111
- "A cat holding a sign that says hello world --ar 85:128 --v 6.0 --style raw"
112
- ]
113
-
114
- css = '''
115
- .gradio-container{max-width: 585px !important}
116
- h1{text-align:center}
117
- footer {
118
- visibility: hidden
119
- }
120
- '''
121
-
122
- with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
123
- with gr.Column(elem_id="col-container"):
124
- gr.Markdown("## GRID 6XπŸͺ¨")
125
-
126
- with gr.Row():
127
- prompt = gr.Text(
128
- label="Prompt",
129
- show_label=False,
130
- max_lines=1,
131
- placeholder="Enter your prompt",
132
- container=False,
133
- )
134
-
135
- run_button = gr.Button("Run", scale=0, variant="primary")
136
-
137
- result = gr.Image(label="Result", show_label=False)
138
-
139
-
140
- with gr.Row(visible=True):
141
- grid_size_selection = gr.Dropdown(
142
- choices=["2x1", "1x2", "2x2", "2x3", "3x2", "1x1"],
143
- value="1x1",
144
- label="Grid Size"
145
- )
146
-
147
- with gr.Accordion("Advanced Settings", open=False):
148
- negative_prompt = gr.Text(
149
- label="Negative prompt",
150
- max_lines=1,
151
- placeholder="Enter a negative prompt",
152
- value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
153
- visible=False,
154
- )
155
-
156
- seed = gr.Slider(
157
- label="Seed",
158
- minimum=0,
159
- maximum=MAX_SEED,
160
- step=1,
161
- value=0,
162
- )
163
-
164
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
165
-
166
- with gr.Row():
167
- width = gr.Slider(
168
- label="Width",
169
- minimum=512,
170
- maximum=MAX_IMAGE_SIZE,
171
- step=32,
172
- value=1024,
173
- )
174
-
175
- height = gr.Slider(
176
- label="Height",
177
- minimum=512,
178
- maximum=MAX_IMAGE_SIZE,
179
- step=32,
180
- value=1024,
181
- )
182
-
183
- with gr.Row():
184
- guidance_scale = gr.Slider(
185
- label="Guidance scale",
186
- minimum=0.0,
187
- maximum=7.5,
188
- step=0.1,
189
- value=0.0,
190
- )
191
-
192
- num_inference_steps = gr.Slider(
193
- label="Number of inference steps",
194
- minimum=1,
195
- maximum=50,
196
- step=1,
197
- value=8,
198
- )
199
-
200
- style_selection = gr.Radio(
201
- show_label=True,
202
- container=True,
203
- interactive=True,
204
- choices=STYLE_NAMES,
205
- value=DEFAULT_STYLE_NAME,
206
- label="Quality Style",
207
- )
208
-
209
- gr.Examples(examples=examples,
210
- inputs=[prompt],
211
- outputs=[result, seed],
212
- fn=infer,
213
- cache_examples=False)
214
-
215
- gr.on(
216
- triggers=[run_button.click, prompt.submit],
217
- fn=infer,
218
- inputs=[
219
- prompt,
220
- negative_prompt,
221
- seed,
222
- randomize_seed,
223
- width,
224
- height,
225
- guidance_scale,
226
- num_inference_steps,
227
- style_selection,
228
- grid_size_selection,
229
- ],
230
- outputs=[result, seed],
231
- )
232
-
233
- if __name__ == "__main__":
234
- demo.launch()