prithivMLmods commited on
Commit
159b3cd
1 Parent(s): c2ddc59

With Single LoRA

Browse files
Files changed (1) hide show
  1. Last Commit.txt +241 -0
Last Commit.txt ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import gradio as gr
3
+ import torch
4
+ from PIL import Image
5
+ from diffusers import DiffusionPipeline
6
+ import random
7
+ import uuid
8
+ from typing import Tuple
9
+ import numpy as np
10
+
11
+ DESCRIPTIONz = """## FLUX REALISM 🔥"""
12
+
13
+
14
+ DESCRIPTIONy = """
15
+ <p align="left">
16
+ <a title="Github" href="https://github.com/PRITHIVSAKTHIUR/FLUX-REALPIX" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
17
+ <img src="https://img.shields.io/github/stars/PRITHIVSAKTHIUR/FLUX-REALPIX?label=GitHub%20%E2%98%85&logo=github&color=C8C" alt="badge-github-stars">
18
+ </a>
19
+ </p>
20
+ """
21
+
22
+ def save_image(img):
23
+ unique_name = str(uuid.uuid4()) + ".png"
24
+ img.save(unique_name)
25
+ return unique_name
26
+
27
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
28
+ if randomize_seed:
29
+ seed = random.randint(0, MAX_SEED)
30
+ return seed
31
+
32
+ MAX_SEED = np.iinfo(np.int32).max
33
+
34
+ if not torch.cuda.is_available():
35
+ DESCRIPTIONz += "\n<p>⚠️Running on CPU, This may not work on CPU.</p>"
36
+
37
+ base_model = "black-forest-labs/FLUX.1-dev"
38
+ pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
39
+
40
+ lora_repo = "prithivMLmods/Canopus-LoRA-Flux-FaceRealism"
41
+ trigger_word = "Realism" # Leave trigger_word blank if not used.
42
+ pipe.load_lora_weights(lora_repo)
43
+
44
+ pipe.to("cuda")
45
+
46
+ style_list = [
47
+ {
48
+ "name": "3840 x 2160",
49
+ "prompt": "hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
50
+ },
51
+ {
52
+ "name": "2560 x 1440",
53
+ "prompt": "hyper-realistic 4K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
54
+ },
55
+ {
56
+ "name": "HD+",
57
+ "prompt": "hyper-realistic 2K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
58
+ },
59
+ {
60
+ "name": "Style Zero",
61
+ "prompt": "{prompt}",
62
+ },
63
+ ]
64
+
65
+ styles = {k["name"]: k["prompt"] for k in style_list}
66
+
67
+ DEFAULT_STYLE_NAME = "3840 x 2160"
68
+ STYLE_NAMES = list(styles.keys())
69
+
70
+ def apply_style(style_name: str, positive: str) -> str:
71
+ return styles.get(style_name, styles[DEFAULT_STYLE_NAME]).replace("{prompt}", positive)
72
+
73
+ @spaces.GPU(duration=60, enable_queue=True)
74
+ def generate(
75
+ prompt: str,
76
+ seed: int = 0,
77
+ width: int = 1024,
78
+ height: int = 1024,
79
+ guidance_scale: float = 3,
80
+ randomize_seed: bool = False,
81
+ style_name: str = DEFAULT_STYLE_NAME,
82
+ progress=gr.Progress(track_tqdm=True),
83
+ ):
84
+ seed = int(randomize_seed_fn(seed, randomize_seed))
85
+
86
+ positive_prompt = apply_style(style_name, prompt)
87
+
88
+ if trigger_word:
89
+ positive_prompt = f"{trigger_word} {positive_prompt}"
90
+
91
+ images = pipe(
92
+ prompt=positive_prompt,
93
+ width=width,
94
+ height=height,
95
+ guidance_scale=guidance_scale,
96
+ num_inference_steps=16,
97
+ num_images_per_prompt=1,
98
+ output_type="pil",
99
+ ).images
100
+ image_paths = [save_image(img) for img in images]
101
+ print(image_paths)
102
+ return image_paths, seed
103
+
104
+
105
+ def load_predefined_images():
106
+ predefined_images = [
107
+ "assets/11.png",
108
+ "assets/22.png",
109
+ "assets/33.png",
110
+ "assets/44.png",
111
+ "assets/55.webp",
112
+ "assets/66.png",
113
+ "assets/77.png",
114
+ "assets/88.png",
115
+ "assets/99.png",
116
+ ]
117
+ return predefined_images
118
+
119
+
120
+
121
+ examples = [
122
+ "A portrait of an attractive woman in her late twenties with light brown hair and purple, wearing large a a yellow sweater. She is looking directly at the camera, standing outdoors near trees.. --ar 128:85 --v 6.0 --style raw",
123
+ "A photo of the model wearing a white bodysuit and beige trench coat, posing in front of a train station with hands on head, soft light, sunset, fashion photography, high resolution, 35mm lens, f/22, natural lighting, global illumination. --ar 85:128 --v 6.0 --style raw",
124
+ ]
125
+
126
+
127
+ css = '''
128
+ .gradio-container{max-width: 575px !important}
129
+ h1{text-align:center}
130
+ footer {
131
+ visibility: hidden
132
+ }
133
+ '''
134
+
135
+ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
136
+ gr.Markdown(DESCRIPTIONz)
137
+ gr.Markdown(DESCRIPTIONy)
138
+ with gr.Row():
139
+ prompt = gr.Text(
140
+ label="Prompt",
141
+ show_label=False,
142
+ max_lines=1,
143
+ placeholder="Enter your prompt",
144
+ container=False,
145
+ )
146
+ run_button = gr.Button("Run", scale=0)
147
+ result = gr.Gallery(label="Result", columns=1, show_label=False)
148
+
149
+ with gr.Accordion("Advanced options", open=False, visible=True):
150
+ seed = gr.Slider(
151
+ label="Seed",
152
+ minimum=0,
153
+ maximum=MAX_SEED,
154
+ step=1,
155
+ value=0,
156
+ visible=True
157
+ )
158
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
159
+
160
+ with gr.Row(visible=True):
161
+ width = gr.Slider(
162
+ label="Width",
163
+ minimum=512,
164
+ maximum=2048,
165
+ step=64,
166
+ value=1024,
167
+ )
168
+ height = gr.Slider(
169
+ label="Height",
170
+ minimum=512,
171
+ maximum=2048,
172
+ step=64,
173
+ value=1024,
174
+ )
175
+
176
+ with gr.Row():
177
+ guidance_scale = gr.Slider(
178
+ label="Guidance Scale",
179
+ minimum=0.1,
180
+ maximum=20.0,
181
+ step=0.1,
182
+ value=3.0,
183
+ )
184
+ num_inference_steps = gr.Slider(
185
+ label="Number of inference steps",
186
+ minimum=1,
187
+ maximum=40,
188
+ step=1,
189
+ value=16,
190
+ )
191
+
192
+ style_selection = gr.Radio(
193
+ show_label=True,
194
+ container=True,
195
+ interactive=True,
196
+ choices=STYLE_NAMES,
197
+ value=DEFAULT_STYLE_NAME,
198
+ label="Quality Style",
199
+ )
200
+
201
+
202
+
203
+ gr.Examples(
204
+ examples=examples,
205
+ inputs=prompt,
206
+ outputs=[result, seed],
207
+ fn=generate,
208
+ cache_examples=False,
209
+ )
210
+
211
+ gr.on(
212
+ triggers=[
213
+ prompt.submit,
214
+ run_button.click,
215
+ ],
216
+ fn=generate,
217
+ inputs=[
218
+ prompt,
219
+ seed,
220
+ width,
221
+ height,
222
+ guidance_scale,
223
+ randomize_seed,
224
+ style_selection,
225
+ ],
226
+ outputs=[result, seed],
227
+ api_name="run",
228
+ )
229
+
230
+ gr.Markdown("### Generated Images")
231
+ predefined_gallery = gr.Gallery(label="Generated Images", columns=3, show_label=False, value=load_predefined_images())
232
+
233
+
234
+ gr.Markdown("**Disclaimer/Note:**")
235
+
236
+ gr.Markdown("🔥This space provides realistic image generation, which works better for human faces and portraits. Realistic trigger works properly, better for photorealistic trigger words, close-up shots, face diffusion, male, female characters.")
237
+
238
+ gr.Markdown("🔥users are accountable for the content they generate and are responsible for ensuring it meets appropriate ethical standards.")
239
+
240
+ if __name__ == "__main__":
241
+ demo.queue(max_size=40).launch()