ehristoforu commited on
Commit
e899098
·
verified ·
1 Parent(s): 8072663

Upload 4 files

Browse files
Files changed (4) hide show
  1. .gitattributes +35 -0
  2. README.md +13 -0
  3. app.py +210 -0
  4. requirements.txt +6 -0
.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Proteus V0.1
3
+ emoji: 🦖
4
+ colorFrom: pink
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 4.14.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: cc-by-4.0
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import os
4
+ import random
5
+ import uuid
6
+
7
+ import gradio as gr
8
+ import numpy as np
9
+ from PIL import Image
10
+ import spaces
11
+ import torch
12
+ from diffusers import StableDiffusionPipeline
13
+
14
+ DESCRIPTION = """
15
+ # Fluently V1
16
+
17
+ Model: [this](https://huggingface.co/ehristoforu/Fluently-v1)
18
+ """
19
+ if not torch.cuda.is_available():
20
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo may not work on CPU.</p>"
21
+
22
+ MAX_SEED = np.iinfo(np.int32).max
23
+
24
+ USE_TORCH_COMPILE = 0
25
+ ENABLE_CPU_OFFLOAD = 0
26
+
27
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
28
+
29
+
30
+ if torch.cuda.is_available():
31
+ pipe = StableDiffusionPipeline.from_pretrained(
32
+ "ehristoforu/Fluently-v1",
33
+ torch_dtype=torch.float16,
34
+ use_safetensors=True,
35
+ )
36
+ if ENABLE_CPU_OFFLOAD:
37
+ pipe.enable_model_cpu_offload()
38
+
39
+ pipe.to(device)
40
+ print("Loaded on Device!")
41
+
42
+ if USE_TORCH_COMPILE:
43
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
44
+ print("Model Compiled!")
45
+
46
+
47
+ def save_image(img):
48
+ unique_name = str(uuid.uuid4()) + ".png"
49
+ img.save(unique_name)
50
+ return unique_name
51
+
52
+
53
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
54
+ if randomize_seed:
55
+ seed = random.randint(0, MAX_SEED)
56
+ return seed
57
+
58
+
59
+ @spaces.GPU(enable_queue=True)
60
+ def generate(
61
+ prompt: str,
62
+ negative_prompt: str = "",
63
+ use_negative_prompt: bool = False,
64
+ seed: int = 0,
65
+ width: int = 1024,
66
+ height: int = 1024,
67
+ guidance_scale: float = 3,
68
+ randomize_seed: bool = False,
69
+ progress=gr.Progress(track_tqdm=True),
70
+ ):
71
+
72
+ pipe.to(device)
73
+ seed = int(randomize_seed_fn(seed, randomize_seed))
74
+
75
+ if not use_negative_prompt:
76
+ negative_prompt = "" # type: ignore
77
+ images = pipe(
78
+ prompt=prompt,
79
+ negative_prompt=negative_prompt,
80
+ width=width,
81
+ height=height,
82
+ guidance_scale=guidance_scale,
83
+ num_inference_steps=30,
84
+ num_images_per_prompt=1,
85
+ output_type="pil",
86
+ ).images
87
+
88
+ image_paths = [save_image(img) for img in images]
89
+ print(image_paths)
90
+ return image_paths, seed
91
+
92
+
93
+
94
+
95
+ examples = [
96
+ "neon holography crystal cat",
97
+ "a cat eating a piece of cheese",
98
+ "an astronaut riding a horse in space",
99
+ "a cartoon of a boy playing with a tiger",
100
+ "a cute robot artist painting on an easel, concept art",
101
+ "a close up of a woman wearing a transparent, prismatic, elaborate nemeses headdress, over the should pose, brown skin-tone"
102
+ ]
103
+
104
+ css = '''
105
+ .gradio-container{max-width: 560px !important}
106
+ h1{text-align:center}
107
+ footer {
108
+ visibility: hidden
109
+ }
110
+ '''
111
+ with gr.Blocks(title="Fluently V1", css=css) as demo:
112
+ gr.Markdown(DESCRIPTION)
113
+ gr.DuplicateButton(
114
+ value="Duplicate Space for private use",
115
+ elem_id="duplicate-button",
116
+ visible=False,
117
+ )
118
+ with gr.Group():
119
+ with gr.Row():
120
+ prompt = gr.Text(
121
+ label="Prompt",
122
+ show_label=False,
123
+ max_lines=1,
124
+ placeholder="Enter your prompt",
125
+ container=False,
126
+ )
127
+ run_button = gr.Button("Run", scale=0)
128
+ result = gr.Gallery(label="Result", columns=1, preview=True, show_label=False)
129
+ with gr.Accordion("Advanced options", open=False):
130
+ with gr.Row():
131
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
132
+ negative_prompt = gr.Text(
133
+ label="Negative prompt",
134
+ max_lines=1,
135
+ value="""(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation""",
136
+ placeholder="Enter a negative prompt",
137
+ visible=False,
138
+ )
139
+ seed = gr.Slider(
140
+ label="Seed",
141
+ minimum=0,
142
+ maximum=MAX_SEED,
143
+ step=1,
144
+ value=0,
145
+ visible=True
146
+ )
147
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
148
+ with gr.Row(visible=True):
149
+ width = gr.Slider(
150
+ label="Width",
151
+ minimum=256,
152
+ maximum=1024,
153
+ step=8,
154
+ value=512,
155
+ )
156
+ height = gr.Slider(
157
+ label="Height",
158
+ minimum=256,
159
+ maximum=1024,
160
+ step=8,
161
+ value=512,
162
+ )
163
+ with gr.Row():
164
+ guidance_scale = gr.Slider(
165
+ label="Guidance Scale",
166
+ minimum=0.1,
167
+ maximum=20.0,
168
+ step=0.1,
169
+ value=5.5,
170
+ )
171
+
172
+ gr.Examples(
173
+ examples=examples,
174
+ inputs=prompt,
175
+ outputs=[result, seed],
176
+ fn=generate,
177
+ cache_examples=False,
178
+ )
179
+
180
+ use_negative_prompt.change(
181
+ fn=lambda x: gr.update(visible=x),
182
+ inputs=use_negative_prompt,
183
+ outputs=negative_prompt,
184
+ api_name=False,
185
+ )
186
+
187
+
188
+ gr.on(
189
+ triggers=[
190
+ prompt.submit,
191
+ negative_prompt.submit,
192
+ run_button.click,
193
+ ],
194
+ fn=generate,
195
+ inputs=[
196
+ prompt,
197
+ negative_prompt,
198
+ use_negative_prompt,
199
+ seed,
200
+ width,
201
+ height,
202
+ guidance_scale,
203
+ randomize_seed,
204
+ ],
205
+ outputs=[result, seed],
206
+ api_name="run",
207
+ )
208
+
209
+ if __name__ == "__main__":
210
+ demo.queue(max_size=20).launch(show_api=False, debug=False)
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ torch
2
+ diffusers==0.25.0
3
+ transformers
4
+ safetensors
5
+ accelerate
6
+ omegaconf