Spaces:
Runtime error
Runtime error
Update APP.py
Browse files
APP.py
CHANGED
@@ -13,210 +13,275 @@ import psutil
|
|
13 |
import math
|
14 |
import random
|
15 |
|
16 |
-
|
17 |
-
pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(
|
|
|
|
|
|
|
|
|
18 |
|
19 |
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
|
20 |
|
21 |
if torch.cuda.is_available():
|
22 |
pipe = pipe.to("cuda")
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
-
|
26 |
-
input_image
|
27 |
-
instruction: str,
|
28 |
-
steps: int,
|
29 |
-
seed: int,
|
30 |
-
text_cfg_scale: float,
|
31 |
-
image_cfg_scale: float,
|
32 |
-
):
|
33 |
-
|
34 |
-
width, height = input_image.size
|
35 |
-
factor = 512 / max(width, height)
|
36 |
-
factor = math.ceil(min(width, height) * factor / 64) * 64 / min(width, height)
|
37 |
-
width = int((width * factor) // 64) * 64
|
38 |
-
height = int((height * factor) // 64) * 64
|
39 |
-
input_image = ImageOps.fit(input_image, (width, height), method=Image.Resampling.LANCZOS)
|
40 |
-
|
41 |
-
if instruction == "":
|
42 |
-
return [input_image, seed]
|
43 |
-
|
44 |
-
generator = torch.manual_seed(seed)
|
45 |
-
edited_image = pipe(
|
46 |
-
instruction, image=input_image,
|
47 |
-
guidance_scale=text_cfg_scale, image_guidance_scale=image_cfg_scale,
|
48 |
-
num_inference_steps=steps, generator=generator,
|
49 |
-
).images[0]
|
50 |
-
print(f"EDITED: {edited_image}")
|
51 |
-
return edited_image
|
52 |
-
|
53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
def get_frames(video_in):
|
56 |
frames = []
|
57 |
-
#resize the video
|
58 |
clip = VideoFileClip(video_in)
|
59 |
|
60 |
-
#
|
61 |
-
|
62 |
-
|
63 |
-
clip_resized = clip.resize(height=512)
|
64 |
-
clip_resized.write_videofile("video_resized.mp4", fps=30)
|
65 |
-
else:
|
66 |
-
print("video rate is OK")
|
67 |
-
clip_resized = clip.resize(height=512)
|
68 |
-
clip_resized.write_videofile("video_resized.mp4", fps=clip.fps)
|
69 |
-
|
70 |
-
print("video resized to 512 height")
|
71 |
-
|
72 |
-
# Opens the Video file with CV2
|
73 |
-
cap= cv2.VideoCapture("video_resized.mp4")
|
74 |
|
|
|
75 |
fps = cap.get(cv2.CAP_PROP_FPS)
|
76 |
-
|
77 |
-
|
78 |
-
while(cap.isOpened()):
|
79 |
ret, frame = cap.read()
|
80 |
-
if ret
|
81 |
break
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
|
|
86 |
cap.release()
|
87 |
cv2.destroyAllWindows()
|
88 |
-
print("broke the video into frames")
|
89 |
-
|
90 |
return frames, fps
|
91 |
|
92 |
-
|
93 |
def create_video(frames, fps):
|
94 |
-
print("building video result")
|
95 |
clip = ImageSequenceClip(frames, fps=fps)
|
96 |
-
|
97 |
-
|
98 |
-
return
|
99 |
|
|
|
|
|
|
|
|
|
|
|
100 |
|
101 |
-
def infer(prompt,video_in, seed_in, trim_value):
|
102 |
-
print(prompt)
|
103 |
-
break_vid = get_frames(video_in)
|
104 |
-
|
105 |
-
frames_list= break_vid[0]
|
106 |
-
fps = break_vid[1]
|
107 |
-
n_frame = int(trim_value*fps)
|
108 |
-
|
109 |
-
if n_frame >= len(frames_list):
|
110 |
-
print("video is shorter than the cut value")
|
111 |
-
n_frame = len(frames_list)
|
112 |
-
|
113 |
result_frames = []
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
# exporting the image
|
125 |
-
pix2pix_img.save(f"result_img-{i}.jpg")
|
126 |
-
result_frames.append(f"result_img-{i}.jpg")
|
127 |
-
print("frame " + i + "/" + str(n_frame) + ": done;")
|
128 |
-
|
129 |
-
final_vid = create_video(result_frames, fps)
|
130 |
-
print("finished !")
|
131 |
-
|
132 |
-
return final_vid, gr.Group.update(visible=True)
|
133 |
|
134 |
title = """
|
135 |
<div style="text-align: center; max-width: 700px; margin: 0 auto;">
|
136 |
-
<div
|
137 |
-
|
138 |
-
display: inline-flex;
|
139 |
-
align-items: center;
|
140 |
-
gap: 0.8rem;
|
141 |
-
font-size: 1.75rem;
|
142 |
-
"
|
143 |
-
>
|
144 |
-
<h1 style="font-weight: 900; margin-bottom: 7px; margin-top: 5px;">
|
145 |
-
Pix2Pix Video
|
146 |
-
</h1>
|
147 |
</div>
|
148 |
-
<p style="margin-bottom: 10px; font-size: 94%">
|
149 |
-
Apply Instruct Pix2Pix Diffusion to a video
|
150 |
-
</p>
|
151 |
</div>
|
152 |
"""
|
153 |
|
154 |
article = """
|
155 |
-
|
156 |
<div class="footer">
|
157 |
<p>
|
158 |
Examples by <a href="https://twitter.com/CitizenPlain" target="_blank">Nathan Shipley</a> •
|
159 |
Follow <a href="https://twitter.com/fffiloni" target="_blank">Sylvain Filoni</a> for future updates 🤗
|
160 |
</p>
|
161 |
</div>
|
162 |
-
<div id="may-like-container" style="display: flex;justify-content: center;flex-direction: column;align-items: center;margin-bottom: 30px;">
|
163 |
<p>You may also like: </p>
|
164 |
-
<div id="may-like-content" style="display:flex;flex-wrap: wrap;align-items:center;height:20px;">
|
165 |
-
|
166 |
-
|
167 |
-
<a href="https://huggingface.co/spaces/timbrooks/instruct-pix2pix" target="_blank">
|
168 |
<image href="https://img.shields.io/badge/🤗 Spaces-Instruct_Pix2Pix-blue" src="https://img.shields.io/badge/🤗 Spaces-Instruct_Pix2Pix-blue.png" height="20"/>
|
169 |
-
|
170 |
</svg>
|
171 |
-
|
172 |
</div>
|
173 |
-
|
174 |
</div>
|
175 |
-
|
176 |
"""
|
177 |
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
185 |
with gr.Row():
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
gr.
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
inputs = [prompt,video_inp,seed_inp, trim_in]
|
202 |
-
outputs = [video_out, share_group]
|
203 |
-
|
204 |
-
#ex = gr.Examples(
|
205 |
-
# [
|
206 |
-
# ["Make it a marble sculpture", "./examples/pexels-jill-burrow-7665249_512x512.mp4", 422112651, 4],
|
207 |
-
# ["Make it molten lava", "./examples/Ocean_Pexels_ 8953474_512x512.mp4", 43571876, 4]
|
208 |
-
# ],
|
209 |
-
# inputs=inputs,
|
210 |
-
# outputs=outputs,
|
211 |
-
# fn=infer,
|
212 |
-
# cache_examples=True,
|
213 |
-
#)
|
214 |
-
|
215 |
-
gr.HTML(article)
|
216 |
-
|
217 |
-
submit_btn.click(infer, inputs, outputs)
|
218 |
-
share_button.click(None, [], [], _js=share_js)
|
219 |
|
220 |
-
|
221 |
-
|
222 |
-
demo.queue(max_size=12).launch()
|
|
|
13 |
import math
|
14 |
import random
|
15 |
|
16 |
+
# Load the model
|
17 |
+
pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(
|
18 |
+
"timbrooks/instruct-pix2pix",
|
19 |
+
torch_dtype=torch.float16,
|
20 |
+
safety_checker=None
|
21 |
+
)
|
22 |
|
23 |
device = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
|
24 |
|
25 |
if torch.cuda.is_available():
|
26 |
pipe = pipe.to("cuda")
|
27 |
|
28 |
+
def pix2pix(input_image: Image.Image, instruction: str, steps: int, seed: int, text_cfg_scale: float, image_cfg_scale: float):
|
29 |
+
width, height = input_image.size
|
30 |
+
factor = 512 / max(width, height)
|
31 |
+
factor = math.ceil(min(width, height) * factor / 64) * 64 / min(width, height)
|
32 |
+
width = int((width * factor) // 64) * 64
|
33 |
+
height = int((height * factor) // 64) * 64
|
34 |
+
input_image = ImageOps.fit(input_image, (width, height), method=Image.Resampling.LANCZOS)
|
35 |
|
36 |
+
if instruction == "":
|
37 |
+
return [input_image, seed]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
+
generator = torch.manual_seed(seed)
|
40 |
+
edited_image = pipe(
|
41 |
+
instruction, image=input_image,
|
42 |
+
guidance_scale=text_cfg_scale, image_guidance_scale=image_cfg_scale,
|
43 |
+
num_inference_steps=steps, generator=generator,
|
44 |
+
).images[0]
|
45 |
+
return edited_image
|
46 |
|
47 |
def get_frames(video_in):
|
48 |
frames = []
|
|
|
49 |
clip = VideoFileClip(video_in)
|
50 |
|
51 |
+
# Resize the video
|
52 |
+
clip_resized = clip.resize(height=512)
|
53 |
+
clip_resized.write_videofile("video_resized.mp4", fps=min(clip.fps, 30))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
+
cap = cv2.VideoCapture("video_resized.mp4")
|
56 |
fps = cap.get(cv2.CAP_PROP_FPS)
|
57 |
+
i = 0
|
58 |
+
while cap.isOpened():
|
|
|
59 |
ret, frame = cap.read()
|
60 |
+
if not ret:
|
61 |
break
|
62 |
+
frame_path = f'frame_{i}.jpg'
|
63 |
+
cv2.imwrite(frame_path, frame)
|
64 |
+
frames.append(frame_path)
|
65 |
+
i += 1
|
66 |
+
|
67 |
cap.release()
|
68 |
cv2.destroyAllWindows()
|
|
|
|
|
69 |
return frames, fps
|
70 |
|
|
|
71 |
def create_video(frames, fps):
|
|
|
72 |
clip = ImageSequenceClip(frames, fps=fps)
|
73 |
+
output_video = "output_video.mp4"
|
74 |
+
clip.write_videofile(output_video, fps=fps)
|
75 |
+
return output_video
|
76 |
|
77 |
+
def infer(prompt, video_in, seed_in, trim_value):
|
78 |
+
frames_list, fps = get_frames(video_in)
|
79 |
+
n_frame = int(trim_value * fps)
|
80 |
+
|
81 |
+
n_frame = min(n_frame, len(frames_list))
|
82 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
result_frames = []
|
84 |
+
for i in frames_list[:n_frame]:
|
85 |
+
pil_image = Image.open(i).convert("RGB")
|
86 |
+
edited_image = pix2pix(pil_image, prompt, 50, seed_in, 7.5, 1.5)
|
87 |
+
result_frame_path = f"result_{i}"
|
88 |
+
edited_image.save(result_frame_path)
|
89 |
+
result_frames.append(result_frame_path)
|
90 |
+
|
91 |
+
final_video = create_video(result_frames, fps)
|
92 |
+
return final_video, gr.Group.update(visible=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
|
94 |
title = """
|
95 |
<div style="text-align: center; max-width: 700px; margin: 0 auto;">
|
96 |
+
<div style="display: inline-flex; align-items: center; gap: 0.8rem; font-size: 1.75rem;">
|
97 |
+
<h1 style="font-weight: 900; margin-bottom: 7px; margin-top: 5px;">Pix2Pix Video</h1>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
</div>
|
99 |
+
<p style="margin-bottom: 10px; font-size: 94%">Apply Instruct Pix2Pix Diffusion to a video</p>
|
|
|
|
|
100 |
</div>
|
101 |
"""
|
102 |
|
103 |
article = """
|
|
|
104 |
<div class="footer">
|
105 |
<p>
|
106 |
Examples by <a href="https://twitter.com/CitizenPlain" target="_blank">Nathan Shipley</a> •
|
107 |
Follow <a href="https://twitter.com/fffiloni" target="_blank">Sylvain Filoni</a> for future updates 🤗
|
108 |
</p>
|
109 |
</div>
|
110 |
+
<div id="may-like-container" style="display: flex; justify-content: center; flex-direction: column; align-items: center; margin-bottom: 30px;">
|
111 |
<p>You may also like: </p>
|
112 |
+
<div id="may-like-content" style="display: flex; flex-wrap: wrap; align-items: center; height: 20px;">
|
113 |
+
<svg height="20" width="162" style="margin-left: 4px; margin-bottom: 6px;">
|
114 |
+
<a href="https://huggingface.co/spaces/timbrooks/instruct-pix2pix" target="_blank">
|
|
|
115 |
<image href="https://img.shields.io/badge/🤗 Spaces-Instruct_Pix2Pix-blue" src="https://img.shields.io/badge/🤗 Spaces-Instruct_Pix2Pix-blue.png" height="20"/>
|
116 |
+
</a>
|
117 |
</svg>
|
|
|
118 |
</div>
|
|
|
119 |
</div>
|
|
|
120 |
"""
|
121 |
|
122 |
+
css = """
|
123 |
+
/* Enhanced CSS for a more modern look */
|
124 |
+
#col-container {
|
125 |
+
max-width: 820px;
|
126 |
+
margin-left: auto;
|
127 |
+
margin-right: auto;
|
128 |
+
padding: 1rem;
|
129 |
+
background-color: #f9f9f9;
|
130 |
+
border-radius: 10px;
|
131 |
+
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
|
132 |
+
}
|
133 |
+
#duplicate-container {
|
134 |
+
display: flex;
|
135 |
+
justify-content: space-between;
|
136 |
+
align-items: center;
|
137 |
+
line-height: 1.5em;
|
138 |
+
flex-direction: row-reverse;
|
139 |
+
font-size: 1em;
|
140 |
+
margin-top: 1rem;
|
141 |
+
background-color: #ffffff;
|
142 |
+
padding: 0.5rem 1rem;
|
143 |
+
border-radius: 8px;
|
144 |
+
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
|
145 |
+
}
|
146 |
+
a, a:hover, a:visited {
|
147 |
+
text-decoration: underline;
|
148 |
+
font-weight: 600;
|
149 |
+
color: #1f2937 !important;
|
150 |
+
transition: color 0.3s ease;
|
151 |
+
}
|
152 |
+
a:hover {
|
153 |
+
color: #4a90e2 !important;
|
154 |
+
}
|
155 |
+
.dark a, .dark a:hover, .dark a:visited {
|
156 |
+
color: #f3f4f6 !important;
|
157 |
+
}
|
158 |
+
.dark a:hover {
|
159 |
+
color: #90c2f7 !important;
|
160 |
+
}
|
161 |
+
.footer {
|
162 |
+
margin-bottom: 45px;
|
163 |
+
margin-top: 10px;
|
164 |
+
text-align: center;
|
165 |
+
border-bottom: 1px solid #e5e5e5;
|
166 |
+
padding-bottom: 1rem;
|
167 |
+
}
|
168 |
+
.footer > p {
|
169 |
+
font-size: 0.8rem !important;
|
170 |
+
display: inline-block;
|
171 |
+
padding: 0 10px;
|
172 |
+
transform: translateY(26px);
|
173 |
+
background: white;
|
174 |
+
}
|
175 |
+
.dark .footer {
|
176 |
+
border-color: #303030;
|
177 |
+
}
|
178 |
+
.dark .footer > p {
|
179 |
+
background: #0b0f19;
|
180 |
+
}
|
181 |
+
div#may-like-container > p {
|
182 |
+
font-size: 0.8em;
|
183 |
+
margin-bottom: 4px;
|
184 |
+
}
|
185 |
+
.animate-spin {
|
186 |
+
animation: spin 1s linear infinite;
|
187 |
+
}
|
188 |
+
@keyframes spin {
|
189 |
+
from {
|
190 |
+
transform: rotate(0deg);
|
191 |
+
}
|
192 |
+
to {
|
193 |
+
transform: rotate(360deg);
|
194 |
+
}
|
195 |
+
}
|
196 |
+
#share-btn-container {
|
197 |
+
display: flex;
|
198 |
+
padding: 0.5rem;
|
199 |
+
background-color: #000000;
|
200 |
+
justify-content: center;
|
201 |
+
align-items: center;
|
202 |
+
border-radius: 9999px;
|
203 |
+
max-width: 13rem;
|
204 |
+
margin: 1rem auto;
|
205 |
+
transition: background-color 0.3s ease;
|
206 |
+
}
|
207 |
+
#share-btn-container:hover {
|
208 |
+
background-color: #333333;
|
209 |
+
}
|
210 |
+
#share-btn {
|
211 |
+
all: initial;
|
212 |
+
color: #ffffff;
|
213 |
+
font-weight: 600;
|
214 |
+
cursor: pointer;
|
215 |
+
font-family: 'IBM Plex Sans', sans-serif;
|
216 |
+
margin-left: 0.5rem;
|
217 |
+
padding: 0.5rem 1rem;
|
218 |
+
border-radius: 9999px;
|
219 |
+
transition: background-color 0.3s ease;
|
220 |
+
}
|
221 |
+
#share-btn:focus {
|
222 |
+
outline: 2px solid #ffffff;
|
223 |
+
outline-offset: 2px;
|
224 |
+
}
|
225 |
+
#share-btn * {
|
226 |
+
all: unset;
|
227 |
+
}
|
228 |
+
#share-btn-container div:nth-child(-n+2) {
|
229 |
+
width: auto !important;
|
230 |
+
min-height: 0px !important;
|
231 |
+
}
|
232 |
+
#share-btn-container .wrap {
|
233 |
+
display: none !important;
|
234 |
+
}
|
235 |
+
#share-btn-container.hidden {
|
236 |
+
display: none !important;
|
237 |
+
}
|
238 |
+
@media (max-width: 600px) {
|
239 |
+
#duplicate-container {
|
240 |
+
flex-direction: column;
|
241 |
+
text-align: center;
|
242 |
+
}
|
243 |
+
#share-btn-container {
|
244 |
+
max-width: 100%;
|
245 |
+
}
|
246 |
+
}
|
247 |
+
.dark #col-container {
|
248 |
+
background-color: #1f2937;
|
249 |
+
}
|
250 |
+
.dark #duplicate-container {
|
251 |
+
background-color: #2d3748;
|
252 |
+
}
|
253 |
+
.dark #share-btn-container {
|
254 |
+
background-color: #111827;
|
255 |
+
}
|
256 |
+
.dark #share-btn-container:hover {
|
257 |
+
background-color: #1f2937;
|
258 |
+
}
|
259 |
+
.dark .footer > p {
|
260 |
+
background: #1f2937;
|
261 |
+
}
|
262 |
+
"""
|
263 |
+
|
264 |
+
block = gr.Blocks(css=css)
|
265 |
+
|
266 |
+
with block:
|
267 |
+
with gr.Row(elem_id="col-container"):
|
268 |
+
gr.Markdown(title)
|
269 |
+
with gr.Group():
|
270 |
+
with gr.Box():
|
271 |
with gr.Row():
|
272 |
+
input_text = gr.Textbox(label="Enter your prompt", placeholder="Enter your prompt", show_label=False, lines=2, max_lines=2).style(container=False)
|
273 |
+
seed = gr.Slider(0, 2000, value=0, step=1, label="Seed (0 = random)", show_label=False)
|
274 |
+
video_input = gr.Video(label="Input Video")
|
275 |
+
trim = gr.Slider(0, 10, value=2, step=1, label="Trim (seconds)")
|
276 |
+
run_button = gr.Button("Run")
|
277 |
+
with gr.Group(visible=False) as video_output_group:
|
278 |
+
video_output = gr.Video(label="Output Video")
|
279 |
+
share_button = gr.Button("Share to Community")
|
280 |
+
|
281 |
+
gr.Markdown(article)
|
282 |
+
|
283 |
+
run_button.click(infer, inputs=[input_text, video_input, seed, trim], outputs=[video_output, video_output_group])
|
284 |
+
community_icon_html(share_js, gr.update(video_output), block)
|
285 |
+
|
286 |
+
block.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
287 |
|
|
|
|
|
|