Spaces:
Configuration error
Configuration error
animation
Browse files- ImageState.py +2 -4
- app.py +9 -3
- configs.py +2 -2
ImageState.py
CHANGED
@@ -41,7 +41,7 @@ class ImageState:
|
|
41 |
self.transform_history = []
|
42 |
self.attn_mask = None
|
43 |
self.prompt_optim = prompt_optimizer
|
44 |
-
self.state_id = "./"
|
45 |
print("NEW INSTANCE")
|
46 |
print(self.state_id)
|
47 |
self._load_vectors()
|
@@ -91,7 +91,6 @@ class ImageState:
|
|
91 |
# def _get_current_vector_transforms(self):
|
92 |
# current_vector_transforms = (self.blue_eyes, self.lip_size, self.hair_gp, self.asian_transform, sum(self.current_prompt_transforms))
|
93 |
# return (self.blend_latent, current_vector_transforms)
|
94 |
-
# @cache
|
95 |
def _get_mask(self, img, mask=None):
|
96 |
if img and "mask" in img and img["mask"] is not None:
|
97 |
attn_mask = torchvision.transforms.ToTensor()(img["mask"])
|
@@ -156,8 +155,7 @@ class ImageState:
|
|
156 |
if path1 is None: path1 = path2
|
157 |
if path2 is None: path2 = path1
|
158 |
self.path1, self.path2 = path1, path2
|
159 |
-
|
160 |
-
# self.aligned_path2 = align_from_path(path2)
|
161 |
return self.blend(blend_weight)
|
162 |
@torch.no_grad()
|
163 |
def blend(self, weight):
|
|
|
41 |
self.transform_history = []
|
42 |
self.attn_mask = None
|
43 |
self.prompt_optim = prompt_optimizer
|
44 |
+
self.state_id = "./img_history"
|
45 |
print("NEW INSTANCE")
|
46 |
print(self.state_id)
|
47 |
self._load_vectors()
|
|
|
91 |
# def _get_current_vector_transforms(self):
|
92 |
# current_vector_transforms = (self.blue_eyes, self.lip_size, self.hair_gp, self.asian_transform, sum(self.current_prompt_transforms))
|
93 |
# return (self.blend_latent, current_vector_transforms)
|
|
|
94 |
def _get_mask(self, img, mask=None):
|
95 |
if img and "mask" in img and img["mask"] is not None:
|
96 |
attn_mask = torchvision.transforms.ToTensor()(img["mask"])
|
|
|
155 |
if path1 is None: path1 = path2
|
156 |
if path2 is None: path2 = path1
|
157 |
self.path1, self.path2 = path1, path2
|
158 |
+
clear_img_dir(self.state_id)
|
|
|
159 |
return self.blend(blend_weight)
|
160 |
@torch.no_grad()
|
161 |
def blend(self, weight):
|
app.py
CHANGED
@@ -47,6 +47,7 @@ class StateWrapper:
|
|
47 |
def apply_lip_vector(state, *args, **kwargs):
|
48 |
return state, *state[0].apply_lip_vector(*args, **kwargs)
|
49 |
def apply_prompts(state, *args, **kwargs):
|
|
|
50 |
for image in state[0].apply_prompts(*args, **kwargs):
|
51 |
yield state, *image
|
52 |
def apply_rb_vector(state, *args, **kwargs):
|
@@ -69,11 +70,16 @@ class StateWrapper:
|
|
69 |
return state, *state[0].update_images(*args, **kwargs)
|
70 |
def update_requant(state, *args, **kwargs):
|
71 |
return state, *state[0].update_requant(*args, **kwargs)
|
72 |
-
|
|
|
|
|
73 |
with gr.Blocks(css="styles.css") as demo:
|
74 |
-
|
|
|
75 |
with gr.Row():
|
76 |
with gr.Column(scale=1):
|
|
|
|
|
77 |
blue_eyes = gr.Slider(
|
78 |
label="Blue Eyes",
|
79 |
minimum=-.8,
|
@@ -192,7 +198,7 @@ with gr.Blocks(css="styles.css") as demo:
|
|
192 |
maximum=50,
|
193 |
value=3,
|
194 |
step=1,
|
195 |
-
label="Steps to run at the end of the optimization, optimizing only the masked perceptual loss. If the edit is changing the identity too much, this setting will run steps at the end that
|
196 |
# discriminator_steps = gr.Slider(minimum=0,
|
197 |
# maximum=50,
|
198 |
# step=1,
|
|
|
47 |
def apply_lip_vector(state, *args, **kwargs):
|
48 |
return state, *state[0].apply_lip_vector(*args, **kwargs)
|
49 |
def apply_prompts(state, *args, **kwargs):
|
50 |
+
print(state[1])
|
51 |
for image in state[0].apply_prompts(*args, **kwargs):
|
52 |
yield state, *image
|
53 |
def apply_rb_vector(state, *args, **kwargs):
|
|
|
70 |
return state, *state[0].update_images(*args, **kwargs)
|
71 |
def update_requant(state, *args, **kwargs):
|
72 |
return state, *state[0].update_requant(*args, **kwargs)
|
73 |
+
def ret_id(id):
|
74 |
+
print(id)
|
75 |
+
return(id)
|
76 |
with gr.Blocks(css="styles.css") as demo:
|
77 |
+
id = gr.State(str(uuid.uuid4()))
|
78 |
+
state = gr.State([ImageState(vqgan, promptoptim), str(uuid.uuid4())])
|
79 |
with gr.Row():
|
80 |
with gr.Column(scale=1):
|
81 |
+
x = gr.Button(label="asd")
|
82 |
+
x.click(ret_id, inputs=id, outputs=id)
|
83 |
blue_eyes = gr.Slider(
|
84 |
label="Blue Eyes",
|
85 |
minimum=-.8,
|
|
|
198 |
maximum=50,
|
199 |
value=3,
|
200 |
step=1,
|
201 |
+
label="Steps to run at the end of the optimization, optimizing only the masked perceptual loss. If the edit is changing the identity too much, this setting will run steps at the end that 'pull' the image back towards the original identity")
|
202 |
# discriminator_steps = gr.Slider(minimum=0,
|
203 |
# maximum=50,
|
204 |
# step=1,
|
configs.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
def set_small_local():
|
3 |
-
return (gr.Slider.update(value=25), gr.Slider.update(value=0.15), gr.Slider.update(value=
|
4 |
def set_major_local():
|
5 |
-
return (gr.Slider.update(value=25), gr.Slider.update(value=0.
|
6 |
def set_major_global():
|
7 |
return (gr.Slider.update(value=30), gr.Slider.update(value=0.1), gr.Slider.update(value=1), gr.Slider.update(value=1))
|
|
|
1 |
import gradio as gr
|
2 |
def set_small_local():
|
3 |
+
return (gr.Slider.update(value=25), gr.Slider.update(value=0.15), gr.Slider.update(value=5), gr.Slider.update(value=4))
|
4 |
def set_major_local():
|
5 |
+
return (gr.Slider.update(value=25), gr.Slider.update(value=0.187), gr.Slider.update(value=36.6), gr.Slider.update(value=6))
|
6 |
def set_major_global():
|
7 |
return (gr.Slider.update(value=30), gr.Slider.update(value=0.1), gr.Slider.update(value=1), gr.Slider.update(value=1))
|