AmitIsraeli commited on
Commit
de5b99e
·
1 Parent(s): ea363b1

add deocemntation, imporve run time on inversion

Browse files
Files changed (3) hide show
  1. .gitattributes +1 -0
  2. app.py +5 -1
  3. help_function.py +12 -5
.gitattributes CHANGED
@@ -29,6 +29,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
  *.tgz filter=lfs diff=lfs merge=lfs -text
30
  *.wasm filter=lfs diff=lfs merge=lfs -text
31
  *.xz filter=lfs diff=lfs merge=lfs -text
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
29
  *.tgz filter=lfs diff=lfs merge=lfs -text
30
  *.wasm filter=lfs diff=lfs merge=lfs -text
31
  *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.dat filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
app.py CHANGED
@@ -10,7 +10,11 @@ def greet(image,text,power):
10
  image_edit = model_helper.image_from_text(text,PIL_image,power)
11
  return image_edit
12
 
13
- description = "demo for model to edit face with text, you can see the [github repo CelebrityLook](https://github.com/amit154154/CelebrityLook)"
 
 
 
 
14
  title = "FaceOver - edit face with text 🐨 "
15
 
16
  examples = [
 
10
  image_edit = model_helper.image_from_text(text,PIL_image,power)
11
  return image_edit
12
 
13
+ description = "demo for model to edit face with text, you can see the [github repo CelebrityLook](https://github.com/amit154154/CelebrityLook) \n" \
14
+ "note that the model is in alpha version, so it's not perfect, but it's fun to play with it, some guidelines:\n " \
15
+ "1. the image you give the model need to be aligin to the camera such that your eyes are infront of the camera\n" \
16
+ "2.play with the power of the text and the text itself, it will look like 💩 sometimes" \
17
+
18
  title = "FaceOver - edit face with text 🐨 "
19
 
20
  examples = [
help_function.py CHANGED
@@ -4,6 +4,9 @@ from torchvision import transforms
4
  from torchvision.transforms import ToPILImage
5
  import torch.nn.functional as F
6
 
 
 
 
7
  class help_function:
8
  def __init__(self):
9
  self.clip_text_model = torch.jit.load('jit_models/clip_text_jit.pt', map_location=torch.device('cpu'))
@@ -36,11 +39,15 @@ class help_function:
36
  w_delta = self.mapper_clip(text_feachers - self.mean_clip)
37
  return w_delta
38
 
 
39
  def image_from_text(self,text,image,power = 1.0):
40
  w_inversion = self.get_image_inversion(image)
41
- text_embedding = self.get_text_embedding(text)
42
- w_delta = self.get_text_delta(text_embedding)
43
-
44
- w_edit = w_inversion + w_delta * power
 
 
45
  image_edit = self.decoder(w_edit)
46
- return ToPILImage()((image_edit[0]+0.5)*0.5)
 
 
4
  from torchvision.transforms import ToPILImage
5
  import torch.nn.functional as F
6
 
7
+
8
+
9
+
10
  class help_function:
11
  def __init__(self):
12
  self.clip_text_model = torch.jit.load('jit_models/clip_text_jit.pt', map_location=torch.device('cpu'))
 
39
  w_delta = self.mapper_clip(text_feachers - self.mean_clip)
40
  return w_delta
41
 
42
+
43
  def image_from_text(self,text,image,power = 1.0):
44
  w_inversion = self.get_image_inversion(image)
45
+ if power != 0:
46
+ text_embedding = self.get_text_embedding(text)
47
+ w_delta = self.get_text_delta(text_embedding)
48
+ w_edit = w_inversion + w_delta * power
49
+ else:
50
+ w_edit = w_inversion
51
  image_edit = self.decoder(w_edit)
52
+ image_edit = ToPILImage()((image_edit[0]+0.5)*0.5).resize((512,512))
53
+ return image_edit