hysts HF Staff commited on
Commit
fafee53
·
1 Parent(s): 8cf6e41
.gitattributes CHANGED
@@ -26,3 +26,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
26
  *.zip filter=lfs diff=lfs merge=lfs -text
27
  *.zstandard filter=lfs diff=lfs merge=lfs -text
28
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
26
  *.zip filter=lfs diff=lfs merge=lfs -text
27
  *.zstandard filter=lfs diff=lfs merge=lfs -text
28
  *tfevents* filter=lfs diff=lfs merge=lfs -text
29
+ *.jpg filter=lfs diff=lfs merge=lfs -text
.gitignore DELETED
@@ -1 +0,0 @@
1
- images
 
 
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 📚
4
  colorFrom: purple
5
  colorTo: yellow
6
  sdk: gradio
7
- sdk_version: 3.19.1
8
  app_file: app.py
9
  pinned: false
10
  ---
 
4
  colorFrom: purple
5
  colorTo: yellow
6
  sdk: gradio
7
+ sdk_version: 3.36.1
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py CHANGED
@@ -4,9 +4,7 @@ from __future__ import annotations
4
 
5
  import functools
6
  import os
7
- import pathlib
8
  import sys
9
- import tarfile
10
  from typing import Callable
11
 
12
  import cv2
@@ -27,32 +25,12 @@ sys.path.insert(0, 'deep-head-pose/code')
27
  from hopenet import Hopenet
28
  from ibug.face_detection import RetinaFacePredictor
29
 
30
- TITLE = 'Hopenet'
31
- DESCRIPTION = 'This is an unofficial demo for https://github.com/natanielruiz/deep-head-pose.'
32
-
33
- HF_TOKEN = os.getenv('HF_TOKEN')
34
-
35
-
36
- def load_sample_images() -> list[pathlib.Path]:
37
- image_dir = pathlib.Path('images')
38
- if not image_dir.exists():
39
- image_dir.mkdir()
40
- dataset_repo = 'hysts/input-images'
41
- filenames = ['001.tar']
42
- for name in filenames:
43
- path = huggingface_hub.hf_hub_download(dataset_repo,
44
- name,
45
- repo_type='dataset',
46
- use_auth_token=HF_TOKEN)
47
- with tarfile.open(path) as f:
48
- f.extractall(image_dir.as_posix())
49
- return sorted(image_dir.rglob('*.jpg'))
50
 
51
 
52
  def load_model(model_name: str, device: torch.device) -> nn.Module:
53
- path = huggingface_hub.hf_hub_download('hysts/Hopenet',
54
- f'models/{model_name}.pkl',
55
- use_auth_token=HF_TOKEN)
56
  state_dict = torch.load(path, map_location='cpu')
57
  model = Hopenet(torchvision.models.resnet.Bottleneck, [3, 4, 6, 3], 66)
58
  model.load_state_dict(state_dict)
@@ -160,26 +138,33 @@ model_names = [
160
  models = {name: load_model(name, device) for name in model_names}
161
  transform = create_transform()
162
 
163
- func = functools.partial(run,
164
- face_detector=face_detector,
165
- models=models,
166
- transform=transform,
167
- device=device)
168
-
169
- image_paths = load_sample_images()
170
- examples = [[path.as_posix(), model_names[0]] for path in image_paths]
171
-
172
- gr.Interface(
173
- fn=func,
174
- inputs=[
175
- gr.Image(type='numpy', label='Input'),
176
- gr.Radio(model_names,
177
- type='value',
178
- default=model_names[0],
179
- label='Model'),
180
- ],
181
- outputs=gr.Image(type='numpy', label='Output'),
182
- examples=examples,
183
- title=TITLE,
184
- description=DESCRIPTION,
185
- ).launch(show_api=False)
 
 
 
 
 
 
 
 
4
 
5
  import functools
6
  import os
 
7
  import sys
 
8
  from typing import Callable
9
 
10
  import cv2
 
25
  from hopenet import Hopenet
26
  from ibug.face_detection import RetinaFacePredictor
27
 
28
+ DESCRIPTION = '# [Hopenet](https://github.com/natanielruiz/deep-head-pose)'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
 
31
  def load_model(model_name: str, device: torch.device) -> nn.Module:
32
+ path = huggingface_hub.hf_hub_download('public-data/Hopenet',
33
+ f'models/{model_name}.pkl')
 
34
  state_dict = torch.load(path, map_location='cpu')
35
  model = Hopenet(torchvision.models.resnet.Bottleneck, [3, 4, 6, 3], 66)
36
  model.load_state_dict(state_dict)
 
138
  models = {name: load_model(name, device) for name in model_names}
139
  transform = create_transform()
140
 
141
+ fn = functools.partial(run,
142
+ face_detector=face_detector,
143
+ models=models,
144
+ transform=transform,
145
+ device=device)
146
+
147
+ examples = [['images/pexels-ksenia-chernaya-8535230.jpg', 'hopenet_alpha1']]
148
+
149
+ with gr.Blocks(css='style.css') as demo:
150
+ gr.Markdown(DESCRIPTION)
151
+ with gr.Row():
152
+ with gr.Column():
153
+ image = gr.Image(label='Input', type='numpy')
154
+ model_name = gr.Radio(label='Model',
155
+ choices=model_names,
156
+ type='value',
157
+ value=model_names[0])
158
+ run_button = gr.Button('Run')
159
+ with gr.Column():
160
+ result = gr.Image(label='Output')
161
+ gr.Examples(examples=examples,
162
+ inputs=[image, model_name],
163
+ outputs=result,
164
+ fn=fn,
165
+ cache_examples=os.getenv('CACHE_EXAMPLES') == '1')
166
+ run_button.click(fn=fn,
167
+ inputs=[image, model_name],
168
+ outputs=result,
169
+ api_name='run')
170
+ demo.queue().launch()
images/pexels-ksenia-chernaya-8535230.jpg ADDED

Git LFS Details

  • SHA256: 79b6b7ee58972731ed7d83e45a13c18544690df9b384f36327b627714c2f5cab
  • Pointer size: 131 Bytes
  • Size of remote file: 409 kB
requirements.txt CHANGED
@@ -1,6 +1,6 @@
1
- numpy==1.22.3
2
- opencv-python-headless==4.5.5.64
3
- Pillow==9.1.0
4
- scipy==1.8.0
5
- torch==1.11.0
6
- torchvision==0.12.0
 
1
+ numpy==1.23.5
2
+ opencv-python-headless==4.8.0.74
3
+ Pillow==10.0.0
4
+ scipy==1.11.1
5
+ torch==2.0.1
6
+ torchvision==0.15.2
style.css ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ h1 {
2
+ text-align: center;
3
+ }
4
+
5
+ #duplicate-button {
6
+ margin: auto;
7
+ }