hysts HF staff commited on
Commit
0a214bd
·
1 Parent(s): 0038172
Files changed (4) hide show
  1. .pre-commit-config.yaml +35 -0
  2. .style.yapf +5 -0
  3. README.md +1 -1
  4. app.py +40 -68
.pre-commit-config.yaml ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.2.0
4
+ hooks:
5
+ - id: check-executables-have-shebangs
6
+ - id: check-json
7
+ - id: check-merge-conflict
8
+ - id: check-shebang-scripts-are-executable
9
+ - id: check-toml
10
+ - id: check-yaml
11
+ - id: double-quote-string-fixer
12
+ - id: end-of-file-fixer
13
+ - id: mixed-line-ending
14
+ args: ['--fix=lf']
15
+ - id: requirements-txt-fixer
16
+ - id: trailing-whitespace
17
+ - repo: https://github.com/myint/docformatter
18
+ rev: v1.4
19
+ hooks:
20
+ - id: docformatter
21
+ args: ['--in-place']
22
+ - repo: https://github.com/pycqa/isort
23
+ rev: 5.12.0
24
+ hooks:
25
+ - id: isort
26
+ - repo: https://github.com/pre-commit/mirrors-mypy
27
+ rev: v0.991
28
+ hooks:
29
+ - id: mypy
30
+ args: ['--ignore-missing-imports']
31
+ - repo: https://github.com/google/yapf
32
+ rev: v0.32.0
33
+ hooks:
34
+ - id: yapf
35
+ args: ['--parallel', '--in-place']
.style.yapf ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [style]
2
+ based_on_style = pep8
3
+ blank_line_before_nested_class_or_def = false
4
+ spaces_before_comment = 2
5
+ split_before_logical_operator = true
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 👁
4
  colorFrom: blue
5
  colorTo: gray
6
  sdk: gradio
7
- sdk_version: 3.0.5
8
  app_file: app.py
9
  pinned: false
10
  ---
 
4
  colorFrom: blue
5
  colorTo: gray
6
  sdk: gradio
7
+ sdk_version: 3.19.1
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py CHANGED
@@ -2,16 +2,17 @@
2
 
3
  from __future__ import annotations
4
 
5
- import argparse
6
  import os
7
  import pathlib
 
8
  import subprocess
9
  import tarfile
10
 
11
  if os.environ.get('SYSTEM') == 'spaces':
12
- subprocess.call('pip uninstall -y opencv-python'.split())
13
- subprocess.call('pip uninstall -y opencv-python-headless'.split())
14
- subprocess.call('pip install opencv-python-headless==4.5.5.64'.split())
 
15
 
16
  import gradio as gr
17
  import huggingface_hub
@@ -24,22 +25,8 @@ mp_pose = mp.solutions.pose
24
 
25
  TITLE = 'MediaPipe Human Pose Estimation'
26
  DESCRIPTION = 'https://google.github.io/mediapipe/'
27
- ARTICLE = '<center><img src="https://visitor-badge.glitch.me/badge?page_id=hysts.mediapipe-pose-estimation" alt="visitor badge"/></center>'
28
 
29
- TOKEN = os.environ['TOKEN']
30
-
31
-
32
- def parse_args() -> argparse.Namespace:
33
- parser = argparse.ArgumentParser()
34
- parser.add_argument('--theme', type=str)
35
- parser.add_argument('--live', action='store_true')
36
- parser.add_argument('--share', action='store_true')
37
- parser.add_argument('--port', type=int)
38
- parser.add_argument('--disable-queue',
39
- dest='enable_queue',
40
- action='store_false')
41
- parser.add_argument('--allow-flagging', type=str, default='never')
42
- return parser.parse_args()
43
 
44
 
45
  def load_sample_images() -> list[pathlib.Path]:
@@ -52,7 +39,7 @@ def load_sample_images() -> list[pathlib.Path]:
52
  path = huggingface_hub.hf_hub_download(dataset_repo,
53
  name,
54
  repo_type='dataset',
55
- use_auth_token=TOKEN)
56
  with tarfile.open(path) as f:
57
  f.extractall(image_dir.as_posix())
58
  return sorted(image_dir.rglob('*.jpg'))
@@ -74,7 +61,7 @@ def run(image: np.ndarray, model_complexity: int, enable_segmentation: bool,
74
  elif background_color == 'black':
75
  bg_color = 0
76
  elif background_color == 'green':
77
- bg_color = (0, 255, 0)
78
  else:
79
  raise ValueError
80
 
@@ -92,50 +79,35 @@ def run(image: np.ndarray, model_complexity: int, enable_segmentation: bool,
92
  return res[:, :, ::-1]
93
 
94
 
95
- def main():
96
- args = parse_args()
97
-
98
- model_complexities = list(range(3))
99
- background_colors = ['white', 'black', 'green']
100
-
101
- image_paths = load_sample_images()
102
- examples = [[
103
- path.as_posix(), model_complexities[1], True, 0.5, background_colors[0]
104
- ] for path in image_paths]
105
-
106
- gr.Interface(
107
- run,
108
- [
109
- gr.inputs.Image(type='numpy', label='Input'),
110
- gr.inputs.Radio(model_complexities,
111
- type='index',
112
- default=model_complexities[1],
113
- label='Model Complexity'),
114
- gr.inputs.Checkbox(default=True, label='Enable Segmentation'),
115
- gr.inputs.Slider(0,
116
- 1,
117
- step=0.05,
118
- default=0.5,
119
- label='Minimum Detection Confidence'),
120
- gr.inputs.Radio(background_colors,
121
- type='value',
122
- default=background_colors[0],
123
- label='Background Color'),
124
- ],
125
- gr.outputs.Image(type='numpy', label='Output'),
126
- examples=examples,
127
- title=TITLE,
128
- description=DESCRIPTION,
129
- article=ARTICLE,
130
- theme=args.theme,
131
- allow_flagging=args.allow_flagging,
132
- live=args.live,
133
- ).launch(
134
- enable_queue=args.enable_queue,
135
- server_port=args.port,
136
- share=args.share,
137
- )
138
-
139
-
140
- if __name__ == '__main__':
141
- main()
 
2
 
3
  from __future__ import annotations
4
 
 
5
  import os
6
  import pathlib
7
+ import shlex
8
  import subprocess
9
  import tarfile
10
 
11
  if os.environ.get('SYSTEM') == 'spaces':
12
+ subprocess.call(shlex.split('pip uninstall -y opencv-python'))
13
+ subprocess.call(shlex.split('pip uninstall -y opencv-python-headless'))
14
+ subprocess.call(
15
+ shlex.split('pip install opencv-python-headless==4.5.5.64'))
16
 
17
  import gradio as gr
18
  import huggingface_hub
 
25
 
26
  TITLE = 'MediaPipe Human Pose Estimation'
27
  DESCRIPTION = 'https://google.github.io/mediapipe/'
 
28
 
29
+ HF_TOKEN = os.getenv('HF_TOKEN')
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
 
32
  def load_sample_images() -> list[pathlib.Path]:
 
39
  path = huggingface_hub.hf_hub_download(dataset_repo,
40
  name,
41
  repo_type='dataset',
42
+ use_auth_token=HF_TOKEN)
43
  with tarfile.open(path) as f:
44
  f.extractall(image_dir.as_posix())
45
  return sorted(image_dir.rglob('*.jpg'))
 
61
  elif background_color == 'black':
62
  bg_color = 0
63
  elif background_color == 'green':
64
+ bg_color = (0, 255, 0) # type: ignore
65
  else:
66
  raise ValueError
67
 
 
79
  return res[:, :, ::-1]
80
 
81
 
82
+ model_complexities = list(range(3))
83
+ background_colors = ['white', 'black', 'green']
84
+
85
+ image_paths = load_sample_images()
86
+ examples = [[
87
+ path.as_posix(), model_complexities[1], True, 0.5, background_colors[0]
88
+ ] for path in image_paths]
89
+
90
+ gr.Interface(
91
+ fn=run,
92
+ inputs=[
93
+ gr.Image(label='Input', type='numpy'),
94
+ gr.Radio(label='Model Complexity',
95
+ choices=model_complexities,
96
+ type='index',
97
+ value=model_complexities[1]),
98
+ gr.Checkbox(default=True, label='Enable Segmentation'),
99
+ gr.Slider(label='Minimum Detection Confidence',
100
+ minimum=0,
101
+ maximum=1,
102
+ step=0.05,
103
+ value=0.5),
104
+ gr.Radio(label='Background Color',
105
+ choices=background_colors,
106
+ type='value',
107
+ value=background_colors[0]),
108
+ ],
109
+ outputs=gr.Image(label='Output', type='numpy'),
110
+ examples=examples,
111
+ title=TITLE,
112
+ description=DESCRIPTION,
113
+ ).launch(show_api=False)