hysts HF staff commited on
Commit
714b9ba
1 Parent(s): 4ce10a7
Files changed (6) hide show
  1. .pre-commit-config.yaml +46 -0
  2. .style.yapf +5 -0
  3. app.py +172 -157
  4. labels/cifar10_classes.json +1 -0
  5. model.py +104 -0
  6. style.css +11 -0
.pre-commit-config.yaml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ exclude: ^stylegan3
2
+ repos:
3
+ - repo: https://github.com/pre-commit/pre-commit-hooks
4
+ rev: v4.2.0
5
+ hooks:
6
+ - id: check-executables-have-shebangs
7
+ - id: check-json
8
+ - id: check-merge-conflict
9
+ - id: check-shebang-scripts-are-executable
10
+ - id: check-toml
11
+ - id: check-yaml
12
+ - id: double-quote-string-fixer
13
+ - id: end-of-file-fixer
14
+ - id: mixed-line-ending
15
+ args: ['--fix=lf']
16
+ - id: requirements-txt-fixer
17
+ - id: trailing-whitespace
18
+ - repo: https://github.com/myint/docformatter
19
+ rev: v1.4
20
+ hooks:
21
+ - id: docformatter
22
+ args: ['--in-place']
23
+ - repo: https://github.com/pycqa/isort
24
+ rev: 5.10.1
25
+ hooks:
26
+ - id: isort
27
+ - repo: https://github.com/pre-commit/mirrors-mypy
28
+ rev: v0.812
29
+ hooks:
30
+ - id: mypy
31
+ args: ['--ignore-missing-imports']
32
+ - repo: https://github.com/google/yapf
33
+ rev: v0.32.0
34
+ hooks:
35
+ - id: yapf
36
+ args: ['--parallel', '--in-place']
37
+ - repo: https://github.com/kynan/nbstripout
38
+ rev: 0.5.0
39
+ hooks:
40
+ - id: nbstripout
41
+ args: ['--extra-keys', 'metadata.interpreter metadata.kernelspec cell.metadata.pycharm']
42
+ - repo: https://github.com/nbQA-dev/nbQA
43
+ rev: 1.3.1
44
+ hooks:
45
+ - id: nbqa-isort
46
+ - id: nbqa-yapf
.style.yapf ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ [style]
2
+ based_on_style = pep8
3
+ blank_line_before_nested_class_or_def = false
4
+ spaces_before_comment = 2
5
+ split_before_logical_operator = true
app.py CHANGED
@@ -3,189 +3,204 @@
3
  from __future__ import annotations
4
 
5
  import argparse
6
- import functools
7
- import os
8
- import pickle
9
- import sys
10
 
11
  import gradio as gr
12
  import numpy as np
13
- import torch
14
- import torch.nn as nn
15
- from huggingface_hub import hf_hub_download
16
 
17
- sys.path.insert(0, 'stylegan3')
18
 
19
- TITLE = 'StyleGAN2'
20
- DESCRIPTION = '''This is an unofficial demo for https://github.com/NVlabs/stylegan3.
21
 
22
  Expected execution time on Hugging Face Spaces: 4s
23
  '''
24
- SAMPLE_IMAGE_DIR = 'https://huggingface.co/spaces/hysts/StyleGAN2/resolve/main/samples'
25
- ARTICLE = f'''## Generated images
26
- - truncation: 0.7
27
- ### CIFAR-10
28
- - size: 32x32
29
- - class index: 0-9
30
- - seed: 0-9
31
- ![CIFAR-10 samples]({SAMPLE_IMAGE_DIR}/cifar10.jpg)
32
- ### AFHQ-Cat
33
- - size: 512x512
34
- - seed: 0-99
35
- ![AFHQ-Cat samples]({SAMPLE_IMAGE_DIR}/afhq-cat.jpg)
36
- ### AFHQ-Dog
37
- - size: 512x512
38
- - seed: 0-99
39
- ![AFHQ-Dog samples]({SAMPLE_IMAGE_DIR}/afhq-dog.jpg)
40
- ### AFHQ-Wild
41
- - size: 512x512
42
- - seed: 0-99
43
- ![AFHQ-Wild samples]({SAMPLE_IMAGE_DIR}/afhq-wild.jpg)
44
- ### AFHQv2
45
- - size: 512x512
46
- - seed: 0-99
47
- ![AFHQv2 samples]({SAMPLE_IMAGE_DIR}/afhqv2.jpg)
48
- ### LSUN-Dog
49
- - size: 256x256
50
- - seed: 0-99
51
- ![LSUN-Dog samples]({SAMPLE_IMAGE_DIR}/lsun-dog.jpg)
52
- ### BreCaHAD
53
- - size: 512x512
54
- - seed: 0-99
55
- ![BreCaHAD samples]({SAMPLE_IMAGE_DIR}/brecahad.jpg)
56
- ### CelebA-HQ
57
- - size: 256x256
58
- - seed: 0-99
59
- ![CelebA-HQ samples]({SAMPLE_IMAGE_DIR}/celebahq.jpg)
60
- ### FFHQ
61
- - size: 1024x1024
62
- - seed: 0-99
63
- ![FFHQ samples]({SAMPLE_IMAGE_DIR}/ffhq.jpg)
64
- ### FFHQ-U
65
- - size: 1024x1024
66
- - seed: 0-99
67
- ![FFHQ-U samples]({SAMPLE_IMAGE_DIR}/ffhq-u.jpg)
68
- ### MetFaces
69
- - size: 1024x1024
70
- - seed: 0-99
71
- ![MetFaces samples]({SAMPLE_IMAGE_DIR}/metfaces.jpg)
72
- ### MetFaces-U
73
- - size: 1024x1024
74
- - seed: 0-99
75
- ![MetFaces-U samples]({SAMPLE_IMAGE_DIR}/metfaces-u.jpg)
76
-
77
- <center><img src="https://visitor-badge.glitch.me/badge?page_id=hysts.stylegan2" alt="visitor badge"/></center>
78
- '''
79
-
80
- TOKEN = os.environ['TOKEN']
81
 
82
 
83
  def parse_args() -> argparse.Namespace:
84
  parser = argparse.ArgumentParser()
85
  parser.add_argument('--device', type=str, default='cpu')
86
  parser.add_argument('--theme', type=str)
87
- parser.add_argument('--live', action='store_true')
88
  parser.add_argument('--share', action='store_true')
89
  parser.add_argument('--port', type=int)
90
  parser.add_argument('--disable-queue',
91
  dest='enable_queue',
92
  action='store_false')
93
- parser.add_argument('--allow-flagging', type=str, default='never')
94
  return parser.parse_args()
95
 
96
 
97
- def generate_z(z_dim: int, seed: int, device: torch.device) -> torch.Tensor:
98
- return torch.from_numpy(np.random.RandomState(seed).randn(
99
- 1, z_dim)).to(device).float()
100
-
101
-
102
- @torch.inference_mode()
103
- def generate_image(model_name: str, class_index: int, seed: int,
104
- truncation_psi: float, model_dict: dict[str, nn.Module],
105
- device: torch.device) -> np.ndarray:
106
- model = model_dict[model_name]
107
- seed = int(np.clip(seed, 0, np.iinfo(np.uint32).max))
108
-
109
- z = generate_z(model.z_dim, seed, device)
110
- label = torch.zeros([1, model.c_dim], device=device)
111
- class_index = round(class_index)
112
- class_index = min(max(0, class_index), model.c_dim - 1)
113
- class_index = torch.tensor(class_index, dtype=torch.long)
114
- if class_index >= 0:
115
- label[:, class_index] = 1
116
-
117
- out = model(z, label, truncation_psi=truncation_psi)
118
- out = (out.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
119
- return out[0].cpu().numpy()
120
-
121
-
122
- def load_model(file_name: str, device: torch.device) -> nn.Module:
123
- path = hf_hub_download('hysts/StyleGAN2',
124
- f'models/{file_name}',
125
- use_auth_token=TOKEN)
126
- with open(path, 'rb') as f:
127
- model = pickle.load(f)['G_ema']
128
- model.eval()
129
- model.to(device)
130
- with torch.inference_mode():
131
- z = torch.zeros((1, model.z_dim)).to(device)
132
- label = torch.zeros([1, model.c_dim], device=device)
133
- model(z, label)
134
- return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
 
136
 
137
  def main():
138
  args = parse_args()
139
- device = torch.device(args.device)
140
-
141
- model_names = {
142
- 'AFHQ-Cat-512': 'stylegan2-afhqcat-512x512.pkl',
143
- 'AFHQ-Dog-512': 'stylegan2-afhqdog-512x512.pkl',
144
- 'AFHQv2-512': 'stylegan2-afhqv2-512x512.pkl',
145
- 'AFHQ-Wild-512': 'stylegan2-afhqwild-512x512.pkl',
146
- 'BreCaHAD-512': 'stylegan2-brecahad-512x512.pkl',
147
- 'CelebA-HQ-256': 'stylegan2-celebahq-256x256.pkl',
148
- 'CIFAR-10': 'stylegan2-cifar10-32x32.pkl',
149
- 'FFHQ-256': 'stylegan2-ffhq-256x256.pkl',
150
- 'FFHQ-512': 'stylegan2-ffhq-512x512.pkl',
151
- 'FFHQ-1024': 'stylegan2-ffhq-1024x1024.pkl',
152
- 'FFHQ-U-256': 'stylegan2-ffhqu-256x256.pkl',
153
- 'FFHQ-U-1024': 'stylegan2-ffhqu-1024x1024.pkl',
154
- 'LSUN-Dog-256': 'stylegan2-lsundog-256x256.pkl',
155
- 'MetFaces-1024': 'stylegan2-metfaces-1024x1024.pkl',
156
- 'MetFaces-U-1024': 'stylegan2-metfacesu-1024x1024.pkl',
157
- }
158
-
159
- model_dict = {
160
- name: load_model(file_name, device)
161
- for name, file_name in model_names.items()
162
- }
163
-
164
- func = functools.partial(generate_image,
165
- model_dict=model_dict,
166
- device=device)
167
- func = functools.update_wrapper(func, generate_image)
168
-
169
- gr.Interface(
170
- func,
171
- [
172
- gr.inputs.Radio(list(model_names.keys()),
173
- type='value',
174
- default='FFHQ-1024',
175
- label='Model'),
176
- gr.inputs.Number(default=0, label='Class index'),
177
- gr.inputs.Number(default=0, label='Seed'),
178
- gr.inputs.Slider(
179
- 0, 2, step=0.05, default=0.7, label='Truncation psi'),
180
- ],
181
- gr.outputs.Image(type='numpy', label='Output'),
182
- title=TITLE,
183
- description=DESCRIPTION,
184
- article=ARTICLE,
185
- theme=args.theme,
186
- allow_flagging=args.allow_flagging,
187
- live=args.live,
188
- ).launch(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
  enable_queue=args.enable_queue,
190
  server_port=args.port,
191
  share=args.share,
 
3
  from __future__ import annotations
4
 
5
  import argparse
6
+ import json
 
 
 
7
 
8
  import gradio as gr
9
  import numpy as np
 
 
 
10
 
11
+ from model import Model
12
 
13
+ TITLE = '# StyleGAN2'
14
+ DESCRIPTION = '''This is an unofficial demo for [https://github.com/NVlabs/stylegan3](https://github.com/NVlabs/stylegan3).
15
 
16
  Expected execution time on Hugging Face Spaces: 4s
17
  '''
18
+ FOOTER = '<img id="visitor-badge" alt="visitor badge" src="https://visitor-badge.glitch.me/badge?page_id=hysts.stylegan2" />'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
 
21
  def parse_args() -> argparse.Namespace:
22
  parser = argparse.ArgumentParser()
23
  parser.add_argument('--device', type=str, default='cpu')
24
  parser.add_argument('--theme', type=str)
 
25
  parser.add_argument('--share', action='store_true')
26
  parser.add_argument('--port', type=int)
27
  parser.add_argument('--disable-queue',
28
  dest='enable_queue',
29
  action='store_false')
 
30
  return parser.parse_args()
31
 
32
 
33
+ def update_class_index(name: str) -> dict:
34
+ if name == 'CIFAR-10':
35
+ return gr.Slider.update(maximum=9, visible=True)
36
+ else:
37
+ return gr.Slider.update(visible=False)
38
+
39
+
40
+ def get_sample_image_url(name: str) -> str:
41
+ sample_image_dir = 'https://huggingface.co/spaces/hysts/StyleGAN2/resolve/main/samples'
42
+ return f'{sample_image_dir}/{name}.jpg'
43
+
44
+
45
+ def get_sample_image_markdown(name: str) -> str:
46
+ url = get_sample_image_url(name)
47
+ if name == 'cifar10':
48
+ size = 32
49
+ class_index = '0-9'
50
+ seed = '0-9'
51
+ else:
52
+ class_index = 'N/A'
53
+ seed = '0-99'
54
+ if name == 'afhq-cat':
55
+ size = 512
56
+ elif name == 'afhq-dog':
57
+ size = 512
58
+ elif name == 'afhq-wild':
59
+ size = 512
60
+ elif name == 'afhqv2':
61
+ size = 512
62
+ elif name == 'brecahad':
63
+ size = 256
64
+ elif name == 'celebahq':
65
+ size = 1024
66
+ elif name == 'ffhq':
67
+ size = 1024
68
+ elif name == 'ffhq-u':
69
+ size = 1024
70
+ elif name == 'lsun-dog':
71
+ size = 256
72
+ elif name == 'metfaces':
73
+ size = 1024
74
+ elif name == 'metfaces-u':
75
+ size = 1024
76
+ else:
77
+ raise ValueError
78
+
79
+ return f'''
80
+ - size: {size}x{size}
81
+ - class_index: {class_index}
82
+ - seed: {seed}
83
+ - truncation: 0.7
84
+ ![sample images]({url})'''
85
+
86
+
87
+ def load_class_names(name: str) -> list[str]:
88
+ with open(f'labels/{name}_classes.json') as f:
89
+ names = json.load(f)
90
+ return names
91
+
92
+
93
+ def get_class_name_df(name: str) -> list:
94
+ names = load_class_names(name)
95
+ return list(map(list, enumerate(names))) # type: ignore
96
+
97
+
98
+ CIFAR10_NAMES = load_class_names('cifar10')
99
+
100
+
101
+ def update_class_name(model_name: str, index: int) -> dict:
102
+ if model_name == 'CIFAR-10':
103
+ value = CIFAR10_NAMES[index]
104
+ return gr.Textbox.update(value=value, visible=True)
105
+ else:
106
+ return gr.Textbox.update(visible=False)
107
 
108
 
109
  def main():
110
  args = parse_args()
111
+ model = Model(args.device)
112
+
113
+ with gr.Blocks(theme=args.theme, css='style.css') as demo:
114
+ gr.Markdown(TITLE)
115
+ gr.Markdown(DESCRIPTION)
116
+
117
+ with gr.Tabs():
118
+ with gr.TabItem('App'):
119
+ with gr.Row():
120
+ with gr.Column():
121
+ with gr.Group():
122
+ model_name = gr.Dropdown(list(
123
+ model.MODEL_NAME_DICT.keys()),
124
+ value='FFHQ-1024',
125
+ label='Model')
126
+ seed = gr.Slider(0,
127
+ np.iinfo(np.uint32).max,
128
+ step=1,
129
+ value=0,
130
+ label='Seed')
131
+ psi = gr.Slider(0,
132
+ 2,
133
+ step=0.05,
134
+ value=0.7,
135
+ label='Truncation psi')
136
+ class_index = gr.Slider(0,
137
+ 9,
138
+ step=1,
139
+ value=0,
140
+ label='Class Index',
141
+ visible=False)
142
+ class_name = gr.Textbox(
143
+ value=CIFAR10_NAMES[class_index.value],
144
+ label='Class Label',
145
+ interactive=False,
146
+ visible=False)
147
+ run_button = gr.Button('Run')
148
+ with gr.Column():
149
+ result = gr.Image(label='Result', elem_id='result')
150
+
151
+ with gr.TabItem('Sample Images'):
152
+ with gr.Row():
153
+ model_name2 = gr.Dropdown([
154
+ 'afhq-cat',
155
+ 'afhq-dog',
156
+ 'afhq-wild',
157
+ 'afhqv2',
158
+ 'brecahad',
159
+ 'celebahq',
160
+ 'cifar10',
161
+ 'ffhq',
162
+ 'ffhq-u',
163
+ 'lsun-dog',
164
+ 'metfaces',
165
+ 'metfaces-u',
166
+ ],
167
+ value='afhq-cat',
168
+ label='Model')
169
+ with gr.Row():
170
+ text = get_sample_image_markdown(model_name2.value)
171
+ sample_images = gr.Markdown(text)
172
+
173
+ gr.Markdown(FOOTER)
174
+
175
+ model_name.change(fn=model.set_model, inputs=model_name, outputs=None)
176
+ model_name.change(fn=update_class_index,
177
+ inputs=model_name,
178
+ outputs=class_index)
179
+ model_name.change(fn=update_class_name,
180
+ inputs=[
181
+ model_name,
182
+ class_index,
183
+ ],
184
+ outputs=class_name)
185
+ class_index.change(fn=update_class_name,
186
+ inputs=[
187
+ model_name,
188
+ class_index,
189
+ ],
190
+ outputs=class_name)
191
+ run_button.click(fn=model.set_model_and_generate_image,
192
+ inputs=[
193
+ model_name,
194
+ seed,
195
+ psi,
196
+ class_index,
197
+ ],
198
+ outputs=result)
199
+ model_name2.change(fn=get_sample_image_markdown,
200
+ inputs=model_name2,
201
+ outputs=sample_images)
202
+
203
+ demo.launch(
204
  enable_queue=args.enable_queue,
205
  server_port=args.port,
206
  share=args.share,
labels/cifar10_classes.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
model.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import pathlib
5
+ import pickle
6
+ import sys
7
+
8
+ import numpy as np
9
+ import torch
10
+ import torch.nn as nn
11
+ from huggingface_hub import hf_hub_download
12
+
13
+ current_dir = pathlib.Path(__file__).parent
14
+ submodule_dir = current_dir / 'stylegan3'
15
+ sys.path.insert(0, submodule_dir.as_posix())
16
+
17
+ HF_TOKEN = os.environ['HF_TOKEN']
18
+
19
+
20
+ class Model:
21
+ MODEL_NAME_DICT = {
22
+ 'AFHQ-Cat-512': 'stylegan2-afhqcat-512x512.pkl',
23
+ 'AFHQ-Dog-512': 'stylegan2-afhqdog-512x512.pkl',
24
+ 'AFHQv2-512': 'stylegan2-afhqv2-512x512.pkl',
25
+ 'AFHQ-Wild-512': 'stylegan2-afhqwild-512x512.pkl',
26
+ 'BreCaHAD-512': 'stylegan2-brecahad-512x512.pkl',
27
+ 'CelebA-HQ-256': 'stylegan2-celebahq-256x256.pkl',
28
+ 'CIFAR-10': 'stylegan2-cifar10-32x32.pkl',
29
+ 'FFHQ-256': 'stylegan2-ffhq-256x256.pkl',
30
+ 'FFHQ-512': 'stylegan2-ffhq-512x512.pkl',
31
+ 'FFHQ-1024': 'stylegan2-ffhq-1024x1024.pkl',
32
+ 'FFHQ-U-256': 'stylegan2-ffhqu-256x256.pkl',
33
+ 'FFHQ-U-1024': 'stylegan2-ffhqu-1024x1024.pkl',
34
+ 'LSUN-Dog-256': 'stylegan2-lsundog-256x256.pkl',
35
+ 'MetFaces-1024': 'stylegan2-metfaces-1024x1024.pkl',
36
+ 'MetFaces-U-1024': 'stylegan2-metfacesu-1024x1024.pkl',
37
+ }
38
+
39
+ def __init__(self, device: str | torch.device):
40
+ self.device = torch.device(device)
41
+ self._download_all_models()
42
+ self.model_name = 'FFHQ-1024'
43
+ self.model = self._load_model(self.model_name)
44
+
45
+ def _load_model(self, model_name: str) -> nn.Module:
46
+ file_name = self.MODEL_NAME_DICT[model_name]
47
+ path = hf_hub_download('hysts/StyleGAN2',
48
+ f'models/{file_name}',
49
+ use_auth_token=HF_TOKEN)
50
+ with open(path, 'rb') as f:
51
+ model = pickle.load(f)['G_ema']
52
+ model.eval()
53
+ model.to(self.device)
54
+ return model
55
+
56
+ def set_model(self, model_name: str) -> None:
57
+ if model_name == self.model_name:
58
+ return
59
+ self.model_name = model_name
60
+ self.model = self._load_model(model_name)
61
+
62
+ def _download_all_models(self):
63
+ for name in self.MODEL_NAME_DICT.keys():
64
+ self._load_model(name)
65
+
66
+ def generate_z(self, seed: int) -> torch.Tensor:
67
+ seed = int(np.clip(seed, 0, np.iinfo(np.uint32).max))
68
+ z = np.random.RandomState(seed).randn(1, self.model.z_dim)
69
+ return torch.from_numpy(z).float().to(self.device)
70
+
71
+ def postprocess(self, tensor: torch.Tensor) -> np.ndarray:
72
+ tensor = (tensor.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(
73
+ torch.uint8)
74
+ return tensor.cpu().numpy()
75
+
76
+ def make_label_tensor(self, class_index: int) -> torch.Tensor:
77
+ class_index = round(class_index)
78
+ class_index = min(max(0, class_index), self.model.c_dim - 1)
79
+ class_index = torch.tensor(class_index, dtype=torch.long)
80
+
81
+ label = torch.zeros([1, self.model.c_dim], device=self.device)
82
+ if class_index >= 0:
83
+ label[:, class_index] = 1
84
+ return label
85
+
86
+ @torch.inference_mode()
87
+ def generate(self, z: torch.Tensor, label: torch.Tensor,
88
+ truncation_psi: float) -> torch.Tensor:
89
+ return self.model(z, label, truncation_psi=truncation_psi)
90
+
91
+ def generate_image(self, seed: int, truncation_psi: float,
92
+ class_index: int) -> np.ndarray:
93
+ z = self.generate_z(seed)
94
+ label = self.make_label_tensor(class_index)
95
+
96
+ out = self.generate(z, label, truncation_psi)
97
+ out = self.postprocess(out)
98
+ return out[0]
99
+
100
+ def set_model_and_generate_image(self, model_name: str, seed: int,
101
+ truncation_psi: float,
102
+ class_index: int) -> np.ndarray:
103
+ self.set_model(model_name)
104
+ return self.generate_image(seed, truncation_psi, class_index)
style.css ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ h1 {
2
+ text-align: center;
3
+ }
4
+ div#result {
5
+ max-width: 600px;
6
+ max-height: 600px;
7
+ }
8
+ img#visitor-badge {
9
+ display: block;
10
+ margin: auto;
11
+ }