waveydaveygravy commited on
Commit
1e075b7
·
1 Parent(s): d82ce5b

Delete checkpoints/processor.py

Browse files
Files changed (1) hide show
  1. checkpoints/processor.py +0 -148
checkpoints/processor.py DELETED
@@ -1,148 +0,0 @@
1
- """
2
- This file contains a Processor that can be used to process images with controlnet aux processors
3
- """
4
- import io
5
- import logging
6
- from typing import Dict, Optional, Union
7
-
8
- from PIL import Image
9
-
10
- from controlnet_aux import (CannyDetector, ContentShuffleDetector, HEDdetector,
11
- LeresDetector, LineartAnimeDetector,
12
- LineartDetector, MediapipeFaceDetector,
13
- MidasDetector, MLSDdetector, NormalBaeDetector,
14
- OpenposeDetector, PidiNetDetector, ZoeDetector,
15
- DWposeDetector)
16
-
17
- LOGGER = logging.getLogger(__name__)
18
-
19
-
20
- MODELS = {
21
- # checkpoint models
22
- 'scribble_hed': {'class': HEDdetector, 'checkpoint': True},
23
- 'softedge_hed': {'class': HEDdetector, 'checkpoint': True},
24
- 'scribble_hedsafe': {'class': HEDdetector, 'checkpoint': True},
25
- 'softedge_hedsafe': {'class': HEDdetector, 'checkpoint': True},
26
- 'depth_midas': {'class': MidasDetector, 'checkpoint': True},
27
- 'mlsd': {'class': MLSDdetector, 'checkpoint': True},
28
- 'openpose': {'class': OpenposeDetector, 'checkpoint': True},
29
- 'openpose_face': {'class': OpenposeDetector, 'checkpoint': True},
30
- 'openpose_faceonly': {'class': OpenposeDetector, 'checkpoint': True},
31
- 'openpose_full': {'class': OpenposeDetector, 'checkpoint': True},
32
- 'openpose_hand': {'class': OpenposeDetector, 'checkpoint': True},
33
- 'dwpose': {'class': DWposeDetector, 'checkpoint': True},
34
- 'scribble_pidinet': {'class': PidiNetDetector, 'checkpoint': True},
35
- 'softedge_pidinet': {'class': PidiNetDetector, 'checkpoint': True},
36
- 'scribble_pidsafe': {'class': PidiNetDetector, 'checkpoint': True},
37
- 'softedge_pidsafe': {'class': PidiNetDetector, 'checkpoint': True},
38
- 'normal_bae': {'class': NormalBaeDetector, 'checkpoint': True},
39
- 'lineart_coarse': {'class': LineartDetector, 'checkpoint': True},
40
- 'lineart_realistic': {'class': LineartDetector, 'checkpoint': True},
41
- 'lineart_anime': {'class': LineartAnimeDetector, 'checkpoint': True},
42
- 'depth_zoe': {'class': ZoeDetector, 'checkpoint': True},
43
- 'depth_leres': {'class': LeresDetector, 'checkpoint': True},
44
- 'depth_leres++': {'class': LeresDetector, 'checkpoint': True},
45
- # instantiate
46
- 'shuffle': {'class': ContentShuffleDetector, 'checkpoint': False},
47
- 'mediapipe_face': {'class': MediapipeFaceDetector, 'checkpoint': False},
48
- 'canny': {'class': CannyDetector, 'checkpoint': False},
49
- }
50
-
51
-
52
- MODEL_PARAMS = {
53
- 'scribble_hed': {'scribble': True},
54
- 'softedge_hed': {'scribble': False},
55
- 'scribble_hedsafe': {'scribble': True, 'safe': True},
56
- 'softedge_hedsafe': {'scribble': False, 'safe': True},
57
- 'depth_midas': {},
58
- 'mlsd': {},
59
- 'openpose': {'include_body': True, 'include_hand': False, 'include_face': False},
60
- 'openpose_face': {'include_body': True, 'include_hand': False, 'include_face': True},
61
- 'openpose_faceonly': {'include_body': False, 'include_hand': False, 'include_face': True},
62
- 'openpose_full': {'include_body': True, 'include_hand': True, 'include_face': True},
63
- 'openpose_hand': {'include_body': False, 'include_hand': True, 'include_face': False},
64
- 'dwpose': {},
65
- 'scribble_pidinet': {'safe': False, 'scribble': True},
66
- 'softedge_pidinet': {'safe': False, 'scribble': False},
67
- 'scribble_pidsafe': {'safe': True, 'scribble': True},
68
- 'softedge_pidsafe': {'safe': True, 'scribble': False},
69
- 'normal_bae': {},
70
- 'lineart_realistic': {'coarse': False},
71
- 'lineart_coarse': {'coarse': True},
72
- 'lineart_anime': {},
73
- 'canny': {},
74
- 'shuffle': {},
75
- 'depth_zoe': {},
76
- 'depth_leres': {'boost': False},
77
- 'depth_leres++': {'boost': True},
78
- 'mediapipe_face': {},
79
- }
80
-
81
- CHOICES = f"Choices for the processor are {list(MODELS.keys())}"
82
-
83
-
84
- class Processor:
85
- def __init__(self, processor_id: str, params: Optional[Dict] = None) -> None:
86
- """Processor that can be used to process images with controlnet aux processors
87
-
88
- Args:
89
- processor_id (str): processor name, options are 'hed, midas, mlsd, openpose,
90
- pidinet, normalbae, lineart, lineart_coarse, lineart_anime,
91
- canny, content_shuffle, zoe, mediapipe_face
92
- params (Optional[Dict]): parameters for the processor
93
- """
94
- LOGGER.info(f"Loading {processor_id}")
95
-
96
- if processor_id not in MODELS:
97
- raise ValueError(f"{processor_id} is not a valid processor id. Please make sure to choose one of {', '.join(MODELS.keys())}")
98
-
99
- self.processor_id = processor_id
100
- self.processor = self.load_processor(self.processor_id)
101
-
102
- # load default params
103
- self.params = MODEL_PARAMS[self.processor_id]
104
- # update with user params
105
- if params:
106
- self.params.update(params)
107
-
108
- def load_processor(self, processor_id: str) -> 'Processor':
109
- """Load controlnet aux processors
110
-
111
- Args:
112
- processor_id (str): processor name
113
-
114
- Returns:
115
- Processor: controlnet aux processor
116
- """
117
- processor = MODELS[processor_id]['class']
118
-
119
- # check if the proecssor is a checkpoint model
120
- if MODELS[processor_id]['checkpoint']:
121
- processor = processor.from_pretrained("lllyasviel/Annotators")
122
- else:
123
- processor = processor()
124
- return processor
125
-
126
- def __call__(self, image: Union[Image.Image, bytes],
127
- to_pil: bool = True) -> Union[Image.Image, bytes]:
128
- """processes an image with a controlnet aux processor
129
-
130
- Args:
131
- image (Union[Image.Image, bytes]): input image in bytes or PIL Image
132
- to_pil (bool): whether to return bytes or PIL Image
133
-
134
- Returns:
135
- Union[Image.Image, bytes]: processed image in bytes or PIL Image
136
- """
137
- # check if bytes or PIL Image
138
- if isinstance(image, bytes):
139
- image = Image.open(io.BytesIO(image)).convert("RGB")
140
-
141
- processed_image = self.processor(image, **self.params)
142
-
143
- if to_pil:
144
- return processed_image
145
- else:
146
- output_bytes = io.BytesIO()
147
- processed_image.save(output_bytes, format='JPEG')
148
- return output_bytes.getvalue()