Oliver Hamilton commited on
Commit
a083fd4
·
verified ·
1 Parent(s): 6ad996e

Upload 29 files

Browse files
Files changed (30) hide show
  1. .gitattributes +1 -0
  2. deployments/LICENSE +201 -0
  3. deployments/README.md +227 -0
  4. deployments/deployment/Instance segmentation task/LICENSE +201 -0
  5. deployments/deployment/Instance segmentation task/README.md +164 -0
  6. deployments/deployment/Instance segmentation task/model.json +26 -0
  7. deployments/deployment/Instance segmentation task/model/config.json +8 -0
  8. deployments/deployment/Instance segmentation task/model/model.bin +3 -0
  9. deployments/deployment/Instance segmentation task/model/model.xml +0 -0
  10. deployments/deployment/Instance segmentation task/python/demo.py +107 -0
  11. deployments/deployment/Instance segmentation task/python/demo_package/__init__.py +27 -0
  12. deployments/deployment/Instance segmentation task/python/demo_package/executors/__init__.py +12 -0
  13. deployments/deployment/Instance segmentation task/python/demo_package/executors/asynchronous.py +79 -0
  14. deployments/deployment/Instance segmentation task/python/demo_package/executors/synchronous.py +49 -0
  15. deployments/deployment/Instance segmentation task/python/demo_package/model_wrapper.py +131 -0
  16. deployments/deployment/Instance segmentation task/python/demo_package/streamer/__init__.py +24 -0
  17. deployments/deployment/Instance segmentation task/python/demo_package/streamer/streamer.py +346 -0
  18. deployments/deployment/Instance segmentation task/python/demo_package/utils.py +61 -0
  19. deployments/deployment/Instance segmentation task/python/demo_package/visualizers/__init__.py +22 -0
  20. deployments/deployment/Instance segmentation task/python/demo_package/visualizers/vis_utils.py +190 -0
  21. deployments/deployment/Instance segmentation task/python/demo_package/visualizers/visualizer.py +402 -0
  22. deployments/deployment/Instance segmentation task/python/requirements.txt +3 -0
  23. deployments/deployment/Instance segmentation task/python/setup.py +30 -0
  24. deployments/deployment/project.json +77 -0
  25. deployments/example_code/demo.py +34 -0
  26. deployments/example_code/demo_notebook.ipynb +156 -0
  27. deployments/example_code/demo_ovms.ipynb +421 -0
  28. deployments/example_code/requirements-notebook.txt +6 -0
  29. deployments/example_code/requirements.txt +3 -0
  30. deployments/sample_image.jpg +3 -0
.gitattributes CHANGED
@@ -36,3 +36,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
36
  eggsample1.png filter=lfs diff=lfs merge=lfs -text
37
  eggsample2.png filter=lfs diff=lfs merge=lfs -text
38
  deployment2/sample_image.jpg filter=lfs diff=lfs merge=lfs -text
 
 
36
  eggsample1.png filter=lfs diff=lfs merge=lfs -text
37
  eggsample2.png filter=lfs diff=lfs merge=lfs -text
38
  deployment2/sample_image.jpg filter=lfs diff=lfs merge=lfs -text
39
+ deployments/sample_image.jpg filter=lfs diff=lfs merge=lfs -text
deployments/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright (C) 2018-2021 Intel Corporation
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
deployments/README.md ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Code deployment
2
+ ## Table of contents
3
+ - [Introduction](#introduction)
4
+ - [Prerequisites](#prerequisites)
5
+ - [Installation](#Installation)
6
+ - [Usage](#usage)
7
+ - [Troubleshooting](#troubleshooting)
8
+ - [Package contents](#package-contents)
9
+
10
+
11
+ ## Introduction
12
+
13
+ This code deployment .zip archive contains:
14
+
15
+ 1. Inference model(s) for your Intel® Geti™ project.
16
+
17
+ 2. A sample image or video frame, exported from your project.
18
+
19
+ 3. A very simple code example to get and visualize the result of inference for your
20
+ project, on the sample image.
21
+
22
+ 4. Jupyter notebooks with instructions and code for running inference for your project,
23
+ either locally or via the OpenVINO Model Server (OVMS).
24
+
25
+ The deployment holds one model for each task in your project, so if for example
26
+ you created a deployment for a `Detection -> Classification` project, it will consist of
27
+ both a detection, and a classification model. The Intel® Geti™ SDK is used to run
28
+ inference for all models in the project's task chain.
29
+
30
+ This README describes the steps required to get the code sample up and running on your
31
+ machine.
32
+
33
+ ## Prerequisites
34
+
35
+ - [Python 3.9, 3.10 or 3.11](https://www.python.org/downloads/)
36
+ - [*Optional, only for OVMS notebook*] [Docker](https://docs.docker.com/get-docker/)
37
+
38
+ ## Installation
39
+
40
+ 1. Install [prerequisites](#prerequisites). You may also need to
41
+ [install pip](https://pip.pypa.io/en/stable/installation/). For example, on Ubuntu
42
+ execute the following command to install Python and pip:
43
+
44
+ ```
45
+ sudo apt install python3-dev python3-pip
46
+ ```
47
+ If you already have installed pip before, make sure it is up to date by doing:
48
+
49
+ ```
50
+ pip install --upgrade pip
51
+ ```
52
+
53
+ 2. Create a clean virtual environment: <a name="virtual-env-creation"></a>
54
+
55
+ One of the possible ways for creating a virtual environment is to use `virtualenv`:
56
+
57
+ ```
58
+ python -m pip install virtualenv
59
+ python -m virtualenv <directory_for_environment>
60
+ ```
61
+
62
+ Before starting to work inside the virtual environment, it should be activated:
63
+
64
+ On Linux and macOS:
65
+
66
+ ```
67
+ source <directory_for_environment>/bin/activate
68
+ ```
69
+
70
+ On Windows:
71
+
72
+ ```
73
+ .\<directory_for_environment>\Scripts\activate
74
+ ```
75
+
76
+ Please make sure that the environment contains
77
+ [wheel](https://pypi.org/project/wheel/) by calling the following command:
78
+
79
+ ```
80
+ python -m pip install wheel
81
+ ```
82
+
83
+ > **NOTE**: On Linux and macOS, you may need to type `python3` instead of `python`.
84
+
85
+ 3. In your terminal, navigate to the `example_code` directory in the code deployment
86
+ package.
87
+
88
+ 4. Install requirements in the environment:
89
+
90
+ ```
91
+ python -m pip install -r requirements.txt
92
+ ```
93
+
94
+ 5. (Optional) Install the requirements for running the `demo_notebook.ipynb` or
95
+ `demo_ovms.ipynb` Juypter notebooks:
96
+
97
+ ```
98
+ python -m pip install -r requirements-notebook.txt
99
+ ```
100
+
101
+ ## Usage
102
+ ### Local inference
103
+ Both `demo.py` script and the `demo_notebook.ipynb` notebook contain a code sample for:
104
+
105
+ 1. Loading the code deployment (and the models it contains) into memory.
106
+
107
+ 2. Loading the `sample_image.jpg`, which is a random image taken from the project you
108
+ deployed.
109
+
110
+ 3. Running inference on the sample image.
111
+
112
+ 4. Visualizing the inference results.
113
+
114
+ ### Inference with OpenVINO Model Server
115
+ The additional demo notebook `demo_ovms.ipynb` shows how to set up and run an OpenVINO
116
+ Model Server for your deployment, and make inference requests to it. The notebook
117
+ contains instructions and code to:
118
+
119
+ 1. Generate a configuration file for OVMS.
120
+
121
+ 2. Launch an OVMS docker container with the proper configuration.
122
+
123
+ 3. Load the image `sample_image.jpg`, as an example image to run inference on.
124
+
125
+ 4. Make an inference request to OVMS.
126
+
127
+ 5. Visualize the inference results.
128
+
129
+ ### Running the demo script
130
+
131
+ In your terminal:
132
+
133
+ 1. Make sure the virtual environment created [above](#virtual-env-creation) is activated.
134
+
135
+ 2. Make sure you are in the `example_code` directory in your terminal.
136
+
137
+ 3. Run the demo using:
138
+
139
+ ```
140
+ python demo.py
141
+ ```
142
+
143
+ The script will run inference on the `sample_image.jpg`. A window will pop up that
144
+ displays the image, and the results of the inference visualized on top of it.
145
+
146
+ ### Running the demo notebooks
147
+
148
+ In your terminal:
149
+
150
+ 1. Make sure the virtual environment created [above](#virtual-env-creation) is activated.
151
+
152
+ 2. Make sure you are in the `example_code` directory in your terminal.
153
+
154
+ 3. Start JupyterLab using:
155
+
156
+ ```
157
+ jupyter lab
158
+ ```
159
+
160
+ 4. This should launch your web browser and take you to the main page of JupyterLab.
161
+
162
+ Inside JuypterLab:
163
+
164
+ 5. In the sidebar of the JupyterLab interface, double-click on `demo_notebook.ipynb` or
165
+ `demo_ovms.ipynb` to open one of the notebooks.
166
+
167
+ 6. Execute the notebook cell by cell to view the inference results.
168
+
169
+
170
+ > **NOTE** The `demo_notebook.ipynb` is a great way to explore the `AnnotationScene`
171
+ > object that is returned by the inference. The demo code only has very basic
172
+ > visualization functionality, which may not be sufficient for all use case. For
173
+ > example if your project contains many labels, it may not be able to visualize the
174
+ > results very well. In that case, you should build your own visualization logic
175
+ > based on the `AnnotationScene` returned by the `deployment.infer()` method.
176
+
177
+ ## Troubleshooting
178
+
179
+ 1. If you have access to the Internet through a proxy server only, please use pip
180
+ with a proxy call as demonstrated by the command below:
181
+
182
+ ```
183
+ python -m pip install --proxy http://<usr_name>:<password>@<proxyserver_name>:<port#> <pkg_name>
184
+ ```
185
+
186
+ 2. If you use Anaconda as environment manager, please consider that OpenVINO has
187
+ limited [Conda support](https://docs.openvino.ai/2021.4/openvino_docs_install_guides_installing_openvino_conda.html).
188
+ It is still possible to use `conda` to create and activate your python environment,
189
+ but in that case please use only `pip` (rather than `conda`) as a package manager
190
+ for installing packages in your environment.
191
+
192
+ 3. If you have problems when you try to use the `pip install` command, please update
193
+ pip version as per the following command:
194
+ ```
195
+ python -m pip install --upgrade pip
196
+ ```
197
+
198
+ ## Package contents
199
+
200
+ The code deployment files are structured as follows:
201
+
202
+ - deployment
203
+ - `project.json`
204
+ - "<title of task 1>"
205
+ - model
206
+ - `model.xml`
207
+ - `model.bin`
208
+ - `config.json`
209
+ - python
210
+ - model_wrappers
211
+ - `__init__.py`
212
+ - model_wrappers required to run demo
213
+ - `README.md`
214
+ - `LICENSE`
215
+ - `demo.py`
216
+ - `requirements.txt`
217
+ - "<title of task 2>" (Optional)
218
+ - ...
219
+ - example_code
220
+ - `demo.py`
221
+ - `demo_notebook.ipynb`
222
+ - `demo_ovms.ipynb`
223
+ - `README.md`
224
+ - `requirements.txt`
225
+ - `requirements-notebook.txt`
226
+ - `sample_image.jpg`
227
+ - `LICENSE`
deployments/deployment/Instance segmentation task/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright (C) 2018-2021 Intel Corporation
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
deployments/deployment/Instance segmentation task/README.md ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Exportable code
2
+
3
+ Exportable code is a .zip archive that contains simple demo to get and visualize result of model inference.
4
+
5
+ ## Structure of generated zip
6
+
7
+ - `README.md`
8
+ - `LICENSE`
9
+ - model
10
+ - `model.xml`
11
+ - `model.bin`
12
+ - `config.json`
13
+ - python
14
+ - demo_package
15
+ - `__init__.py`
16
+ - executors
17
+ - `__init__.py`
18
+ - `asynchronous.py`
19
+ - `synchronous.py`
20
+ - inference
21
+ - `__init__.py`
22
+ - `inference.py`
23
+ - streamer
24
+ - `__init__.py`
25
+ - `streamer.py`
26
+ - visualizers
27
+ - `__init__.py`
28
+ - `visualizer.py`
29
+ - `vis_utils.py`
30
+ - `demo.py`
31
+ - `requirements.txt`
32
+ - `setup.py`
33
+
34
+ ## Prerequisites
35
+
36
+ - [Python 3.10](https://www.python.org/downloads/)
37
+ - [Git](https://git-scm.com/)
38
+
39
+ ## Install requirements to run demo
40
+
41
+ 1. Install [prerequisites](#prerequisites). You may also need to [install pip](https://pip.pypa.io/en/stable/installation/). For example, on Ubuntu execute the following command to get pip installed:
42
+
43
+ ```bash
44
+ sudo apt install python3-pip
45
+ ```
46
+
47
+ 1. Create clean virtual environment:
48
+
49
+ One of the possible ways for creating a virtual environment is to use `virtualenv`:
50
+
51
+ ```bash
52
+ python -m pip install virtualenv
53
+ python -m virtualenv <directory_for_environment>
54
+ ```
55
+
56
+ Before starting to work inside virtual environment, it should be activated:
57
+
58
+ On Linux and macOS:
59
+
60
+ ```bash
61
+ source <directory_for_environment>/bin/activate
62
+ ```
63
+
64
+ On Windows:
65
+
66
+ ```bash
67
+ .\<directory_for_environment>\Scripts\activate
68
+ ```
69
+
70
+ Please make sure that the environment contains [wheel](https://pypi.org/project/wheel/) by calling the following command:
71
+
72
+ ```bash
73
+ python -m pip install wheel
74
+ ```
75
+
76
+ > **NOTE**: On Linux and macOS, you may need to type `python3` instead of `python`.
77
+
78
+ 1. Install requirements in the environment:
79
+
80
+ ```bash
81
+ cd python
82
+ python setup.py install
83
+ ```
84
+
85
+ ## Usecase
86
+
87
+ 1. Running the `demo.py` application with the `-h` option yields the following usage message:
88
+
89
+ ```bash
90
+ usage: demo.py [-h] -i INPUT -m MODEL [MODEL ...] [-it {sync,async}] [-l] [--no_show] [-d {CPU,GPU}] [--output OUTPUT]
91
+
92
+ Options:
93
+ -h, --help Show this help message and exit.
94
+ -i INPUT, --input INPUT
95
+ Required. An input to process. The input must be a single image, a folder of images, video file or camera id.
96
+ -m MODEL [MODEL ...], --model MODELS [MODELS ...]
97
+ Optional. Path to directory with trained model and configuration file. Default value points to deployed model folder '../model'.
98
+ -it {sync,async}, --inference_type {sync,async}
99
+ Optional. Type of inference for single model.
100
+ -l, --loop Optional. Enable reading the input in a loop.
101
+ --no_show Optional. Disables showing inference results on UI.
102
+ -d {CPU,GPU}, --device {CPU,GPU}
103
+ Optional. Device to infer the model.
104
+ --output OUTPUT Optional. Output path to save input data with predictions.
105
+ ```
106
+
107
+ 2. As a `model` parameter the default value `../model` will be used. Or you can specify the other path to the model directory from generated zip. You can pass as `input` a single image, a folder of images, a video file, or a web camera id. So you can use the following command to do inference with a pre-trained model:
108
+
109
+ ```bash
110
+ python3 demo.py -i <path_to_video>/inputVideo.mp4
111
+ ```
112
+
113
+ You can press `Q` to stop inference during demo running.
114
+
115
+ > **NOTE**: If you provide a single image as input, the demo processes and renders it quickly, then exits. To continuously
116
+ > visualize inference results on the screen, apply the `--loop` option, which enforces processing a single image in a loop.
117
+ > In this case, you can stop the demo by pressing `Q` button or killing the process in the terminal (`Ctrl+C` for Linux).
118
+ >
119
+ > **NOTE**: Default configuration contains info about pre- and post processing for inference and is guaranteed to be correct.
120
+ > Also you can change `config.json` that specifies the confidence threshold and color for each class visualization, but any
121
+ > changes should be made with caution.
122
+
123
+ 3. To save inferenced results with predictions on it, you can specify the folder path, using `--output`.
124
+ It works for images, videos, image folders and web cameras. To prevent issues, do not specify it together with a `--loop` parameter.
125
+
126
+ ```bash
127
+ python3 demo.py \
128
+ --input <path_to_image>/inputImage.jpg \
129
+ --models ../model \
130
+ --output resulted_images
131
+ ```
132
+
133
+ 4. To run a demo on a web camera, you need to know its ID.
134
+ You can check a list of camera devices by running this command line on Linux system:
135
+
136
+ ```bash
137
+ sudo apt-get install v4l-utils
138
+ v4l2-ctl --list-devices
139
+ ```
140
+
141
+ The output will look like this:
142
+
143
+ ```bash
144
+ Integrated Camera (usb-0000:00:1a.0-1.6):
145
+ /dev/video0
146
+ ```
147
+
148
+ After that, you can use this `/dev/video0` as a camera ID for `--input`.
149
+
150
+ ## Troubleshooting
151
+
152
+ 1. If you have access to the Internet through the proxy server only, please use pip with proxy call as demonstrated by command below:
153
+
154
+ ```bash
155
+ python -m pip install --proxy http://<usr_name>:<password>@<proxyserver_name>:<port#> <pkg_name>
156
+ ```
157
+
158
+ 1. If you use Anaconda environment, you should consider that OpenVINO has limited [Conda support](https://docs.openvino.ai/2021.4/openvino_docs_install_guides_installing_openvino_conda.html) for Python 3.6 and 3.7 versions only. But the demo package requires python 3.8. So please use other tools to create the environment (like `venv` or `virtualenv`) and use `pip` as a package manager.
159
+
160
+ 1. If you have problems when you try to use `pip install` command, please update pip version by following command:
161
+
162
+ ```bash
163
+ python -m pip install --upgrade pip
164
+ ```
deployments/deployment/Instance segmentation task/model.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "6807ecb527f5406c0158ae5b",
3
+ "name": "RTMDet_tiny OpenVINO FP16",
4
+ "version": 1,
5
+ "creation_date": "2025-04-22T19:23:33.177000+00:00",
6
+ "model_format": "OpenVINO",
7
+ "precision": [
8
+ "FP16"
9
+ ],
10
+ "has_xai_head": false,
11
+ "target_device": "CPU",
12
+ "target_device_type": null,
13
+ "performance": {
14
+ "score": 0.9520755876647748
15
+ },
16
+ "size": 12618721,
17
+ "latency": 0,
18
+ "fps_throughput": 0,
19
+ "optimization_type": "MO",
20
+ "optimization_objectives": {},
21
+ "model_status": "SUCCESS",
22
+ "configurations": [],
23
+ "previous_revision_id": "6807ecb527f5406c0158ae58",
24
+ "previous_trained_revision_id": "6807ecb527f5406c0158ae58",
25
+ "optimization_methods": []
26
+ }
deployments/deployment/Instance segmentation task/model/config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "instance_segmentation",
3
+ "model_type": "MaskRCNN",
4
+ "model_parameters": {
5
+ "labels": "otx_empty_lbl egg Empty",
6
+ "labels_ids": "None 6483114c18fb8c1c529bd150 6483114c18fb8c1c529bd154"
7
+ }
8
+ }
deployments/deployment/Instance segmentation task/model/model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b7c38bcc92193ed6d51a8650f2a105133fd2571edee7c11fb735f4c85a5b43f
3
+ size 11876754
deployments/deployment/Instance segmentation task/model/model.xml ADDED
The diff for this file is too large to render. See raw diff
 
deployments/deployment/Instance segmentation task/python/demo.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ """Demo based on ModelAPI."""
5
+
6
+ import sys
7
+ from argparse import SUPPRESS, ArgumentParser
8
+ from pathlib import Path
9
+
10
+ from demo_package import AsyncExecutor, ModelWrapper, SyncExecutor, create_visualizer
11
+
12
+
13
+ def build_argparser() -> ArgumentParser:
14
+ """Returns an ArgumentParser for parsing command line arguments."""
15
+ parser = ArgumentParser(add_help=False)
16
+ args = parser.add_argument_group("Options")
17
+ args.add_argument(
18
+ "-h",
19
+ "--help",
20
+ action="help",
21
+ default=SUPPRESS,
22
+ help="Show this help message and exit.",
23
+ )
24
+ args.add_argument(
25
+ "-i",
26
+ "--input",
27
+ required=True,
28
+ help="Required. An input to process. The input must be a single image, "
29
+ "a folder of images, video file or camera id.",
30
+ )
31
+ args.add_argument(
32
+ "-m",
33
+ "--model",
34
+ help="Optional. Path to directory with trained model and configuration file. "
35
+ "Default value points to deployed model folder '../model'.",
36
+ default=Path("../model"),
37
+ type=Path,
38
+ )
39
+ args.add_argument(
40
+ "-it",
41
+ "--inference_type",
42
+ help="Optional. Type of inference for single model.",
43
+ choices=["sync", "async"],
44
+ default="async",
45
+ type=str,
46
+ )
47
+ args.add_argument(
48
+ "-l",
49
+ "--loop",
50
+ help="Optional. Enable reading the input in a loop.",
51
+ default=False,
52
+ action="store_true",
53
+ )
54
+ args.add_argument(
55
+ "--no_show",
56
+ help="Optional. Disables showing inference results on UI.",
57
+ default=False,
58
+ action="store_true",
59
+ )
60
+ args.add_argument(
61
+ "-d",
62
+ "--device",
63
+ help="Optional. Device to infer the model.",
64
+ choices=["CPU", "GPU"],
65
+ default="CPU",
66
+ type=str,
67
+ )
68
+ args.add_argument(
69
+ "--output",
70
+ default="./outputs/model_visualization",
71
+ type=str,
72
+ help="Optional. Output path to save input data with predictions.",
73
+ )
74
+
75
+ return parser
76
+
77
+
78
+ EXECUTORS = {
79
+ "sync": SyncExecutor,
80
+ "async": AsyncExecutor,
81
+ }
82
+
83
+
84
+ def main() -> int:
85
+ """Main function that is used to run demo."""
86
+ args = build_argparser().parse_args()
87
+
88
+ if args.loop and args.output:
89
+ msg = "--loop and --output cannot be both specified"
90
+ raise ValueError(msg)
91
+
92
+ # create models
93
+ model = ModelWrapper(args.model, device=args.device)
94
+ inferencer = EXECUTORS[args.inference_type]
95
+
96
+ # create visualizer
97
+ visualizer = create_visualizer(model.task_type, model.labels, no_show=args.no_show, output=args.output)
98
+
99
+ # create inferencer and run
100
+ demo = inferencer(model, visualizer)
101
+ demo.run(args.input, args.loop and not args.no_show)
102
+
103
+ return 0
104
+
105
+
106
+ if __name__ == "__main__":
107
+ sys.exit(main())
deployments/deployment/Instance segmentation task/python/demo_package/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ """Initialization of demo package."""
5
+
6
+ from .executors import AsyncExecutor, SyncExecutor
7
+ from .model_wrapper import ModelWrapper
8
+ from .utils import create_visualizer
9
+ from .visualizers import (
10
+ BaseVisualizer,
11
+ ClassificationVisualizer,
12
+ InstanceSegmentationVisualizer,
13
+ ObjectDetectionVisualizer,
14
+ SemanticSegmentationVisualizer,
15
+ )
16
+
17
+ __all__ = [
18
+ "SyncExecutor",
19
+ "AsyncExecutor",
20
+ "create_visualizer",
21
+ "ModelWrapper",
22
+ "BaseVisualizer",
23
+ "ClassificationVisualizer",
24
+ "SemanticSegmentationVisualizer",
25
+ "InstanceSegmentationVisualizer",
26
+ "ObjectDetectionVisualizer",
27
+ ]
deployments/deployment/Instance segmentation task/python/demo_package/executors/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ """Initialization of executors."""
5
+
6
+ from .asynchronous import AsyncExecutor
7
+ from .synchronous import SyncExecutor
8
+
9
+ __all__ = [
10
+ "SyncExecutor",
11
+ "AsyncExecutor",
12
+ ]
deployments/deployment/Instance segmentation task/python/demo_package/executors/asynchronous.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ """Async executor based on ModelAPI."""
5
+
6
+ from __future__ import annotations
7
+
8
+ import time
9
+ from typing import TYPE_CHECKING, Any
10
+
11
+ from model_api.pipelines import AsyncPipeline
12
+
13
+ if TYPE_CHECKING:
14
+ import numpy as np
15
+ from demo_package.model_wrapper import ModelWrapper
16
+
17
+
18
+ from demo_package.streamer import get_streamer
19
+ from demo_package.visualizers import BaseVisualizer, dump_frames
20
+
21
+
22
+ class AsyncExecutor:
23
+ """Async inferencer.
24
+
25
+ Args:
26
+ model: model for inference
27
+ visualizer: visualizer of inference results
28
+ """
29
+
30
+ def __init__(self, model: ModelWrapper, visualizer: BaseVisualizer) -> None:
31
+ self.model = model
32
+ self.visualizer = visualizer
33
+ self.async_pipeline = AsyncPipeline(self.model.core_model)
34
+
35
+ def run(self, input_stream: int | str, loop: bool = False) -> None:
36
+ """Async inference for input stream (image, video stream, camera)."""
37
+ streamer = get_streamer(input_stream, loop)
38
+ next_frame_id = 0
39
+ next_frame_id_to_show = 0
40
+ stop_visualization = False
41
+ saved_frames = []
42
+
43
+ for frame in streamer:
44
+ results = self.async_pipeline.get_result(next_frame_id_to_show)
45
+ while results:
46
+ start_time = time.perf_counter()
47
+ output = self.render_result(results)
48
+ next_frame_id_to_show += 1
49
+ self.visualizer.show(output)
50
+ if self.visualizer.output:
51
+ saved_frames.append(output)
52
+ stop_visualization = self.visualizer.is_quit()
53
+ # visualize video not faster than the original FPS
54
+ self.visualizer.video_delay(time.perf_counter() - start_time, streamer)
55
+ results = self.async_pipeline.get_result(next_frame_id_to_show)
56
+ if stop_visualization:
57
+ break
58
+ self.async_pipeline.submit_data(frame, next_frame_id, {"frame": frame})
59
+ next_frame_id += 1
60
+ self.async_pipeline.await_all()
61
+ for next_id in range(next_frame_id_to_show, next_frame_id):
62
+ start_time = time.perf_counter()
63
+ results = self.async_pipeline.get_result(next_id)
64
+ if not results:
65
+ msg = "Async pipeline returned None results"
66
+ raise RuntimeError(msg)
67
+ output = self.render_result(results)
68
+ self.visualizer.show(output)
69
+ if self.visualizer.output:
70
+ saved_frames.append(output)
71
+ # visualize video not faster than the original FPS
72
+ self.visualizer.video_delay(time.perf_counter() - start_time, streamer)
73
+ dump_frames(saved_frames, self.visualizer.output, input_stream, streamer)
74
+
75
+ def render_result(self, results: tuple[Any, dict]) -> np.ndarray:
76
+ """Render for results of inference."""
77
+ predictions, frame_meta = results
78
+ current_frame = frame_meta["frame"]
79
+ return self.visualizer.draw(current_frame, predictions)
deployments/deployment/Instance segmentation task/python/demo_package/executors/synchronous.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ """Synchronous Executor based on ModelAPI."""
5
+
6
+ from __future__ import annotations
7
+
8
+ import time
9
+ from typing import TYPE_CHECKING
10
+
11
+ if TYPE_CHECKING:
12
+ from demo_package.model_wrapper import ModelWrapper
13
+ from demo_package.visualizers import BaseVisualizer
14
+
15
+ from demo_package.streamer.streamer import get_streamer
16
+ from demo_package.visualizers import dump_frames
17
+
18
+
19
+ class SyncExecutor:
20
+ """Synchronous executor for model inference.
21
+
22
+ Args:
23
+ model (ModelContainer): model for inference
24
+ visualizer (Visualizer): visualizer of inference results. Defaults to None.
25
+ """
26
+
27
+ def __init__(self, model: ModelWrapper, visualizer: BaseVisualizer) -> None:
28
+ self.model = model
29
+ self.visualizer = visualizer
30
+
31
+ def run(self, input_stream: int | str, loop: bool = False) -> None:
32
+ """Run demo using input stream (image, video stream, camera)."""
33
+ streamer = get_streamer(input_stream, loop)
34
+ saved_frames = []
35
+
36
+ for frame in streamer:
37
+ # getting result include preprocessing, infer, postprocessing for sync infer
38
+ start_time = time.perf_counter()
39
+ predictions, _ = self.model(frame)
40
+ output = self.visualizer.draw(frame, predictions)
41
+ self.visualizer.show(output)
42
+ if output is not None:
43
+ saved_frames.append(output)
44
+ if self.visualizer.is_quit():
45
+ break
46
+ # visualize video not faster than the original FPS
47
+ self.visualizer.video_delay(time.perf_counter() - start_time, streamer)
48
+
49
+ dump_frames(saved_frames, self.visualizer.output, input_stream, streamer)
deployments/deployment/Instance segmentation task/python/demo_package/model_wrapper.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ """ModelContainer class used for loading the model in the model wrapper."""
5
+
6
+ from __future__ import annotations
7
+
8
+ from enum import Enum
9
+ from typing import TYPE_CHECKING, Any, NamedTuple
10
+
11
+ from model_api.adapters import OpenvinoAdapter, create_core
12
+ from model_api.models import Model
13
+
14
+ from .utils import get_model_path, get_parameters
15
+
16
+ if TYPE_CHECKING:
17
+ from pathlib import Path
18
+
19
+ import numpy as np
20
+ from model_api.tilers import DetectionTiler, InstanceSegmentationTiler
21
+
22
+
23
+ class TaskType(str, Enum):
24
+ """OTX task type definition."""
25
+
26
+ CLASSIFICATION = "CLASSIFICATION"
27
+ DETECTION = "DETECTION"
28
+ INSTANCE_SEGMENTATION = "INSTANCE_SEGMENTATION"
29
+ SEGMENTATION = "SEGMENTATION"
30
+
31
+
32
+ class ModelWrapper:
33
+ """Class for storing the model wrapper based on Model API and needed parameters of model.
34
+
35
+ Args:
36
+ model_dir (Path): path to model directory
37
+ """
38
+
39
+ def __init__(self, model_dir: Path, device: str = "CPU") -> None:
40
+ model_adapter = OpenvinoAdapter(create_core(), get_model_path(model_dir / "model.xml"), device=device)
41
+ if not (model_dir / "config.json").exists():
42
+ msg = "config.json doesn't exist in the model directory."
43
+ raise RuntimeError(msg)
44
+ self.parameters = get_parameters(model_dir / "config.json")
45
+ self._labels = self.parameters["model_parameters"]["labels"]
46
+ self._task_type = TaskType[self.parameters["task_type"].upper()]
47
+
48
+ # labels for modelAPI wrappers can be empty, because unused in pre- and postprocessing
49
+ self.model_parameters = self.parameters["model_parameters"]
50
+
51
+ # model already contains correct labels
52
+ self.model_parameters.pop("labels")
53
+
54
+ self.core_model = Model.create_model(
55
+ model_adapter,
56
+ self.parameters["model_type"],
57
+ self.model_parameters,
58
+ preload=True,
59
+ )
60
+ self.tiler = self.setup_tiler(model_dir, device)
61
+
62
+ def setup_tiler(
63
+ self,
64
+ model_dir: Path,
65
+ device: str,
66
+ ) -> DetectionTiler | InstanceSegmentationTiler | None:
67
+ """Set up tiler for model.
68
+
69
+ Args:
70
+ model_dir (str): model directory
71
+ device (str): device to run model on
72
+ Returns:
73
+ Optional: type of tiler or None
74
+ """
75
+ if not self.parameters.get("tiling_parameters") or not self.parameters["tiling_parameters"]["enable_tiling"]:
76
+ return None
77
+
78
+ msg = "Tiling has not been implemented yet"
79
+ raise NotImplementedError(msg)
80
+
81
+ @property
82
+ def task_type(self) -> TaskType:
83
+ """Task type property."""
84
+ return self._task_type
85
+
86
+ @property
87
+ def labels(self) -> dict:
88
+ """Labels property."""
89
+ return self._labels
90
+
91
+ def infer(self, frame: np.ndarray) -> tuple[NamedTuple, dict]:
92
+ """Infer with original image.
93
+
94
+ Args:
95
+ frame: np.ndarray, input image
96
+ Returns:
97
+ predictions: NamedTuple, prediction
98
+ frame_meta: Dict, dict with original shape
99
+ """
100
+ # getting result include preprocessing, infer, postprocessing for sync infer
101
+ predictions = self.core_model(frame)
102
+ frame_meta = {"original_shape": frame.shape}
103
+
104
+ return predictions, frame_meta
105
+
106
+ def infer_tile(self, frame: np.ndarray) -> tuple[NamedTuple, dict]:
107
+ """Infer by patching full image to tiles.
108
+
109
+ Args:
110
+ frame: np.ndarray - input image
111
+ Returns:
112
+ Tuple[NamedTuple, Dict]: prediction and original shape
113
+ """
114
+ if self.tiler is None:
115
+ msg = "Tiler is not set"
116
+ raise RuntimeError(msg)
117
+ detections = self.tiler(frame)
118
+ return detections, {"original_shape": frame.shape}
119
+
120
+ def __call__(self, input_data: np.ndarray) -> tuple[Any, dict]:
121
+ """Call the ModelWrapper class.
122
+
123
+ Args:
124
+ input_data (np.ndarray): The input image.
125
+
126
+ Returns:
127
+ Tuple[Any, dict]: A tuple containing predictions and the meta information.
128
+ """
129
+ if self.tiler is not None:
130
+ return self.infer_tile(input_data)
131
+ return self.infer(input_data)
deployments/deployment/Instance segmentation task/python/demo_package/streamer/__init__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ """Initialization of streamer."""
5
+
6
+ from .streamer import (
7
+ BaseStreamer,
8
+ CameraStreamer,
9
+ DirStreamer,
10
+ ImageStreamer,
11
+ ThreadedStreamer,
12
+ VideoStreamer,
13
+ get_streamer,
14
+ )
15
+
16
+ __all__ = [
17
+ "CameraStreamer",
18
+ "DirStreamer",
19
+ "ImageStreamer",
20
+ "ThreadedStreamer",
21
+ "VideoStreamer",
22
+ "BaseStreamer",
23
+ "get_streamer",
24
+ ]
deployments/deployment/Instance segmentation task/python/demo_package/streamer/streamer.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ """Streamer for reading input."""
5
+
6
+ from __future__ import annotations
7
+
8
+ import abc
9
+ import contextlib
10
+ import multiprocessing
11
+ import os
12
+ import queue
13
+ import sys
14
+ from enum import Enum
15
+ from pathlib import Path
16
+ from typing import TYPE_CHECKING, Iterator
17
+
18
+ if TYPE_CHECKING:
19
+ import numpy as np
20
+
21
+ import cv2
22
+
23
+
24
+ class MediaType(Enum):
25
+ """This Enum represents the types of input."""
26
+
27
+ IMAGE = 1
28
+ DIR = 2
29
+ VIDEO = 3
30
+ CAMERA = 4
31
+
32
+
33
+ class BaseStreamer(metaclass=abc.ABCMeta):
34
+ """Base Streamer interface to implement Image, Video and Camera streamers."""
35
+
36
+ @abc.abstractmethod
37
+ def __init__(self, input_path: str, loop: bool = False) -> None:
38
+ """Initialize the streamer object.
39
+
40
+ Args:
41
+ input_path (str): path to the input stream
42
+ loop (bool, optional): whether to loop the stream or not. Defaults to False.
43
+ """
44
+ raise NotImplementedError
45
+
46
+ @abc.abstractmethod
47
+ def __iter__(self) -> Iterator[np.ndarray]:
48
+ """Iterate through the streamer object that is a Python Generator object.
49
+
50
+ Returns:
51
+ np.ndarray: Yield the image or video frame.
52
+ """
53
+ raise NotImplementedError
54
+
55
+ @abc.abstractmethod
56
+ def get_type(self) -> MediaType:
57
+ """Get type of streamer.
58
+
59
+ Returns:
60
+ MediaType: type of streamer.
61
+ """
62
+ raise NotImplementedError
63
+
64
+ def fps(self) -> float:
65
+ """Returns a frequency of getting images from source."""
66
+ raise NotImplementedError
67
+
68
+
69
+ def _process_run(streamer: BaseStreamer, buffer: multiprocessing.Queue) -> None:
70
+ """Private function that is run by the thread.
71
+
72
+ Waits for the buffer to gain space for timeout seconds while it is full.
73
+ If no space was available within this time the function will exit
74
+
75
+ streamer (BaseStreamer): The streamer to retrieve frames from
76
+ buffer (multiprocessing.Queue): The buffer to place the retrieved frames in
77
+ """
78
+ for frame in streamer:
79
+ buffer.put(frame)
80
+
81
+
82
+ class ThreadedStreamer(BaseStreamer):
83
+ """Runs a BaseStreamer on a separate thread.
84
+
85
+ streamer (BaseStreamer): The streamer to run on a thread
86
+ buffer_size (int): Number of frame to buffer internally. Defaults to 2.
87
+
88
+ Example:
89
+ >>> streamer = VideoStreamer(path="../demo.mp4")
90
+ >>> threaded_streamer = ThreadedStreamer(streamer)
91
+ >>> for frame in threaded_streamer:
92
+ ... pass
93
+ """
94
+
95
+ def __init__(self, streamer: BaseStreamer, buffer_size: int = 2) -> None:
96
+ self.buffer_size = buffer_size
97
+ self.streamer = streamer
98
+
99
+ def __iter__(self) -> Iterator[np.ndarray]:
100
+ """Get frames from streamer and yield them.
101
+
102
+ Yields:
103
+ Iterator[np.ndarray]: Yield the image or video frame.
104
+ """
105
+ buffer: multiprocessing.Queue = multiprocessing.Queue(maxsize=self.buffer_size)
106
+ process = multiprocessing.Process(target=_process_run, args=(self.streamer, buffer))
107
+ # Make thread a daemon so that it will exit when the main program exits as well
108
+ process.daemon = True
109
+ process.start()
110
+
111
+ try:
112
+ with contextlib.suppress(queue.Empty):
113
+ while process.is_alive() or not buffer.empty():
114
+ yield buffer.get(timeout=0.1)
115
+ except GeneratorExit:
116
+ process.terminate()
117
+ finally:
118
+ process.join(timeout=0.1)
119
+ # The kill() function is only available in Python 3.7.
120
+ # Skip it if running an older Python version.
121
+ if sys.version_info >= (3, 7) and process.exitcode is None:
122
+ process.kill()
123
+
124
+ def get_type(self) -> MediaType:
125
+ """Get type of internal streamer.
126
+
127
+ Returns:
128
+ MediaType: type of internal streamer.
129
+ """
130
+ return self.streamer.get_type()
131
+
132
+
133
+ class VideoStreamer(BaseStreamer):
134
+ """Video Streamer.
135
+
136
+ Args:
137
+ path: Path to the video file.
138
+
139
+ Example:
140
+ >>> streamer = VideoStreamer(path="../demo.mp4")
141
+ ... for frame in streamer:
142
+ ... pass
143
+ """
144
+
145
+ def __init__(self, input_path: str, loop: bool = False) -> None:
146
+ self.media_type = MediaType.VIDEO
147
+ self.loop = loop
148
+ self.cap = cv2.VideoCapture()
149
+ status = self.cap.open(input_path)
150
+ if not status:
151
+ msg = f"Can't open the video from {input_path}"
152
+ raise RuntimeError(msg)
153
+
154
+ def __iter__(self) -> Iterator[np.ndarray]:
155
+ """Iterates over frames of the video.
156
+
157
+ If self.loop is set to True, the video will loop infinitely.
158
+ """
159
+ while True:
160
+ status, image = self.cap.read()
161
+ if status:
162
+ yield cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
163
+ elif self.loop:
164
+ self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
165
+ else:
166
+ break
167
+
168
+ def fps(self) -> float:
169
+ """Returns a frequency of getting images from source."""
170
+ return self.cap.get(cv2.CAP_PROP_FPS)
171
+
172
+ def get_type(self) -> MediaType:
173
+ """Returns the type of media."""
174
+ return MediaType.VIDEO
175
+
176
+
177
+ class CameraStreamer(BaseStreamer):
178
+ """Stream video frames from camera.
179
+
180
+ Args:
181
+ camera_device (int): Camera device index e.g, 0, 1
182
+
183
+ Example:
184
+ >>> streamer = CameraStreamer(camera_device=0)
185
+ ... for frame in streamer:
186
+ ... cv2.imshow("Window", frame)
187
+ ... if ord("q") == cv2.waitKey(1):
188
+ ... break
189
+ """
190
+
191
+ def __init__(self, camera_device: str = "0") -> None:
192
+ self.media_type = MediaType.CAMERA
193
+ try:
194
+ self.stream = cv2.VideoCapture(int(camera_device))
195
+ except ValueError as err:
196
+ msg = f"Can't find the camera {camera_device}"
197
+ raise ValueError(msg) from err
198
+
199
+ def __iter__(self) -> Iterator[np.ndarray]:
200
+ """Read video and yield the frame.
201
+
202
+ Args:
203
+ stream: Video stream captured via OpenCV's VideoCapture
204
+
205
+ Returns:
206
+ Individual frame
207
+ """
208
+ while True:
209
+ frame_available, frame = self.stream.read()
210
+ if not frame_available:
211
+ break
212
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
213
+ yield frame
214
+
215
+ self.stream.release()
216
+
217
+ def get_type(self) -> MediaType:
218
+ """Returns the type of media."""
219
+ return MediaType.CAMERA
220
+
221
+
222
+ class ImageStreamer(BaseStreamer):
223
+ """Stream from image file.
224
+
225
+ Args:
226
+ input_path (str): Path to an image.
227
+ loop (bool): Whether to loop through the image or not. Defaults to False.
228
+
229
+ Example:
230
+ >>> streamer = ImageStreamer(path="../images")
231
+ ... for frame in streamer:
232
+ ... cv2.imshow("Window", frame)
233
+ ... cv2.waitKey(0)
234
+ """
235
+
236
+ def __init__(self, input_path: str, loop: bool = False) -> None:
237
+ self.loop = loop
238
+ self.media_type = MediaType.IMAGE
239
+ if not Path(input_path).is_file():
240
+ msg = f"Can't find the image by {input_path}"
241
+ raise RuntimeError(msg)
242
+ self.image = cv2.imread(input_path, cv2.IMREAD_COLOR)
243
+ if self.image is None:
244
+ msg = f"Can't open the image from {input_path}"
245
+ raise RuntimeError(msg)
246
+ self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
247
+
248
+ def __iter__(self) -> Iterator[np.ndarray]:
249
+ """If loop is True, yield the image again and again."""
250
+ if not self.loop:
251
+ yield self.image
252
+ else:
253
+ while True:
254
+ yield self.image
255
+
256
+ def get_type(self) -> MediaType:
257
+ """Returns the type of the streamer."""
258
+ return MediaType.IMAGE
259
+
260
+
261
+ class DirStreamer(BaseStreamer):
262
+ """Stream from directory of images.
263
+
264
+ Args:
265
+ path: Path to directory.
266
+
267
+ Example:
268
+ >>> streamer = DirStreamer(path="../images")
269
+ ... for frame in streamer:
270
+ ... cv2.imshow("Window", frame)
271
+ ... cv2.waitKey(0)
272
+ """
273
+
274
+ def __init__(self, input_path: str, loop: bool = False) -> None:
275
+ self.loop = loop
276
+ self.media_type = MediaType.DIR
277
+ self.dir = Path(input_path)
278
+ if not self.dir.is_dir():
279
+ msg = f"Can't find the dir by {input_path}"
280
+ raise RuntimeError(msg)
281
+ self.names = sorted(os.listdir(self.dir))
282
+ if not self.names:
283
+ msg = f"The dir {input_path} is empty"
284
+ raise RuntimeError(msg)
285
+ self.file_id = 0
286
+ for name in self.names:
287
+ filename = self.dir / name
288
+ image = cv2.imread(str(filename), cv2.IMREAD_COLOR)
289
+ if image is not None:
290
+ return
291
+ msg = f"Can't read the first image from {input_path}"
292
+ raise RuntimeError(msg)
293
+
294
+ def __iter__(self) -> Iterator[np.ndarray]:
295
+ """Iterates over the images in a directory.
296
+
297
+ If self.loop is True, it reiterates again from the first image in the directory.
298
+ """
299
+ while self.file_id < len(self.names):
300
+ filename = self.dir / self.names[self.file_id]
301
+ image = cv2.imread(str(filename), cv2.IMREAD_COLOR)
302
+ if self.file_id < len(self.names) - 1:
303
+ self.file_id = self.file_id + 1
304
+ else:
305
+ self.file_id = self.file_id + 1 if not self.loop else 0
306
+ if image is not None:
307
+ yield cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
308
+
309
+ def get_type(self) -> MediaType:
310
+ """Returns the type of the streamer."""
311
+ return MediaType.DIR
312
+
313
+
314
+ def get_streamer(
315
+ input_stream: str,
316
+ loop: bool = False,
317
+ threaded: bool = False,
318
+ ) -> BaseStreamer:
319
+ """Get streamer object based on the file path or camera device index provided.
320
+
321
+ Args:
322
+ input_stream (str): Path to file or directory or index for camera.
323
+ loop (bool): Enable reading the input in a loop. Defaults to False.
324
+ threaded (bool): Run streaming on a separate thread. Threaded streaming option. Defaults to False.
325
+
326
+ Returns:
327
+ BaseStreamer: Streamer object.
328
+ """
329
+ errors: list[Exception] = []
330
+ streamer_types = (ImageStreamer, DirStreamer, VideoStreamer)
331
+ for reader in streamer_types:
332
+ try:
333
+ streamer = reader(input_stream, loop) # type: ignore [abstract]
334
+ return ThreadedStreamer(streamer) if threaded else streamer
335
+ except RuntimeError as error: # noqa: PERF203
336
+ errors.append(error)
337
+ try:
338
+ streamer = CameraStreamer(input_stream)
339
+ return ThreadedStreamer(streamer) if threaded else streamer
340
+ except RuntimeError as error:
341
+ errors.append(error)
342
+
343
+ if errors:
344
+ raise RuntimeError(errors)
345
+
346
+ sys.exit(1)
deployments/deployment/Instance segmentation task/python/demo_package/utils.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ """Utils for demo."""
5
+
6
+ from __future__ import annotations
7
+
8
+ import json
9
+ from pathlib import Path
10
+
11
+ from .visualizers import (
12
+ BaseVisualizer,
13
+ ClassificationVisualizer,
14
+ InstanceSegmentationVisualizer,
15
+ ObjectDetectionVisualizer,
16
+ SemanticSegmentationVisualizer,
17
+ )
18
+
19
+
20
+ def get_model_path(path: Path | None) -> Path:
21
+ """Get path to model."""
22
+ model_path = path
23
+ if model_path is None:
24
+ model_path = Path(__file__).parent / "openvino.xml"
25
+ if not model_path.exists():
26
+ msg = "The path to the model was not found."
27
+ raise OSError(msg)
28
+
29
+ return model_path
30
+
31
+
32
+ def get_parameters(path: Path | None) -> dict:
33
+ """Get hyper parameters to creating model."""
34
+ parameters_path = path
35
+ if parameters_path is None:
36
+ parameters_path = Path(__file__).parent / "config.json"
37
+ if not parameters_path.exists():
38
+ msg = "The path to the config was not found."
39
+ raise OSError(msg)
40
+
41
+ with Path.open(parameters_path, encoding="utf8") as file:
42
+ return json.load(file)
43
+
44
+
45
+ def create_visualizer(
46
+ task_type: str,
47
+ labels: list,
48
+ no_show: bool = False,
49
+ output: str = "./outputs",
50
+ ) -> BaseVisualizer | None:
51
+ """Create visualizer according to kind of task."""
52
+ if task_type == "CLASSIFICATION":
53
+ return ClassificationVisualizer(window_name="Result", no_show=no_show, output=output)
54
+ if task_type == "SEGMENTATION":
55
+ return SemanticSegmentationVisualizer(window_name="Result", labels=labels, no_show=no_show, output=output)
56
+ if task_type == "INSTANCE_SEGMENTATION":
57
+ return InstanceSegmentationVisualizer(window_name="Result", labels=labels, no_show=no_show, output=output)
58
+ if task_type == "DETECTION":
59
+ return ObjectDetectionVisualizer(window_name="Result", labels=labels, no_show=no_show, output=output)
60
+ msg = "Visualizer for f{task_type} is not implemented"
61
+ raise NotImplementedError(msg)
deployments/deployment/Instance segmentation task/python/demo_package/visualizers/__init__.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ """Initialization of visualizers."""
5
+
6
+ from .vis_utils import dump_frames
7
+ from .visualizer import (
8
+ BaseVisualizer,
9
+ ClassificationVisualizer,
10
+ InstanceSegmentationVisualizer,
11
+ ObjectDetectionVisualizer,
12
+ SemanticSegmentationVisualizer,
13
+ )
14
+
15
+ __all__ = [
16
+ "BaseVisualizer",
17
+ "dump_frames",
18
+ "ClassificationVisualizer",
19
+ "SemanticSegmentationVisualizer",
20
+ "InstanceSegmentationVisualizer",
21
+ "ObjectDetectionVisualizer",
22
+ ]
deployments/deployment/Instance segmentation task/python/demo_package/visualizers/vis_utils.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ """This module implements activation map."""
5
+
6
+ from __future__ import annotations
7
+
8
+ import colorsys
9
+ import random
10
+ from pathlib import Path
11
+
12
+ import cv2
13
+ import numpy as np
14
+
15
+
16
+ def get_actmap(
17
+ saliency_map: np.ndarray,
18
+ output_res: tuple | list,
19
+ ) -> np.ndarray:
20
+ """Get activation map (heatmap) from saliency map.
21
+
22
+ It will return activation map from saliency map
23
+
24
+ Args:
25
+ saliency_map (np.ndarray): Saliency map with pixel values from 0-255
26
+ output_res (Union[tuple, list]): Output resolution
27
+
28
+ Returns:
29
+ saliency_map (np.ndarray): [H, W, 3] colormap, more red means more salient
30
+
31
+ """
32
+ if len(saliency_map.shape) == 3:
33
+ saliency_map = saliency_map[0]
34
+
35
+ saliency_map = cv2.resize(saliency_map, output_res)
36
+ return cv2.applyColorMap(saliency_map, cv2.COLORMAP_JET)
37
+
38
+
39
+ def get_input_names_list(input_path: str | int, capture: cv2.VideoCapture) -> list[str]:
40
+ """Lists the filenames of all inputs for demo."""
41
+ # Web camera input
42
+ if isinstance(input_path, int):
43
+ return []
44
+ if "DIR" in str(capture.get_type()):
45
+ return [f.name for f in Path(input_path).iterdir() if f.is_file()]
46
+ return [Path(input_path).name]
47
+
48
+
49
+ def dump_frames(saved_frames: list, output: str, input_path: str | int, capture: cv2.VideoCapture) -> None:
50
+ """Saves images/videos with predictions from saved_frames to output folder with proper names."""
51
+ # If no frames are saved, return
52
+ if not saved_frames:
53
+ return
54
+
55
+ # Create the output folder if it doesn't exist
56
+ output_path = Path(output)
57
+ if not output_path.exists():
58
+ output_path.mkdir(parents=True)
59
+
60
+ # Get the list of input names
61
+ filenames = get_input_names_list(input_path, capture)
62
+
63
+ # If the input is a video, save it as video
64
+ if "VIDEO" in str(capture.get_type()):
65
+ filename = filenames[0]
66
+ w, h, _ = saved_frames[0].shape
67
+ video_path = str(output_path / filename)
68
+ codec = cv2.VideoWriter_fourcc(*"mp4v")
69
+ out = cv2.VideoWriter(video_path, codec, capture.fps(), (h, w))
70
+ for frame in saved_frames:
71
+ out.write(frame)
72
+ out.release()
73
+ print(f"Video was saved to {video_path}")
74
+ # If the input is not a video, save each frame as an image
75
+ else:
76
+ if len(filenames) != len(saved_frames):
77
+ filenames = [f"output_{i}.jpeg" for i, _ in enumerate(saved_frames)]
78
+ for filename, frame in zip(filenames, saved_frames):
79
+ image_path = str(output_path / filename)
80
+ cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
81
+ cv2.imwrite(image_path, frame)
82
+ print(f"Image was saved to {image_path}")
83
+
84
+
85
+ class ColorPalette:
86
+ """Represents a palette of colors."""
87
+
88
+ def __init__(self, num_classes: int, rng: random.Random | None = None) -> None:
89
+ """Initialize the ColorPalette.
90
+
91
+ Args:
92
+ - num_classes (int): The number of classes.
93
+ - rng (Optional[random.Random]): The random number generator.
94
+
95
+ Returns:
96
+ None
97
+ """
98
+ if num_classes <= 0:
99
+ msg = "ColorPalette accepts only the positive number of colors"
100
+ raise ValueError(msg)
101
+ if rng is None:
102
+ rng = random.Random(0xACE) # nosec B311 # disable random check
103
+
104
+ candidates_num = 100
105
+ hsv_colors = [(1.0, 1.0, 1.0)]
106
+ for _ in range(1, num_classes):
107
+ colors_candidates = [
108
+ (rng.random(), rng.uniform(0.8, 1.0), rng.uniform(0.5, 1.0)) for _ in range(candidates_num)
109
+ ]
110
+ min_distances = [self._min_distance(hsv_colors, c) for c in colors_candidates]
111
+ arg_max = np.argmax(min_distances)
112
+ hsv_colors.append(colors_candidates[arg_max])
113
+
114
+ self.palette = [self.hsv2rgb(*hsv) for hsv in hsv_colors]
115
+
116
+ @staticmethod
117
+ def _dist(c1: tuple[float, float, float], c2: tuple[float, float, float]) -> float:
118
+ """Calculate the distance between two colors in 3D space.
119
+
120
+ Args:
121
+ - c1 (Tuple[float, float, float]): Tuple representing the first RGB color.
122
+ - c2 (Tuple[float, float, float]): Tuple representing the second RGB color.
123
+
124
+ Returns:
125
+ float: The distance between the two colors.
126
+ """
127
+ dh = min(abs(c1[0] - c2[0]), 1 - abs(c1[0] - c2[0])) * 2
128
+ ds = abs(c1[1] - c2[1])
129
+ dv = abs(c1[2] - c2[2])
130
+ return dh * dh + ds * ds + dv * dv
131
+
132
+ @classmethod
133
+ def _min_distance(
134
+ cls,
135
+ colors_set: list[tuple[float, float, float]],
136
+ color_candidate: tuple[float, float, float],
137
+ ) -> float:
138
+ """Calculate the minimum distance between color_candidate and colors_set.
139
+
140
+ Args:
141
+ - colors_set: List of tuples representing RGB colors.
142
+ - color_candidate: Tuple representing an RGB color.
143
+
144
+ Returns:
145
+ - float: The minimum distance between color_candidate and colors_set.
146
+ """
147
+ distances = [cls._dist(o, color_candidate) for o in colors_set]
148
+ return min(distances)
149
+
150
+ def to_numpy_array(self) -> np.ndarray:
151
+ """Convert the palette to a NumPy array.
152
+
153
+ Returns:
154
+ np.ndarray: The palette as a NumPy array.
155
+ """
156
+ return np.array(self.palette)
157
+
158
+ @staticmethod
159
+ def hsv2rgb(h: float, s: float, v: float) -> tuple[int, int, int]:
160
+ """Convert HSV color to RGB color.
161
+
162
+ Args:
163
+ - h (float): Hue.
164
+ - s (float): Saturation.
165
+ - v (float): Value.
166
+
167
+ Returns:
168
+ Tuple[int, int, int]: RGB color.
169
+ """
170
+ r, g, b = colorsys.hsv_to_rgb(h, s, v)
171
+ return int(r * 255), int(g * 255), int(b * 255)
172
+
173
+ def __getitem__(self, n: int) -> tuple[int, int, int]:
174
+ """Get the color at index n.
175
+
176
+ Args:
177
+ - n (int): Index.
178
+
179
+ Returns:
180
+ Tuple[int, int, int]: RGB color.
181
+ """
182
+ return self.palette[n % len(self.palette)]
183
+
184
+ def __len__(self) -> int:
185
+ """Returns the number of colors in the palette.
186
+
187
+ Returns:
188
+ int: The number of colors in the palette.
189
+ """
190
+ return len(self.palette)
deployments/deployment/Instance segmentation task/python/demo_package/visualizers/visualizer.py ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ """Visualizer for results of prediction."""
5
+
6
+ from __future__ import annotations
7
+
8
+ import logging as log
9
+ import time
10
+ from typing import TYPE_CHECKING, NamedTuple
11
+
12
+ import cv2
13
+ import numpy as np
14
+ from model_api.performance_metrics import put_highlighted_text
15
+
16
+ from .vis_utils import ColorPalette
17
+
18
+ if TYPE_CHECKING:
19
+ from demo_package.streamer import BaseStreamer
20
+ from model_api.models.utils import (
21
+ ClassificationResult,
22
+ DetectionResult,
23
+ InstanceSegmentationResult,
24
+ SegmentedObject,
25
+ )
26
+
27
+
28
+ class BaseVisualizer:
29
+ """Base class for visualizators."""
30
+
31
+ def __init__(
32
+ self,
33
+ window_name: str | None = None,
34
+ no_show: bool = False,
35
+ delay: int | None = None,
36
+ output: str = "./outputs",
37
+ ) -> None:
38
+ """Base class for visualizators.
39
+
40
+ Args:
41
+ window_name (str]): The name of the window. Defaults to None.
42
+ no_show (bool): Flag to indicate whether to show the window. Defaults to False.
43
+ delay (int]): The delay in seconds. Defaults to None.
44
+ output (str]): The output directory. Defaults to "./outputs".
45
+
46
+ Returns:
47
+ None
48
+ """
49
+ self.window_name = "Window" if window_name is None else window_name
50
+
51
+ self.delay = delay
52
+ self.no_show = no_show
53
+ if delay is None:
54
+ self.delay = 1
55
+ self.output = output
56
+
57
+ def draw(
58
+ self,
59
+ frame: np.ndarray,
60
+ predictions: NamedTuple,
61
+ ) -> np.ndarray:
62
+ """Draw annotations on the image.
63
+
64
+ Args:
65
+ frame: Input image
66
+ predictions: Annotations to be drawn on the input image
67
+
68
+ Returns:
69
+ Output image with annotations.
70
+ """
71
+ raise NotImplementedError
72
+
73
+ def show(self, image: np.ndarray) -> None:
74
+ """Show result image.
75
+
76
+ Args:
77
+ image (np.ndarray): Image to be shown.
78
+ """
79
+ if not self.no_show:
80
+ cv2.imshow(self.window_name, image)
81
+
82
+ def is_quit(self) -> bool:
83
+ """Check user wish to quit."""
84
+ if self.no_show:
85
+ return False
86
+
87
+ return ord("q") == cv2.waitKey(self.delay)
88
+
89
+ def video_delay(self, elapsed_time: float, streamer: BaseStreamer) -> None:
90
+ """Check if video frames were inferenced faster than the original video FPS and delay visualizer if so.
91
+
92
+ Args:
93
+ elapsed_time (float): Time spent on frame inference
94
+ streamer (BaseStreamer): Streamer object
95
+ """
96
+ if self.no_show:
97
+ return
98
+ if "VIDEO" in str(streamer.get_type()):
99
+ fps_num = streamer.fps()
100
+ orig_frame_time = 1 / fps_num
101
+ if elapsed_time < orig_frame_time:
102
+ time.sleep(orig_frame_time - elapsed_time)
103
+
104
+
105
+ class ClassificationVisualizer(BaseVisualizer):
106
+ """Visualize the predicted classification labels by drawing the annotations on the input image.
107
+
108
+ Example:
109
+ >>> predictions = inference_model.predict(frame)
110
+ >>> output = visualizer.draw(frame, predictions)
111
+ >>> visualizer.show(output)
112
+ """
113
+
114
+ def draw(
115
+ self,
116
+ frame: np.ndarray,
117
+ predictions: ClassificationResult,
118
+ ) -> np.ndarray:
119
+ """Draw classification annotations on the image.
120
+
121
+ Args:
122
+ image: Input image
123
+ annotation: Annotations to be drawn on the input image
124
+
125
+ Returns:
126
+ Output image with annotations.
127
+ """
128
+ predictions = predictions.top_labels
129
+ if not any(predictions):
130
+ log.warning("There are no predictions.")
131
+ return frame
132
+
133
+ class_label = predictions[0][1]
134
+ font_scale = 0.7
135
+ label_height = cv2.getTextSize(class_label, cv2.FONT_HERSHEY_COMPLEX, font_scale, 2)[0][1]
136
+ initial_labels_pos = frame.shape[0] - label_height * (int(1.5 * len(predictions)) + 1)
137
+
138
+ if initial_labels_pos < 0:
139
+ initial_labels_pos = label_height
140
+ log.warning("Too much labels to display on this frame, some will be omitted")
141
+ offset_y = initial_labels_pos
142
+
143
+ header = "Label: Score:"
144
+ label_width = cv2.getTextSize(header, cv2.FONT_HERSHEY_COMPLEX, font_scale, 2)[0][0]
145
+ put_highlighted_text(
146
+ frame,
147
+ header,
148
+ (frame.shape[1] - label_width, offset_y),
149
+ cv2.FONT_HERSHEY_COMPLEX,
150
+ font_scale,
151
+ (255, 0, 0),
152
+ 2,
153
+ )
154
+
155
+ for idx, class_label, score in predictions:
156
+ label = f"{idx}. {class_label} {score:.2f}"
157
+ label_width = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX, font_scale, 2)[0][0]
158
+ offset_y += int(label_height * 1.5)
159
+ put_highlighted_text(
160
+ frame,
161
+ label,
162
+ (frame.shape[1] - label_width, offset_y),
163
+ cv2.FONT_HERSHEY_COMPLEX,
164
+ font_scale,
165
+ (255, 0, 0),
166
+ 2,
167
+ )
168
+ return frame
169
+
170
+
171
+ class SemanticSegmentationVisualizer(BaseVisualizer):
172
+ """Visualize the predicted segmentation labels by drawing the annotations on the input image.
173
+
174
+ Example:
175
+ >>> masks = inference_model.predict(frame)
176
+ >>> output = visualizer.draw(frame, masks)
177
+ >>> visualizer.show(output)
178
+ """
179
+
180
+ def __init__(
181
+ self,
182
+ labels: list[str],
183
+ window_name: str | None = None,
184
+ no_show: bool = False,
185
+ delay: int | None = None,
186
+ output: str = "./outputs",
187
+ ) -> None:
188
+ """Semantic segmentation visualizer.
189
+
190
+ Draws the segmentation masks on the input image.
191
+
192
+ Parameters:
193
+ labels (List[str]): List of labels.
194
+ window_name (str | None): Name of the window (default is None).
195
+ no_show (bool): Flag indicating whether to show the window (default is False).
196
+ delay (int | None): Delay in milliseconds (default is None).
197
+ output (str): Output path (default is "./outputs").
198
+
199
+ Returns:
200
+ None
201
+ """
202
+ super().__init__(window_name, no_show, delay, output)
203
+ self.color_palette = ColorPalette(len(labels)).to_numpy_array()
204
+ self.color_map = self._create_color_map()
205
+
206
+ def _create_color_map(self) -> np.ndarray:
207
+ classes = self.color_palette[:, ::-1] # RGB to BGR
208
+ color_map = np.zeros((256, 1, 3), dtype=np.uint8)
209
+ classes_num = len(classes)
210
+ color_map[:classes_num, 0, :] = classes
211
+ color_map[classes_num:, 0, :] = np.random.uniform(0, 255, size=(256 - classes_num, 3))
212
+ return color_map
213
+
214
+ def _apply_color_map(self, input_2d_mask: np.ndarray) -> np.ndarray:
215
+ input_3d = cv2.merge([input_2d_mask, input_2d_mask, input_2d_mask])
216
+ return cv2.LUT(input_3d.astype(np.uint8), self.color_map)
217
+
218
+ def draw(self, frame: np.ndarray, masks: SegmentedObject) -> np.ndarray:
219
+ """Draw segmentation annotations on the image.
220
+
221
+ Args:
222
+ frame: Input image
223
+ masks: Mask annotations to be drawn on the input image
224
+
225
+ Returns:
226
+ Output image with annotations.
227
+ """
228
+ masks = masks.resultImage
229
+ output = self._apply_color_map(masks)
230
+ return cv2.addWeighted(frame, 0.5, output, 0.5, 0)
231
+
232
+
233
+ class ObjectDetectionVisualizer(BaseVisualizer):
234
+ """Visualizes object detection annotations on an input image."""
235
+
236
+ def __init__(
237
+ self,
238
+ labels: list[str],
239
+ window_name: str | None = None,
240
+ no_show: bool = False,
241
+ delay: int | None = None,
242
+ output: str = "./outputs",
243
+ ) -> None:
244
+ """Object detection visualizer.
245
+
246
+ Draws the object detection annotations on the input image.
247
+
248
+ Parameters:
249
+ labels (List[str]): The list of labels.
250
+ window_name (str | None): The name of the window. Defaults to None.
251
+ no_show (bool): Flag to control whether to show the window. Defaults to False.
252
+ delay (int | None): The delay in milliseconds. Defaults to None.
253
+ output (str): The output directory. Defaults to "./outputs".
254
+
255
+ Returns:
256
+ None
257
+ """
258
+ super().__init__(window_name, no_show, delay, output)
259
+ self.labels = labels
260
+ self.color_palette = ColorPalette(len(labels))
261
+
262
+ def draw(
263
+ self,
264
+ frame: np.ndarray,
265
+ predictions: DetectionResult,
266
+ ) -> np.ndarray:
267
+ """Draw instance segmentation annotations on the image.
268
+
269
+ Args:
270
+ image: Input image
271
+ annotation: Annotations to be drawn on the input image
272
+
273
+ Returns:
274
+ Output image with annotations.
275
+ """
276
+ for detection in predictions.objects:
277
+ class_id = int(detection.id)
278
+ color = self.color_palette[class_id]
279
+ det_label = self.color_palette[class_id] if self.labels and len(self.labels) >= class_id else f"#{class_id}"
280
+ xmin, ymin, xmax, ymax = detection.xmin, detection.ymin, detection.xmax, detection.ymax
281
+ cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 2)
282
+ cv2.putText(
283
+ frame,
284
+ f"{det_label} {detection.score:.1%}",
285
+ (xmin, ymin - 7),
286
+ cv2.FONT_HERSHEY_COMPLEX,
287
+ 0.6,
288
+ color,
289
+ 1,
290
+ )
291
+
292
+ return frame
293
+
294
+
295
+ class InstanceSegmentationVisualizer(BaseVisualizer):
296
+ """Visualizes Instance Segmentation annotations on an input image."""
297
+
298
+ def __init__(
299
+ self,
300
+ labels: list[str],
301
+ window_name: str | None = None,
302
+ no_show: bool = False,
303
+ delay: int | None = None,
304
+ output: str = "./outputs",
305
+ ) -> None:
306
+ """Instance segmentation visualizer.
307
+
308
+ Draws the instance segmentation annotations on the input image.
309
+
310
+ Args:
311
+ labels (List[str]): The list of labels.
312
+ window_name (str]): The name of the window. Defaults to None.
313
+ no_show (bool): A flag to indicate whether to show the window. Defaults to False.
314
+ delay (int]): The delay in milliseconds. Defaults to None.
315
+ output (str]): The path to the output directory. Defaults to "./outputs".
316
+
317
+ Returns:
318
+ None
319
+ """
320
+ super().__init__(window_name, no_show, delay, output)
321
+ self.labels = labels
322
+ colors_num = len(labels) if labels else 80
323
+ self.show_boxes = False
324
+ self.show_scores = True
325
+ self.palette = ColorPalette(colors_num)
326
+
327
+ def draw(
328
+ self,
329
+ frame: np.ndarray,
330
+ predictions: InstanceSegmentationResult,
331
+ ) -> np.ndarray:
332
+ """Draw the instance segmentation results on the input frame.
333
+
334
+ Args:
335
+ frame: np.ndarray - The input frame on which to draw the instance segmentation results.
336
+ predictions: InstanceSegmentationResult - The instance segmentation results to be drawn.
337
+
338
+ Returns:
339
+ np.ndarray - The input frame with the instance segmentation results drawn on it.
340
+ """
341
+ result = frame.copy()
342
+ output_objects = predictions.segmentedObjects
343
+ bboxes = [[output.xmin, output.ymin, output.xmax, output.ymax] for output in output_objects]
344
+ scores = [output.score for output in output_objects]
345
+ masks = [output.mask for output in output_objects]
346
+ label_names = [output.str_label for output in output_objects]
347
+
348
+ result = self._overlay_masks(result, masks)
349
+ return self._overlay_labels(result, bboxes, label_names, scores)
350
+
351
+ def _overlay_masks(self, image: np.ndarray, masks: list[np.ndarray]) -> np.ndarray:
352
+ segments_image = image.copy()
353
+ aggregated_mask = np.zeros(image.shape[:2], dtype=np.uint8)
354
+ aggregated_colored_mask = np.zeros(image.shape, dtype=np.uint8)
355
+ all_contours = []
356
+
357
+ for i, mask in enumerate(masks):
358
+ contours = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2]
359
+ if contours:
360
+ all_contours.append(contours[0])
361
+
362
+ mask_color = self.palette[i]
363
+ cv2.bitwise_or(aggregated_mask, mask, dst=aggregated_mask)
364
+ cv2.bitwise_or(aggregated_colored_mask, mask_color, dst=aggregated_colored_mask, mask=mask)
365
+
366
+ # Fill the area occupied by all instances with a colored instances mask image
367
+ cv2.bitwise_and(segments_image, (0, 0, 0), dst=segments_image, mask=aggregated_mask)
368
+ cv2.bitwise_or(segments_image, aggregated_colored_mask, dst=segments_image, mask=aggregated_mask)
369
+
370
+ cv2.addWeighted(image, 0.5, segments_image, 0.5, 0, dst=image)
371
+ cv2.drawContours(image, all_contours, -1, (0, 0, 0))
372
+ return image
373
+
374
+ def _overlay_boxes(self, image: np.ndarray, boxes: list[np.ndarray], classes: list[int]) -> np.ndarray:
375
+ for box, class_id in zip(boxes, classes):
376
+ color = self.palette[class_id]
377
+ top_left, bottom_right = box[:2], box[2:]
378
+ image = cv2.rectangle(image, top_left, bottom_right, color, 2)
379
+ return image
380
+
381
+ def _overlay_labels(
382
+ self,
383
+ image: np.ndarray,
384
+ boxes: list[np.ndarray],
385
+ classes: list[str],
386
+ scores: list[float],
387
+ ) -> np.ndarray:
388
+ template = "{}: {:.2f}" if self.show_scores else "{}"
389
+
390
+ for box, score, label in zip(boxes, scores, classes):
391
+ text = template.format(label, score)
392
+ textsize = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)[0]
393
+ cv2.putText(
394
+ image,
395
+ text,
396
+ (box[0], box[1] + int(textsize[0] / 3)),
397
+ cv2.FONT_HERSHEY_SIMPLEX,
398
+ 0.5,
399
+ (255, 255, 255),
400
+ 1,
401
+ )
402
+ return image
deployments/deployment/Instance segmentation task/python/requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ openvino==2024.3.0
2
+ openvino-model-api==0.2.5
3
+ numpy==1.26.4
deployments/deployment/Instance segmentation task/python/setup.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 Intel Corporation
2
+ # SPDX-License-Identifier: Apache-2.0
3
+ #
4
+ """setup file for demo package."""
5
+
6
+ from pathlib import Path
7
+
8
+ from setuptools import find_packages, setup
9
+
10
+ SETUP_DIR = Path(__file__).resolve().parent
11
+
12
+ with Path.open(SETUP_DIR / "requirements.txt", encoding="utf8") as f:
13
+ required = f.read().splitlines()
14
+
15
+ packages = find_packages(str(SETUP_DIR))
16
+ package_dir = {packages[0]: str(SETUP_DIR / packages[0])}
17
+
18
+ setup(
19
+ name=packages[0],
20
+ version="0.0",
21
+ author="Intel® Corporation",
22
+ license="Copyright (c) 2024 Intel Corporation. SPDX-License-Identifier: Apache-2.0",
23
+ description="Demo based on ModelAPI classes",
24
+ packages=packages,
25
+ package_dir=package_dir,
26
+ package_data={
27
+ packages[0]: ["*.json"],
28
+ },
29
+ install_requires=required,
30
+ )
deployments/deployment/project.json ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "id": "6483114c18fb8c1c529bd149",
3
+ "name": "eggs!",
4
+ "creation_time": "2023-06-09T11:47:24.780000+00:00",
5
+ "creator_id": "dd725a2c-b183-4616-bcf3-0894843fb6a5",
6
+ "pipeline": {
7
+ "tasks": [
8
+ {
9
+ "id": "6483114c18fb8c1c529bd14a",
10
+ "title": "Dataset",
11
+ "task_type": "dataset"
12
+ },
13
+ {
14
+ "id": "6483114c18fb8c1c529bd14d",
15
+ "title": "Instance segmentation task",
16
+ "task_type": "instance_segmentation",
17
+ "labels": [
18
+ {
19
+ "id": "6483114c18fb8c1c529bd150",
20
+ "name": "egg",
21
+ "is_anomalous": false,
22
+ "color": "#c9e649ff",
23
+ "hotkey": "",
24
+ "is_empty": false,
25
+ "group": "Instance segmentation labels",
26
+ "parent_id": null
27
+ },
28
+ {
29
+ "id": "6483114c18fb8c1c529bd154",
30
+ "name": "Empty",
31
+ "is_anomalous": false,
32
+ "color": "#000000ff",
33
+ "hotkey": "",
34
+ "is_empty": true,
35
+ "group": "Empty",
36
+ "parent_id": null
37
+ }
38
+ ],
39
+ "label_schema_id": "6483114c18fb8c1c529bd156"
40
+ }
41
+ ],
42
+ "connections": [
43
+ {
44
+ "from": "6483114c18fb8c1c529bd14a",
45
+ "to": "6483114c18fb8c1c529bd14d"
46
+ }
47
+ ]
48
+ },
49
+ "thumbnail": "/api/v1/organizations/0ec46502-f590-4358-afff-a6beb25fe89f/workspaces/97ecb1e9-4367-4bc6-b335-1c6e7aedbf77/projects/6483114c18fb8c1c529bd149/thumbnail",
50
+ "performance": {
51
+ "score": 0.9520755876647748,
52
+ "task_performances": [
53
+ {
54
+ "task_id": "6483114c18fb8c1c529bd14d",
55
+ "score": {
56
+ "value": 0.9520755876647748,
57
+ "metric_type": "Dice"
58
+ }
59
+ }
60
+ ]
61
+ },
62
+ "storage_info": {},
63
+ "datasets": [
64
+ {
65
+ "id": "6483114c18fb8c1c529bd151",
66
+ "name": "Dataset",
67
+ "use_for_training": true,
68
+ "creation_time": "2023-06-09T11:47:24.780000+00:00"
69
+ },
70
+ {
71
+ "id": "64898e9c68d5ade57e325981",
72
+ "name": "Testing set 1",
73
+ "use_for_training": false,
74
+ "creation_time": "2023-06-14T09:55:40.186000+00:00"
75
+ }
76
+ ]
77
+ }
deployments/example_code/demo.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2022 Intel Corporation
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing,
10
+ # software distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions
13
+ # and limitations under the License.
14
+
15
+ import cv2
16
+ from geti_sdk.deployment import Deployment
17
+ from geti_sdk.utils import show_image_with_annotation_scene
18
+
19
+ if __name__ == "__main__":
20
+ # Step 1: Load the deployment
21
+ deployment = Deployment.from_folder("../deployment")
22
+
23
+ # Step 2: Load the sample image
24
+ image = cv2.imread("../sample_image.jpg")
25
+ image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
26
+
27
+ # Step 3: Send inference model(s) to CPU
28
+ deployment.load_inference_models(device="CPU")
29
+
30
+ # Step 4: Infer image
31
+ prediction = deployment.infer(image_rgb)
32
+
33
+ # Step 5: Visualization
34
+ show_image_with_annotation_scene(image_rgb, prediction)
deployments/example_code/demo_notebook.ipynb ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "id": "86111f81-16f5-46e5-9010-1ef9e05a1571",
6
+ "metadata": {
7
+ "copyright": [
8
+ "INTEL CONFIDENTIAL",
9
+ "Copyright (C) 2022 Intel Corporation",
10
+ "This software and the related documents are Intel copyrighted materials, and your use of them is governed by",
11
+ "the express license under which they were provided to you (\"License\"). Unless the License provides otherwise,",
12
+ "you may not use, modify, copy, publish, distribute, disclose or transmit this software or the related documents",
13
+ "without Intel's prior written permission.",
14
+ "This software and the related documents are provided as is, with no express or implied warranties,",
15
+ "other than those that are expressly stated in the License."
16
+ ]
17
+ },
18
+ "source": [
19
+ "# Intel® Geti™ deployment demo notebook\n",
20
+ "This notebook demonstrates how to run inference for a deployed Intel® Geti™ project on your local machine. It contains the following steps:\n",
21
+ "1. Load the deployment for the project from the exported `deployment` folder\n",
22
+ "2. Load a sample image to run inference on\n",
23
+ "3. Prepare the deployment for inference by sending the model (or models) for the project to the CPU\n",
24
+ "4. Infer image\n",
25
+ "5. Visualize prediction"
26
+ ]
27
+ },
28
+ {
29
+ "cell_type": "markdown",
30
+ "id": "a0ee561b-49fb-4f8b-9c7f-e4859e3fe99e",
31
+ "metadata": {},
32
+ "source": [
33
+ "### Step 1: Load the deployment"
34
+ ]
35
+ },
36
+ {
37
+ "cell_type": "code",
38
+ "execution_count": null,
39
+ "id": "d04d3e58-8cae-4491-86b6-fbc876fd5e4f",
40
+ "metadata": {},
41
+ "outputs": [],
42
+ "source": [
43
+ "from geti_sdk.deployment import Deployment\n",
44
+ "\n",
45
+ "deployment = Deployment.from_folder(\"../deployment\")"
46
+ ]
47
+ },
48
+ {
49
+ "cell_type": "markdown",
50
+ "id": "713de7c8-0ac4-4865-b947-98ecbc4173fb",
51
+ "metadata": {},
52
+ "source": [
53
+ "### Step 2: Load the sample image"
54
+ ]
55
+ },
56
+ {
57
+ "cell_type": "code",
58
+ "execution_count": null,
59
+ "id": "5c61e01f-2c88-4f0d-ae18-88610cc13bf2",
60
+ "metadata": {},
61
+ "outputs": [],
62
+ "source": [
63
+ "import cv2\n",
64
+ "\n",
65
+ "image = cv2.imread(\"../sample_image.jpg\")\n",
66
+ "image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)"
67
+ ]
68
+ },
69
+ {
70
+ "cell_type": "markdown",
71
+ "id": "40da9013-46f7-488d-972d-5ceddd54a60c",
72
+ "metadata": {},
73
+ "source": [
74
+ "### Step 3: Send inference model(s) to CPU"
75
+ ]
76
+ },
77
+ {
78
+ "cell_type": "code",
79
+ "execution_count": null,
80
+ "id": "f6b80e6f-57fa-421a-b71f-ffbd0847c0a9",
81
+ "metadata": {},
82
+ "outputs": [],
83
+ "source": [
84
+ "deployment.load_inference_models(device='CPU')"
85
+ ]
86
+ },
87
+ {
88
+ "cell_type": "markdown",
89
+ "id": "6f539adc-04e7-43b4-b113-99e7ff7f6482",
90
+ "metadata": {},
91
+ "source": [
92
+ "### Step 4: Infer image"
93
+ ]
94
+ },
95
+ {
96
+ "cell_type": "code",
97
+ "execution_count": null,
98
+ "id": "a0e72d41-ec75-4bfe-859b-7302463b9fb6",
99
+ "metadata": {},
100
+ "outputs": [],
101
+ "source": [
102
+ "prediction = deployment.infer(image_rgb)"
103
+ ]
104
+ },
105
+ {
106
+ "cell_type": "markdown",
107
+ "id": "5f450bb5-29dc-4ac4-b5bb-4b02f350aacc",
108
+ "metadata": {},
109
+ "source": [
110
+ "### Step 5: Visualization"
111
+ ]
112
+ },
113
+ {
114
+ "cell_type": "code",
115
+ "execution_count": null,
116
+ "id": "db0dd922-36aa-4203-bc02-76c17d12d8be",
117
+ "metadata": {},
118
+ "outputs": [],
119
+ "source": [
120
+ "from geti_sdk.utils import show_image_with_annotation_scene\n",
121
+ "\n",
122
+ "show_image_with_annotation_scene(image_rgb, prediction, show_in_notebook=True)"
123
+ ]
124
+ },
125
+ {
126
+ "cell_type": "code",
127
+ "execution_count": null,
128
+ "id": "a342324f-3177-4d61-bee4-40b47d0f78f8",
129
+ "metadata": {},
130
+ "outputs": [],
131
+ "source": []
132
+ }
133
+ ],
134
+ "metadata": {
135
+ "celltoolbar": "Edit Metadata",
136
+ "kernelspec": {
137
+ "display_name": "Python 3 (ipykernel)",
138
+ "language": "python",
139
+ "name": "python3"
140
+ },
141
+ "language_info": {
142
+ "codemirror_mode": {
143
+ "name": "ipython",
144
+ "version": 3
145
+ },
146
+ "file_extension": ".py",
147
+ "mimetype": "text/x-python",
148
+ "name": "python",
149
+ "nbconvert_exporter": "python",
150
+ "pygments_lexer": "ipython3",
151
+ "version": "3.8.10"
152
+ }
153
+ },
154
+ "nbformat": 4,
155
+ "nbformat_minor": 5
156
+ }
deployments/example_code/demo_ovms.ipynb ADDED
@@ -0,0 +1,421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "copyright": [
7
+ "INTEL CONFIDENTIAL",
8
+ "Copyright (C) 2023 Intel Corporation",
9
+ "This software and the related documents are Intel copyrighted materials, and your use of them is governed by",
10
+ "the express license under which they were provided to you (\"License\"). Unless the License provides otherwise,",
11
+ "you may not use, modify, copy, publish, distribute, disclose or transmit this software or the related documents",
12
+ "without Intel's prior written permission.",
13
+ "This software and the related documents are provided as is, with no express or implied warranties,",
14
+ "other than those that are expressly stated in the License."
15
+ ]
16
+ },
17
+ "source": [
18
+ "# Serving Intel® Geti™ models with OpenVINO Model Server\n",
19
+ "This notebook shows how to set up an OpenVINO model server to serve the models trained\n",
20
+ "in your Intel® Geti™ project. It also shows how to use the Geti SDK as a client to\n",
21
+ "make inference requests to the model server.\n",
22
+ "\n",
23
+ "# Contents\n",
24
+ "\n",
25
+ "1. **OpenVINO Model Server**\n",
26
+ " 1. Requirements\n",
27
+ " 2. Generating the model server configuration\n",
28
+ " 3. Launching the model server\n",
29
+ "\n",
30
+ "2. **OVMS inference with Geti SDK**\n",
31
+ " 1. Loading inference model and sample image\n",
32
+ " 2. Requesting inference\n",
33
+ " 3. Inspecting the results\n",
34
+ "\n",
35
+ "3. **Conclusion**\n",
36
+ " 1. Cleaning up\n",
37
+ "\n",
38
+ "> **NOTE**: This notebook will set up a model server on the same machine that will be\n",
39
+ "> used as a client to request inference. In a real scenario you'd most likely\n",
40
+ "> want the server and the client to be different physical machines. The steps to set up\n",
41
+ "> OVMS on a remote server are the same as for the local server outlined in this\n",
42
+ "> notebook, but additional network configuration and security measures are most likely\n",
43
+ "> required.\n",
44
+ "\n",
45
+ "# OpenVINO Model Server\n",
46
+ "## Requirements\n",
47
+ "We will be running the OpenVINO Model Server (OVMS) with Docker. Please make sure you\n",
48
+ "have docker available on your system. You can install it by following the instructions\n",
49
+ "[here](https://docs.docker.com/get-docker/).\n",
50
+ "\n",
51
+ "## Generating the model server configuration\n",
52
+ "The `deployment` that was downloaded from the Intel® Geti™ platform can be used to create\n",
53
+ "the configuration files that are needed to set up an OpenVINO model server for your project.\n",
54
+ "\n",
55
+ "The cell below shows how to create the configuration. Running this cell should create\n",
56
+ "a folder called `ovms_models` in a temporary directory. The `ovms_models` folder\n",
57
+ "contains the models and the configuration files required to run OVMS for the Intel®\n",
58
+ "Geti™ project."
59
+ ]
60
+ },
61
+ {
62
+ "cell_type": "code",
63
+ "execution_count": null,
64
+ "metadata": {
65
+ "collapsed": false,
66
+ "jupyter": {
67
+ "outputs_hidden": false
68
+ },
69
+ "pycharm": {
70
+ "name": "#%%\n"
71
+ }
72
+ },
73
+ "outputs": [],
74
+ "source": [
75
+ "import os\n",
76
+ "import tempfile\n",
77
+ "\n",
78
+ "from geti_sdk.deployment import Deployment\n",
79
+ "\n",
80
+ "deployment_path = os.path.join(\"..\", \"deployment\")\n",
81
+ "\n",
82
+ "# Load the Geti deployment\n",
83
+ "deployment = Deployment.from_folder(deployment_path)\n",
84
+ "\n",
85
+ "# Creating the OVMS configuration for the deployment\n",
86
+ "# First, we'll create a temporary directory to store the config files\n",
87
+ "ovms_config_path = os.path.join(tempfile.mkdtemp(), \"ovms_models\")\n",
88
+ "\n",
89
+ "# Next, we generate the OVMS configuration and save it\n",
90
+ "deployment.generate_ovms_config(output_folder=ovms_config_path)\n",
91
+ "\n",
92
+ "print(f\"Configuration for OpenVINO Model Server was created at '{ovms_config_path}'\")"
93
+ ]
94
+ },
95
+ {
96
+ "cell_type": "markdown",
97
+ "metadata": {
98
+ "pycharm": {
99
+ "name": "#%% md\n"
100
+ }
101
+ },
102
+ "source": [
103
+ "## Launching the model server\n",
104
+ "As mentioned before, we will run OVMS in a Docker container. First, we need to make sure\n",
105
+ "that we have the latest OVMS image on our system. Run the cell below to pull the image."
106
+ ]
107
+ },
108
+ {
109
+ "cell_type": "code",
110
+ "execution_count": null,
111
+ "metadata": {
112
+ "collapsed": false,
113
+ "jupyter": {
114
+ "outputs_hidden": false
115
+ },
116
+ "pycharm": {
117
+ "name": "#%%\n"
118
+ }
119
+ },
120
+ "outputs": [],
121
+ "source": [
122
+ "! docker pull openvino/model_server:latest"
123
+ ]
124
+ },
125
+ {
126
+ "cell_type": "markdown",
127
+ "metadata": {
128
+ "pycharm": {
129
+ "name": "#%% md\n"
130
+ }
131
+ },
132
+ "source": [
133
+ "Next, we have to start the container with the configuration that we just generated. This\n",
134
+ "is done in the cell below.\n",
135
+ "\n",
136
+ "> NOTE: The cell below starts the OVMS container and sets it up to listen for inference\n",
137
+ "> requests on port 9000 on your system. If this port is already occupied the `docker run`\n",
138
+ "> command will fail and you may need to try a different port number."
139
+ ]
140
+ },
141
+ {
142
+ "cell_type": "code",
143
+ "execution_count": null,
144
+ "metadata": {
145
+ "collapsed": false,
146
+ "jupyter": {
147
+ "outputs_hidden": false
148
+ },
149
+ "pycharm": {
150
+ "name": "#%%\n"
151
+ }
152
+ },
153
+ "outputs": [],
154
+ "source": [
155
+ "# Launch the OVMS container\n",
156
+ "result = ! docker run -d --rm -v {ovms_config_path}:/models -p 9000:9000 --name ovms_demo openvino/model_server:latest --port 9000 --config_path /models/ovms_model_config.json\n",
157
+ "\n",
158
+ "# Check that the container was created successfully\n",
159
+ "if len(result) == 1:\n",
160
+ " container_id = result[0]\n",
161
+ " print(f\"OVMS container with ID '{container_id}' created.\")\n",
162
+ "else:\n",
163
+ " # Anything other than 1 result indicates that something went wrong\n",
164
+ " raise RuntimeError(result)\n",
165
+ "\n",
166
+ "# Check that the container is running properly\n",
167
+ "container_info = ! docker container inspect {container_id}\n",
168
+ "container_status = str(container_info.grep(\"Status\"))\n",
169
+ "\n",
170
+ "if not container_status or not \"running\" in container_status:\n",
171
+ " raise RuntimeError(\n",
172
+ " f\"Invalid ovms docker container status found: {container_status}. Most \"\n",
173
+ " f\"likely the container has not started properly.\"\n",
174
+ " )\n",
175
+ "print(\"OVMS container is up and running.\")"
176
+ ]
177
+ },
178
+ {
179
+ "cell_type": "markdown",
180
+ "metadata": {
181
+ "pycharm": {
182
+ "name": "#%% md\n"
183
+ }
184
+ },
185
+ "source": [
186
+ "That's it! If all went well the cell above should print the ID of the container that\n",
187
+ "was created. This can be used to identify your container if you have a lot of docker\n",
188
+ "containers running on your system.\n",
189
+ "\n",
190
+ "# OVMS inference with Geti SDK\n",
191
+ "Now that the OVMS container is running, we can use the Geti SDK to talk to it and make an\n",
192
+ "inference request. The remaining part of this notebook shows how to do so.\n",
193
+ "\n",
194
+ "## Loading inference model and sample image\n",
195
+ "In the first part of this notebook we created configuration files for OVMS, using the\n",
196
+ "`deployment` that was generated for your Intel® Geti™ project. To do inference, we need\n",
197
+ "to connect the deployment to the OVMS container that is now running. This is done in the\n",
198
+ "cell below."
199
+ ]
200
+ },
201
+ {
202
+ "cell_type": "code",
203
+ "execution_count": null,
204
+ "metadata": {
205
+ "collapsed": false,
206
+ "jupyter": {
207
+ "outputs_hidden": false
208
+ },
209
+ "pycharm": {
210
+ "name": "#%%\n"
211
+ }
212
+ },
213
+ "outputs": [],
214
+ "source": [
215
+ "# Load the inference models by connecting to OVMS on port 9000\n",
216
+ "deployment.load_inference_models(device=\"http://localhost:9000\")\n",
217
+ "\n",
218
+ "print(\"Connected to OpenVINO Model Server.\")"
219
+ ]
220
+ },
221
+ {
222
+ "cell_type": "markdown",
223
+ "metadata": {
224
+ "pycharm": {
225
+ "name": "#%% md\n"
226
+ }
227
+ },
228
+ "source": [
229
+ "You should see some output indicating that the connection to OVMS was made successfully.\n",
230
+ "If you see any errors at this stage, make sure your OVMS container is running and that the\n",
231
+ "port number is correct.\n",
232
+ "\n",
233
+ "Next up, we'll load a sample image from the project to run inference on"
234
+ ]
235
+ },
236
+ {
237
+ "cell_type": "code",
238
+ "execution_count": null,
239
+ "metadata": {
240
+ "collapsed": false,
241
+ "jupyter": {
242
+ "outputs_hidden": false
243
+ },
244
+ "pycharm": {
245
+ "name": "#%%\n"
246
+ }
247
+ },
248
+ "outputs": [],
249
+ "source": [
250
+ "import cv2\n",
251
+ "\n",
252
+ "# Load the sample image\n",
253
+ "image = cv2.imread(\"../sample_image.jpg\")\n",
254
+ "image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
255
+ "\n",
256
+ "# Show the image in the notebook\n",
257
+ "from IPython.display import display\n",
258
+ "from PIL import Image\n",
259
+ "\n",
260
+ "display(Image.fromarray(image_rgb))"
261
+ ]
262
+ },
263
+ {
264
+ "cell_type": "markdown",
265
+ "metadata": {
266
+ "pycharm": {
267
+ "name": "#%% md\n"
268
+ }
269
+ },
270
+ "source": [
271
+ "## Requesting inference\n",
272
+ "Now that everything is set up, making an inference request is very simple:"
273
+ ]
274
+ },
275
+ {
276
+ "cell_type": "code",
277
+ "execution_count": null,
278
+ "metadata": {
279
+ "collapsed": false,
280
+ "jupyter": {
281
+ "outputs_hidden": false
282
+ },
283
+ "pycharm": {
284
+ "name": "#%%\n"
285
+ }
286
+ },
287
+ "outputs": [],
288
+ "source": [
289
+ "import time\n",
290
+ "\n",
291
+ "t_start = time.time()\n",
292
+ "prediction = deployment.infer(image_rgb)\n",
293
+ "t_end = time.time()\n",
294
+ "\n",
295
+ "print(\n",
296
+ " f\"OVMS inference on sample image completed in {(t_end - t_start) * 1000:.1f} milliseconds.\"\n",
297
+ ")"
298
+ ]
299
+ },
300
+ {
301
+ "cell_type": "markdown",
302
+ "metadata": {
303
+ "pycharm": {
304
+ "name": "#%% md\n"
305
+ }
306
+ },
307
+ "source": [
308
+ "## Inspecting the results\n",
309
+ "Note that the code to request inference is exactly the same as for the case when the model\n",
310
+ "is loaded on the CPU (see `demo_notebook.ipynb`). Like The `prediction` can be shown using\n",
311
+ "the Geti SDK visualization utility function."
312
+ ]
313
+ },
314
+ {
315
+ "cell_type": "code",
316
+ "execution_count": null,
317
+ "metadata": {
318
+ "collapsed": false,
319
+ "jupyter": {
320
+ "outputs_hidden": false
321
+ },
322
+ "pycharm": {
323
+ "name": "#%%\n"
324
+ }
325
+ },
326
+ "outputs": [],
327
+ "source": [
328
+ "from geti_sdk.utils import show_image_with_annotation_scene\n",
329
+ "\n",
330
+ "show_image_with_annotation_scene(image_rgb, prediction, show_in_notebook=True);"
331
+ ]
332
+ },
333
+ {
334
+ "cell_type": "markdown",
335
+ "metadata": {
336
+ "jupyter": {
337
+ "outputs_hidden": false
338
+ },
339
+ "pycharm": {
340
+ "name": "#%% md\n"
341
+ }
342
+ },
343
+ "source": [
344
+ "# Conclusion\n",
345
+ "That's all there is to it! Of course in practice the client would request inference\n",
346
+ "from an OpenVINO model server on a different physical machine, in contrast to the\n",
347
+ "example here where client and server are running on the same machine.\n",
348
+ "\n",
349
+ "The steps outlined in this notebook can be used as a basis to set up a remote\n",
350
+ "client/server combination, but please note that additional network configuration will\n",
351
+ "be required (along with necessary security measures).\n",
352
+ "\n",
353
+ "## Cleaning up\n",
354
+ "To clean up, we'll stop the OVMS docker container that we started. This will\n",
355
+ "automatically remove the container. After that, we'll delete the temporary directory\n",
356
+ "we created to store the config files."
357
+ ]
358
+ },
359
+ {
360
+ "cell_type": "code",
361
+ "execution_count": null,
362
+ "metadata": {},
363
+ "outputs": [],
364
+ "source": [
365
+ "# Stop the container\n",
366
+ "result = ! docker stop {container_id}\n",
367
+ "\n",
368
+ "# Check if removing the container worked correctly\n",
369
+ "if result[0] == container_id:\n",
370
+ " print(f\"OVMS container '{container_id}' stopped and removed successfully.\")\n",
371
+ "else:\n",
372
+ " print(\n",
373
+ " \"An error occurred while removing OVMS docker container. Most likely the container \"\n",
374
+ " \"was already removed. \"\n",
375
+ " )\n",
376
+ " print(f\"The docker daemon responded with the following error: \\n{result}\")\n",
377
+ " \n",
378
+ "# Remove the temporary directory with the OVMS configuration\n",
379
+ "import shutil\n",
380
+ "\n",
381
+ "temp_dir = os.path.dirname(ovms_config_path)\n",
382
+ "try:\n",
383
+ " shutil.rmtree(temp_dir)\n",
384
+ " print(\"Temporary configuration directory removed successfully.\")\n",
385
+ "except FileNotFoundError:\n",
386
+ " print(\n",
387
+ " f\"Temporary directory with OVMS configuration '{temp_dir}' was \"\n",
388
+ " f\"not found on the system. Most likely it is already removed.\"\n",
389
+ " )"
390
+ ]
391
+ },
392
+ {
393
+ "cell_type": "code",
394
+ "execution_count": null,
395
+ "metadata": {},
396
+ "outputs": [],
397
+ "source": []
398
+ }
399
+ ],
400
+ "metadata": {
401
+ "kernelspec": {
402
+ "display_name": "Python 3 (ipykernel)",
403
+ "language": "python",
404
+ "name": "python3"
405
+ },
406
+ "language_info": {
407
+ "codemirror_mode": {
408
+ "name": "ipython",
409
+ "version": 3
410
+ },
411
+ "file_extension": ".py",
412
+ "mimetype": "text/x-python",
413
+ "name": "python",
414
+ "nbconvert_exporter": "python",
415
+ "pygments_lexer": "ipython3",
416
+ "version": "3.8.16"
417
+ }
418
+ },
419
+ "nbformat": 4,
420
+ "nbformat_minor": 4
421
+ }
deployments/example_code/requirements-notebook.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Requirements for running the `demo_notebook.ipynb` and `demo_ovms.ipynb` Jupyter notebooks
2
+ geti-sdk==2.6.*
3
+ jupyterlab>=3.6
4
+ opencv-python>=4.10
5
+ Pillow>=9.4.0
6
+ ipython>=8.10.0
deployments/example_code/requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Base requirements for the deployment
2
+ geti-sdk==2.6.*
3
+ opencv-python>=4.10
deployments/sample_image.jpg ADDED

Git LFS Details

  • SHA256: 7b43ed2d322acc1e1ca5b2b0a478a063965c46c6bd99dda9bead5e2ea42f82b4
  • Pointer size: 131 Bytes
  • Size of remote file: 303 kB