oliver9523 commited on
Commit
ea36e21
·
verified ·
1 Parent(s): c07dbb4

Delete example_code

Browse files
example_code/demo.py DELETED
@@ -1,34 +0,0 @@
1
- # Copyright (C) 2022 Intel Corporation
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing,
10
- # software distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions
13
- # and limitations under the License.
14
-
15
- import cv2
16
- from geti_sdk.deployment import Deployment
17
- from geti_sdk.utils import show_image_with_annotation_scene
18
-
19
- if __name__ == "__main__":
20
- # Step 1: Load the deployment
21
- deployment = Deployment.from_folder("../deployment")
22
-
23
- # Step 2: Load the sample image
24
- image = cv2.imread("../sample_image.jpg")
25
- image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
26
-
27
- # Step 3: Send inference model(s) to CPU
28
- deployment.load_inference_models(device="CPU")
29
-
30
- # Step 4: Infer image
31
- prediction = deployment.infer(image_rgb)
32
-
33
- # Step 5: Visualization
34
- show_image_with_annotation_scene(image_rgb, prediction)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
example_code/demo_notebook.ipynb DELETED
@@ -1,156 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "markdown",
5
- "id": "86111f81-16f5-46e5-9010-1ef9e05a1571",
6
- "metadata": {
7
- "copyright": [
8
- "INTEL CONFIDENTIAL",
9
- "Copyright (C) 2022 Intel Corporation",
10
- "This software and the related documents are Intel copyrighted materials, and your use of them is governed by",
11
- "the express license under which they were provided to you (\"License\"). Unless the License provides otherwise,",
12
- "you may not use, modify, copy, publish, distribute, disclose or transmit this software or the related documents",
13
- "without Intel's prior written permission.",
14
- "This software and the related documents are provided as is, with no express or implied warranties,",
15
- "other than those that are expressly stated in the License."
16
- ]
17
- },
18
- "source": [
19
- "# Intel® Geti™ deployment demo notebook\n",
20
- "This notebook demonstrates how to run inference for a deployed Intel® Geti™ project on your local machine. It contains the following steps:\n",
21
- "1. Load the deployment for the project from the exported `deployment` folder\n",
22
- "2. Load a sample image to run inference on\n",
23
- "3. Prepare the deployment for inference by sending the model (or models) for the project to the CPU\n",
24
- "4. Infer image\n",
25
- "5. Visualize prediction"
26
- ]
27
- },
28
- {
29
- "cell_type": "markdown",
30
- "id": "a0ee561b-49fb-4f8b-9c7f-e4859e3fe99e",
31
- "metadata": {},
32
- "source": [
33
- "### Step 1: Load the deployment"
34
- ]
35
- },
36
- {
37
- "cell_type": "code",
38
- "execution_count": null,
39
- "id": "d04d3e58-8cae-4491-86b6-fbc876fd5e4f",
40
- "metadata": {},
41
- "outputs": [],
42
- "source": [
43
- "from geti_sdk.deployment import Deployment\n",
44
- "\n",
45
- "deployment = Deployment.from_folder(\"../deployment\")"
46
- ]
47
- },
48
- {
49
- "cell_type": "markdown",
50
- "id": "713de7c8-0ac4-4865-b947-98ecbc4173fb",
51
- "metadata": {},
52
- "source": [
53
- "### Step 2: Load the sample image"
54
- ]
55
- },
56
- {
57
- "cell_type": "code",
58
- "execution_count": null,
59
- "id": "5c61e01f-2c88-4f0d-ae18-88610cc13bf2",
60
- "metadata": {},
61
- "outputs": [],
62
- "source": [
63
- "import cv2\n",
64
- "\n",
65
- "image = cv2.imread(\"../sample_image.jpg\")\n",
66
- "image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)"
67
- ]
68
- },
69
- {
70
- "cell_type": "markdown",
71
- "id": "40da9013-46f7-488d-972d-5ceddd54a60c",
72
- "metadata": {},
73
- "source": [
74
- "### Step 3: Send inference model(s) to CPU"
75
- ]
76
- },
77
- {
78
- "cell_type": "code",
79
- "execution_count": null,
80
- "id": "f6b80e6f-57fa-421a-b71f-ffbd0847c0a9",
81
- "metadata": {},
82
- "outputs": [],
83
- "source": [
84
- "deployment.load_inference_models(device='CPU')"
85
- ]
86
- },
87
- {
88
- "cell_type": "markdown",
89
- "id": "6f539adc-04e7-43b4-b113-99e7ff7f6482",
90
- "metadata": {},
91
- "source": [
92
- "### Step 4: Infer image"
93
- ]
94
- },
95
- {
96
- "cell_type": "code",
97
- "execution_count": null,
98
- "id": "a0e72d41-ec75-4bfe-859b-7302463b9fb6",
99
- "metadata": {},
100
- "outputs": [],
101
- "source": [
102
- "prediction = deployment.infer(image_rgb)"
103
- ]
104
- },
105
- {
106
- "cell_type": "markdown",
107
- "id": "5f450bb5-29dc-4ac4-b5bb-4b02f350aacc",
108
- "metadata": {},
109
- "source": [
110
- "### Step 5: Visualization"
111
- ]
112
- },
113
- {
114
- "cell_type": "code",
115
- "execution_count": null,
116
- "id": "db0dd922-36aa-4203-bc02-76c17d12d8be",
117
- "metadata": {},
118
- "outputs": [],
119
- "source": [
120
- "from geti_sdk.utils import show_image_with_annotation_scene\n",
121
- "\n",
122
- "show_image_with_annotation_scene(image_rgb, prediction, show_in_notebook=True)"
123
- ]
124
- },
125
- {
126
- "cell_type": "code",
127
- "execution_count": null,
128
- "id": "a342324f-3177-4d61-bee4-40b47d0f78f8",
129
- "metadata": {},
130
- "outputs": [],
131
- "source": []
132
- }
133
- ],
134
- "metadata": {
135
- "celltoolbar": "Edit Metadata",
136
- "kernelspec": {
137
- "display_name": "Python 3 (ipykernel)",
138
- "language": "python",
139
- "name": "python3"
140
- },
141
- "language_info": {
142
- "codemirror_mode": {
143
- "name": "ipython",
144
- "version": 3
145
- },
146
- "file_extension": ".py",
147
- "mimetype": "text/x-python",
148
- "name": "python",
149
- "nbconvert_exporter": "python",
150
- "pygments_lexer": "ipython3",
151
- "version": "3.8.10"
152
- }
153
- },
154
- "nbformat": 4,
155
- "nbformat_minor": 5
156
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
example_code/demo_ovms.ipynb DELETED
@@ -1,421 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "markdown",
5
- "metadata": {
6
- "copyright": [
7
- "INTEL CONFIDENTIAL",
8
- "Copyright (C) 2023 Intel Corporation",
9
- "This software and the related documents are Intel copyrighted materials, and your use of them is governed by",
10
- "the express license under which they were provided to you (\"License\"). Unless the License provides otherwise,",
11
- "you may not use, modify, copy, publish, distribute, disclose or transmit this software or the related documents",
12
- "without Intel's prior written permission.",
13
- "This software and the related documents are provided as is, with no express or implied warranties,",
14
- "other than those that are expressly stated in the License."
15
- ]
16
- },
17
- "source": [
18
- "# Serving Intel® Geti™ models with OpenVINO Model Server\n",
19
- "This notebook shows how to set up an OpenVINO model server to serve the models trained\n",
20
- "in your Intel® Geti™ project. It also shows how to use the Geti SDK as a client to\n",
21
- "make inference requests to the model server.\n",
22
- "\n",
23
- "# Contents\n",
24
- "\n",
25
- "1. **OpenVINO Model Server**\n",
26
- " 1. Requirements\n",
27
- " 2. Generating the model server configuration\n",
28
- " 3. Launching the model server\n",
29
- "\n",
30
- "2. **OVMS inference with Geti SDK**\n",
31
- " 1. Loading inference model and sample image\n",
32
- " 2. Requesting inference\n",
33
- " 3. Inspecting the results\n",
34
- "\n",
35
- "3. **Conclusion**\n",
36
- " 1. Cleaning up\n",
37
- "\n",
38
- "> **NOTE**: This notebook will set up a model server on the same machine that will be\n",
39
- "> used as a client to request inference. In a real scenario you'd most likely\n",
40
- "> want the server and the client to be different physical machines. The steps to set up\n",
41
- "> OVMS on a remote server are the same as for the local server outlined in this\n",
42
- "> notebook, but additional network configuration and security measures are most likely\n",
43
- "> required.\n",
44
- "\n",
45
- "# OpenVINO Model Server\n",
46
- "## Requirements\n",
47
- "We will be running the OpenVINO Model Server (OVMS) with Docker. Please make sure you\n",
48
- "have docker available on your system. You can install it by following the instructions\n",
49
- "[here](https://docs.docker.com/get-docker/).\n",
50
- "\n",
51
- "## Generating the model server configuration\n",
52
- "The `deployment` that was downloaded from the Intel® Geti™ platform can be used to create\n",
53
- "the configuration files that are needed to set up an OpenVINO model server for your project.\n",
54
- "\n",
55
- "The cell below shows how to create the configuration. Running this cell should create\n",
56
- "a folder called `ovms_models` in a temporary directory. The `ovms_models` folder\n",
57
- "contains the models and the configuration files required to run OVMS for the Intel®\n",
58
- "Geti™ project."
59
- ]
60
- },
61
- {
62
- "cell_type": "code",
63
- "execution_count": null,
64
- "metadata": {
65
- "collapsed": false,
66
- "jupyter": {
67
- "outputs_hidden": false
68
- },
69
- "pycharm": {
70
- "name": "#%%\n"
71
- }
72
- },
73
- "outputs": [],
74
- "source": [
75
- "import os\n",
76
- "import tempfile\n",
77
- "\n",
78
- "from geti_sdk.deployment import Deployment\n",
79
- "\n",
80
- "deployment_path = os.path.join(\"..\", \"deployment\")\n",
81
- "\n",
82
- "# Load the Geti deployment\n",
83
- "deployment = Deployment.from_folder(deployment_path)\n",
84
- "\n",
85
- "# Creating the OVMS configuration for the deployment\n",
86
- "# First, we'll create a temporary directory to store the config files\n",
87
- "ovms_config_path = os.path.join(tempfile.mkdtemp(), \"ovms_models\")\n",
88
- "\n",
89
- "# Next, we generate the OVMS configuration and save it\n",
90
- "deployment.generate_ovms_config(output_folder=ovms_config_path)\n",
91
- "\n",
92
- "print(f\"Configuration for OpenVINO Model Server was created at '{ovms_config_path}'\")"
93
- ]
94
- },
95
- {
96
- "cell_type": "markdown",
97
- "metadata": {
98
- "pycharm": {
99
- "name": "#%% md\n"
100
- }
101
- },
102
- "source": [
103
- "## Launching the model server\n",
104
- "As mentioned before, we will run OVMS in a Docker container. First, we need to make sure\n",
105
- "that we have the latest OVMS image on our system. Run the cell below to pull the image."
106
- ]
107
- },
108
- {
109
- "cell_type": "code",
110
- "execution_count": null,
111
- "metadata": {
112
- "collapsed": false,
113
- "jupyter": {
114
- "outputs_hidden": false
115
- },
116
- "pycharm": {
117
- "name": "#%%\n"
118
- }
119
- },
120
- "outputs": [],
121
- "source": [
122
- "! docker pull openvino/model_server:latest"
123
- ]
124
- },
125
- {
126
- "cell_type": "markdown",
127
- "metadata": {
128
- "pycharm": {
129
- "name": "#%% md\n"
130
- }
131
- },
132
- "source": [
133
- "Next, we have to start the container with the configuration that we just generated. This\n",
134
- "is done in the cell below.\n",
135
- "\n",
136
- "> NOTE: The cell below starts the OVMS container and sets it up to listen for inference\n",
137
- "> requests on port 9000 on your system. If this port is already occupied the `docker run`\n",
138
- "> command will fail and you may need to try a different port number."
139
- ]
140
- },
141
- {
142
- "cell_type": "code",
143
- "execution_count": null,
144
- "metadata": {
145
- "collapsed": false,
146
- "jupyter": {
147
- "outputs_hidden": false
148
- },
149
- "pycharm": {
150
- "name": "#%%\n"
151
- }
152
- },
153
- "outputs": [],
154
- "source": [
155
- "# Launch the OVMS container\n",
156
- "result = ! docker run -d --rm -v {ovms_config_path}:/models -p 9000:9000 --name ovms_demo openvino/model_server:latest --port 9000 --config_path /models/ovms_model_config.json\n",
157
- "\n",
158
- "# Check that the container was created successfully\n",
159
- "if len(result) == 1:\n",
160
- " container_id = result[0]\n",
161
- " print(f\"OVMS container with ID '{container_id}' created.\")\n",
162
- "else:\n",
163
- " # Anything other than 1 result indicates that something went wrong\n",
164
- " raise RuntimeError(result)\n",
165
- "\n",
166
- "# Check that the container is running properly\n",
167
- "container_info = ! docker container inspect {container_id}\n",
168
- "container_status = str(container_info.grep(\"Status\"))\n",
169
- "\n",
170
- "if not container_status or not \"running\" in container_status:\n",
171
- " raise RuntimeError(\n",
172
- " f\"Invalid ovms docker container status found: {container_status}. Most \"\n",
173
- " f\"likely the container has not started properly.\"\n",
174
- " )\n",
175
- "print(\"OVMS container is up and running.\")"
176
- ]
177
- },
178
- {
179
- "cell_type": "markdown",
180
- "metadata": {
181
- "pycharm": {
182
- "name": "#%% md\n"
183
- }
184
- },
185
- "source": [
186
- "That's it! If all went well the cell above should print the ID of the container that\n",
187
- "was created. This can be used to identify your container if you have a lot of docker\n",
188
- "containers running on your system.\n",
189
- "\n",
190
- "# OVMS inference with Geti SDK\n",
191
- "Now that the OVMS container is running, we can use the Geti SDK to talk to it and make an\n",
192
- "inference request. The remaining part of this notebook shows how to do so.\n",
193
- "\n",
194
- "## Loading inference model and sample image\n",
195
- "In the first part of this notebook we created configuration files for OVMS, using the\n",
196
- "`deployment` that was generated for your Intel® Geti™ project. To do inference, we need\n",
197
- "to connect the deployment to the OVMS container that is now running. This is done in the\n",
198
- "cell below."
199
- ]
200
- },
201
- {
202
- "cell_type": "code",
203
- "execution_count": null,
204
- "metadata": {
205
- "collapsed": false,
206
- "jupyter": {
207
- "outputs_hidden": false
208
- },
209
- "pycharm": {
210
- "name": "#%%\n"
211
- }
212
- },
213
- "outputs": [],
214
- "source": [
215
- "# Load the inference models by connecting to OVMS on port 9000\n",
216
- "deployment.load_inference_models(device=\"http://localhost:9000\")\n",
217
- "\n",
218
- "print(\"Connected to OpenVINO Model Server.\")"
219
- ]
220
- },
221
- {
222
- "cell_type": "markdown",
223
- "metadata": {
224
- "pycharm": {
225
- "name": "#%% md\n"
226
- }
227
- },
228
- "source": [
229
- "You should see some output indicating that the connection to OVMS was made successfully.\n",
230
- "If you see any errors at this stage, make sure your OVMS container is running and that the\n",
231
- "port number is correct.\n",
232
- "\n",
233
- "Next up, we'll load a sample image from the project to run inference on"
234
- ]
235
- },
236
- {
237
- "cell_type": "code",
238
- "execution_count": null,
239
- "metadata": {
240
- "collapsed": false,
241
- "jupyter": {
242
- "outputs_hidden": false
243
- },
244
- "pycharm": {
245
- "name": "#%%\n"
246
- }
247
- },
248
- "outputs": [],
249
- "source": [
250
- "import cv2\n",
251
- "\n",
252
- "# Load the sample image\n",
253
- "image = cv2.imread(\"../sample_image.jpg\")\n",
254
- "image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
255
- "\n",
256
- "# Show the image in the notebook\n",
257
- "from IPython.display import display\n",
258
- "from PIL import Image\n",
259
- "\n",
260
- "display(Image.fromarray(image_rgb))"
261
- ]
262
- },
263
- {
264
- "cell_type": "markdown",
265
- "metadata": {
266
- "pycharm": {
267
- "name": "#%% md\n"
268
- }
269
- },
270
- "source": [
271
- "## Requesting inference\n",
272
- "Now that everything is set up, making an inference request is very simple:"
273
- ]
274
- },
275
- {
276
- "cell_type": "code",
277
- "execution_count": null,
278
- "metadata": {
279
- "collapsed": false,
280
- "jupyter": {
281
- "outputs_hidden": false
282
- },
283
- "pycharm": {
284
- "name": "#%%\n"
285
- }
286
- },
287
- "outputs": [],
288
- "source": [
289
- "import time\n",
290
- "\n",
291
- "t_start = time.time()\n",
292
- "prediction = deployment.infer(image_rgb)\n",
293
- "t_end = time.time()\n",
294
- "\n",
295
- "print(\n",
296
- " f\"OVMS inference on sample image completed in {(t_end - t_start) * 1000:.1f} milliseconds.\"\n",
297
- ")"
298
- ]
299
- },
300
- {
301
- "cell_type": "markdown",
302
- "metadata": {
303
- "pycharm": {
304
- "name": "#%% md\n"
305
- }
306
- },
307
- "source": [
308
- "## Inspecting the results\n",
309
- "Note that the code to request inference is exactly the same as for the case when the model\n",
310
- "is loaded on the CPU (see `demo_notebook.ipynb`). Like The `prediction` can be shown using\n",
311
- "the Geti SDK visualization utility function."
312
- ]
313
- },
314
- {
315
- "cell_type": "code",
316
- "execution_count": null,
317
- "metadata": {
318
- "collapsed": false,
319
- "jupyter": {
320
- "outputs_hidden": false
321
- },
322
- "pycharm": {
323
- "name": "#%%\n"
324
- }
325
- },
326
- "outputs": [],
327
- "source": [
328
- "from geti_sdk.utils import show_image_with_annotation_scene\n",
329
- "\n",
330
- "show_image_with_annotation_scene(image_rgb, prediction, show_in_notebook=True);"
331
- ]
332
- },
333
- {
334
- "cell_type": "markdown",
335
- "metadata": {
336
- "jupyter": {
337
- "outputs_hidden": false
338
- },
339
- "pycharm": {
340
- "name": "#%% md\n"
341
- }
342
- },
343
- "source": [
344
- "# Conclusion\n",
345
- "That's all there is to it! Of course in practice the client would request inference\n",
346
- "from an OpenVINO model server on a different physical machine, in contrast to the\n",
347
- "example here where client and server are running on the same machine.\n",
348
- "\n",
349
- "The steps outlined in this notebook can be used as a basis to set up a remote\n",
350
- "client/server combination, but please note that additional network configuration will\n",
351
- "be required (along with necessary security measures).\n",
352
- "\n",
353
- "## Cleaning up\n",
354
- "To clean up, we'll stop the OVMS docker container that we started. This will\n",
355
- "automatically remove the container. After that, we'll delete the temporary directory\n",
356
- "we created to store the config files."
357
- ]
358
- },
359
- {
360
- "cell_type": "code",
361
- "execution_count": null,
362
- "metadata": {},
363
- "outputs": [],
364
- "source": [
365
- "# Stop the container\n",
366
- "result = ! docker stop {container_id}\n",
367
- "\n",
368
- "# Check if removing the container worked correctly\n",
369
- "if result[0] == container_id:\n",
370
- " print(f\"OVMS container '{container_id}' stopped and removed successfully.\")\n",
371
- "else:\n",
372
- " print(\n",
373
- " \"An error occurred while removing OVMS docker container. Most likely the container \"\n",
374
- " \"was already removed. \"\n",
375
- " )\n",
376
- " print(f\"The docker daemon responded with the following error: \\n{result}\")\n",
377
- " \n",
378
- "# Remove the temporary directory with the OVMS configuration\n",
379
- "import shutil\n",
380
- "\n",
381
- "temp_dir = os.path.dirname(ovms_config_path)\n",
382
- "try:\n",
383
- " shutil.rmtree(temp_dir)\n",
384
- " print(\"Temporary configuration directory removed successfully.\")\n",
385
- "except FileNotFoundError:\n",
386
- " print(\n",
387
- " f\"Temporary directory with OVMS configuration '{temp_dir}' was \"\n",
388
- " f\"not found on the system. Most likely it is already removed.\"\n",
389
- " )"
390
- ]
391
- },
392
- {
393
- "cell_type": "code",
394
- "execution_count": null,
395
- "metadata": {},
396
- "outputs": [],
397
- "source": []
398
- }
399
- ],
400
- "metadata": {
401
- "kernelspec": {
402
- "display_name": "Python 3 (ipykernel)",
403
- "language": "python",
404
- "name": "python3"
405
- },
406
- "language_info": {
407
- "codemirror_mode": {
408
- "name": "ipython",
409
- "version": 3
410
- },
411
- "file_extension": ".py",
412
- "mimetype": "text/x-python",
413
- "name": "python",
414
- "nbconvert_exporter": "python",
415
- "pygments_lexer": "ipython3",
416
- "version": "3.8.16"
417
- }
418
- },
419
- "nbformat": 4,
420
- "nbformat_minor": 4
421
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
example_code/requirements-notebook.txt DELETED
@@ -1,6 +0,0 @@
1
- # Requirements for running the `demo_notebook.ipynb` and `demo_ovms.ipynb` Jupyter notebooks
2
- geti-sdk==1.5.*
3
- jupyterlab==3.6.*
4
- opencv-python>=4.5.0
5
- Pillow>=9.4.0
6
- ipython>=8.10.0
 
 
 
 
 
 
 
example_code/requirements.txt DELETED
@@ -1,3 +0,0 @@
1
- # Base requirements for the deployment
2
- geti-sdk==1.5.*
3
- opencv-python>=4.5.0