File size: 2,948 Bytes
a33e03b
 
 
 
 
 
 
360a2c0
 
 
 
 
819890a
 
 
 
 
360a2c0
 
 
 
 
 
 
 
 
 
 
 
819890a
 
360a2c0
 
 
 
 
 
 
 
 
 
 
 
 
 
819890a
 
 
 
360a2c0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
819890a
 
 
360a2c0
 
 
 
 
 
 
 
 
 
 
a33e03b
 
 
 
 
 
 
 
 
 
360a2c0
 
 
 
 
a33e03b
360a2c0
 
 
 
 
 
 
 
 
 
a33e03b
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from hydra import compose, initialize\n",
    "from PIL import Image \n",
    "\n",
    "# Ensure that the necessary repository is cloned and installed. You may need to run: \n",
    "# git clone [email protected]:WongKinYiu/YOLO.git\n",
    "# cd YOLO \n",
    "# pip install .\n",
    "from yolo import AugmentationComposer, bbox_nms, Config, create_model, custom_logger, draw_bboxes, Vec2Box"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "CONFIG_PATH = \"../yolo/config\"\n",
    "CONFIG_NAME = \"config\"\n",
    "\n",
    "DEVICE = 'cuda:0'\n",
    "CLASS_NUM = 80\n",
    "WEIGHT_PATH = '../weights/v9-c.pt' \n",
    "IMAGE_PATH = '../demo/images/inference/image.png'\n",
    "\n",
    "custom_logger()\n",
    "device = torch.device(DEVICE)\n",
    "image = Image.open(IMAGE_PATH)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with initialize(config_path=CONFIG_PATH, version_base=None, job_name=\"notebook_job\"):\n",
    "    cfg: Config = compose(config_name=CONFIG_NAME, overrides=[\"task=inference\", f\"task.data.source={IMAGE_PATH}\", \"model=v9-c-deploy\"])\n",
    "    model = create_model(cfg.model, class_num=CLASS_NUM, weight_path=WEIGHT_PATH).to(device)\n",
    "    transform = AugmentationComposer([], cfg.image_size)\n",
    "    vec2box = Vec2Box(model, cfg.image_size, device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "image, bbox = transform(image, torch.zeros(0, 5))\n",
    "image = image.to(device)[None]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "with torch.no_grad():\n",
    "    predict = model(image)\n",
    "    predict = vec2box(predict[\"Main\"])\n",
    "\n",
    "predict_box = bbox_nms(predict[0], predict[2], cfg.task.nms)\n",
    "draw_bboxes(image, predict_box, save_path='../demo/images/output/', idx2label=cfg.class_list)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Sample Output:\n",
    "\n",
    "![image](../demo/images/output/visualize.png)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "yolomit",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}