File size: 8,445 Bytes
cbb2b8a 61740cb cbb2b8a d566054 61740cb d566054 cbb2b8a 61740cb cbb2b8a 61740cb cbb2b8a 61740cb cbb2b8a 61740cb cbb2b8a 61740cb cbb2b8a 61740cb cbb2b8a 61740cb cbb2b8a 61740cb cbb2b8a 61740cb cbb2b8a 61740cb cbb2b8a 61740cb cbb2b8a 61740cb cbb2b8a 61740cb cbb2b8a 61740cb cbb2b8a d566054 cbb2b8a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 |
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"ename": "ImportError",
"evalue": "cannot import name 'DepthRepresentation' from 'dronescapes_reader' (/scratch/sdc/datasets/dronescapes/dronescapes_reader/__init__.py)",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn [4], line 7\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mrandom\u001b[39;00m\n\u001b[1;32m 6\u001b[0m sys\u001b[38;5;241m.\u001b[39mpath\u001b[38;5;241m.\u001b[39mappend(Path\u001b[38;5;241m.\u001b[39mcwd()\u001b[38;5;241m.\u001b[39mparent\u001b[38;5;241m.\u001b[39m\u001b[38;5;21m__str__\u001b[39m())\n\u001b[0;32m----> 7\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdronescapes_reader\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m MultiTaskDataset, DepthRepresentation, OpticalFlowRepresentation, SemanticRepresentation\n\u001b[1;32m 8\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtorch\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mutils\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mdata\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m DataLoader\n\u001b[1;32m 9\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mnumpy\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mnp\u001b[39;00m\n",
"\u001b[0;31mImportError\u001b[0m: cannot import name 'DepthRepresentation' from 'dronescapes_reader' (/scratch/sdc/datasets/dronescapes/dronescapes_reader/__init__.py)"
]
}
],
"source": [
"import sys\n",
"from pathlib import Path\n",
"from functools import partial\n",
"from pprint import pprint\n",
"import random\n",
"sys.path.append(Path.cwd().parent.__str__())\n",
"from dronescapes_reader import MultiTaskDataset, DepthRepresentation, OpticalFlowRepresentation, SemanticRepresentation\n",
"from torch.utils.data import DataLoader\n",
"import numpy as np\n",
"import torch as tr\n",
"from media_processing_lib.collage_maker import collage_fn\n",
"from media_processing_lib.image import image_add_title\n",
"import matplotlib.pyplot as plt\n",
"\n",
"%load_ext autoreload\n",
"%autoreload 2"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[32m[24-05-13 14:30:43 DEBUG]\u001b[0m Building dataset from: '/scratch/sdc/datasets/dronescapes/scripts/../data/train_set' (multitask_dataset.py:186)\n",
"\u001b[32m[24-05-13 14:30:44 INFO]\u001b[0m Found 11664 data points as union of all nodes' data (8 nodes). (multitask_dataset.py:174)\n",
"\u001b[32m[24-05-13 14:30:44 DEBUG]\u001b[0m No explicit tasks provided. Using all of them as read from the paths (8). (multitask_dataset.py:86)\n",
"\u001b[32m[24-05-13 14:30:44 INFO]\u001b[0m Tasks used in this dataset: ['depth_dpt', 'depth_sfm_manual202204', 'edges_dexined', 'normals_sfm_manual202204', 'opticalflow_rife', 'rgb', 'semantic_mask2former_swin_mapillary_converted', 'semantic_segprop8'] (multitask_dataset.py:93)\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[MultiTaskDataset]\n",
" - Path: '/scratch/sdc/datasets/dronescapes/scripts/../data/train_set'\n",
" - Only full data: False\n",
" - Representations (8): [DepthRepresentation(depth_dpt), DepthRepresentation(depth_sfm_manual202204), NpzRepresentation(edges_dexined), NpzRepresentation(normals_sfm_manual202204), OpticalFlowRepresentation(opticalflow_rife), NpzRepresentation(rgb), SemanticRepresentation(semantic_mask2former_swin_mapillary_converted), SemanticRepresentation(semantic_segprop8)]\n",
" - Length: 11664\n",
"== Shapes ==\n",
"{'depth_dpt': torch.Size([540, 960]),\n",
" 'depth_sfm_manual202204': torch.Size([540, 960]),\n",
" 'edges_dexined': torch.Size([540, 960]),\n",
" 'normals_sfm_manual202204': torch.Size([540, 960, 3]),\n",
" 'opticalflow_rife': torch.Size([540, 960, 2]),\n",
" 'rgb': torch.Size([540, 960, 3]),\n",
" 'semantic_mask2former_swin_mapillary_converted': torch.Size([540, 960]),\n",
" 'semantic_segprop8': torch.Size([540, 960])}\n"
]
}
],
"source": [
"sema_repr = partial(SemanticRepresentation, classes=8, color_map=[[0, 255, 0], [0, 127, 0], [255, 255, 0],\n",
" [255, 255, 255], [255, 0, 0], [0, 0, 255],\n",
" [0, 255, 255], [127, 127, 63]])\n",
"reader = MultiTaskDataset(\"../data/train_set\", handle_missing_data=\"fill_none\",\n",
" task_types={\"depth_dpt\": DepthRepresentation(\"depth_dpt\", min_depth=0, max_depth=0.999),\n",
" \"depth_sfm_manual202204\": DepthRepresentation(\"depth_sfm_manual202204\",\n",
" min_depth=0, max_depth=300),\n",
" \"opticalflow_rife\": OpticalFlowRepresentation,\n",
" \"semantic_segprop8\": sema_repr,\n",
" \"semantic_mask2former_swin_mapillary_converted\": sema_repr})\n",
"print(reader)\n",
"print(\"== Shapes ==\")\n",
"pprint(reader.data_shape)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"== Random loaded item ==\n"
]
},
{
"ename": "NameError",
"evalue": "name 'reader' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn [2], line 2\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m== Random loaded item ==\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m----> 2\u001b[0m rand_ix \u001b[38;5;241m=\u001b[39m random\u001b[38;5;241m.\u001b[39mrandint(\u001b[38;5;241m0\u001b[39m, \u001b[38;5;28mlen\u001b[39m(\u001b[43mreader\u001b[49m))\n\u001b[1;32m 3\u001b[0m data, name, repr_names \u001b[38;5;241m=\u001b[39m reader[rand_ix] \u001b[38;5;66;03m# get a random item\u001b[39;00m\n\u001b[1;32m 4\u001b[0m img_data \u001b[38;5;241m=\u001b[39m {}\n",
"\u001b[0;31mNameError\u001b[0m: name 'reader' is not defined"
]
}
],
"source": [
"print(\"== Random loaded item ==\")\n",
"rand_ix = random.randint(0, len(reader))\n",
"data, name, repr_names = reader[rand_ix] # get a random item\n",
"img_data = {}\n",
"for k, v in data.items():\n",
" img_data[k] = reader.name_to_task[k].plot_fn(v) if v is not None else np.zeros((*reader.data_shape[k][0:2], 3))\n",
"if \"rgb\" in img_data: # move rgb as 1st item in the collage\n",
" img_data = {\"rgb\": img_data[\"rgb\"], **{k: v for k, v in img_data.items() if k != \"rgb\"}}\n",
"pprint({k: v.shape for k, v in img_data.items()})\n",
"collage = collage_fn(list(img_data.values()), titles=img_data.keys(), size_px=55)\n",
"collage = image_add_title(collage, name, size_px=55, top_padding=110)\n",
"plt.imshow(collage)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "ngc",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
|