Spaces:
Sleeping
Sleeping
again
Browse files- pages/Model_Evaluation.py +2 -2
- pages/Upload_and_Predict.py +1 -1
- training/training.ipynb +50 -4
pages/Model_Evaluation.py
CHANGED
@@ -105,12 +105,12 @@ def load_test_data(csv_path):
|
|
105 |
def load_model():
|
106 |
model = models.densenet121(pretrained=False)
|
107 |
model.classifier = nn.Linear(model.classifier.in_features, len(class_names))
|
108 |
-
model.load_state_dict(torch.load(r"
|
109 |
model.eval()
|
110 |
return model
|
111 |
|
112 |
# ---- Main UI Buttons ----
|
113 |
-
csv_path = r"D:\DR_Classification\
|
114 |
model = load_model()
|
115 |
test_loader = load_test_data(csv_path)
|
116 |
|
|
|
105 |
def load_model():
|
106 |
model = models.densenet121(pretrained=False)
|
107 |
model.classifier = nn.Linear(model.classifier.in_features, len(class_names))
|
108 |
+
model.load_state_dict(torch.load(r"Model/Pretrained_Densenet-121.pth", map_location=torch.device('cpu')))
|
109 |
model.eval()
|
110 |
return model
|
111 |
|
112 |
# ---- Main UI Buttons ----
|
113 |
+
csv_path = r"D:\DR_Classification\splits\test_labels.csv"
|
114 |
model = load_model()
|
115 |
test_loader = load_test_data(csv_path)
|
116 |
|
pages/Upload_and_Predict.py
CHANGED
@@ -57,7 +57,7 @@ class_names = ['No DR', 'Mild', 'Moderate', 'Severe', 'Proliferative DR']
|
|
57 |
|
58 |
# Load sample images from CSV with proper label mapping
|
59 |
@st.cache_data
|
60 |
-
def load_sample_images_from_csv(csv_path=r'D:\DR_Classification\
|
61 |
df = pd.read_csv(csv_path)
|
62 |
samples = defaultdict(list)
|
63 |
|
|
|
57 |
|
58 |
# Load sample images from CSV with proper label mapping
|
59 |
@st.cache_data
|
60 |
+
def load_sample_images_from_csv(csv_path=r'D:\DR_Classification\splits\test_labels.csv'):
|
61 |
df = pd.read_csv(csv_path)
|
62 |
samples = defaultdict(list)
|
63 |
|
training/training.ipynb
CHANGED
@@ -137,7 +137,7 @@
|
|
137 |
"df['label'] = df['diagnosis']\n",
|
138 |
"\n",
|
139 |
"# Create output directories for storing train and test images\n",
|
140 |
-
"output_root = \"D:\\\\DR_Classification\\\\
|
141 |
"os.makedirs(os.path.join(output_root, \"train\"), exist_ok=True) # Creates 'train' folder if not existing\n",
|
142 |
"os.makedirs(os.path.join(output_root, \"test\"), exist_ok=True) # Creates 'test' folder if not existing\n",
|
143 |
"\n",
|
@@ -267,8 +267,8 @@
|
|
267 |
"outputs": [],
|
268 |
"source": [
|
269 |
"# Load the split CSVs\n",
|
270 |
-
"train_df = pd.read_csv(\"D:/DR_Classification/
|
271 |
-
"test_df = pd.read_csv(\"D:/DR_Classification/
|
272 |
"\n",
|
273 |
"# Extract paths and labels\n",
|
274 |
"train_paths = train_df['new_path'].tolist()\n",
|
@@ -1468,12 +1468,58 @@
|
|
1468 |
"\n",
|
1469 |
"# Example usage:\n",
|
1470 |
"class_names = ['No DR', 'Mild', 'Moderate', 'Severe', 'Proliferative DR'] # Modify as per your dataset\n",
|
1471 |
-
"image_path = r'D:\\DR_Classification\\
|
1472 |
"predicted_class, confidence_percentage = predict_image(model, image_path, class_names, device='cpu')\n",
|
1473 |
"\n",
|
1474 |
"print(f\"Predicted Class: {predicted_class}\")\n",
|
1475 |
"print(f\"Confidence: {confidence_percentage:.2f}%\")"
|
1476 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1477 |
}
|
1478 |
],
|
1479 |
"metadata": {
|
|
|
137 |
"df['label'] = df['diagnosis']\n",
|
138 |
"\n",
|
139 |
"# Create output directories for storing train and test images\n",
|
140 |
+
"output_root = \"D:\\\\DR_Classification\\\\splits\" #change path to your output folder\n",
|
141 |
"os.makedirs(os.path.join(output_root, \"train\"), exist_ok=True) # Creates 'train' folder if not existing\n",
|
142 |
"os.makedirs(os.path.join(output_root, \"test\"), exist_ok=True) # Creates 'test' folder if not existing\n",
|
143 |
"\n",
|
|
|
267 |
"outputs": [],
|
268 |
"source": [
|
269 |
"# Load the split CSVs\n",
|
270 |
+
"train_df = pd.read_csv(\"D:/DR_Classification/splits/train_labels.csv\")\n",
|
271 |
+
"test_df = pd.read_csv(\"D:/DR_Classification/splits/test_labels.csv\")\n",
|
272 |
"\n",
|
273 |
"# Extract paths and labels\n",
|
274 |
"train_paths = train_df['new_path'].tolist()\n",
|
|
|
1468 |
"\n",
|
1469 |
"# Example usage:\n",
|
1470 |
"class_names = ['No DR', 'Mild', 'Moderate', 'Severe', 'Proliferative DR'] # Modify as per your dataset\n",
|
1471 |
+
"image_path = r'D:\\DR_Classification\\splits\\train\\007-0025-000.jpg' # Replace with your image path\n",
|
1472 |
"predicted_class, confidence_percentage = predict_image(model, image_path, class_names, device='cpu')\n",
|
1473 |
"\n",
|
1474 |
"print(f\"Predicted Class: {predicted_class}\")\n",
|
1475 |
"print(f\"Confidence: {confidence_percentage:.2f}%\")"
|
1476 |
]
|
1477 |
+
},
|
1478 |
+
{
|
1479 |
+
"cell_type": "code",
|
1480 |
+
"execution_count": 1,
|
1481 |
+
"id": "eb2308ed",
|
1482 |
+
"metadata": {},
|
1483 |
+
"outputs": [
|
1484 |
+
{
|
1485 |
+
"ename": "FileNotFoundError",
|
1486 |
+
"evalue": "[Errno 2] No such file or directory: 'D:\\\\DR_Classification\\\\dataset\\\\splits\\\\test_labels.csv'",
|
1487 |
+
"output_type": "error",
|
1488 |
+
"traceback": [
|
1489 |
+
"\u001b[31m---------------------------------------------------------------------------\u001b[39m",
|
1490 |
+
"\u001b[31mFileNotFoundError\u001b[39m Traceback (most recent call last)",
|
1491 |
+
"\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 12\u001b[39m\n\u001b[32m 9\u001b[39m new_dir = \u001b[33mr\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mD:\u001b[39m\u001b[33m\\\u001b[39m\u001b[33mDR_Classification\u001b[39m\u001b[33m\\\u001b[39m\u001b[33mdataset\u001b[39m\u001b[33m\\\u001b[39m\u001b[33msplitted-data\u001b[39m\u001b[33m\\\u001b[39m\u001b[33mtest\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 11\u001b[39m \u001b[38;5;66;03m# === Load the CSV ===\u001b[39;00m\n\u001b[32m---> \u001b[39m\u001b[32m12\u001b[39m df = \u001b[43mpd\u001b[49m\u001b[43m.\u001b[49m\u001b[43mread_csv\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcsv_path\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 14\u001b[39m \u001b[38;5;66;03m# === Replace old path with new path in 'new_path' column ===\u001b[39;00m\n\u001b[32m 15\u001b[39m df[\u001b[33m'\u001b[39m\u001b[33mnew_path\u001b[39m\u001b[33m'\u001b[39m] = df[\u001b[33m'\u001b[39m\u001b[33mnew_path\u001b[39m\u001b[33m'\u001b[39m].str.replace(old_dir, new_dir, regex=\u001b[38;5;28;01mFalse\u001b[39;00m)\n",
|
1492 |
+
"\u001b[36mFile \u001b[39m\u001b[32md:\\DR_Classification\\.venv\\Lib\\site-packages\\pandas\\io\\parsers\\readers.py:1026\u001b[39m, in \u001b[36mread_csv\u001b[39m\u001b[34m(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, skipfooter, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, date_format, dayfirst, cache_dates, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, doublequote, escapechar, comment, encoding, encoding_errors, dialect, on_bad_lines, delim_whitespace, low_memory, memory_map, float_precision, storage_options, dtype_backend)\u001b[39m\n\u001b[32m 1013\u001b[39m kwds_defaults = _refine_defaults_read(\n\u001b[32m 1014\u001b[39m dialect,\n\u001b[32m 1015\u001b[39m delimiter,\n\u001b[32m (...)\u001b[39m\u001b[32m 1022\u001b[39m dtype_backend=dtype_backend,\n\u001b[32m 1023\u001b[39m )\n\u001b[32m 1024\u001b[39m kwds.update(kwds_defaults)\n\u001b[32m-> \u001b[39m\u001b[32m1026\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43m_read\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfilepath_or_buffer\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mkwds\u001b[49m\u001b[43m)\u001b[49m\n",
|
1493 |
+
"\u001b[36mFile \u001b[39m\u001b[32md:\\DR_Classification\\.venv\\Lib\\site-packages\\pandas\\io\\parsers\\readers.py:620\u001b[39m, in \u001b[36m_read\u001b[39m\u001b[34m(filepath_or_buffer, kwds)\u001b[39m\n\u001b[32m 617\u001b[39m _validate_names(kwds.get(\u001b[33m\"\u001b[39m\u001b[33mnames\u001b[39m\u001b[33m\"\u001b[39m, \u001b[38;5;28;01mNone\u001b[39;00m))\n\u001b[32m 619\u001b[39m \u001b[38;5;66;03m# Create the parser.\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m620\u001b[39m parser = \u001b[43mTextFileReader\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfilepath_or_buffer\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwds\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 622\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m chunksize \u001b[38;5;129;01mor\u001b[39;00m iterator:\n\u001b[32m 623\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m parser\n",
|
1494 |
+
"\u001b[36mFile \u001b[39m\u001b[32md:\\DR_Classification\\.venv\\Lib\\site-packages\\pandas\\io\\parsers\\readers.py:1620\u001b[39m, in \u001b[36mTextFileReader.__init__\u001b[39m\u001b[34m(self, f, engine, **kwds)\u001b[39m\n\u001b[32m 1617\u001b[39m \u001b[38;5;28mself\u001b[39m.options[\u001b[33m\"\u001b[39m\u001b[33mhas_index_names\u001b[39m\u001b[33m\"\u001b[39m] = kwds[\u001b[33m\"\u001b[39m\u001b[33mhas_index_names\u001b[39m\u001b[33m\"\u001b[39m]\n\u001b[32m 1619\u001b[39m \u001b[38;5;28mself\u001b[39m.handles: IOHandles | \u001b[38;5;28;01mNone\u001b[39;00m = \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[32m-> \u001b[39m\u001b[32m1620\u001b[39m \u001b[38;5;28mself\u001b[39m._engine = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_make_engine\u001b[49m\u001b[43m(\u001b[49m\u001b[43mf\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mengine\u001b[49m\u001b[43m)\u001b[49m\n",
|
1495 |
+
"\u001b[36mFile \u001b[39m\u001b[32md:\\DR_Classification\\.venv\\Lib\\site-packages\\pandas\\io\\parsers\\readers.py:1880\u001b[39m, in \u001b[36mTextFileReader._make_engine\u001b[39m\u001b[34m(self, f, engine)\u001b[39m\n\u001b[32m 1878\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mb\u001b[39m\u001b[33m\"\u001b[39m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m mode:\n\u001b[32m 1879\u001b[39m mode += \u001b[33m\"\u001b[39m\u001b[33mb\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m-> \u001b[39m\u001b[32m1880\u001b[39m \u001b[38;5;28mself\u001b[39m.handles = \u001b[43mget_handle\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 1881\u001b[39m \u001b[43m \u001b[49m\u001b[43mf\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 1882\u001b[39m \u001b[43m \u001b[49m\u001b[43mmode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 1883\u001b[39m \u001b[43m \u001b[49m\u001b[43mencoding\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43moptions\u001b[49m\u001b[43m.\u001b[49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mencoding\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 1884\u001b[39m \u001b[43m \u001b[49m\u001b[43mcompression\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43moptions\u001b[49m\u001b[43m.\u001b[49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mcompression\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 1885\u001b[39m \u001b[43m \u001b[49m\u001b[43mmemory_map\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43moptions\u001b[49m\u001b[43m.\u001b[49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mmemory_map\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 1886\u001b[39m \u001b[43m \u001b[49m\u001b[43mis_text\u001b[49m\u001b[43m=\u001b[49m\u001b[43mis_text\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 1887\u001b[39m \u001b[43m \u001b[49m\u001b[43merrors\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43moptions\u001b[49m\u001b[43m.\u001b[49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mencoding_errors\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mstrict\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 1888\u001b[39m \u001b[43m \u001b[49m\u001b[43mstorage_options\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43moptions\u001b[49m\u001b[43m.\u001b[49m\u001b[43mget\u001b[49m\u001b[43m(\u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mstorage_options\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 1889\u001b[39m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1890\u001b[39m \u001b[38;5;28;01massert\u001b[39;00m \u001b[38;5;28mself\u001b[39m.handles \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[32m 1891\u001b[39m f = \u001b[38;5;28mself\u001b[39m.handles.handle\n",
|
1496 |
+
"\u001b[36mFile \u001b[39m\u001b[32md:\\DR_Classification\\.venv\\Lib\\site-packages\\pandas\\io\\common.py:873\u001b[39m, in \u001b[36mget_handle\u001b[39m\u001b[34m(path_or_buf, mode, encoding, compression, memory_map, is_text, errors, storage_options)\u001b[39m\n\u001b[32m 868\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(handle, \u001b[38;5;28mstr\u001b[39m):\n\u001b[32m 869\u001b[39m \u001b[38;5;66;03m# Check whether the filename is to be opened in binary mode.\u001b[39;00m\n\u001b[32m 870\u001b[39m \u001b[38;5;66;03m# Binary mode does not support 'encoding' and 'newline'.\u001b[39;00m\n\u001b[32m 871\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m ioargs.encoding \u001b[38;5;129;01mand\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mb\u001b[39m\u001b[33m\"\u001b[39m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m ioargs.mode:\n\u001b[32m 872\u001b[39m \u001b[38;5;66;03m# Encoding\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m873\u001b[39m handle = \u001b[38;5;28;43mopen\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[32m 874\u001b[39m \u001b[43m \u001b[49m\u001b[43mhandle\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 875\u001b[39m \u001b[43m \u001b[49m\u001b[43mioargs\u001b[49m\u001b[43m.\u001b[49m\u001b[43mmode\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 876\u001b[39m \u001b[43m \u001b[49m\u001b[43mencoding\u001b[49m\u001b[43m=\u001b[49m\u001b[43mioargs\u001b[49m\u001b[43m.\u001b[49m\u001b[43mencoding\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 877\u001b[39m \u001b[43m \u001b[49m\u001b[43merrors\u001b[49m\u001b[43m=\u001b[49m\u001b[43merrors\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 878\u001b[39m \u001b[43m \u001b[49m\u001b[43mnewline\u001b[49m\u001b[43m=\u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[32m 879\u001b[39m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 880\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m 881\u001b[39m \u001b[38;5;66;03m# Binary mode\u001b[39;00m\n\u001b[32m 882\u001b[39m handle = \u001b[38;5;28mopen\u001b[39m(handle, ioargs.mode)\n",
|
1497 |
+
"\u001b[31mFileNotFoundError\u001b[39m: [Errno 2] No such file or directory: 'D:\\\\DR_Classification\\\\dataset\\\\splits\\\\test_labels.csv'"
|
1498 |
+
]
|
1499 |
+
}
|
1500 |
+
],
|
1501 |
+
"source": [
|
1502 |
+
"import pandas as pd\n",
|
1503 |
+
"\n",
|
1504 |
+
"# === File paths ===\n",
|
1505 |
+
"csv_path = r\"D:\\DR_Classification\\dataset\\splits\\test_labels.csv\"\n",
|
1506 |
+
"output_csv_path = r\"D:\\DR_Classification\\dataset\\Splitted_data\\splits\\test_labels.csv\"\n",
|
1507 |
+
"\n",
|
1508 |
+
"# === Old and new base directory paths ===\n",
|
1509 |
+
"old_dir = r\"D:\\DR_Classification\\splits\\test\"\n",
|
1510 |
+
"new_dir = r\"D:\\DR_Classification\\dataset\\splitted-data\\test\"\n",
|
1511 |
+
"\n",
|
1512 |
+
"# === Load the CSV ===\n",
|
1513 |
+
"df = pd.read_csv(csv_path)\n",
|
1514 |
+
"\n",
|
1515 |
+
"# === Replace old path with new path in 'new_path' column ===\n",
|
1516 |
+
"df['new_path'] = df['new_path'].str.replace(old_dir, new_dir, regex=False)\n",
|
1517 |
+
"\n",
|
1518 |
+
"# === Save the updated CSV ===\n",
|
1519 |
+
"df.to_csv(output_csv_path, index=False)\n",
|
1520 |
+
"\n",
|
1521 |
+
"print(\"✅ CSV updated and saved at:\", output_csv_path)\n"
|
1522 |
+
]
|
1523 |
}
|
1524 |
],
|
1525 |
"metadata": {
|