{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "98165c88-8ead-4fae-9ea8-6b2e82996fc5", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "6\n", "num params encoder 50840\n", "num params 21496282\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ " 0%| | 0/48 [00:00 50\u001b[0m outputs \u001b[38;5;241m=\u001b[39m model(inputs, return_mask \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[1;32m 51\u001b[0m _, predicted \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mmax(outputs, \u001b[38;5;241m1\u001b[39m)\n\u001b[1;32m 52\u001b[0m results[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124moutput\u001b[39m\u001b[38;5;124m'\u001b[39m]\u001b[38;5;241m.\u001b[39mextend(outputs\u001b[38;5;241m.\u001b[39mcpu()\u001b[38;5;241m.\u001b[39mnumpy()\u001b[38;5;241m.\u001b[39mtolist())\n", "File \u001b[0;32m~/.conda/envs/frbnn/lib/python3.11/site-packages/torch/nn/modules/module.py:1553\u001b[0m, in \u001b[0;36mModule._wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1551\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compiled_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs) \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[1;32m 1552\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1553\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_call_impl(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n", "File \u001b[0;32m~/.conda/envs/frbnn/lib/python3.11/site-packages/torch/nn/modules/module.py:1562\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1557\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m 1558\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m 1559\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks\n\u001b[1;32m 1560\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m 1561\u001b[0m \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1562\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m forward_call(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 1564\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 1565\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m\n", "File \u001b[0;32m~/.conda/envs/frbnn/lib/python3.11/site-packages/torch/nn/parallel/data_parallel.py:186\u001b[0m, in \u001b[0;36mDataParallel.forward\u001b[0;34m(self, *inputs, **kwargs)\u001b[0m\n\u001b[1;32m 184\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmodule(\u001b[38;5;241m*\u001b[39minputs[\u001b[38;5;241m0\u001b[39m], \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mmodule_kwargs[\u001b[38;5;241m0\u001b[39m])\n\u001b[1;32m 185\u001b[0m replicas \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mreplicate(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmodule, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdevice_ids[:\u001b[38;5;28mlen\u001b[39m(inputs)])\n\u001b[0;32m--> 186\u001b[0m outputs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mparallel_apply(replicas, inputs, module_kwargs)\n\u001b[1;32m 187\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mgather(outputs, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39moutput_device)\n", "File \u001b[0;32m~/.conda/envs/frbnn/lib/python3.11/site-packages/torch/nn/parallel/data_parallel.py:201\u001b[0m, in \u001b[0;36mDataParallel.parallel_apply\u001b[0;34m(self, replicas, inputs, kwargs)\u001b[0m\n\u001b[1;32m 200\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mparallel_apply\u001b[39m(\u001b[38;5;28mself\u001b[39m, replicas: Sequence[T], inputs: Sequence[Any], kwargs: Any) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m List[Any]:\n\u001b[0;32m--> 201\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m parallel_apply(replicas, inputs, kwargs, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdevice_ids[:\u001b[38;5;28mlen\u001b[39m(replicas)])\n", "File \u001b[0;32m~/.conda/envs/frbnn/lib/python3.11/site-packages/torch/nn/parallel/parallel_apply.py:108\u001b[0m, in \u001b[0;36mparallel_apply\u001b[0;34m(modules, inputs, kwargs_tup, devices)\u001b[0m\n\u001b[1;32m 106\u001b[0m output \u001b[38;5;241m=\u001b[39m results[i]\n\u001b[1;32m 107\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(output, ExceptionWrapper):\n\u001b[0;32m--> 108\u001b[0m output\u001b[38;5;241m.\u001b[39mreraise()\n\u001b[1;32m 109\u001b[0m outputs\u001b[38;5;241m.\u001b[39mappend(output)\n\u001b[1;32m 110\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m outputs\n", "File \u001b[0;32m~/.conda/envs/frbnn/lib/python3.11/site-packages/torch/_utils.py:706\u001b[0m, in \u001b[0;36mExceptionWrapper.reraise\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 702\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m:\n\u001b[1;32m 703\u001b[0m \u001b[38;5;66;03m# If the exception takes multiple arguments, don't try to\u001b[39;00m\n\u001b[1;32m 704\u001b[0m \u001b[38;5;66;03m# instantiate since we don't know how to\u001b[39;00m\n\u001b[1;32m 705\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(msg) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m--> 706\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m exception\n", "\u001b[0;31mAttributeError\u001b[0m: Caught AttributeError in replica 0 on device 0.\nOriginal Traceback (most recent call last):\n File \"/home/pma/.conda/envs/frbnn/lib/python3.11/site-packages/torch/nn/parallel/parallel_apply.py\", line 83, in _worker\n output = module(*input, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/home/pma/.conda/envs/frbnn/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1553, in _wrapped_call_impl\n return self._call_impl(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/home/pma/.conda/envs/frbnn/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1562, in _call_impl\n return forward_call(*args, **kwargs)\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n File \"/home/pma/projects/frbnn_narrow/CNN/resnet_model.py\", line 106, in forward\n return x, self.mask, self.value\n ^^^^^^^^^\n File \"/home/pma/.conda/envs/frbnn/lib/python3.11/site-packages/torch/nn/modules/module.py\", line 1729, in __getattr__\n raise AttributeError(f\"'{type(self).__name__}' object has no attribute '{name}'\")\nAttributeError: 'ResNet' object has no attribute 'mask'\n" ] } ], "source": [ "from utils import CustomDataset, transform, preproc, Convert_ONNX\n", "from torch.utils.data import Dataset, DataLoader\n", "from utils import CustomDataset, TestingDataset, transform\n", "from tqdm import tqdm\n", "import torch\n", "import numpy as np\n", "from resnet_model import ResidualBlock, ResNet\n", "import torch\n", "import torch.nn as nn\n", "import torch.optim as optim\n", "from tqdm import tqdm \n", "import torch.nn.functional as F\n", "from torch.optim.lr_scheduler import ReduceLROnPlateau\n", "import pickle\n", "\n", "torch.manual_seed(1)\n", "# torch.manual_seed(42)\n", "\n", "\n", "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", "num_gpus = torch.cuda.device_count()\n", "print(num_gpus)\n", "\n", "test_data_dir = '/mnt/buf1/pma/frbnn/test_ready'\n", "test_dataset = TestingDataset(test_data_dir, transform=transform)\n", "\n", "num_classes = 2\n", "testloader = DataLoader(test_dataset, batch_size=420, shuffle=True, num_workers=32)\n", "\n", "model = ResNet(24, ResidualBlock, [3, 4, 6, 3], num_classes=num_classes).to(device)\n", "model = nn.DataParallel(model)\n", "model = model.to(device)\n", "params = sum(p.numel() for p in model.parameters())\n", "print(\"num params \",params)\n", "\n", "model_1 = 'models/model-23-99.045.pt'\n", "# model_1 ='models/model-47-99.125.pt'\n", "model.load_state_dict(torch.load(model_1, weights_only=True))\n", "model = model.eval()\n", "\n", "# eval\n", "val_loss = 0.0\n", "correct_valid = 0\n", "total = 0\n", "results = {'output': [],'pred': [], 'true':[], 'freq':[], 'snr':[], 'dm':[], 'boxcar':[]}\n", "model.eval()\n", "with torch.no_grad():\n", " for images, labels in tqdm(testloader):\n", " inputs, labels = images.to(device), labels\n", " outputs = model(inputs)\n", " _, predicted = torch.max(outputs, 1)\n", " results['output'].extend(outputs.cpu().numpy().tolist())\n", " results['pred'].extend(predicted.cpu().numpy().tolist())\n", " results['true'].extend(labels[0].cpu().numpy().tolist())\n", " results['freq'].extend(labels[2].cpu().numpy().tolist())\n", " results['dm'].extend(labels[1].cpu().numpy().tolist())\n", " results['snr'].extend(labels[3].cpu().numpy().tolist())\n", " results['boxcar'].extend(labels[4].cpu().numpy().tolist())\n", " total += labels[0].size(0)\n", " correct_valid += (predicted.cpu() == labels[0].cpu()).sum().item()\n", "# Calculate training accuracy after each epoch\n", "val_accuracy = correct_valid / total * 100.0\n", "print(\"===========================\")\n", "print('accuracy: ', val_accuracy)\n", "print(\"===========================\")\n", "\n", "import pickle\n", "\n", "# Pickle the dictionary to a file\n", "with open('models/test_42.pkl', 'wb') as f:\n", " pickle.dump(results, f)" ] }, { "cell_type": "code", "execution_count": null, "id": "64733667-75c3-4fd3-ab9f-62b85c5e27e3", "metadata": {}, "outputs": [], "source": [ "from utils import CustomDataset, transform, preproc, Convert_ONNX\n", "from torch.utils.data import Dataset, DataLoader\n", "from utils import CustomDataset, TestingDataset, transform\n", "from tqdm import tqdm\n", "import torch\n", "import numpy as np\n", "from resnet_model_mask import ResidualBlock, ResNet\n", "import torch\n", "import torch.nn as nn\n", "import torch.optim as optim\n", "from tqdm import tqdm \n", "import torch.nn.functional as F\n", "from torch.optim.lr_scheduler import ReduceLROnPlateau\n", "import pickle\n", "\n", "torch.manual_seed(1)\n", "# torch.manual_seed(42)\n", "\n", "\n", "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", "num_gpus = torch.cuda.device_count()\n", "print(num_gpus)\n", "\n", "test_data_dir = '/mnt/buf1/pma/frbnn/test_ready'\n", "test_dataset = TestingDataset(test_data_dir, transform=transform)\n", "\n", "num_classes = 2\n", "testloader = DataLoader(test_dataset, batch_size=420, shuffle=True, num_workers=32)\n", "\n", "model = ResNet(24, ResidualBlock, [3, 4, 6, 3], num_classes=num_classes).to(device)\n", "model = nn.DataParallel(model)\n", "model = model.to(device)\n", "params = sum(p.numel() for p in model.parameters())\n", "print(\"num params \",params)\n", "\n", "\n", "model_1 = 'models/model-14-98.005.pt'\n", "# model_1 ='models/model-47-99.125.pt'\n", "model.load_state_dict(torch.load(model_1, weights_only=True))\n", "model = model.eval()\n", "\n", "# eval\n", "val_loss = 0.0\n", "correct_valid = 0\n", "total = 0\n", "results = {'output': [],'pred': [], 'true':[], 'freq':[], 'snr':[], 'dm':[], 'boxcar':[]}\n", "model.eval()\n", "with torch.no_grad():\n", " for images, labels in tqdm(testloader):\n", " inputs, labels = images.to(device), labels\n", " outputs = model(inputs)\n", " _, predicted = torch.max(outputs, 1)\n", " results['output'].extend(outputs.cpu().numpy().tolist())\n", " results['pred'].extend(predicted.cpu().numpy().tolist())\n", " results['true'].extend(labels[0].cpu().numpy().tolist())\n", " results['freq'].extend(labels[2].cpu().numpy().tolist())\n", " results['dm'].extend(labels[1].cpu().numpy().tolist())\n", " results['snr'].extend(labels[3].cpu().numpy().tolist())\n", " results['boxcar'].extend(labels[4].cpu().numpy().tolist())\n", " total += labels[0].size(0)\n", " correct_valid += (predicted.cpu() == labels[0].cpu()).sum().item()\n", " \n", "# Calculate training accuracy after each epoch\n", "val_accuracy = correct_valid / total * 100.0\n", "print(\"===========================\")\n", "print('accuracy: ', val_accuracy)\n", "print(\"===========================\")\n", "\n", "import pickle\n", "\n", "# Pickle the dictionary to a file\n", "with open('models/test_1.pkl', 'wb') as f:\n", " pickle.dump(results, f)" ] }, { "cell_type": "code", "execution_count": null, "id": "fe74ada8-43e4-4c73-b772-0ef18983345d", "metadata": {}, "outputs": [], "source": [ "from utils import CustomDataset, transform, preproc, Convert_ONNX\n", "from torch.utils.data import Dataset, DataLoader\n", "from utils import CustomDataset, TestingDataset, transform\n", "from tqdm import tqdm\n", "import torch\n", "import numpy as np\n", "from resnet_model_mask import ResidualBlock, ResNet\n", "import torch\n", "import torch.nn as nn\n", "import torch.optim as optim\n", "from tqdm import tqdm \n", "import torch.nn.functional as F\n", "from torch.optim.lr_scheduler import ReduceLROnPlateau\n", "import pickle\n", "\n", "torch.manual_seed(1)\n", "# torch.manual_seed(42)\n", "\n", "\n", "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", "num_gpus = torch.cuda.device_count()\n", "print(num_gpus)\n", "\n", "test_data_dir = '/mnt/buf1/pma/frbnn/test_ready'\n", "test_dataset = TestingDataset(test_data_dir, transform=transform)\n", "\n", "num_classes = 2\n", "testloader = DataLoader(test_dataset, batch_size=420, shuffle=True, num_workers=32)\n", "\n", "model = ResNet(24, ResidualBlock, [3, 4, 6, 3], num_classes=num_classes).to(device)\n", "model = nn.DataParallel(model)\n", "model = model.to(device)\n", "params = sum(p.numel() for p in model.parameters())\n", "print(\"num params \",params)\n", "\n", "\n", "model_1 = 'models/model-28-98.955.pt'\n", "# model_1 ='models/model-47-99.125.pt'\n", "model.load_state_dict(torch.load(model_1, weights_only=True))\n", "model = model.eval()\n", "\n", "# eval\n", "val_loss = 0.0\n", "correct_valid = 0\n", "total = 0\n", "results = {'output': [],'pred': [], 'true':[], 'freq':[], 'snr':[], 'dm':[], 'boxcar':[]}\n", "model.eval()\n", "with torch.no_grad():\n", " for images, labels in tqdm(testloader):\n", " inputs, labels = images.to(device), labels\n", " outputs = model(inputs)\n", " _, predicted = torch.max(outputs, 1)\n", " results['output'].extend(outputs.cpu().numpy().tolist())\n", " results['pred'].extend(predicted.cpu().numpy().tolist())\n", " results['true'].extend(labels[0].cpu().numpy().tolist())\n", " results['freq'].extend(labels[2].cpu().numpy().tolist())\n", " results['dm'].extend(labels[1].cpu().numpy().tolist())\n", " results['snr'].extend(labels[3].cpu().numpy().tolist())\n", " results['boxcar'].extend(labels[4].cpu().numpy().tolist())\n", " total += labels[0].size(0)\n", " correct_valid += (predicted.cpu() == labels[0].cpu()).sum().item()\n", " \n", "# Calculate training accuracy after each epoch\n", "val_accuracy = correct_valid / total * 100.0\n", "print(\"===========================\")\n", "print('accuracy: ', val_accuracy)\n", "print(\"===========================\")\n", "\n", "import pickle\n", "\n", "# Pickle the dictionary to a file\n", "with open('models/test_7109.pkl', 'wb') as f:\n", " pickle.dump(results, f)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.9" } }, "nbformat": 4, "nbformat_minor": 5 }