{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "98165c88-8ead-4fae-9ea8-6b2e82996fc5", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "2\n", "num params encoder 50840\n", "num params 21496282\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/home/pma/.conda/envs/frbnn/lib/python3.11/site-packages/torch/nn/parallel/parallel_apply.py:79: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n", " with torch.cuda.device(device), torch.cuda.stream(stream), autocast(enabled=autocast_enabled):\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "===========================\n", "accuracy: 99.195\n", "===========================\n", "False Positive Rate: 0.005\n", "Precision: 0.995\n", "Recall: 0.989\n", "F1 Score: 0.992\n" ] } ], "source": [ "from utils import CustomDataset, transform, Convert_ONNX\n", "from torch.utils.data import Dataset, DataLoader\n", "from utils import CustomDataset, TestingDataset, transform\n", "from tqdm import tqdm\n", "import torch\n", "import numpy as np\n", "from resnet_model_mask import ResidualBlock, ResNet\n", "import torch\n", "import torch.nn as nn\n", "import torch.optim as optim\n", "from tqdm import tqdm \n", "import torch.nn.functional as F\n", "from torch.optim.lr_scheduler import ReduceLROnPlateau\n", "import pickle\n", "\n", "torch.manual_seed(1)\n", "# torch.manual_seed(42)\n", "\n", "\n", "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", "num_gpus = torch.cuda.device_count()\n", "print(num_gpus)\n", "threshold = 0.992\n", "\n", "test_data_dir = '/mnt/buf1/pma/frbnn/test_ready'\n", "test_dataset = TestingDataset(test_data_dir, transform=transform)\n", "\n", "num_classes = 2\n", "testloader = DataLoader(test_dataset, batch_size=420, shuffle=True, num_workers=32)\n", "\n", "model = ResNet(24, ResidualBlock, [3, 4, 6, 3], num_classes=num_classes).to(device)\n", "model = nn.DataParallel(model)\n", "model = model.to(device)\n", "params = sum(p.numel() for p in model.parameters())\n", "print(\"num params \",params)\n", "\n", "model_1 = 'models_mask/model-43-99.235_42.pt'\n", "# model_1 ='models/model-47-99.125.pt'\n", "model.load_state_dict(torch.load(model_1, weights_only=True))\n", "model = model.eval()\n", "\n", "# eval\n", "val_loss = 0.0\n", "correct_valid = 0\n", "total = 0\n", "results = {'output': [],'pred': [], 'true':[], 'freq':[], 'snr':[], 'dm':[], 'boxcar':[]}\n", "model.eval()\n", "with torch.no_grad():\n", " for images, labels in testloader:\n", " inputs, labels = images.to(device), labels\n", " outputs = nn.Softmax(dim = 1)(model(inputs))\n", " selection = outputs[:, 1] > threshold\n", " predicted = selection.int()\n", " results['pred'].extend(predicted.cpu().numpy().tolist())\n", " results['true'].extend(labels[0].cpu().numpy().tolist())\n", " results['freq'].extend(labels[2].cpu().numpy().tolist())\n", " results['dm'].extend(labels[1].cpu().numpy().tolist())\n", " results['snr'].extend(labels[3].cpu().numpy().tolist())\n", " results['boxcar'].extend(labels[4].cpu().numpy().tolist())\n", " total += labels[0].size(0)\n", " correct_valid += (predicted.cpu() == labels[0].cpu()).sum().item()\n", " \n", "# Calculate training accuracy after each epoch\n", "val_accuracy = correct_valid / total * 100.0\n", "print(\"===========================\")\n", "print('accuracy: ', val_accuracy)\n", "print(\"===========================\")\n", "\n", "import pickle\n", "\n", "# Pickle the dictionary to a file\n", "with open('models_mask/test_42.pkl', 'wb') as f:\n", " pickle.dump(results, f)\n", "\n", "from sklearn.metrics import precision_score, recall_score, f1_score\n", "from sklearn.metrics import confusion_matrix\n", "\n", "# Example binary labels\n", "true = results['true'] # ground truth\n", "pred = results['pred'] # predicted\n", "\n", "# Compute metrics\n", "precision = precision_score(true, pred)\n", "recall = recall_score(true, pred)\n", "f1 = f1_score(true, pred)\n", "# Get confusion matrix: TN, FP, FN, TP\n", "tn, fp, fn, tp = confusion_matrix(true, pred).ravel()\n", "\n", "# Compute FPR\n", "fpr = fp / (fp + tn)\n", "\n", "print(f\"False Positive Rate: {fpr:.3f}\")\n", "\n", "print(f\"Precision: {precision:.3f}\")\n", "print(f\"Recall: {recall:.3f}\")\n", "print(f\"F1 Score: {f1:.3f}\")\n" ] }, { "cell_type": "code", "execution_count": 2, "id": "64733667-75c3-4fd3-ab9f-62b85c5e27e3", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "2\n", "num params encoder 50840\n", "num params 21496282\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/home/pma/.conda/envs/frbnn/lib/python3.11/site-packages/torch/nn/parallel/parallel_apply.py:79: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n", " with torch.cuda.device(device), torch.cuda.stream(stream), autocast(enabled=autocast_enabled):\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "===========================\n", "accuracy: 99.195\n", "===========================\n", "False Positive Rate: 0.007\n", "Precision: 0.993\n", "Recall: 0.991\n", "F1 Score: 0.992\n" ] } ], "source": [ "from utils import CustomDataset, transform, Convert_ONNX\n", "from torch.utils.data import Dataset, DataLoader\n", "from utils import CustomDataset, TestingDataset, transform\n", "from tqdm import tqdm\n", "import torch\n", "import numpy as np\n", "from resnet_model_mask import ResidualBlock, ResNet\n", "import torch\n", "import torch.nn as nn\n", "import torch.optim as optim\n", "from tqdm import tqdm \n", "import torch.nn.functional as F\n", "from torch.optim.lr_scheduler import ReduceLROnPlateau\n", "import pickle\n", "\n", "torch.manual_seed(1)\n", "\n", "\n", "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", "num_gpus = torch.cuda.device_count()\n", "print(num_gpus)\n", "\n", "test_data_dir = '/mnt/buf1/pma/frbnn/test_ready'\n", "test_dataset = TestingDataset(test_data_dir, transform=transform)\n", "\n", "num_classes = 2\n", "testloader = DataLoader(test_dataset, batch_size=420, shuffle=True, num_workers=32)\n", "\n", "model = ResNet(24, ResidualBlock, [3, 4, 6, 3], num_classes=num_classes).to(device)\n", "model = nn.DataParallel(model)\n", "model = model.to(device)\n", "params = sum(p.numel() for p in model.parameters())\n", "print(\"num params \",params)\n", "\n", "\n", "model_1 = 'models_mask/model-36-99.11999999999999_1.pt'\n", "# model_1 ='models/model-47-99.125.pt'\n", "model.load_state_dict(torch.load(model_1, weights_only=True))\n", "model = model.eval()\n", "\n", "# eval\n", "val_loss = 0.0\n", "correct_valid = 0\n", "total = 0\n", "results = {'output': [],'pred': [], 'true':[], 'freq':[], 'snr':[], 'dm':[], 'boxcar':[]}\n", "model.eval()\n", "with torch.no_grad():\n", " for images, labels in testloader:\n", " inputs, labels = images.to(device), labels\n", " outputs = nn.Softmax(dim = 1)(model(inputs))\n", " selection = outputs[:, 1] > threshold\n", " predicted = selection.int()\n", " results['pred'].extend(predicted.cpu().numpy().tolist())\n", " results['true'].extend(labels[0].cpu().numpy().tolist())\n", " results['freq'].extend(labels[2].cpu().numpy().tolist())\n", " results['dm'].extend(labels[1].cpu().numpy().tolist())\n", " results['snr'].extend(labels[3].cpu().numpy().tolist())\n", " results['boxcar'].extend(labels[4].cpu().numpy().tolist())\n", " total += labels[0].size(0)\n", " correct_valid += (predicted.cpu() == labels[0].cpu()).sum().item()\n", " \n", " \n", "# Calculate training accuracy after each epoch\n", "val_accuracy = correct_valid / total * 100.0\n", "print(\"===========================\")\n", "print('accuracy: ', val_accuracy)\n", "print(\"===========================\")\n", "\n", "import pickle\n", "\n", "# Pickle the dictionary to a file\n", "with open('models_mask/test_1.pkl', 'wb') as f:\n", " pickle.dump(results, f)\n", "\n", "from sklearn.metrics import precision_score, recall_score, f1_score\n", "\n", "# Example binary labels\n", "true = results['true'] # ground truth\n", "pred = results['pred'] # predicted\n", "\n", "# Compute metrics\n", "precision = precision_score(true, pred)\n", "recall = recall_score(true, pred)\n", "f1 = f1_score(true, pred)\n", "# Get confusion matrix: TN, FP, FN, TP\n", "tn, fp, fn, tp = confusion_matrix(true, pred).ravel()\n", "\n", "# Compute FPR\n", "fpr = fp / (fp + tn)\n", "\n", "print(f\"False Positive Rate: {fpr:.3f}\")\n", "\n", "print(f\"Precision: {precision:.3f}\")\n", "print(f\"Recall: {recall:.3f}\")\n", "print(f\"F1 Score: {f1:.3f}\")" ] }, { "cell_type": "code", "execution_count": 3, "id": "fe74ada8-43e4-4c73-b772-0ef18983345d", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "2\n", "num params encoder 50840\n", "num params 21496282\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/home/pma/.conda/envs/frbnn/lib/python3.11/site-packages/torch/nn/parallel/parallel_apply.py:79: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n", " with torch.cuda.device(device), torch.cuda.stream(stream), autocast(enabled=autocast_enabled):\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "===========================\n", "accuracy: 99.035\n", "===========================\n", "False Positive Rate: 0.007\n", "Precision: 0.993\n", "Recall: 0.987\n", "F1 Score: 0.990\n" ] } ], "source": [ "from utils import CustomDataset, transform, Convert_ONNX\n", "from torch.utils.data import Dataset, DataLoader\n", "from utils import CustomDataset, TestingDataset, transform\n", "from tqdm import tqdm\n", "import torch\n", "import numpy as np\n", "from resnet_model_mask import ResidualBlock, ResNet\n", "import torch\n", "import torch.nn as nn\n", "import torch.optim as optim\n", "from tqdm import tqdm \n", "import torch.nn.functional as F\n", "from torch.optim.lr_scheduler import ReduceLROnPlateau\n", "import pickle\n", "\n", "torch.manual_seed(1)\n", "# torch.manual_seed(42)\n", "\n", "\n", "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", "num_gpus = torch.cuda.device_count()\n", "print(num_gpus)\n", "\n", "test_data_dir = '/mnt/buf1/pma/frbnn/test_ready'\n", "test_dataset = TestingDataset(test_data_dir, transform=transform)\n", "\n", "num_classes = 2\n", "testloader = DataLoader(test_dataset, batch_size=420, shuffle=True, num_workers=32)\n", "\n", "model = ResNet(24, ResidualBlock, [3, 4, 6, 3], num_classes=num_classes).to(device)\n", "model = nn.DataParallel(model)\n", "model = model.to(device)\n", "params = sum(p.numel() for p in model.parameters())\n", "print(\"num params \",params)\n", "\n", "\n", "model_1 = 'models_mask/model-26-99.13_7109.pt'\n", "# model_1 ='models/model-47-99.125.pt'\n", "model.load_state_dict(torch.load(model_1, weights_only=True))\n", "model = model.eval()\n", "\n", "# eval\n", "val_loss = 0.0\n", "correct_valid = 0\n", "total = 0\n", "results = {'output': [],'pred': [], 'true':[], 'freq':[], 'snr':[], 'dm':[], 'boxcar':[]}\n", "model.eval()\n", "with torch.no_grad():\n", " for images, labels in testloader:\n", " inputs, labels = images.to(device), labels\n", " outputs = nn.Softmax(dim = 1)(model(inputs))\n", " selection = outputs[:, 1] > threshold\n", " predicted = selection.int()\n", " results['pred'].extend(predicted.cpu().numpy().tolist())\n", " results['true'].extend(labels[0].cpu().numpy().tolist())\n", " results['freq'].extend(labels[2].cpu().numpy().tolist())\n", " results['dm'].extend(labels[1].cpu().numpy().tolist())\n", " results['snr'].extend(labels[3].cpu().numpy().tolist())\n", " results['boxcar'].extend(labels[4].cpu().numpy().tolist())\n", " total += labels[0].size(0)\n", " correct_valid += (predicted.cpu() == labels[0].cpu()).sum().item()\n", " \n", " \n", "# Calculate training accuracy after each epoch\n", "val_accuracy = correct_valid / total * 100.0\n", "print(\"===========================\")\n", "print('accuracy: ', val_accuracy)\n", "print(\"===========================\")\n", "\n", "import pickle\n", "\n", "# Pickle the dictionary to a file\n", "with open('models_mask/test_7109.pkl', 'wb') as f:\n", " pickle.dump(results, f)\n", "\n", "from sklearn.metrics import precision_score, recall_score, f1_score\n", "\n", "# Example binary labels\n", "true = results['true'] # ground truth\n", "pred = results['pred'] # predicted\n", "\n", "# Compute metrics\n", "precision = precision_score(true, pred)\n", "recall = recall_score(true, pred)\n", "f1 = f1_score(true, pred)\n", "# Get confusion matrix: TN, FP, FN, TP\n", "tn, fp, fn, tp = confusion_matrix(true, pred).ravel()\n", "\n", "# Compute FPR\n", "fpr = fp / (fp + tn)\n", "\n", "print(f\"False Positive Rate: {fpr:.3f}\")\n", "\n", "print(f\"Precision: {precision:.3f}\")\n", "print(f\"Recall: {recall:.3f}\")\n", "print(f\"F1 Score: {f1:.3f}\")" ] }, { "cell_type": "code", "execution_count": 9, "id": "974e62d6-5088-4cd8-9721-6702717eadee", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "99.14166666666665 0.07542472332656346\n", "0.989 0.16329931618554536\n", "0.9913333333333334 0.0012472191289246482\n", "0.6333333333333333 0.09428090415820634\n" ] } ], "source": [ "# acc\n", "print(np.mean([99.195,99.195, 99.035 ]), np.std([99.195,99.195, 99.035]))\n", "# recall\n", "print(np.mean([0.989,0.991, 0.987]), np.std([0.989,0.991, 0.987])*100)\n", "# f1\n", "print(np.mean([0.992,0.992,0.990 ]),np.std([0.990,0.988,0.991 ]))\n", "# fp\n", "print(np.mean([0.005,0.007,0.007 ])*100,np.std([0.005,0.007,0.007])*100)\n" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.9" } }, "nbformat": 4, "nbformat_minor": 5 }