{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "851f001a-3882-42cf-8e45-1bb7c4193d20", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "6\n", "num params encoder 50840\n", "num params 21496282\n" ] } ], "source": [ "from utils import CustomDataset, transform, preproc, Convert_ONNX\n", "from torch.utils.data import Dataset, DataLoader\n", "import torch\n", "import numpy as np\n", "from resnet_model_mask import ResidualBlock, ResNet\n", "import torch\n", "import torch.nn as nn\n", "import torch.optim as optim\n", "from tqdm import tqdm \n", "import torch.nn.functional as F\n", "from torch.optim.lr_scheduler import ReduceLROnPlateau\n", "import pickle\n", "\n", "torch.manual_seed(1)\n", "# torch.manual_seed(42)\n", "\n", "\n", "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n", "num_gpus = torch.cuda.device_count()\n", "print(num_gpus)\n", "\n", "# Create custom dataset instance\n", "# Create custom dataset instance\n", "data_dir = '/mnt/buf0/pma/frbnn/train_ready'\n", "dataset = CustomDataset(data_dir, transform=transform)\n", "valid_data_dir = '/mnt/buf0/pma/frbnn/valid_ready'\n", "valid_dataset = CustomDataset(valid_data_dir, transform=transform)\n", "\n", "\n", "num_classes = 2\n", "trainloader = DataLoader(dataset, batch_size=420, shuffle=True, num_workers=32)\n", "\n", "model = ResNet(24, ResidualBlock, [3, 4, 6, 3], num_classes=num_classes).to(device)\n", "model = nn.DataParallel(model)\n", "model = model.to(device)\n", "params = sum(p.numel() for p in model.parameters())\n", "print(\"num params \",params)\n" ] }, { "cell_type": "code", "execution_count": 2, "id": "676a6ffa-5bed-403d-ba03-627f14b36de2", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ " 0%| | 0/477 [00:00 29\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m images, labels \u001b[38;5;129;01min\u001b[39;00m validloader:\n\u001b[1;32m 30\u001b[0m inputs, labels \u001b[38;5;241m=\u001b[39m images\u001b[38;5;241m.\u001b[39mto(device), labels\u001b[38;5;241m.\u001b[39mto(device)\u001b[38;5;241m.\u001b[39mfloat()\n\u001b[1;32m 31\u001b[0m optimizer\u001b[38;5;241m.\u001b[39mzero_grad()\n", "\u001b[0;31mNameError\u001b[0m: name 'validloader' is not defined" ] } ], "source": [ "criterion = nn.CrossEntropyLoss(weight = torch.tensor([1,1]).to(device))\n", "optimizer = optim.Adam(model.parameters(), lr=0.0001)\n", "scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=10)\n", "\n", "for epoch in range(5):\n", " running_loss = 0.0\n", " correct_train = 0\n", " total_train = 0\n", " with tqdm(trainloader, unit=\"batch\") as tepoch:\n", " model.train()\n", " for i, (images, labels) in enumerate(tepoch):\n", " inputs, labels = images.to(device), labels.to(device).float()\n", " optimizer.zero_grad()\n", " outputs = model(inputs, return_mask=False).to(device)\n", " new_label = F.one_hot(labels.type(torch.int64),num_classes=2).type(torch.float32).to(device)\n", " loss = criterion(outputs, new_label)\n", " loss.backward()\n", " optimizer.step()\n", " running_loss += loss.item()\n", " # Calculate training accuracy\n", " _, predicted = torch.max(outputs.data, 1)\n", " total_train += labels.size(0)\n", " correct_train += (predicted == labels).sum().item() \n", " val_loss = 0.0\n", " correct_valid = 0\n", " total = 0\n", " model.eval()\n", " with torch.no_grad():\n", " for images, labels in validloader:\n", " inputs, labels = images.to(device), labels.to(device).float()\n", " optimizer.zero_grad()\n", " outputs = model(inputs, return_mask=False)\n", " new_label = F.one_hot(labels.type(torch.int64),num_classes=2).type(torch.float32)\n", " loss = criterion(outputs, new_label)\n", " val_loss += loss.item()\n", " _, predicted = torch.max(outputs, 1)\n", " total += labels.size(0)\n", " correct_valid += (predicted == labels).sum().item()\n", " scheduler.step(val_loss)\n", " # Calculate training accuracy after each epoch\n", " train_accuracy = 100 * correct_train / total_train\n", " val_accuracy = correct_valid / total * 100.0\n", "\n", "\n", " print(\"===========================\")\n", " print('accuracy: ', epoch, train_accuracy, val_accuracy)\n", " print('learning rate: ', scheduler.get_last_lr())\n", " print(\"===========================\")" ] }, { "cell_type": "code", "execution_count": null, "id": "3faa4a11-89fb-4556-ae87-3645a47fa00d", "metadata": {}, "outputs": [], "source": [ "train_accuracy = 100 * correct_train / total_train\n", "print('accuracy: ', epoch, train_accuracy)" ] }, { "cell_type": "code", "execution_count": null, "id": "e586c4d2-a7f4-4f14-81fc-4f84ffac52b3", "metadata": {}, "outputs": [], "source": [ "import sigpyproc.readers as r\n", "import cv2\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "\n", "from scipy.special import softmax\n", "%matplotlib inline\n", "path = '/mnt/primary/ata/projects/p051/fil_60565_59210_9756774_J0534+2200_0001/LoB.C0928/fil_60565_59210_9756774_J0534+2200_0001-beam0000.fil'\n", "# path = '/mnt/primary/ata/projects/p051/fil_60564_62428_4679748_J0332+5434_0001/LoB.C0928/fil_60564_62428_4679748_J0332+5434_0001-beam0000.fil'\n", "\n", "# Get some metadata\n", "\n", "# Open the filterbank file\n", "fil = r.FilReader(path)\n", "header = fil.header\n", "print(\"Header:\", header)\n", "n=100\n", "li = [ 7257608, 7324207, 10393163, 10641071, 11130537, 11085081,\n", " 11419145, 11964112, 12329364, 13047181]\n", "for el in li:\n", " data = torch.tensor(fil.read_block(el-1024, 2048)).cuda()\n", " print(data.shape)\n", " out = model(transform(torch.tensor(data).cuda())[None])\n", " print(softmax(out.detach().cpu().numpy(), axis=1))\n", " plt.figure(figsize=(10,10))\n", " plt.imshow(data.cpu().numpy(), aspect = 10)\n", " plt.show()" ] }, { "cell_type": "code", "execution_count": null, "id": "609e5564-f14f-4bd1-b604-68e7e7d42834", "metadata": {}, "outputs": [], "source": [ "triggers = []\n", "counter = 0\n", "with torch.no_grad():\n", " for i in range(2048,10201921, 2048 ):\n", " data = torch.tensor(fil.read_block(i-1024, 2048)).cuda()\n", " # Shuffle the tensor using the random indices\n", " out = model(transform(torch.tensor(data).cuda())[None])\n", " triggers.append(softmax(out.detach().cpu().numpy(), axis=1))\n", " counter += 1\n", " if counter > 1000:\n", " break" ] }, { "cell_type": "code", "execution_count": null, "id": "08ee6dcf-cb30-4490-8624-4e52552fdf39", "metadata": {}, "outputs": [], "source": [ "print(triggers[0])" ] }, { "cell_type": "code", "execution_count": null, "id": "8c56c6f5-5a0b-4854-8a94-066a9baf4cfc", "metadata": {}, "outputs": [], "source": [ "stack = np.stack(triggers)\n", "positives = stack[:,0,1]\n", "num_pos = np.where(positives > 0.5)[0].shape[0]\n", "print(num_pos)" ] }, { "cell_type": "code", "execution_count": null, "id": "eb1d1591-8855-4989-bf12-c8a9cdbf2a4d", "metadata": {}, "outputs": [], "source": [ "import pickle\n", "\n", "# Path to your pickle file\n", "file_path = \"../dataset_generator/dir.pkl\"\n", "\n", "# Open and load the pickle file\n", "with open(file_path, \"rb\") as file: # Use \"rb\" mode for reading binary files\n", " data = pickle.load(file)\n", "\n", "# Print the contents of the file\n" ] }, { "cell_type": "code", "execution_count": null, "id": "46f61d7e-55fa-44fe-be94-d4ddb3c576f9", "metadata": {}, "outputs": [], "source": [ "import sigpyproc.readers as r\n", "import cv2\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "\n", "from scipy.special import softmax\n", "%matplotlib inline\n", "path = data[0]\n", "model.eval()\n", "\n", "fil = r.FilReader(path)\n", "header = fil.header\n", "print(\"Header:\", header)\n", "n=100\n", "\n", "\n", "triggers = []\n", "counter = 0\n", "for i in range(2048,10201921, 2048):\n", " data = torch.tensor(fil.read_block(i-1024, 2048)).cuda()\n", " # Shuffle the tensor using the random indices\n", " out = model(transform(torch.tensor(data).cuda())[None])\n", " triggers.append(softmax(out.detach().cpu().numpy(), axis=1))\n", " counter += 1\n", " if counter > 1000:\n", " break" ] }, { "cell_type": "code", "execution_count": null, "id": "413d402e-2ce3-49fc-bbd4-a3cf1cc92388", "metadata": {}, "outputs": [], "source": [ "print(triggers[0])" ] }, { "cell_type": "code", "execution_count": null, "id": "5c039dee-1b9b-4664-b42a-a79d780f37f1", "metadata": {}, "outputs": [], "source": [ "stack = np.stack(triggers)\n", "positives = stack[:,0,1]\n", "num_pos = np.where(positives > 0.5)[0].shape[0]\n", "print(num_pos)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.9" } }, "nbformat": 4, "nbformat_minor": 5 }