{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [] }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" }, "accelerator": "GPU", "gpuClass": "standard" }, "cells": [ { "cell_type": "code", "execution_count": 32, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "L6gytYO-DHMK", "outputId": "b0c87fe1-77a4-45c7-8ea4-b8211cc0c4a7" }, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n" ] } ], "source": [ "from google.colab import drive\n", "drive.mount('/content/drive')" ] }, { "cell_type": "code", "source": [ "%pip install efficientnet-pytorch" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "OoBBN22XDRNG", "outputId": "c63a35aa-a077-44c7-93e5-bc9ba9732770" }, "execution_count": 33, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/\n", "Requirement already satisfied: efficientnet-pytorch in /usr/local/lib/python3.9/dist-packages (0.7.1)\n", "Requirement already satisfied: torch in /usr/local/lib/python3.9/dist-packages (from efficientnet-pytorch) (2.0.0+cu118)\n", "Requirement already satisfied: typing-extensions in /usr/local/lib/python3.9/dist-packages (from torch->efficientnet-pytorch) (4.5.0)\n", "Requirement already satisfied: sympy in /usr/local/lib/python3.9/dist-packages (from torch->efficientnet-pytorch) (1.11.1)\n", "Requirement already satisfied: filelock in /usr/local/lib/python3.9/dist-packages (from torch->efficientnet-pytorch) (3.11.0)\n", "Requirement already satisfied: networkx in /usr/local/lib/python3.9/dist-packages (from torch->efficientnet-pytorch) (3.1)\n", "Requirement already satisfied: triton==2.0.0 in /usr/local/lib/python3.9/dist-packages (from torch->efficientnet-pytorch) (2.0.0)\n", "Requirement already satisfied: jinja2 in /usr/local/lib/python3.9/dist-packages (from torch->efficientnet-pytorch) (3.1.2)\n", "Requirement already satisfied: lit in /usr/local/lib/python3.9/dist-packages (from triton==2.0.0->torch->efficientnet-pytorch) (16.0.1)\n", "Requirement already satisfied: cmake in /usr/local/lib/python3.9/dist-packages (from triton==2.0.0->torch->efficientnet-pytorch) (3.25.2)\n", "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.9/dist-packages (from jinja2->torch->efficientnet-pytorch) (2.1.2)\n", "Requirement already satisfied: mpmath>=0.19 in /usr/local/lib/python3.9/dist-packages (from sympy->torch->efficientnet-pytorch) (1.3.0)\n" ] } ] }, { "cell_type": "code", "source": [ "import numpy as np\n", "import pandas as pd\n", "import matplotlib.pyplot as plt\n", "import os\n", "from PIL import Image\n", "import torch\n", "from torch import nn, optim\n", "import torch.nn.functional as F\n", "from torch.utils.data import DataLoader, Dataset\n", "import albumentations as A\n", "from albumentations.pytorch import ToTensorV2 \n", "from tqdm import tqdm\n", "from torchvision import models\n", "from efficientnet_pytorch import EfficientNet\n", "from sklearn import metrics" ], "metadata": { "id": "phJgllqcDSuH" }, "execution_count": 34, "outputs": [] }, { "cell_type": "code", "source": [ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")" ], "metadata": { "id": "DyUTFa31DTdp" }, "execution_count": 35, "outputs": [] }, { "cell_type": "code", "source": [ "class Dataset(Dataset):\n", " def __init__(self, root_images, root_file, transform = None):\n", " self.root_images = root_images\n", " self.root_file = root_file\n", " self.transform = transform\n", " self.file = pd.read_csv(root_file)\n", "\n", "\n", " def __len__(self):\n", " return self.file.shape[0]\n", " \n", " def __getitem__(self,index):\n", " img_path = os.path.join(self.root_images, self.file['id'][index])\n", " image = np.array(Image.open(img_path).convert('RGB'))\n", " \n", " if self.transform is not None:\n", " augmentations = self.transform(image = image)\n", " image = augmentations['image'] \n", " \n", " return image" ], "metadata": { "id": "kTk-mXXUDUUA" }, "execution_count": 36, "outputs": [] }, { "cell_type": "code", "source": [ "learning_rate = 0.0001\n", "batch_size = 32\n", "epochs = 10\n", "height = 224 \n", "width = 224\n", "IMG = '/content/drive/MyDrive/Colab Notebooks/AI images or Not/test'\n", "FILE = '/content/sample_submission.csv'" ], "metadata": { "id": "HXEpa4PlDU85" }, "execution_count": 37, "outputs": [] }, { "cell_type": "code", "source": [ "def get_loader(image, file, batch_size, test_transform):\n", " \n", " test_ds = Dataset(image , file, test_transform)\n", " test_loader = DataLoader(test_ds, batch_size= batch_size, shuffle= False)\n", "\n", "\n", "\n", " return test_loader " ], "metadata": { "id": "i-VOTQp2DVbK" }, "execution_count": 38, "outputs": [] }, { "cell_type": "code", "source": [ "normalize = A.Normalize(\n", " mean = [0.485 , 0.456 , 0.406],\n", " std = [0.229 , 0.224, 0.255],\n", " max_pixel_value= 255.0\n", ")\n", "\n", "\n", "test_transform = A.Compose(\n", " [A.Resize(width=width , height= height),\n", " normalize,\n", " ToTensorV2()\n", " ]\n", ")\n" ], "metadata": { "id": "RD4GnrT6DVpr" }, "execution_count": 39, "outputs": [] }, { "cell_type": "code", "source": [ "class Net(nn.Module):\n", " def __init__(self):\n", " super().__init__()\n", " self.model = EfficientNet.from_pretrained('efficientnet-b4')\n", " self.fct = nn.Linear(1000,1)\n", " \n", " def forward(self,img):\n", " x = self.model(img)\n", " # print(x.shape)\n", " x = self.fct(x)\n", " return x" ], "metadata": { "id": "HYH0pBe9DV3M" }, "execution_count": 40, "outputs": [] }, { "cell_type": "code", "source": [ "def load_checkpoint(checkpoint, model, optimizer):\n", " print('====> Loading...')\n", " model.load_state_dict(checkpoint['state_dict'])\n", " optimizer.load_state_dict(checkpoint['optimizer'])" ], "metadata": { "id": "1Ype_u3qDV-n" }, "execution_count": 41, "outputs": [] }, { "cell_type": "code", "source": [ "test = pd.read_csv(FILE)\n", "test" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 424 }, "id": "Jf_Is1qDGz-W", "outputId": "cf79a4c0-2bca-473c-886e-726d7956015d" }, "execution_count": 42, "outputs": [ { "output_type": "execute_result", "data": { "text/plain": [ " id label\n", "0 0.jpg 0\n", "1 1.jpg 0\n", "2 10.jpg 0\n", "3 100.jpg 0\n", "4 1000.jpg 0\n", "... ... ...\n", "43437 9995.jpg 0\n", "43438 9996.jpg 0\n", "43439 9997.jpg 0\n", "43440 9998.jpg 0\n", "43441 9999.jpg 0\n", "\n", "[43442 rows x 2 columns]" ], "text/html": [ "\n", "
\n", " | id | \n", "label | \n", "
---|---|---|
0 | \n", "0.jpg | \n", "0 | \n", "
1 | \n", "1.jpg | \n", "0 | \n", "
2 | \n", "10.jpg | \n", "0 | \n", "
3 | \n", "100.jpg | \n", "0 | \n", "
4 | \n", "1000.jpg | \n", "0 | \n", "
... | \n", "... | \n", "... | \n", "
43437 | \n", "9995.jpg | \n", "0 | \n", "
43438 | \n", "9996.jpg | \n", "0 | \n", "
43439 | \n", "9997.jpg | \n", "0 | \n", "
43440 | \n", "9998.jpg | \n", "0 | \n", "
43441 | \n", "9999.jpg | \n", "0 | \n", "
43442 rows × 2 columns
\n", "\n", " | id | \n", "label | \n", "
---|---|---|
0 | \n", "0.jpg | \n", "0 | \n", "
1 | \n", "1.jpg | \n", "0 | \n", "
2 | \n", "10.jpg | \n", "0 | \n", "
3 | \n", "100.jpg | \n", "1 | \n", "
4 | \n", "1000.jpg | \n", "0 | \n", "
... | \n", "... | \n", "... | \n", "
43437 | \n", "9995.jpg | \n", "1 | \n", "
43438 | \n", "9996.jpg | \n", "0 | \n", "
43439 | \n", "9997.jpg | \n", "0 | \n", "
43440 | \n", "9998.jpg | \n", "0 | \n", "
43441 | \n", "9999.jpg | \n", "1 | \n", "
43442 rows × 2 columns
\n", "