File size: 3,362 Bytes
77875f7 815a0c4 77875f7 815a0c4 77875f7 815a0c4 77875f7 894f232 77875f7 894f232 77875f7 815a0c4 894f232 815a0c4 77875f7 010e451 815a0c4 010e451 77875f7 010e451 77875f7 010e451 77875f7 010e451 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 |
import pandas as pd
import numpy as np
import os
from tqdm import tqdm
import timm
import torchvision.transforms as T
from PIL import Image
import torch
from typing import List
def is_gpu_available():
"""Check if the python package `onnxruntime-gpu` is installed."""
return torch.cuda.is_available()
WIDTH = 224
HEIGHT = 224
MODEL_PATH = "metaformer-s-224.pth"
MODEL_NAME = "caformer_s18.sail_in22k"
class PytorchWorker:
"""Run inference using ONNX runtime."""
def __init__(self, model_path: str, model_name: str, number_of_categories: int = 1605):
def _load_model(model_name, model_path):
print("Setting up Pytorch Model")
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"Using devide: {self.device}")
model = timm.create_model(model_name, num_classes=number_of_categories, pretrained=False)
weights = torch.load(model_path, map_location=self.device)
model.load_state_dict({w.replace("model.", ""): v for w, v in weights.items()})
return model.to(self.device).eval()
self.model = _load_model(model_name, model_path)
self.transforms = T.Compose([T.Resize((HEIGHT, WIDTH)),
T.ToTensor(),
T.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
def predict_image(self, image: np.ndarray) -> List:
"""Run inference using ONNX runtime.
:param image: Input image as numpy array.
:return: A list with logits and confidences.
"""
logits = self.model(self.transforms(image).unsqueeze(0).to(self.device))
return logits.tolist()
def make_submission(test_metadata, model_path, model_name, output_csv_path="./submission.csv", images_root_path="/tmp/data/private_testset"):
"""Make submission with given """
model = PytorchWorker(model_path, model_name)
predictions = []
for _, row in tqdm(test_metadata.iterrows(), total=len(test_metadata)):
image_path = os.path.join(images_root_path, row.image_path.replace("jpg", "JPG"))
test_image = Image.open(image_path).convert("RGB")
logits = model.predict_image(test_image)
predictions.append(np.argmax(logits))
test_metadata["class_id"] = predictions
user_pred_df = test_metadata.drop_duplicates("observation_id", keep="first")
for ix, row in user_pred_df.iterrows():
if row['class_id'] == 1604:
user_pred_df.loc[ix, 'class_id'] = -1
user_pred_df[["observation_id", "class_id"]].to_csv(output_csv_path, index=None)
def test_submission():
metadata_file_path = "../trial_test.csv"
test_metadata = pd.read_csv(metadata_file_path)
make_submission(
test_metadata=test_metadata,
model_path=MODEL_PATH,
model_name=MODEL_NAME,
images_root_path="../data/DF_FULL/"
)
if __name__ == "__main__":
# test_submission()
import zipfile
with zipfile.ZipFile("/tmp/data/private_testset.zip", 'r') as zip_ref:
zip_ref.extractall("/tmp/data")
metadata_file_path = "./FungiCLEF2024_TestMetadata.csv"
test_metadata = pd.read_csv(metadata_file_path)
make_submission(
test_metadata=test_metadata,
model_path=MODEL_PATH,
model_name=MODEL_NAME
)
|