|
import torch |
|
import torch.nn as nn |
|
import torchvision.models as models |
|
import torchvision.transforms as transforms |
|
import os |
|
from PIL import Image |
|
|
|
|
|
model = models.resnet50(pretrained=True) |
|
|
|
|
|
num_features = model.fc.in_features |
|
model.fc = nn.Identity() |
|
|
|
|
|
model.eval() |
|
|
|
|
|
preprocess = transforms.Compose([ |
|
transforms.Resize(256), |
|
transforms.CenterCrop(224), |
|
transforms.ToTensor(), |
|
transforms.Normalize( |
|
mean=[0.485, 0.456, 0.406], |
|
std=[0.229, 0.224, 0.225] |
|
) |
|
]) |
|
|
|
|
|
features = {} |
|
|
|
|
|
image_dir = 'lfw' |
|
for root, dirs, files in os.walk(image_dir): |
|
for file in files: |
|
|
|
image_path = os.path.join(root, file) |
|
image = Image.open(image_path).convert('RGB') |
|
|
|
|
|
input_tensor = preprocess(image) |
|
input_batch = input_tensor.unsqueeze(0) |
|
|
|
|
|
with torch.no_grad(): |
|
features_tensor = model(input_batch) |
|
features_vector = torch.squeeze(features_tensor).numpy() |
|
|
|
|
|
features[file] = features_vector |