|
import torch |
|
import torch.nn as nn |
|
import torchvision.transforms as transforms |
|
from torchvision import models |
|
from PIL import Image |
|
import gradio as gr |
|
|
|
|
|
class Params: |
|
def __init__(self): |
|
self.batch_size = 128 |
|
self.name = "resnet_50_sgd" |
|
self.workers = 4 |
|
self.lr = 0.1 |
|
self.momentum = 0.9 |
|
self.weight_decay = 1e-4 |
|
self.lr_step_size = 30 |
|
self.lr_gamma = 0.1 |
|
|
|
def __repr__(self): |
|
return str(self.__dict__) |
|
|
|
def __eq__(self, other): |
|
return self.__dict__ == other.__dict__ |
|
|
|
|
|
device = torch.device('cpu') |
|
|
|
|
|
model = models.resnet50(pretrained=False) |
|
model.fc = nn.Linear(model.fc.in_features, 1000) |
|
|
|
|
|
model.to(device) |
|
|
|
checkpoint = torch.load('model.pth', map_location='cpu') |
|
print(checkpoint.keys()) |
|
|
|
|
|
model.load_state_dict(checkpoint['model_state_dict'], strict=False, map_location=device) |
|
|
|
|
|
optimizer.load_state_dict(checkpoint['optimizer_state_dict']) |
|
scheduler.load_state_dict(checkpoint['scheduler_state_dict']) |
|
|
|
|
|
epoch = checkpoint['epoch'] |
|
|
|
|
|
model.eval() |
|
|
|
|
|
|
|
|
|
transform = transforms.Compose([ |
|
transforms.Resize(256), |
|
transforms.CenterCrop(224), |
|
transforms.ToTensor(), |
|
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), |
|
]) |
|
|
|
|
|
LABELS = [f"class_{k}" for k in range(1,1001)] |
|
|
|
|
|
def predict(image): |
|
image = Image.open(image).convert("RGB") |
|
image = transform(image).unsqueeze(0) |
|
|
|
|
|
image = image.to(device) |
|
|
|
with torch.no_grad(): |
|
outputs = model(image) |
|
|
|
_, predicted = torch.max(outputs, 1) |
|
return LABELS[predicted.item()] |
|
|
|
|
|
interface = gr.Interface(fn=predict, inputs=gr.inputs.Image(type="pil"), outputs="text") |
|
|
|
|
|
interface.launch() |
|
|