File size: 2,358 Bytes
b3ad1b5 683c547 b3ad1b5 a2cce3c 8ee7dc9 b3ad1b5 d5a9d4e 36f8da9 b3ad1b5 00e699b 36f8da9 b3ad1b5 8ee7dc9 b3ad1b5 00e699b dfc5d10 00e699b 683c547 a2cce3c b3ad1b5 43e8fb5 76f07f2 36f8da9 b3ad1b5 8ee7dc9 36f8da9 b3ad1b5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
import io
import torch
from torchvision import transforms
from huggingface_hub import HfFileSystem
from PIL import Image
# Authenticate and download the EfficientNet model from Hugging Face
fs = HfFileSystem()
efficientnet_model_path = 'dhhd255/efficientnet_b3/efficientnet_b3.pt'
with fs.open(efficientnet_model_path, 'rb') as f:
efficientnet_model_content = f.read()
# Load the EfficientNet model onto the CPU
efficientnet_model_file = io.BytesIO(efficientnet_model_content)
efficientnet_model = torch.load(efficientnet_model_file, map_location=torch.device('cpu'))
# Authenticate and download your custom model from Hugging Face
custom_model_path = 'dhhd255/efficient_net_parkinsons/best_model.pth'
with fs.open(custom_model_path, 'rb') as f:
custom_model_content = f.read()
# Load your custom model onto the CPU
custom_model_file = io.BytesIO(custom_model_content)
custom_model_state_dict = torch.load(custom_model_file, map_location=torch.device('cpu'))
# Create a new instance of your custom model
model = torch.hub.load('ultralytics/yolov5', 'custom', path='/content/efficientnet_b3.pt')
# Load your custom model into the new instance
model.load_state_dict(custom_model_state_dict)
model.eval()
# Define a function that takes an image as input and uses the model for inference
def image_classifier(image):
# Preprocess the input image
data_transform = transforms.Compose([
transforms.Lambda(lambda x: x.convert('RGB')),
transforms.Resize((224, 224)),
transforms.ToTensor()
])
image = Image.fromarray(image)
image = data_transform(image)
image = image.unsqueeze(0)
# Use your custom model for inference
with torch.no_grad():
outputs = model(image)
_, predicted = torch.max(outputs.data, 1)
# Map the index to a class label
labels = ['Healthy', 'Parkinson']
predicted_label = labels[predicted.item()]
# Return the result
return outputs[0].numpy(), predicted_label
# Load and preprocess the image
img_path = '/content/test_image_healthy.png'
img = Image.open(img_path)
img = data_transform(img)
# Add a batch dimension and move the image to the device
img = img.unsqueeze(0)
# Perform inference
with torch.no_grad():
outputs = model(img)
_, predicted = torch.max(outputs.data, 1)
print(f'Predicted class: {predicted.item()}')
|