Deepaksiwania12 commited on
Commit
cdf9475
β€’
1 Parent(s): e16290b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -51
app.py CHANGED
@@ -1,56 +1,23 @@
1
- import numpy as np
2
- import cv2
3
- import glob
4
- import os
5
- import matplotlib.pyplot as plt
6
- import string
7
- from mlxtend.plotting import plot_decision_regions
8
- from mpl_toolkits.mplot3d import Axes3D
9
- from sklearn.decomposition import PCA
10
- from sklearn.preprocessing import StandardScaler
11
- from sklearn.neighbors import KNeighborsClassifier
12
- from sklearn.tree import DecisionTreeClassifier
13
- from sklearn.model_selection import train_test_split, cross_val_score
14
- from sklearn.utils.multiclass import unique_labels
15
- from sklearn import metrics
16
- from sklearn.svm import SVC
17
- dim = 100
18
  import torch
19
- from torchvision import transforms
20
- from PIL import Image
21
-
22
- # Define your model class
23
- class YourModelClass(torch.nn.Module):
24
- # Define your model architecture here
25
-
26
- # Create an instance of your model
27
- model = YourModelClass()
28
 
29
- # Load the pre-trained weights
30
- model.load_state_dict(torch.load('model_weights.pth'))
31
- model.eval()
32
- def predict_leaf_health(image_path):
33
- try:
34
- # Open and preprocess the image
35
- img = Image.open(image_path)
36
- transform = transforms.Compose([
37
- transforms.Resize((224, 224)),
38
- transforms.ToTensor(),
39
- transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
40
- ])
41
- img = transform(img)
42
- img = img.unsqueeze(0) # Add batch dimension
43
-
44
- # Make prediction
45
- with torch.no_grad():
46
- output = model(img)
47
- prediction = torch.argmax(output).item()
48
 
49
- # Map the prediction to class labels (modify as needed)
50
- class_labels = {0: 'Unhealthy', 1: 'Healthy'}
51
- result = class_labels.get(prediction, 'Unknown')
52
 
53
- return result
 
 
 
 
 
 
54
 
55
- except Exception as e:
56
- return f"Error: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import torch
 
 
 
 
 
 
 
 
 
2
 
3
+ model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()
4
+ import requests
5
+ from PIL import Image
6
+ from torchvision import transforms
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
+ # Download human-readable labels for ImageNet.
9
+ response = requests.get("https://git.io/JJkYN")
10
+ labels = response.text.split("\n")
11
 
12
+ def predict(inp):
13
+ inp = transforms.ToTensor()(inp).unsqueeze(0)
14
+ with torch.no_grad():
15
+ prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)
16
+ confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
17
+ return confidences
18
+ import gradio as gr
19
 
20
+ gr.Interface(fn=predict,
21
+ inputs=gr.Image(type="pil"),
22
+ outputs=gr.Label(num_top_classes=3),
23
+ examples=["lion.jpg", "cheetah.jpg"]).launch()