Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,7 @@ import numpy as np
|
|
6 |
import joblib
|
7 |
from PIL import Image
|
8 |
from torchvision import transforms,models
|
9 |
-
from sklearn.preprocessing import LabelEncoder
|
10 |
from gradio import Interface, Image, Label
|
11 |
from huggingface_hub import snapshot_download
|
12 |
|
@@ -15,7 +15,7 @@ token = os.environ.get("token")
|
|
15 |
|
16 |
# Download the repository snapshot
|
17 |
local_dir = snapshot_download(
|
18 |
-
repo_id="robocan/
|
19 |
repo_type="model",
|
20 |
local_dir="SVD",
|
21 |
token=token
|
@@ -24,6 +24,7 @@ local_dir = snapshot_download(
|
|
24 |
device = 'cpu'
|
25 |
le = LabelEncoder()
|
26 |
le = joblib.load("SVD/le.gz")
|
|
|
27 |
len_classes = len(le.classes_) + 1
|
28 |
|
29 |
class ModelPre(torch.nn.Module):
|
@@ -36,14 +37,38 @@ class ModelPre(torch.nn.Module):
|
|
36 |
torch.nn.ReLU(),
|
37 |
torch.nn.Linear(in_features=512,out_features=len_classes),
|
38 |
)
|
|
|
39 |
|
40 |
def forward(self, data):
|
41 |
return self.embedding(data)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
|
43 |
-
|
|
|
|
|
44 |
|
45 |
-
|
46 |
-
modelm.load_state_dict(model['model'])
|
47 |
|
48 |
cmp = transforms.Compose([
|
49 |
transforms.ToTensor(),
|
@@ -51,30 +76,30 @@ cmp = transforms.Compose([
|
|
51 |
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
52 |
])
|
53 |
|
|
|
54 |
def predict(input_img):
|
55 |
with torch.inference_mode():
|
56 |
img = cmp(input_img).unsqueeze(0)
|
57 |
-
res =
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
top_10_predictions = le.inverse_transform(top_10_indices)
|
62 |
-
|
63 |
-
results = {top_10_predictions[i]: float(top_10_probabilities[i]) for i in range(10)}
|
64 |
-
return results
|
65 |
|
|
|
66 |
def create_label_output(predictions):
|
67 |
-
return predictions
|
68 |
|
|
|
69 |
def predict_and_plot(input_img):
|
70 |
predictions = predict(input_img)
|
71 |
return create_label_output(predictions)
|
72 |
|
|
|
73 |
gradio_app = Interface(
|
74 |
fn=predict_and_plot,
|
75 |
inputs=Image(label="Upload an Image", type="pil"),
|
76 |
-
examples=["GB.PNG", "IT.PNG","NL.PNG","NZ.PNG"],
|
77 |
-
outputs=
|
78 |
title="Predict the Location of this Image"
|
79 |
)
|
80 |
|
|
|
6 |
import joblib
|
7 |
from PIL import Image
|
8 |
from torchvision import transforms,models
|
9 |
+
from sklearn.preprocessing import LabelEncoder,MinMaxScaler
|
10 |
from gradio import Interface, Image, Label
|
11 |
from huggingface_hub import snapshot_download
|
12 |
|
|
|
15 |
|
16 |
# Download the repository snapshot
|
17 |
local_dir = snapshot_download(
|
18 |
+
repo_id="robocan/GeoG_coordinate",
|
19 |
repo_type="model",
|
20 |
local_dir="SVD",
|
21 |
token=token
|
|
|
24 |
device = 'cpu'
|
25 |
le = LabelEncoder()
|
26 |
le = joblib.load("SVD/le.gz")
|
27 |
+
MMS = joblib.load("SVD/MMS.gz")
|
28 |
len_classes = len(le.classes_) + 1
|
29 |
|
30 |
class ModelPre(torch.nn.Module):
|
|
|
37 |
torch.nn.ReLU(),
|
38 |
torch.nn.Linear(in_features=512,out_features=len_classes),
|
39 |
)
|
40 |
+
# Freeze all layers
|
41 |
|
42 |
def forward(self, data):
|
43 |
return self.embedding(data)
|
44 |
+
|
45 |
+
# Load the pretrained model
|
46 |
+
model = ModelPre()
|
47 |
+
#for param in model.parameters():
|
48 |
+
# param.requires_grad = False
|
49 |
+
class GeoGcord(torch.nn.Module):
|
50 |
+
def __init__(self):
|
51 |
+
super().__init__()
|
52 |
+
self.embedding = torch.nn.Sequential(
|
53 |
+
*list(model.children())[0][:-1],
|
54 |
+
torch.nn.Linear(in_features=512,out_features=256),
|
55 |
+
torch.nn.ReLU(),
|
56 |
+
torch.nn.Linear(in_features=256,out_features=128),
|
57 |
+
torch.nn.ReLU(),
|
58 |
+
torch.nn.Linear(in_features=128,out_features=2),
|
59 |
+
)
|
60 |
+
# Freeze all layers
|
61 |
+
|
62 |
+
def forward(self, data):
|
63 |
+
return self.embedding(data)
|
64 |
+
|
65 |
+
|
66 |
|
67 |
+
# Load the pre-trained model
|
68 |
+
model = GeoGcord()
|
69 |
+
model_w = torch.load("SVD/GeoG.pth", map_location=torch.device(device))
|
70 |
|
71 |
+
model.load_state_dict(model_w['model'])
|
|
|
72 |
|
73 |
cmp = transforms.Compose([
|
74 |
transforms.ToTensor(),
|
|
|
76 |
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
77 |
])
|
78 |
|
79 |
+
# Predict function for the new regression model
|
80 |
def predict(input_img):
|
81 |
with torch.inference_mode():
|
82 |
img = cmp(input_img).unsqueeze(0)
|
83 |
+
res = model(img.to(device))
|
84 |
+
# Assuming res is a 2-layer regression output, and MMS.inverse_transform is needed
|
85 |
+
prediction = MMS.inverse_transform(res.cpu().numpy()).flatten()
|
86 |
+
return prediction
|
|
|
|
|
|
|
|
|
87 |
|
88 |
+
# Create label output function
|
89 |
def create_label_output(predictions):
|
90 |
+
return f"Predicted values: {predictions}"
|
91 |
|
92 |
+
# Predict and plot function
|
93 |
def predict_and_plot(input_img):
|
94 |
predictions = predict(input_img)
|
95 |
return create_label_output(predictions)
|
96 |
|
97 |
+
# Gradio app definition
|
98 |
gradio_app = Interface(
|
99 |
fn=predict_and_plot,
|
100 |
inputs=Image(label="Upload an Image", type="pil"),
|
101 |
+
examples=["GB.PNG", "IT.PNG", "NL.PNG", "NZ.PNG"],
|
102 |
+
outputs="text",
|
103 |
title="Predict the Location of this Image"
|
104 |
)
|
105 |
|