simonhermansson commited on
Commit
9122a59
·
1 Parent(s): 1420df1

Updated so it works on CPU

Browse files
Files changed (1) hide show
  1. app.py +19 -18
app.py CHANGED
@@ -12,7 +12,7 @@ from braceexpand import braceexpand
12
 
13
  # Load model
14
  checkpoint_path = "ViT-B/16"
15
- device = "cuda" if torch.cuda.is_available() else "cpu"
16
  model, preprocess = clip.load(checkpoint_path, device=device, jit=False)
17
 
18
 
@@ -78,18 +78,18 @@ def estimate_price_and_usage(img):
78
  bias=False
79
  )
80
  # Load weights for the linear layer as a tensor
81
- linear_data = torch.load("files/reuse_linear.pth")
82
  probe.weight.data = linear_data["weight"]
83
 
84
  # Do inference
85
- probe.eval()
86
- probe = probe.to(device)
87
- output = probe(query_features)
88
- print(output)
89
- output = torch.softmax(output, dim=-1)
90
- output = output.cpu().detach().numpy().astype("float32")
91
- reuse = output.argmax(axis=-1)[0]
92
- reuse_classes = ["Reuse", "Export"]
93
 
94
  # Estimate price
95
  num_classes = 4
@@ -101,17 +101,18 @@ def estimate_price_and_usage(img):
101
  )
102
  # Print output shape for the linear layer
103
  # Load weights for the linear layer as a tensor
104
- linear_data = torch.load("files/price_linear.pth")
105
  probe.weight.data = linear_data["weight"]
106
 
107
  # Do inference
108
- probe.eval()
109
- probe = probe.to(device)
110
- output = probe(query_features)
111
- output = torch.softmax(output, dim=-1)
112
- output = output.cpu().detach().numpy().astype("float32")
113
- price = output.argmax(axis=-1)[0]
114
- price_classes = ["<50", "50-100", "100-150", ">150"]
 
115
 
116
  return f"Estimated price: {price_classes[price]} SEK - Usage: {reuse_classes[reuse]}"
117
 
 
12
 
13
  # Load model
14
  checkpoint_path = "ViT-B/16"
15
+ device = "cpu"
16
  model, preprocess = clip.load(checkpoint_path, device=device, jit=False)
17
 
18
 
 
78
  bias=False
79
  )
80
  # Load weights for the linear layer as a tensor
81
+ linear_data = torch.load("files/reuse_linear.pth", map_location="cpu")
82
  probe.weight.data = linear_data["weight"]
83
 
84
  # Do inference
85
+ with torch.autocast("cpu"):
86
+ probe.eval()
87
+ probe = probe.to(device)
88
+ output = probe(query_features)
89
+ output = torch.softmax(output, dim=-1)
90
+ #output = output.cpu().detach().numpy().astype("float32")
91
+ reuse = output.argmax(axis=-1)[0]
92
+ reuse_classes = ["Reuse", "Export"]
93
 
94
  # Estimate price
95
  num_classes = 4
 
101
  )
102
  # Print output shape for the linear layer
103
  # Load weights for the linear layer as a tensor
104
+ linear_data = torch.load("files/price_linear.pth", map_location="cpu")
105
  probe.weight.data = linear_data["weight"]
106
 
107
  # Do inference
108
+ with torch.autocast("cpu"):
109
+ probe.eval()
110
+ probe = probe.to(device)
111
+ output = probe(query_features)
112
+ output = torch.softmax(output, dim=-1)
113
+ #output = output.cpu().detach().numpy().astype("float32")
114
+ price = output.argmax(axis=-1)[0]
115
+ price_classes = ["<50", "50-100", "100-150", ">150"]
116
 
117
  return f"Estimated price: {price_classes[price]} SEK - Usage: {reuse_classes[reuse]}"
118