ngaggion commited on
Commit
e9256f0
·
1 Parent(s): e736992

Update app

Browse files
Files changed (1) hide show
  1. app.py +21 -6
app.py CHANGED
@@ -7,6 +7,8 @@ from utils.utils import scipy_to_torch_sparse, genMatrixesLungsHeart
7
  import scipy.sparse as sp
8
  import torch
9
 
 
 
10
 
11
  def getDenseMask(landmarks):
12
  RL = landmarks[0:44]
@@ -35,7 +37,18 @@ def drawOnTop(img, landmarks):
35
  image[:,:,2] = img - 0.1 * (output == 1).astype('float') - 0.2 * (output == 2).astype('float')
36
 
37
  image = np.clip(image, 0, 1)
38
-
 
 
 
 
 
 
 
 
 
 
 
39
  return image
40
 
41
 
@@ -106,13 +119,15 @@ def preprocess(input_img):
106
 
107
 
108
  def segment(input_img):
109
- input_img = cv2.imread(input_img, 0) / 255.0
110
 
111
- img, (h, w, padding) = preprocess(input_img)
 
112
 
113
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
114
- hybrid = loadModel(device)
115
 
 
 
116
  data = torch.from_numpy(img).unsqueeze(0).unsqueeze(0).to(device).float()
117
 
118
  with torch.no_grad():
@@ -121,6 +136,6 @@ def segment(input_img):
121
  return drawOnTop(img, output)
122
 
123
 
124
- if __name__ == "__main__":
125
  demo = gr.Interface(segment, gr.Image(type="filepath"), "image")
126
  demo.launch()
 
7
  import scipy.sparse as sp
8
  import torch
9
 
10
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
11
+ hybrid = None
12
 
13
  def getDenseMask(landmarks):
14
  RL = landmarks[0:44]
 
37
  image[:,:,2] = img - 0.1 * (output == 1).astype('float') - 0.2 * (output == 2).astype('float')
38
 
39
  image = np.clip(image, 0, 1)
40
+
41
+ RL, LL, H = landmarks[0:44], landmarks[44:94], landmarks[94:]
42
+
43
+ # Draw the landmarks as dots
44
+
45
+ for l in RL:
46
+ image = cv2.circle(image, (int(l[0]), int(l[1])), 1, (1, 1, 0), -1)
47
+ for l in LL:
48
+ image = cv2.circle(image, (int(l[0]), int(l[1])), 1, (1, 1, 0), -1)
49
+ for l in H:
50
+ image = cv2.circle(image, (int(l[0]), int(l[1])), 1, (0, 1, 1), -1)
51
+
52
  return image
53
 
54
 
 
119
 
120
 
121
  def segment(input_img):
122
+ global hybrid
123
 
124
+ if hybrid is None:
125
+ hybrid = loadModel()
126
 
127
+ input_img = cv2.imread(input_img, 0) / 255.0
 
128
 
129
+ img, (h, w, padding) = preprocess(input_img)
130
+
131
  data = torch.from_numpy(img).unsqueeze(0).unsqueeze(0).to(device).float()
132
 
133
  with torch.no_grad():
 
136
  return drawOnTop(img, output)
137
 
138
 
139
+ if __name__ == "__main__":
140
  demo = gr.Interface(segment, gr.Image(type="filepath"), "image")
141
  demo.launch()