jpterry commited on
Commit
25fdf2a
·
1 Parent(s): 98f445c

got rid of more print statements

Browse files
Files changed (1) hide show
  1. app.py +8 -6
app.py CHANGED
@@ -122,18 +122,17 @@ def get_activations(model, image: list, model_name: str,
122
  temp_image = model.features[i](temp_image)
123
  if i in activation_indices[model_name]:
124
  layer_outputs[i] = image
125
- print(i, layer_outputs[i].shape)
126
  if i == max(activation_indices[model_name]):
127
  break
128
  output = model(image).detach().cpu().numpy()
129
- print(output)
130
  # print(model(image), model.model(image))
131
 
132
  image = image.detach().cpu().numpy()
133
  output_1 = layer_outputs[activation_indices[model_name][0]].detach().cpu().numpy()
134
  output_2 = layer_outputs[activation_indices[model_name][1]].detach().cpu().numpy()
135
 
136
- print(image.shape, output.shape, output_1.shape, output_2.shape)
137
 
138
  # get activations
139
  # output_1 = outputs[1]
@@ -142,6 +141,7 @@ def get_activations(model, image: list, model_name: str,
142
  # get prediction
143
  # output = outputs[0][0]
144
  output = special.softmax(output)
 
145
 
146
  # sum over velocity channels
147
  if channel == 0:
@@ -231,7 +231,7 @@ def predict_and_analyze(model_name, num_channels, dim, input_channel, image):
231
 
232
  print("Running %s for %i channels" % (model_name, num_channels))
233
  print("Loading data")
234
- print(image)
235
 
236
  image = np.load(image.name, allow_pickle=True)
237
  image = image.astype(np.float32)
@@ -296,7 +296,7 @@ def predict_and_analyze(model_name, num_channels, dim, input_channel, image):
296
  # print(model_url)
297
 
298
  loaded = torch.load(model_url, map_location='cpu', )
299
- print(loaded.keys())
300
 
301
  model.load_state_dict(loaded['state_dict'])
302
  # print(model)
@@ -317,13 +317,15 @@ def predict_and_analyze(model_name, num_channels, dim, input_channel, image):
317
  channel=input_channel,
318
  sub_mean=True)
319
  print("Activations and predictions finished")
320
- print(output)
321
 
322
  if output[0][0] < output[0][1]:
323
  output = 'Planet predicted with %.3f percent confidence' % (100*output[0][1])
324
  else:
325
  output = 'No planet predicted with %.3f percent confidence' % (100*output[0][0])
326
 
 
 
327
  input_image = normalize_array(input_image)
328
  activation_1 = normalize_array(activation_1)
329
  activation_2 = normalize_array(activation_2)
 
122
  temp_image = model.features[i](temp_image)
123
  if i in activation_indices[model_name]:
124
  layer_outputs[i] = image
125
+ # print(i, layer_outputs[i].shape)
126
  if i == max(activation_indices[model_name]):
127
  break
128
  output = model(image).detach().cpu().numpy()
 
129
  # print(model(image), model.model(image))
130
 
131
  image = image.detach().cpu().numpy()
132
  output_1 = layer_outputs[activation_indices[model_name][0]].detach().cpu().numpy()
133
  output_2 = layer_outputs[activation_indices[model_name][1]].detach().cpu().numpy()
134
 
135
+ # print(image.shape, output.shape, output_1.shape, output_2.shape)
136
 
137
  # get activations
138
  # output_1 = outputs[1]
 
141
  # get prediction
142
  # output = outputs[0][0]
143
  output = special.softmax(output)
144
+ print(output)
145
 
146
  # sum over velocity channels
147
  if channel == 0:
 
231
 
232
  print("Running %s for %i channels" % (model_name, num_channels))
233
  print("Loading data")
234
+ # print(image)
235
 
236
  image = np.load(image.name, allow_pickle=True)
237
  image = image.astype(np.float32)
 
296
  # print(model_url)
297
 
298
  loaded = torch.load(model_url, map_location='cpu', )
299
+ # print(loaded.keys())
300
 
301
  model.load_state_dict(loaded['state_dict'])
302
  # print(model)
 
317
  channel=input_channel,
318
  sub_mean=True)
319
  print("Activations and predictions finished")
320
+ # print(output)
321
 
322
  if output[0][0] < output[0][1]:
323
  output = 'Planet predicted with %.3f percent confidence' % (100*output[0][1])
324
  else:
325
  output = 'No planet predicted with %.3f percent confidence' % (100*output[0][0])
326
 
327
+ print(output)
328
+
329
  input_image = normalize_array(input_image)
330
  activation_1 = normalize_array(activation_1)
331
  activation_2 = normalize_array(activation_2)