ThorbenF commited on
Commit
6643342
·
1 Parent(s): a2460df

Update requirements and make necessary code changes

Browse files
Files changed (2) hide show
  1. .ipynb_checkpoints/app-checkpoint.py +5 -5
  2. app.py +5 -5
.ipynb_checkpoints/app-checkpoint.py CHANGED
@@ -99,6 +99,7 @@ def predict_protein_sequence(test_one_letter_sequence):
99
 
100
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
101
  model.to(device)
 
102
  for batch in test_loader:
103
  input_ids = batch['input_ids'].to(device)
104
  attention_mask = batch['attention_mask'].to(device)
@@ -106,13 +107,12 @@ def predict_protein_sequence(test_one_letter_sequence):
106
 
107
  outputs = model(input_ids, attention_mask=attention_mask)
108
  logits = outputs.logits.detach().cpu().numpy()
109
-
 
110
  logits=convert_predictions(logits)
111
- logits.shape
112
-
113
-
114
-
115
  normalized_scores = normalize_scores(logits)
 
116
 
117
  result_str = "\n".join([f"{aa}: {score:.2f}" for aa, score in zip(test_one_letter_sequence, normalized_scores)])
118
 
 
99
 
100
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
101
  model.to(device)
102
+ model.eval()
103
  for batch in test_loader:
104
  input_ids = batch['input_ids'].to(device)
105
  attention_mask = batch['attention_mask'].to(device)
 
107
 
108
  outputs = model(input_ids, attention_mask=attention_mask)
109
  logits = outputs.logits.detach().cpu().numpy()
110
+
111
+ logits = logits[:, :-1] #remove for prot_t5 the last element, because it is a special token
112
  logits=convert_predictions(logits)
113
+
 
 
 
114
  normalized_scores = normalize_scores(logits)
115
+ test_one_letter_sequence = test_one_letter_sequence.replace(" ", "")
116
 
117
  result_str = "\n".join([f"{aa}: {score:.2f}" for aa, score in zip(test_one_letter_sequence, normalized_scores)])
118
 
app.py CHANGED
@@ -99,6 +99,7 @@ def predict_protein_sequence(test_one_letter_sequence):
99
 
100
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
101
  model.to(device)
 
102
  for batch in test_loader:
103
  input_ids = batch['input_ids'].to(device)
104
  attention_mask = batch['attention_mask'].to(device)
@@ -106,13 +107,12 @@ def predict_protein_sequence(test_one_letter_sequence):
106
 
107
  outputs = model(input_ids, attention_mask=attention_mask)
108
  logits = outputs.logits.detach().cpu().numpy()
109
-
 
110
  logits=convert_predictions(logits)
111
- logits.shape
112
-
113
-
114
-
115
  normalized_scores = normalize_scores(logits)
 
116
 
117
  result_str = "\n".join([f"{aa}: {score:.2f}" for aa, score in zip(test_one_letter_sequence, normalized_scores)])
118
 
 
99
 
100
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
101
  model.to(device)
102
+ model.eval()
103
  for batch in test_loader:
104
  input_ids = batch['input_ids'].to(device)
105
  attention_mask = batch['attention_mask'].to(device)
 
107
 
108
  outputs = model(input_ids, attention_mask=attention_mask)
109
  logits = outputs.logits.detach().cpu().numpy()
110
+
111
+ logits = logits[:, :-1] #remove for prot_t5 the last element, because it is a special token
112
  logits=convert_predictions(logits)
113
+
 
 
 
114
  normalized_scores = normalize_scores(logits)
115
+ test_one_letter_sequence = test_one_letter_sequence.replace(" ", "")
116
 
117
  result_str = "\n".join([f"{aa}: {score:.2f}" for aa, score in zip(test_one_letter_sequence, normalized_scores)])
118