ThorbenFroehlking commited on
Commit
5576cdd
·
1 Parent(s): bfc9650
.ipynb_checkpoints/app-Copy1-checkpoint.py CHANGED
@@ -181,18 +181,6 @@ def generate_results_text(pdb_id, segment, residues_by_bracket, protein_residues
181
 
182
  return result_str
183
 
184
- def predict_utils(sequence):
185
- input_ids = tokenizer(" ".join(sequence), return_tensors="pt").input_ids.to(device)
186
- with torch.no_grad():
187
- outputs = model(input_ids).logits.detach().cpu().numpy().squeeze()
188
-
189
- # Calculate scores and normalize them
190
- raw_scores = expit(outputs[:, 1] - outputs[:, 0])
191
- normalized_scores = normalize_scores(raw_scores)
192
-
193
- return raw_scores,normalized_scores
194
-
195
-
196
  def process_pdb(pdb_id_or_file, segment, score_type='normalized'):
197
  # Determine if input is a PDB ID or file path
198
  if pdb_id_or_file.endswith('.pdb'):
 
181
 
182
  return result_str
183
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  def process_pdb(pdb_id_or_file, segment, score_type='normalized'):
185
  # Determine if input is a PDB ID or file path
186
  if pdb_id_or_file.endswith('.pdb'):
.ipynb_checkpoints/app-checkpoint.py CHANGED
@@ -181,6 +181,18 @@ def generate_results_text(pdb_id, segment, residues_by_bracket, protein_residues
181
 
182
  return result_str
183
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  def process_pdb(pdb_id_or_file, segment, score_type='normalized'):
185
  # Determine if input is a PDB ID or file path
186
  if pdb_id_or_file.endswith('.pdb'):
 
181
 
182
  return result_str
183
 
184
+
185
+ def predict_utils(sequence):
186
+ input_ids = tokenizer(" ".join(sequence), return_tensors="pt").input_ids.to(device)
187
+ with torch.no_grad():
188
+ outputs = model(input_ids).logits.detach().cpu().numpy().squeeze()
189
+
190
+ # Calculate scores and normalize them
191
+ raw_scores = expit(outputs[:, 1] - outputs[:, 0])
192
+ normalized_scores = normalize_scores(raw_scores)
193
+
194
+ return raw_scores,normalized_scores
195
+
196
  def process_pdb(pdb_id_or_file, segment, score_type='normalized'):
197
  # Determine if input is a PDB ID or file path
198
  if pdb_id_or_file.endswith('.pdb'):
app-Copy1.py CHANGED
@@ -181,18 +181,6 @@ def generate_results_text(pdb_id, segment, residues_by_bracket, protein_residues
181
 
182
  return result_str
183
 
184
- def predict_utils(sequence):
185
- input_ids = tokenizer(" ".join(sequence), return_tensors="pt").input_ids.to(device)
186
- with torch.no_grad():
187
- outputs = model(input_ids).logits.detach().cpu().numpy().squeeze()
188
-
189
- # Calculate scores and normalize them
190
- raw_scores = expit(outputs[:, 1] - outputs[:, 0])
191
- normalized_scores = normalize_scores(raw_scores)
192
-
193
- return raw_scores,normalized_scores
194
-
195
-
196
  def process_pdb(pdb_id_or_file, segment, score_type='normalized'):
197
  # Determine if input is a PDB ID or file path
198
  if pdb_id_or_file.endswith('.pdb'):
 
181
 
182
  return result_str
183
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  def process_pdb(pdb_id_or_file, segment, score_type='normalized'):
185
  # Determine if input is a PDB ID or file path
186
  if pdb_id_or_file.endswith('.pdb'):
app.py CHANGED
@@ -181,6 +181,18 @@ def generate_results_text(pdb_id, segment, residues_by_bracket, protein_residues
181
 
182
  return result_str
183
 
 
 
 
 
 
 
 
 
 
 
 
 
184
  def process_pdb(pdb_id_or_file, segment, score_type='normalized'):
185
  # Determine if input is a PDB ID or file path
186
  if pdb_id_or_file.endswith('.pdb'):
 
181
 
182
  return result_str
183
 
184
+
185
+ def predict_utils(sequence):
186
+ input_ids = tokenizer(" ".join(sequence), return_tensors="pt").input_ids.to(device)
187
+ with torch.no_grad():
188
+ outputs = model(input_ids).logits.detach().cpu().numpy().squeeze()
189
+
190
+ # Calculate scores and normalize them
191
+ raw_scores = expit(outputs[:, 1] - outputs[:, 0])
192
+ normalized_scores = normalize_scores(raw_scores)
193
+
194
+ return raw_scores,normalized_scores
195
+
196
  def process_pdb(pdb_id_or_file, segment, score_type='normalized'):
197
  # Determine if input is a PDB ID or file path
198
  if pdb_id_or_file.endswith('.pdb'):