ThorbenFroehlking commited on
Commit
38ee0ba
·
1 Parent(s): a89fdb2
Files changed (2) hide show
  1. .ipynb_checkpoints/app-checkpoint.py +11 -10
  2. app.py +11 -10
.ipynb_checkpoints/app-checkpoint.py CHANGED
@@ -182,16 +182,6 @@ def generate_results_text(pdb_id, segment, residues_by_bracket, protein_residues
182
  return result_str
183
 
184
 
185
- def predict_utils(sequence):
186
- input_ids = tokenizer(" ".join(sequence), return_tensors="pt").input_ids.to(device)
187
- with torch.no_grad():
188
- outputs = model(input_ids).logits.detach().cpu().numpy().squeeze()
189
-
190
- # Calculate scores and normalize them
191
- raw_scores = expit(outputs[:, 1] - outputs[:, 0])
192
- normalized_scores = normalize_scores(raw_scores)
193
-
194
- return raw_scores,normalized_scores
195
 
196
 
197
  def process_pdb(pdb_id_or_file, segment, score_type='normalized'):
@@ -632,6 +622,17 @@ with gr.Blocks(css="""
632
  outputs=[predictions_output, molecule_output, download_output]
633
  )
634
 
 
 
 
 
 
 
 
 
 
 
 
635
  #gr.Interface(
636
  #fn=predict_utils,
637
  #inputs=gr.Textbox(),
 
182
  return result_str
183
 
184
 
 
 
 
 
 
 
 
 
 
 
185
 
186
 
187
  def process_pdb(pdb_id_or_file, segment, score_type='normalized'):
 
622
  outputs=[predictions_output, molecule_output, download_output]
623
  )
624
 
625
+ def predict_utils(sequence):
626
+ input_ids = tokenizer(" ".join(sequence), return_tensors="pt").input_ids.to(device)
627
+ with torch.no_grad():
628
+ outputs = model(input_ids).logits.detach().cpu().numpy().squeeze()
629
+
630
+ # Calculate scores and normalize them
631
+ raw_scores = expit(outputs[:, 1] - outputs[:, 0])
632
+ normalized_scores = normalize_scores(raw_scores)
633
+
634
+ return raw_scores,normalized_scores
635
+
636
  #gr.Interface(
637
  #fn=predict_utils,
638
  #inputs=gr.Textbox(),
app.py CHANGED
@@ -182,16 +182,6 @@ def generate_results_text(pdb_id, segment, residues_by_bracket, protein_residues
182
  return result_str
183
 
184
 
185
- def predict_utils(sequence):
186
- input_ids = tokenizer(" ".join(sequence), return_tensors="pt").input_ids.to(device)
187
- with torch.no_grad():
188
- outputs = model(input_ids).logits.detach().cpu().numpy().squeeze()
189
-
190
- # Calculate scores and normalize them
191
- raw_scores = expit(outputs[:, 1] - outputs[:, 0])
192
- normalized_scores = normalize_scores(raw_scores)
193
-
194
- return raw_scores,normalized_scores
195
 
196
 
197
  def process_pdb(pdb_id_or_file, segment, score_type='normalized'):
@@ -632,6 +622,17 @@ with gr.Blocks(css="""
632
  outputs=[predictions_output, molecule_output, download_output]
633
  )
634
 
 
 
 
 
 
 
 
 
 
 
 
635
  #gr.Interface(
636
  #fn=predict_utils,
637
  #inputs=gr.Textbox(),
 
182
  return result_str
183
 
184
 
 
 
 
 
 
 
 
 
 
 
185
 
186
 
187
  def process_pdb(pdb_id_or_file, segment, score_type='normalized'):
 
622
  outputs=[predictions_output, molecule_output, download_output]
623
  )
624
 
625
+ def predict_utils(sequence):
626
+ input_ids = tokenizer(" ".join(sequence), return_tensors="pt").input_ids.to(device)
627
+ with torch.no_grad():
628
+ outputs = model(input_ids).logits.detach().cpu().numpy().squeeze()
629
+
630
+ # Calculate scores and normalize them
631
+ raw_scores = expit(outputs[:, 1] - outputs[:, 0])
632
+ normalized_scores = normalize_scores(raw_scores)
633
+
634
+ return raw_scores,normalized_scores
635
+
636
  #gr.Interface(
637
  #fn=predict_utils,
638
  #inputs=gr.Textbox(),