ThorbenFroehlking commited on
Commit
70d9713
·
1 Parent(s): 31865d7
Files changed (2) hide show
  1. .ipynb_checkpoints/app-checkpoint.py +20 -13
  2. app.py +20 -13
.ipynb_checkpoints/app-checkpoint.py CHANGED
@@ -584,19 +584,7 @@ with gr.Blocks(css="""
584
  else:
585
  return gr.update(visible=False), gr.update(visible=True)
586
 
587
- def predict_utils(sequence):
588
- input_ids = tokenizer(" ".join(sequence), return_tensors="pt").input_ids.to(device)
589
- with torch.no_grad():
590
- outputs = model(input_ids).logits.detach().cpu().numpy().squeeze()
591
-
592
- # Calculate scores and normalize them
593
- raw_scores = expit(outputs[:, 1] - outputs[:, 0])
594
- normalized_scores = normalize_scores(raw_scores)
595
 
596
- return {
597
- "raw_scores": raw_scores.tolist(),
598
- "normalized_scores": normalized_scores.tolist()
599
- }
600
 
601
  mode.change(
602
  toggle_mode,
@@ -636,6 +624,25 @@ with gr.Blocks(css="""
636
  outputs=[predictions_output, molecule_output, download_output]
637
  )
638
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
639
 
640
- demo.load(predict_utils, inputs=gr.Textbox(), outputs=gr.Textbox())
641
  demo.launch(share=True)
 
584
  else:
585
  return gr.update(visible=False), gr.update(visible=True)
586
 
 
 
 
 
 
 
 
 
587
 
 
 
 
 
588
 
589
  mode.change(
590
  toggle_mode,
 
624
  outputs=[predictions_output, molecule_output, download_output]
625
  )
626
 
627
+ def predict_utils(sequence):
628
+ input_ids = tokenizer(" ".join(sequence), return_tensors="pt").input_ids.to(device)
629
+ with torch.no_grad():
630
+ outputs = model(input_ids).logits.detach().cpu().numpy().squeeze()
631
+
632
+ # Calculate scores and normalize them
633
+ raw_scores = expit(outputs[:, 1] - outputs[:, 0])
634
+ normalized_scores = normalize_scores(raw_scores)
635
+
636
+ return {
637
+ "raw_scores": raw_scores.tolist(),
638
+ "normalized_scores": normalized_scores.tolist()
639
+ }
640
+
641
+ gr.Interface(
642
+ fn=predict_utils,
643
+ inputs=gr.Textbox(visible=False),
644
+ outputs=gr.Textbox(visible=False)
645
+ )
646
+
647
 
 
648
  demo.launch(share=True)
app.py CHANGED
@@ -584,19 +584,7 @@ with gr.Blocks(css="""
584
  else:
585
  return gr.update(visible=False), gr.update(visible=True)
586
 
587
- def predict_utils(sequence):
588
- input_ids = tokenizer(" ".join(sequence), return_tensors="pt").input_ids.to(device)
589
- with torch.no_grad():
590
- outputs = model(input_ids).logits.detach().cpu().numpy().squeeze()
591
-
592
- # Calculate scores and normalize them
593
- raw_scores = expit(outputs[:, 1] - outputs[:, 0])
594
- normalized_scores = normalize_scores(raw_scores)
595
 
596
- return {
597
- "raw_scores": raw_scores.tolist(),
598
- "normalized_scores": normalized_scores.tolist()
599
- }
600
 
601
  mode.change(
602
  toggle_mode,
@@ -636,6 +624,25 @@ with gr.Blocks(css="""
636
  outputs=[predictions_output, molecule_output, download_output]
637
  )
638
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
639
 
640
- demo.load(predict_utils, inputs=gr.Textbox(), outputs=gr.Textbox())
641
  demo.launch(share=True)
 
584
  else:
585
  return gr.update(visible=False), gr.update(visible=True)
586
 
 
 
 
 
 
 
 
 
587
 
 
 
 
 
588
 
589
  mode.change(
590
  toggle_mode,
 
624
  outputs=[predictions_output, molecule_output, download_output]
625
  )
626
 
627
+ def predict_utils(sequence):
628
+ input_ids = tokenizer(" ".join(sequence), return_tensors="pt").input_ids.to(device)
629
+ with torch.no_grad():
630
+ outputs = model(input_ids).logits.detach().cpu().numpy().squeeze()
631
+
632
+ # Calculate scores and normalize them
633
+ raw_scores = expit(outputs[:, 1] - outputs[:, 0])
634
+ normalized_scores = normalize_scores(raw_scores)
635
+
636
+ return {
637
+ "raw_scores": raw_scores.tolist(),
638
+ "normalized_scores": normalized_scores.tolist()
639
+ }
640
+
641
+ gr.Interface(
642
+ fn=predict_utils,
643
+ inputs=gr.Textbox(visible=False),
644
+ outputs=gr.Textbox(visible=False)
645
+ )
646
+
647
 
 
648
  demo.launch(share=True)