vedantM commited on
Commit
74b55b3
·
verified ·
1 Parent(s): c895da5

updated app file

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -46,7 +46,7 @@ def nucArg_app():
46
  }
47
 
48
  # Streamlit UI
49
- st.title("Antibiotic Resistance Predictor")
50
  # st.write("This app predicts antibiotic resistance based on DNA sequences.")
51
 
52
  # Input sequence
@@ -58,10 +58,11 @@ def nucArg_app():
58
 
59
  if sequence:
60
  if len(sequence) <= 128:
 
61
  chunks = [sequence] # No splitting needed
62
  model, tokenizer, class_mapping = model_short, tokenizer_short, short_read_classes
63
  else:
64
- st.write("Input sequence is too large. Splitting into smaller chunks for processing.")
65
  chunks = split_sequence(sequence)
66
  model, tokenizer, class_mapping = model_long, tokenizer_long, long_read_classes
67
 
@@ -94,7 +95,7 @@ def nucArg_app():
94
  'Probability': float(prob)
95
  })
96
 
97
- type_probabilities = pd.DataFrame(type_probabilities).sort_values(by='Probability')#,ascending=False)
98
  # type_probabilities = type_probabilities.set_index('Type')
99
  tp = type_probabilities.convert_dtypes()
100
 
 
46
  }
47
 
48
  # Streamlit UI
49
+ st.title("Detecting Antimicrobial Resistance Genes")
50
  # st.write("This app predicts antibiotic resistance based on DNA sequences.")
51
 
52
  # Input sequence
 
58
 
59
  if sequence:
60
  if len(sequence) <= 128:
61
+ st.write("Using Short Reads Model.")
62
  chunks = [sequence] # No splitting needed
63
  model, tokenizer, class_mapping = model_short, tokenizer_short, short_read_classes
64
  else:
65
+ st.write("Using Long Reads Model.")
66
  chunks = split_sequence(sequence)
67
  model, tokenizer, class_mapping = model_long, tokenizer_long, long_read_classes
68
 
 
95
  'Probability': float(prob)
96
  })
97
 
98
+ type_probabilities = pd.DataFrame(type_probabilities).sort_values(by='Probability')
99
  # type_probabilities = type_probabilities.set_index('Type')
100
  tp = type_probabilities.convert_dtypes()
101