Spaces:
Sleeping
Sleeping
import nltk | |
nltk.download('stopwords') | |
nltk.download('punkt') | |
import pandas as pd | |
#classify_abs is a dependency for extract_abs | |
import classify_abs | |
import extract_abs | |
#pd.set_option('display.max_colwidth', None) | |
import streamlit as st | |
import spacy | |
import tensorflow as tf | |
import pickle | |
########## Title for the Web App ########## | |
st.title("Epidemiology Extraction Pipeline for Rare Diseases") | |
st.subheader("National Center for Advancing Translational Sciences (NIH/NCATS)") | |
st.markdown(<img src="https://huggingface.co/spaces/ncats/EpiPipeline4GARD/raw/main/NCATS_logo.svg?sanitize=true">,unsafe_allow_html=True) | |
#### CHANGE SIDEBAR WIDTH ### | |
st.markdown( | |
""" | |
<style> | |
[data-testid="stSidebar"][aria-expanded="true"] > div:first-child { | |
width: 275px; | |
} | |
[data-testid="stSidebar"][aria-expanded="false"] > div:first-child { | |
width: 275px; | |
margin-left: -400px; | |
} | |
</style> | |
""", | |
unsafe_allow_html=True, | |
) | |
#max_results is Maximum number of PubMed ID's to retrieve BEFORE filtering | |
max_results = st.sidebar.number_input("Maximum number of articles to find in PubMed", min_value=1, max_value=None, value=50) | |
filtering = st.sidebar.radio("What type of filtering would you like?",('Strict', 'Lenient', 'None')) | |
extract_diseases = st.sidebar.checkbox("Extract Rare Diseases", value=False) | |
def load_models_experimental(): | |
classify_model_vars = classify_abs.init_classify_model() | |
NER_pipeline, entity_classes = extract_abs.init_NER_pipeline() | |
GARD_dict, max_length = extract_abs.load_GARD_diseases() | |
return classify_model_vars, NER_pipeline, entity_classes, GARD_dict, max_length | |
def load_models(): | |
# load the tokenizer | |
with open('tokenizer.pickle', 'rb') as handle: | |
classify_tokenizer = pickle.load(handle) | |
# load the model | |
classify_model = tf.keras.models.load_model("LSTM_RNN_Model") | |
#classify_model_vars = classify_abs.init_classify_model() | |
NER_pipeline, entity_classes = extract_abs.init_NER_pipeline() | |
GARD_dict, max_length = extract_abs.load_GARD_diseases() | |
return classify_tokenizer, classify_model, NER_pipeline, entity_classes, GARD_dict, max_length | |
with st.spinner('Loading Epidemiology Models and Dependencies...'): | |
classify_model_vars, NER_pipeline, entity_classes, GARD_dict, max_length = load_models_experimental() | |
#classify_tokenizer, classify_model, NER_pipeline, entity_classes, GARD_dict, max_length = load_models() | |
#Load spaCy models which cannot be cached due to hash function error | |
#nlp = spacy.load('en_core_web_lg') | |
#nlpSci = spacy.load("en_ner_bc5cdr_md") | |
#nlpSci2 = spacy.load('en_ner_bionlp13cg_md') | |
#classify_model_vars = (nlp, nlpSci, nlpSci2, classify_model, classify_tokenizer) | |
st.success('All Models and Dependencies Loaded!') | |
disease_or_gard_id = st.text_input("Input a rare disease term or GARD ID.") | |
if disease_or_gard_id: | |
df = extract_abs.streamlit_extraction(disease_or_gard_id, max_results, filtering, | |
NER_pipeline, entity_classes, | |
extract_diseases,GARD_dict, max_length, | |
classify_model_vars) | |
st.dataframe(df) | |
#st.dataframe(data=None, width=None, height=None) | |
# st.code(body, language="python") |