import nltk nltk.download('stopwords') nltk.download('punkt') import pandas as pd #classify_abs is a dependency for extract_abs import classify_abs import extract_abs #pd.set_option('display.max_colwidth', None) import streamlit as st ########## Title for the Web App ########## st.title("Epidemiology Extraction Pipeline for Rare Diseases") st.subheader("National Center for Advancing Translational Sciences (NIH/NCATS)") #### CHANGE SIDEBAR WIDTH ### st.markdown( """ """, unsafe_allow_html=True, ) #max_results is Maximum number of PubMed ID's to retrieve BEFORE filtering max_results = st.sidebar.number_input("Maximum number of articles to find in PubMed", min_value=1, max_value=None, value=50) filtering = st.sidebar.radio("What type of filtering would you like?",('Strict', 'Lenient', 'None')) extract_diseases = st.sidebar.checkbox("Extract Rare Diseases", value=False) with st.spinner('Loading Epidemiology Models and Dependencies...'): classify_model_vars = classify_abs.init_classify_model() NER_pipeline, entity_classes = extract_abs.init_NER_pipeline() GARD_dict, max_length = extract_abs.load_GARD_diseases() st.success('All Models and Dependencies Loaded!') disease_or_gard_id = st.text_input("Input a rare disease term or GARD ID.", value="Fellman syndrome") if disease_or_gard_id: df = extract_abs.search_term_extraction(disease_or_gard_id, max_results, filtering, NER_pipeline, entity_classes, extract_diseases,GARD_dict, max_length, classify_model_vars) st.dataframe(df) st.balloons() #st.dataframe(data=None, width=None, height=None) # st.code(body, language="python")