File size: 2,860 Bytes
46a030d
 
 
 
 
 
 
926183f
46a030d
 
 
 
 
926183f
 
46a030d
 
 
 
 
 
 
 
 
 
 
 
926183f
 
46a030d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import streamlit as st
import numpy as np
from pandas import DataFrame
import run_segbot
from functionforDownloadButtons import download_button
import os
import json

st.set_page_config(
    page_title="Clinical segment generater",
    page_icon="πŸš‘",
    layout="wide"
)


def _max_width_():
    max_width_str = f"max-width: 1400px;"
    st.markdown(
        f"""
    <style>
    .reportview-container .main .block-container{{
        {max_width_str}
    }}
    </style>    
    """,
        unsafe_allow_html=True,
    )


#_max_width_()

#c30 = st.columns([1,])

#with c30:
# st.image("logo.png", width=400)
st.title("πŸš‘ Clinical segment generater")
st.header("")



with st.expander("ℹ️ - About this app", expanded=True):

    st.write(
        """     
-   The *Clinical segment generater* app is an implementation of [our paper](https://journals.plos.org/digitalhealth/article?id=10.1371/journal.pdig.0000099).
-   It automatically splits Japanese sentences into smaller units representing medical meanings.
	    """
    )

    st.markdown("")

st.markdown("")
st.markdown("## πŸ“Œ Paste document")
@st.cache(allow_output_mutation=True)
def model_load():
    return run_segbot.setup()
model,fm,index = model_load()
with st.form(key="my_form"):


    ce, c1, ce, c2, c3 = st.columns([0.07, 1, 0.07, 5, 0.07])
    with c1:
        ModelType = st.radio(
            "Choose the method of sentence split",
            ["fullstop & linebreak (Default)", "pySBD"],
            help="""
            At present, you can choose between 2 methods to split your text into sentences. 

            The fullstop & linebreak is naive and robust to noise, but has low accuracy.
            pySBD is more accurate, but more complex and less robust to noise.
            """,
        )

        if ModelType == "fullstop & linebreak (Default)":
            split_method="fullstop"
            
        else:
            split_method="pySBD"


    with c2:
        doc = st.text_area(
            "Paste your text below",
            height=510,
        )

        submit_button = st.form_submit_button(label="πŸ‘ Go to split!")


if not submit_button:
    st.stop()

keywords = run_segbot.generate(doc, model, fm, index, split_method)


st.markdown("## 🎈 Check & download results")

st.header("")


cs, c1, c2, c3, cLast = st.columns([2, 1.5, 1.5, 1.5, 2])

with c1:
    CSVButton2 = download_button(keywords, "Data.csv", "πŸ“₯ Download (.csv)")
with c2:
    CSVButton2 = download_button(keywords, "Data.txt", "πŸ“₯ Download (.txt)")
with c3:
    CSVButton2 = download_button(keywords, "Data.json", "πŸ“₯ Download (.json)")

st.header("")

#df = DataFrame(keywords, columns=["Keyword/Keyphrase", "Relevancy"])
df = DataFrame(keywords)
df.index += 1
df.columns = ['Segment']
print(df)
# Add styling

#c1, c2, c3 = st.columns([1, 3, 1])

#with c2:
st.table(df)