File size: 21,268 Bytes
89677ab
92ed479
da1c3d0
7419d2a
c520c43
3e01b83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c520c43
 
24c5c6a
 
 
 
 
301ba09
 
 
 
 
2f8d670
 
fb8112c
abddf21
 
301ba09
e021e3c
 
 
 
 
 
8b87470
 
 
fb8112c
ec82ae1
 
 
 
 
 
867722f
 
ec82ae1
 
301ba09
24c5c6a
abddf21
 
 
301ba09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a38fdcd
 
 
 
301ba09
 
 
 
 
 
abddf21
 
24c5c6a
301ba09
7419d2a
 
e8c701e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7419d2a
 
25123c8
da1c3d0
abddf21
2643ed2
abddf21
 
 
da1c3d0
 
 
 
fcbba9e
 
95173ca
 
abddf21
fcbba9e
 
da1c3d0
eb75910
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301ba09
abddf21
301ba09
2f8d670
abddf21
 
 
301ba09
 
0ab12f7
 
301ba09
 
 
 
dcd5e55
301ba09
dcd5e55
 
301ba09
2f8d670
 
301ba09
 
 
 
8f00c3f
301ba09
8f00c3f
3a2340f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8f00c3f
 
 
abddf21
301ba09
eb75910
 
 
 
 
 
 
 
 
 
301ba09
 
 
 
 
 
 
 
 
 
 
abddf21
 
301ba09
 
42e2260
301ba09
 
fb8112c
 
8b87470
fb8112c
 
 
8b87470
fb8112c
 
2f8d670
 
 
 
 
 
 
 
 
301ba09
 
fb8112c
301ba09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
42e2260
8b87470
b2ca546
1e273b2
301ba09
 
 
 
 
b2ca546
301ba09
 
9a145bd
 
 
 
 
301ba09
 
 
9a145bd
301ba09
9a145bd
301ba09
 
 
 
 
 
7d697ba
301ba09
7d697ba
 
 
 
 
 
 
 
301ba09
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9a145bd
 
301ba09
 
7d697ba
 
9a145bd
301ba09
 
 
 
0ab12f7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301ba09
0ab12f7
301ba09
9a145bd
301ba09
9a145bd
 
 
 
 
 
 
 
 
 
 
 
301ba09
 
 
 
 
 
 
 
9a145bd
301ba09
 
 
 
 
 
 
9a145bd
301ba09
 
 
0ab12f7
 
 
9a145bd
0ab12f7
 
 
 
 
 
 
 
 
 
 
 
9a145bd
0ab12f7
 
 
 
301ba09
 
 
 
 
 
 
2f8d670
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
import os
import streamlit as st
from rapidfuzz import process
import random

# with st.spinner("Initializing the environment... This may take up to 10 minutes at the start of each session."):
#     # Create a temporary placeholder for the message
#     loading_placeholder = st.empty()

#     # Show the info message only while the spinner is active
#     loading_placeholder.info("""
#     **Note:** This initialization is required at the start of each session.  
#     Once the app is ready, you can run multiple predictions without re-initializing by clicking the **Reset** button in the sidebar.
#     """)

#     # Run setup script if not already executed
#     if not os.path.exists(".setup_done"):
#         start_time = time.time()
#         os.system("bash setup.sh")
#         end_time = time.time()
#         print(f"Environment prepared in {end_time - start_time:.2f} seconds")
#         with open(".setup_done", "w") as f:
#             f.write("done")

# # ❌ Remove the info message after initialization is complete
# loading_placeholder.empty()


from run_prothgt_app import *

def convert_df(df):
   return df.to_csv(index=False).encode('utf-8')

# Initialize session state variables
if 'predictions_df' not in st.session_state:
    st.session_state.predictions_df = None
if 'submitted' not in st.session_state:
    st.session_state.submitted = False
if 'previous_inputs' not in st.session_state:
    st.session_state.previous_inputs = None
# Initialize session state variables
if 'generating_predictions' not in st.session_state:
    st.session_state.generating_predictions = False

def reset_prediction_state():
    st.session_state.generating_predictions = False
    st.session_state.submitted = False
    st.session_state.predictions_df = None
    st.session_state.previous_inputs = None

def set_generating_predictions():
    st.session_state.generating_predictions = True
    st.session_state.submitted = True

with st.expander("🚀 Upcoming Features"):
    st.info("""
    We are actively working on enhancing ProtHGT application with new capabilities:
    
    - **Real-time data retrieval for new proteins**: Currently, ProtHGT can only generate predictions for proteins that already exist in our knowledge graph. We are developing a new feature that will allow users to **predict functions for entirely new proteins starting from their sequences**. This will work by **retrieving relevant relationship data in real time from external source databases** (e.g., UniProt, STRING, and other biological repositories). The system will dynamically construct a knowledge graph for the query protein, incorporating its interactions, domains, pathways, and other biological associations before running function prediction. This approach will enable ProtHGT to analyze newly discovered or less-studied proteins even if they are not pre-annotated in our dataset.  
    - **Expanded embedding options**: Currently, this application represents proteins using **TAPE embeddings**, which serve as the initial numerical representations of protein sequences before being processed in the heterogeneous graph model. We are working on integrating **ProtT5** and **ESM-2** as alternative initial embeddings, allowing users to choose different sequence representations that may enhance performance for specific tasks. A detailed comparison of how these embeddings influence function prediction accuracy will be included in our upcoming publication.
    - **Knowledge graph visualization for interpretability**: To improve model explainability, we are developing an interactive **knowledge graph visualization** feature. This will allow users to explore the biological relationships that contributed to ProtHGT’s predictions for a given protein. Users will be able to inspect **protein interactions, GO annotations, domains, pathways, and other key connections** in a structured graphical format, making it easier to interpret and validate predictions.

    Stay tuned for updates and future publications!
    """)

with st.sidebar:

    disabled = st.session_state.generating_predictions

    st.markdown("""
        <style>
        .title {
            font-size: 35px;
            font-weight: bold;
            color: #424242;
            margin-bottom: 0px;
        }
        .subtitle {
            font-size: 20px;
            color: #424242;
            margin-bottom: 20px;
            line-height: 1.5;
        }
        .badges {
            margin-top: 10px;
            margin-bottom: 20px;
        }
        </style>
        
        <div class="title">ProtHGT</div>
        <div class="subtitle">Heterogeneous Graph Transformers for Automated Protein Function Prediction Using Knowledge Graphs and Language Models</div>
        <div class="badges">
            <a href="https://github.com/HUBioDataLab/ProtHGT">
                <img src="https://img.shields.io/badge/GitHub-black?logo=github" alt="github-repository">
            </a>
        </div>
    """, unsafe_allow_html=True)

    available_proteins = get_available_proteins()
    
    if 'example_proteins' not in st.session_state:
        st.session_state.example_proteins = random.sample(available_proteins, 5)

    selected_proteins = []
    
    # Add protein selection methods
    selection_method = st.radio(
        "Choose input method:",
        ["Use example query", "Search proteins", "Upload protein ID file"],
        disabled=disabled
    )

    if selection_method == "Use example query":
        selected_proteins = st.session_state.example_proteins
        st.write(f"Selected proteins:")
        st.markdown(
            f"""
            <div style="
                height: 150px; 
                overflow-y: scroll;
                border: 1px solid #ccc;
                border-radius: 4px;
                padding: 8px;
                margin-bottom: 16px;
                background-color: white;">
                {'<br>'.join(selected_proteins)}
            </div>
            """, 
            unsafe_allow_html=True
        )

    elif selection_method == "Search proteins":
        
        # User enters search term
        search_query = st.text_input(
            "1\\. Start typing a protein ID (at least 3 characters) and press Enter to see search results in the dropdown menu below (2)",
            "", 
            disabled=disabled
        )

        # Apply fuzzy search only if query length is >= 3
        filtered_proteins = []
        if len(search_query) >= 3:
            # Case-insensitive search by converting query and proteins to lowercase
            matches = process.extract(
                search_query.upper(), 
                {p: p.upper() for p in available_proteins}, 
                limit=50
            )
            filtered_proteins = [match[0] for match in matches]  # Show top 50 matches

        with st.container():
            selected_proteins = st.multiselect(
                "2\\. Select proteins from search results",
                options=filtered_proteins,
                placeholder="Start typing a protein ID above (1) to see search results...",
                max_selections=100,
                disabled=disabled,
                key="protein_selector"
            )
            # Apply custom CSS to make container scrollable
            st.markdown("""
                <style>
                div[data-testid="stMultiSelect"] div:nth-child(2) {
                    max-height: 250px;
                    overflow-y: auto;
                }
                </style>
                """, unsafe_allow_html=True)

    else:  # Upload file option
        uploaded_file = st.file_uploader(
            "Upload a text file with UniProt IDs (one per line, max 100)*",
            type=['txt'],
            disabled=disabled
        )

        if uploaded_file:
            protein_list = [line.strip() for line in uploaded_file.read().decode('utf-8').splitlines()]

            # Remove empty lines and duplicates
            protein_list = list(filter(None, protein_list))
            protein_list = list(dict.fromkeys(protein_list))
            
            # Check for proteins not in available_proteins
            proteins_not_found = [p for p in protein_list if p not in available_proteins]
            # Filter to keep only available proteins
            protein_list = [p for p in protein_list if p in available_proteins]

            if len(protein_list) > 100:
                st.error("Please upload a file with maximum 100 protein IDs.")
                selected_proteins = []
            else:
                selected_proteins = protein_list
                st.write(f"Loaded {len(selected_proteins)} proteins")

                if proteins_not_found:
                    st.warning(f"""
                    The following proteins were not found in our input knowledge graph and have been discarded: 
                    """)
                    with st.expander("View Discarded Proteins"):
                        # Create scrollable container with fixed height
                        st.markdown(
                            f"""
                            <div style="
                                height: 150px; 
                                overflow-y: scroll;
                                border: 1px solid #ccc;
                                border-radius: 4px;
                                padding: 8px;
                                margin-bottom: 16px;
                                background-color: white;">
                                {'<br>'.join(proteins_not_found)}
                            </div>
                            """, 
                            unsafe_allow_html=True
                        )
                        
                    st.warning(f"""
                    Currently, our system can only generate predictions for proteins that are already included in our knowledge graph. **Real-time retrieval of relationship data from external source databases is not yet supported.**  
                    We are actively working on integrating this capability in future updates. Stay tuned!
                    """)

    if selected_proteins:
        st.write(f"Total proteins selected: {len(selected_proteins)}")
        # Add download button
        proteins_text = '\n'.join(selected_proteins)
        st.download_button(
            label="Download Selected Proteins List",
            data=proteins_text,
            file_name="selected_proteins.txt",
            mime="text/plain",
            key="download_selected_proteins"
        )

        # Add GO category selection
        go_category_options = {
            'All Categories': None,
            'Molecular Function': 'GO_term_F',
            'Biological Process': 'GO_term_P',
            'Cellular Component': 'GO_term_C'
        }
        selected_go_category = st.selectbox(
            "Select GO Category for predictions",
            options=list(go_category_options.keys()),
            help="Choose which GO category to generate predictions for. Selecting 'All Categories' will generate predictions for all three categories.",
            disabled=disabled
        )

    st.warning("⚠️ Due to memory and computational constraints, the maximum number of proteins that can be processed at once is limited to 100 proteins. For larger datasets, please consider running the model locally using our [GitHub repository](https://github.com/HUBioDataLab/ProtHGT).")

    if selected_proteins and selected_go_category:

        button_disabled = st.session_state.submitted

        if st.button("Generate Predictions", 
                    disabled=button_disabled, 
                    key="generate_predictions",
                    on_click=set_generating_predictions):
            pass

        # Create a tuple of current inputs to track changes
        current_inputs = (tuple(selected_proteins), selected_go_category)
        
        # Check if inputs have changed
        if st.session_state.previous_inputs != current_inputs:
            st.session_state.predictions_df = None
            st.session_state.submitted = False
            st.session_state.previous_inputs = current_inputs
        
if st.session_state.submitted:
    with st.spinner("Generating predictions..."):

        # Generate predictions only if not already in session state
        if st.session_state.predictions_df is None:

            # Load model config from JSON file
            import json
            import os

            # Define data directory path
            data_dir = "data"
            models_dir = os.path.join(data_dir, "models")

            # Load model configuration
            model_config_paths = {
                'GO_term_F': os.path.join(models_dir, "prothgt-config-molecular-function.yaml"),
                'GO_term_P': os.path.join(models_dir, "prothgt-config-biological-process.yaml"),
                'GO_term_C': os.path.join(models_dir, "prothgt-config-cellular-component.yaml")
            }

            # Paths for model and data
            model_paths = {
                'GO_term_F': os.path.join(models_dir, "prothgt-model-molecular-function.pt"),
                'GO_term_P': os.path.join(models_dir, "prothgt-model-biological-process.pt"),
                'GO_term_C': os.path.join(models_dir, "prothgt-model-cellular-component.pt")
            }

            # Get the selected GO category
            go_category = go_category_options[selected_go_category]

            # If a specific category is selected, use that model path
            if go_category:
                model_config_paths = [model_config_paths[go_category]]
                model_paths = [model_paths[go_category]]
                go_categories = [go_category]
            else:
                model_config_paths = [model_config_paths[cat] for cat in ['GO_term_F', 'GO_term_P', 'GO_term_C']]
                model_paths = [model_paths[cat] for cat in ['GO_term_F', 'GO_term_P', 'GO_term_C']]
                go_categories = ['GO_term_F', 'GO_term_P', 'GO_term_C']

            # Generate predictions
            predictions_df = generate_prediction_df(
                protein_ids=selected_proteins,
                model_paths=model_paths,
                model_config_paths=model_config_paths,
                go_category=go_categories
            )

            st.session_state.predictions_df = predictions_df
        
            # Reset only the generating_predictions flag to release the sidebar
            st.session_state.generating_predictions = False
            st.rerun()

        # Display and filter predictions
        st.success("Predictions generated successfully!")
        st.markdown("### Filter and View Predictions")
        
        # Create filters
        col1, col2, col3, col4 = st.columns(4)
        
        with col1:
            # Extract UniProt IDs from URLs for the selectbox
            uniprot_ids = st.session_state.predictions_df['UniProt_ID'].apply(
                lambda x: x.split('/')[-2]  # Gets the ID part from the URL
            ).unique().tolist()
            
            # Protein filter
            selected_protein = st.selectbox(
                "Filter by Protein",
                options=['All'] + sorted(uniprot_ids)
            )

        with col2:
            # GO category filter
            selected_category = st.selectbox(
                "Filter by GO Category",
                options=['All'] + sorted(st.session_state.predictions_df['GO_category'].unique().tolist())
            )

        with col3:
            # GO term filter
            go_term_filter = st.text_input(
                "Filter by GO Term ID",
                placeholder="e.g., GO:0003674",
                help="Enter a GO term ID to filter results"
            ).strip()
            
        with col4:
            # Probability threshold
            min_probability_threshold = st.slider(
                "Minimum Probability",
                min_value=0.0,
                max_value=1.0,
                value=0.5,
                step=0.05
            )

            max_probability_threshold = st.slider(
                "Maximum Probability",
                min_value=0.0,
                max_value=1.0,
                value=1.0,
                step=0.05
            )

        # Filter the dataframe using session state data
        filtered_df = st.session_state.predictions_df.copy()

        if selected_protein != 'All':
            filtered_df = filtered_df[filtered_df['UniProt_ID'].str.contains(selected_protein)]
                        
        if selected_category != 'All':
            filtered_df = filtered_df[filtered_df['GO_category'] == selected_category]

        if go_term_filter:
            filtered_df = filtered_df[filtered_df['GO_ID'].str.contains(go_term_filter, case=False, na=False)]
            
        filtered_df = filtered_df[(filtered_df['Probability'] >= min_probability_threshold) & 
                                (filtered_df['Probability'] <= max_probability_threshold)]

        # Custom CSS to increase table width and improve layout
        st.markdown("""
            <style>
                .stDataFrame {
                    width: 100%;
                }
                .stDataFrame > div {
                    width: 100%;
                }
                .stDataFrame [data-testid="stDataFrameResizable"] {
                    width: 100%;
                    min-width: 100%;
                }
                .pagination-info {
                    font-size: 14px;
                    color: #666;
                    padding: 10px 0;
                }
                .page-controls {
                    display: flex;
                    align-items: center;
                    justify-content: center;
                    gap: 20px;
                    padding: 10px 0;
                }
            </style>
        """, unsafe_allow_html=True)

        # Add pagination controls
        col1, col2, col3 = st.columns([2, 1, 2])
        with col2:
            rows_per_page = st.selectbox("Rows per page", [50, 100, 200, 500], index=1)

        total_rows = len(filtered_df)
        total_pages = (total_rows + rows_per_page - 1) // rows_per_page

        # Initialize page number in session state
        if "page_number" not in st.session_state:
            st.session_state.page_number = 0

        # Calculate start and end indices for current page
        start_idx = st.session_state.page_number * rows_per_page
        end_idx = min(start_idx + rows_per_page, total_rows)

        st.dataframe(
            filtered_df.iloc[start_idx:end_idx],
            hide_index=True,
            use_container_width=True,
            column_config={
                "UniProt_ID": st.column_config.LinkColumn(
                    "UniProt ID",
                    help="Click to view protein in UniProt",
                    validate="^https://www\\.uniprot\\.org/uniprotkb/[A-Z0-9]+/entry$",
                    display_text="^https://www\\.uniprot\\.org/uniprotkb/([A-Z0-9]+)/entry$"
                ),
                "GO_ID": st.column_config.LinkColumn(
                    "GO ID",
                    help="Click to view GO term in QuickGO",
                    validate="^https://www\\.ebi\\.ac\\.uk/QuickGO/term/GO:[0-9]+$",
                    display_text="^https://www\\.ebi\\.ac\\.uk/QuickGO/term/(GO:[0-9]+)$"
                ),
                "Probability": st.column_config.ProgressColumn(
                    "Probability",
                    format="%.2f",
                    min_value=0,
                    max_value=1,
                ),
                "Protein": st.column_config.TextColumn(
                    "Protein",
                    help="Protein Name",
                ),
                "GO_category": st.column_config.TextColumn(
                    "GO Category",
                    help="Gene Ontology Category",
                ),
                "GO_term": st.column_config.TextColumn(
                    "GO Term",
                    help="Gene Ontology Term Name",
                ),
            }
        )
        # Pagination controls with better layout
        col1, col2, col3 = st.columns([1, 3, 1])
        with col1:
            if st.button("Previous", disabled=st.session_state.page_number == 0):
                st.session_state.page_number -= 1
                st.rerun()

        with col2:
            st.markdown(f"""
                <div class="pagination-info" style="text-align: center">
                    Page {st.session_state.page_number + 1} of {total_pages}<br>
                    Showing rows {start_idx + 1} to {end_idx} of {total_rows}
                </div>
            """, unsafe_allow_html=True)

        with col3:
            if st.button("Next", disabled=st.session_state.page_number >= total_pages - 1):
                st.session_state.page_number += 1
                st.rerun()


        # Download filtered results
        st.download_button(
            label="Download Filtered Results",
            data=convert_df(filtered_df),
            file_name="filtered_predictions.csv",
            mime="text/csv",
            key="download_filtered_predictions"
        )