vancauwe commited on
Commit
828f42d
·
1 Parent(s): fe38b4f

feat: pages skeleton

Browse files
src/main.py CHANGED
@@ -1,319 +1,16 @@
1
- import logging
2
- import os
3
-
4
- import pandas as pd
5
  import streamlit as st
6
- import folium
7
- from streamlit_folium import st_folium
8
-
9
- from transformers import pipeline
10
- from transformers import AutoModelForImageClassification
11
-
12
- from maps.obs_map import add_obs_map_header
13
- from classifier.classifier_image import add_classifier_header
14
- from datasets import disable_caching
15
- disable_caching()
16
-
17
- import whale_gallery as gallery
18
- import whale_viewer as viewer
19
- from input.input_handling import setup_input, check_inputs_are_set
20
- from input.input_handling import init_input_container_states, add_input_UI_elements, init_input_data_session_states
21
- from input.input_handling import dbg_show_observation_hashes
22
-
23
- from maps.alps_map import present_alps_map
24
- from maps.obs_map import present_obs_map
25
- from utils.st_logs import parse_log_buffer, init_logging_session_states
26
- from utils.workflow_ui import refresh_progress_display, init_workflow_viz, init_workflow_session_states
27
- from hf_push_observations import push_all_observations
28
-
29
- from classifier.classifier_image import cetacean_just_classify, cetacean_show_results_and_review, cetacean_show_results, init_classifier_session_states
30
- from classifier.classifier_hotdog import hotdog_classify
31
-
32
-
33
- # setup for the ML model on huggingface (our wrapper)
34
- os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
35
- #classifier_revision = '0f9c15e2db4d64e7f622ade518854b488d8d35e6'
36
- classifier_revision = 'main' # default/latest version
37
- # and the dataset of observations (hf dataset in our space)
38
- dataset_id = "Saving-Willy/temp_dataset"
39
- data_files = "data/train-00000-of-00001.parquet"
40
-
41
- USE_BASIC_MAP = False
42
- DEV_SIDEBAR_LIB = True
43
-
44
- # one toggle for all the extra debug text
45
- if "MODE_DEV_STATEFUL" not in st.session_state:
46
- st.session_state.MODE_DEV_STATEFUL = False
47
-
48
 
49
- # get a global var for logger accessor in this module
50
- LOG_LEVEL = logging.DEBUG
51
- g_logger = logging.getLogger(__name__)
52
- g_logger.setLevel(LOG_LEVEL)
53
 
54
- st.set_page_config(layout="wide")
55
 
56
- # initialise various session state variables
57
- init_logging_session_states() # logging init should be early
58
- init_workflow_session_states()
59
- init_input_data_session_states()
60
- init_input_container_states()
61
- init_workflow_viz()
62
- init_classifier_session_states()
63
 
64
-
65
- def main() -> None:
66
  """
67
- Main entry point to set up the streamlit UI and run the application.
68
-
69
- The organisation is as follows:
70
-
71
- 1. observation input (a new observations) is handled in the sidebar
72
- 2. the rest of the interface is organised in tabs:
73
-
74
- - cetean classifier
75
- - hotdog classifier
76
- - map to present the obersvations
77
- - table of recent log entries
78
- - gallery of whale images
79
-
80
- The majority of the tabs are instantiated from modules. Currently the two
81
- classifiers are still in-line here.
82
-
83
- """
84
-
85
- g_logger.info("App started.")
86
- g_logger.warning(f"[D] Streamlit version: {st.__version__}. Python version: {os.sys.version}")
87
-
88
- #g_logger.debug("debug message")
89
- #g_logger.info("info message")
90
- #g_logger.warning("warning message")
91
-
92
- # Streamlit app
93
- tab_inference, tab_hotdogs, tab_map, tab_coords, tab_log, tab_gallery = \
94
- st.tabs(["Cetecean classifier", "Hotdog classifier", "Map", "*:gray[Dev:coordinates]*", "Log", "Beautiful cetaceans"])
95
-
96
- # put this early so the progress indicator is at the top (also refreshed at end)
97
- refresh_progress_display()
98
-
99
- # create a sidebar, and parse all the input (returned as `observations` object)
100
- with st.sidebar:
101
- # layout handling
102
- add_input_UI_elements()
103
- # input elements (file upload, text input, etc)
104
- setup_input()
105
-
106
-
107
- with tab_map:
108
- # visual structure: a couple of toggles at the top, then the map inlcuding a
109
- # dropdown for tileset selection.
110
- add_obs_map_header()
111
- tab_map_ui_cols = st.columns(2)
112
- with tab_map_ui_cols[0]:
113
- show_db_points = st.toggle("Show Points from DB", True)
114
- with tab_map_ui_cols[1]:
115
- dbg_show_extra = st.toggle("Show Extra points (test)", False)
116
-
117
- if show_db_points:
118
- # show a nicer map, observations marked, tileset selectable.
119
- st_observation = present_obs_map(
120
- dataset_id=dataset_id, data_files=data_files,
121
- dbg_show_extra=dbg_show_extra)
122
-
123
- else:
124
- # development map.
125
- st_observation = present_alps_map()
126
-
127
-
128
- with tab_log:
129
- handler = st.session_state['handler']
130
- if handler is not None:
131
- records = parse_log_buffer(handler.buffer)
132
- st.dataframe(records[::-1], use_container_width=True,)
133
- st.info(f"Length of records: {len(records)}")
134
- else:
135
- st.error("⚠️ No log handler found!")
136
-
137
-
138
-
139
- with tab_coords:
140
- # the goal of this tab is to allow selection of the new obsvation's location by map click/adjust.
141
- st.markdown("Coming later! :construction:")
142
- st.markdown(
143
- """*The goal is to allow interactive definition for the coordinates of a new
144
- observation, by click/drag points on the map.*""")
145
-
146
-
147
- st.write("Click on the map to capture a location.")
148
- #m = folium.Map(location=visp_loc, zoom_start=7)
149
- mm = folium.Map(location=[39.949610, -75.150282], zoom_start=16)
150
- folium.Marker( [39.949610, -75.150282], popup="Liberty Bell", tooltip="Liberty Bell"
151
- ).add_to(mm)
152
-
153
- st_data2 = st_folium(mm, width=725)
154
- st.write("below the map...")
155
- if st_data2['last_clicked'] is not None:
156
- print(st_data2)
157
- st.info(st_data2['last_clicked'])
158
-
159
-
160
- with tab_gallery:
161
- # here we make a container to allow filtering css properties
162
- # specific to the gallery (otherwise we get side effects)
163
- tg_cont = st.container(key="swgallery")
164
- with tg_cont:
165
- gallery.render_whale_gallery(n_cols=4)
166
-
167
-
168
- # state handling re data_entry phases
169
- # 0. no data entered yet -> display the file uploader thing
170
- # 1. we have some images, but not all the metadata fields are done -> validate button shown, disabled
171
- # 2. all data entered -> validate button enabled
172
- # 3. validation button pressed, validation done -> enable the inference button.
173
- # - at this point do we also want to disable changes to the metadata selectors?
174
- # anyway, simple first.
175
-
176
- if st.session_state.workflow_fsm.is_in_state('doing_data_entry'):
177
- # can we advance state? - only when all inputs are set for all uploaded files
178
- all_inputs_set = check_inputs_are_set(debug=True, empty_ok=False)
179
- if all_inputs_set:
180
- st.session_state.workflow_fsm.complete_current_state()
181
- # -> data_entry_complete
182
- else:
183
- # button, disabled; no state change yet.
184
- st.sidebar.button(":gray[*Validate*]", disabled=True, help="Please fill in all fields.")
185
-
186
-
187
- if st.session_state.workflow_fsm.is_in_state('data_entry_complete'):
188
- # can we advance state? - only when the validate button is pressed
189
- if st.sidebar.button(":white_check_mark:[**Validate**]"):
190
- # create a dictionary with the submitted observation
191
- tab_log.info(f"{st.session_state.observations}")
192
- df = pd.DataFrame([obs.to_dict() for obs in st.session_state.observations.values()])
193
- #df = pd.DataFrame(st.session_state.observations, index=[0])
194
- with tab_coords:
195
- st.table(df)
196
- # there doesn't seem to be any actual validation here?? TODO: find validator function (each element is validated by the input box, but is there something at the whole image level?)
197
- # hmm, maybe it should actually just be "I'm done with data entry"
198
- st.session_state.workflow_fsm.complete_current_state()
199
- # -> data_entry_validated
200
-
201
- # state handling re inference phases (tab_inference)
202
- # 3. validation button pressed, validation done -> enable the inference button.
203
- # 4. inference button pressed -> ML started. | let's cut this one out, since it would only
204
- # make sense if we did it as an async action
205
- # 5. ML done -> show results, and manual validation options
206
- # 6. manual validation done -> enable the upload buttons
207
- #
208
- with tab_inference:
209
- # inside the inference tab, on button press we call the model (on huggingface hub)
210
- # which will be run locally.
211
- # - the model predicts the top 3 most likely species from the input image
212
- # - these species are shown
213
- # - the user can override the species prediction using the dropdown
214
- # - an observation is uploaded if the user chooses.
215
-
216
-
217
- if st.session_state.MODE_DEV_STATEFUL:
218
- dbg_show_observation_hashes()
219
-
220
- add_classifier_header()
221
- # if we are before data_entry_validated, show the button, disabled.
222
- if not st.session_state.workflow_fsm.is_in_state_or_beyond('data_entry_validated'):
223
- tab_inference.button(":gray[*Identify with cetacean classifier*]", disabled=True,
224
- help="Please validate inputs before proceeding",
225
- key="button_infer_ceteans")
226
-
227
- if st.session_state.workflow_fsm.is_in_state('data_entry_validated'):
228
- # show the button, enabled. If pressed, we start the ML model (And advance state)
229
- if tab_inference.button("Identify with cetacean classifier",
230
- key="button_infer_ceteans"):
231
- cetacean_classifier = AutoModelForImageClassification.from_pretrained(
232
- "Saving-Willy/cetacean-classifier",
233
- revision=classifier_revision,
234
- trust_remote_code=True)
235
-
236
- cetacean_just_classify(cetacean_classifier)
237
- st.session_state.workflow_fsm.complete_current_state()
238
- # trigger a refresh too (refreshhing the prog indicator means the script reruns and
239
- # we can enter the next state - visualising the results / review)
240
- # ok it doesn't if done programmatically. maybe interacting with teh button? check docs.
241
- refresh_progress_display()
242
- #TODO: validate this doesn't harm performance adversely.
243
- st.rerun()
244
-
245
- elif st.session_state.workflow_fsm.is_in_state('ml_classification_completed'):
246
- # show the results, and allow manual validation
247
- st.markdown("""### Inference results and manual validation/adjustment """)
248
- if st.session_state.MODE_DEV_STATEFUL:
249
- s = ""
250
- for k, v in st.session_state.whale_prediction1.items():
251
- s += f"* Image {k}: {v}\n"
252
-
253
- st.markdown(s)
254
-
255
- # add a button to advance the state
256
- if st.button("Confirm species predictions", help="Confirm that all species are selected correctly"):
257
- st.session_state.workflow_fsm.complete_current_state()
258
- # -> manual_inspection_completed
259
- st.rerun()
260
-
261
- cetacean_show_results_and_review()
262
-
263
- elif st.session_state.workflow_fsm.is_in_state('manual_inspection_completed'):
264
- # show the ML results, and allow the user to upload the observation
265
- st.markdown("""### Inference Results (after manual validation) """)
266
-
267
-
268
- if st.button("Upload all observations to THE INTERNET!"):
269
- # let this go through to the push_all func, since it just reports to log for now.
270
- push_all_observations(enable_push=False)
271
- st.session_state.workflow_fsm.complete_current_state()
272
- # -> data_uploaded
273
- st.rerun()
274
-
275
- cetacean_show_results()
276
-
277
- elif st.session_state.workflow_fsm.is_in_state('data_uploaded'):
278
- # the data has been sent. Lets show the observations again
279
- # but no buttons to upload (or greyed out ok)
280
- st.markdown("""### Observation(s) uploaded - thank you!""")
281
- cetacean_show_results()
282
-
283
- st.divider()
284
- #df = pd.DataFrame(st.session_state.observations, index=[0])
285
- df = pd.DataFrame([obs.to_dict() for obs in st.session_state.observations.values()])
286
- st.table(df)
287
-
288
- # didn't decide what the next state is here - I think we are in the terminal state.
289
- #st.session_state.workflow_fsm.complete_current_state()
290
-
291
-
292
- # inside the hotdog tab, on button press we call a 2nd model (totally unrelated at present, just for demo
293
- # purposes, an hotdog image classifier) which will be run locally.
294
- # - this model predicts if the image is a hotdog or not, and returns probabilities
295
- # - the input image is the same as for the ceteacean classifier - defined in the sidebar
296
- tab_hotdogs.title("Hot Dog? Or Not?")
297
- tab_hotdogs.write("""
298
- *Run alternative classifer on input images. Here we are using
299
- a binary classifier - hotdog or not - from
300
- huggingface.co/julien-c/hotdog-not-hotdog.*""")
301
-
302
- if tab_hotdogs.button("Get Hotdog Prediction"):
303
-
304
- pipeline_hot_dog = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
305
-
306
- if st.session_state.image is None:
307
- st.info("Please upload an image first.")
308
- #st.info(str(observations.to_dict()))
309
-
310
- else:
311
- hotdog_classify(pipeline_hot_dog, tab_hotdogs)
312
-
313
-
314
- # after all other processing, we can show the stage/state
315
- refresh_progress_display()
316
-
317
-
318
- if __name__ == "__main__":
319
- main()
 
 
 
 
 
1
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
+ st.set_page_config(
4
+ page_title="Home",
5
+ page_icon="🐳",
6
+ )
7
 
8
+ st.write("# Welcome to Cetacean Research Data Infrastructure! 🐬˚˖𓍢ִ໋ 🐋✧˚.⋆")
9
 
10
+ st.sidebar.success("Here are the pages.")
 
 
 
 
 
 
11
 
12
+ st.markdown(
 
13
  """
14
+ About: blablabla
15
+ """
16
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/old_main.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+
4
+ import pandas as pd
5
+ import streamlit as st
6
+ import folium
7
+ from streamlit_folium import st_folium
8
+
9
+ from transformers import pipeline
10
+ from transformers import AutoModelForImageClassification
11
+
12
+ from maps.obs_map import add_obs_map_header
13
+ from classifier.classifier_image import add_classifier_header
14
+ from datasets import disable_caching
15
+ disable_caching()
16
+
17
+ import whale_gallery as gallery
18
+ import whale_viewer as viewer
19
+ from input.input_handling import setup_input, check_inputs_are_set
20
+ from input.input_handling import init_input_container_states, add_input_UI_elements, init_input_data_session_states
21
+ from input.input_handling import dbg_show_observation_hashes
22
+
23
+ from maps.alps_map import present_alps_map
24
+ from maps.obs_map import present_obs_map
25
+ from utils.st_logs import parse_log_buffer, init_logging_session_states
26
+ from utils.workflow_ui import refresh_progress_display, init_workflow_viz, init_workflow_session_states
27
+ from hf_push_observations import push_all_observations
28
+
29
+ from classifier.classifier_image import cetacean_just_classify, cetacean_show_results_and_review, cetacean_show_results, init_classifier_session_states
30
+ from classifier.classifier_hotdog import hotdog_classify
31
+
32
+
33
+ # setup for the ML model on huggingface (our wrapper)
34
+ os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
35
+ #classifier_revision = '0f9c15e2db4d64e7f622ade518854b488d8d35e6'
36
+ classifier_revision = 'main' # default/latest version
37
+ # and the dataset of observations (hf dataset in our space)
38
+ dataset_id = "Saving-Willy/temp_dataset"
39
+ data_files = "data/train-00000-of-00001.parquet"
40
+
41
+ USE_BASIC_MAP = False
42
+ DEV_SIDEBAR_LIB = True
43
+
44
+ # one toggle for all the extra debug text
45
+ if "MODE_DEV_STATEFUL" not in st.session_state:
46
+ st.session_state.MODE_DEV_STATEFUL = False
47
+
48
+
49
+ # get a global var for logger accessor in this module
50
+ LOG_LEVEL = logging.DEBUG
51
+ g_logger = logging.getLogger(__name__)
52
+ g_logger.setLevel(LOG_LEVEL)
53
+
54
+ st.set_page_config(layout="wide")
55
+
56
+ # initialise various session state variables
57
+ init_logging_session_states() # logging init should be early
58
+ init_workflow_session_states()
59
+ init_input_data_session_states()
60
+ init_input_container_states()
61
+ init_workflow_viz()
62
+ init_classifier_session_states()
63
+
64
+
65
+ def main() -> None:
66
+ """
67
+ Main entry point to set up the streamlit UI and run the application.
68
+
69
+ The organisation is as follows:
70
+
71
+ 1. observation input (a new observations) is handled in the sidebar
72
+ 2. the rest of the interface is organised in tabs:
73
+
74
+ - cetean classifier
75
+ - hotdog classifier
76
+ - map to present the obersvations
77
+ - table of recent log entries
78
+ - gallery of whale images
79
+
80
+ The majority of the tabs are instantiated from modules. Currently the two
81
+ classifiers are still in-line here.
82
+
83
+ """
84
+
85
+ g_logger.info("App started.")
86
+ g_logger.warning(f"[D] Streamlit version: {st.__version__}. Python version: {os.sys.version}")
87
+
88
+ #g_logger.debug("debug message")
89
+ #g_logger.info("info message")
90
+ #g_logger.warning("warning message")
91
+
92
+ # Streamlit app
93
+ tab_inference, tab_hotdogs, tab_map, tab_coords, tab_log, tab_gallery = \
94
+ st.tabs(["Cetecean classifier", "Hotdog classifier", "Map", "*:gray[Dev:coordinates]*", "Log", "Beautiful cetaceans"])
95
+
96
+ # put this early so the progress indicator is at the top (also refreshed at end)
97
+ refresh_progress_display()
98
+
99
+ # create a sidebar, and parse all the input (returned as `observations` object)
100
+ with st.sidebar:
101
+ # layout handling
102
+ add_input_UI_elements()
103
+ # input elements (file upload, text input, etc)
104
+ setup_input()
105
+
106
+
107
+ with tab_map:
108
+ # visual structure: a couple of toggles at the top, then the map inlcuding a
109
+ # dropdown for tileset selection.
110
+ add_obs_map_header()
111
+ tab_map_ui_cols = st.columns(2)
112
+ with tab_map_ui_cols[0]:
113
+ show_db_points = st.toggle("Show Points from DB", True)
114
+ with tab_map_ui_cols[1]:
115
+ dbg_show_extra = st.toggle("Show Extra points (test)", False)
116
+
117
+ if show_db_points:
118
+ # show a nicer map, observations marked, tileset selectable.
119
+ st_observation = present_obs_map(
120
+ dataset_id=dataset_id, data_files=data_files,
121
+ dbg_show_extra=dbg_show_extra)
122
+
123
+ else:
124
+ # development map.
125
+ st_observation = present_alps_map()
126
+
127
+
128
+ with tab_log:
129
+ handler = st.session_state['handler']
130
+ if handler is not None:
131
+ records = parse_log_buffer(handler.buffer)
132
+ st.dataframe(records[::-1], use_container_width=True,)
133
+ st.info(f"Length of records: {len(records)}")
134
+ else:
135
+ st.error("⚠️ No log handler found!")
136
+
137
+
138
+
139
+ with tab_coords:
140
+ # the goal of this tab is to allow selection of the new obsvation's location by map click/adjust.
141
+ st.markdown("Coming later! :construction:")
142
+ st.markdown(
143
+ """*The goal is to allow interactive definition for the coordinates of a new
144
+ observation, by click/drag points on the map.*""")
145
+
146
+
147
+ st.write("Click on the map to capture a location.")
148
+ #m = folium.Map(location=visp_loc, zoom_start=7)
149
+ mm = folium.Map(location=[39.949610, -75.150282], zoom_start=16)
150
+ folium.Marker( [39.949610, -75.150282], popup="Liberty Bell", tooltip="Liberty Bell"
151
+ ).add_to(mm)
152
+
153
+ st_data2 = st_folium(mm, width=725)
154
+ st.write("below the map...")
155
+ if st_data2['last_clicked'] is not None:
156
+ print(st_data2)
157
+ st.info(st_data2['last_clicked'])
158
+
159
+
160
+ with tab_gallery:
161
+ # here we make a container to allow filtering css properties
162
+ # specific to the gallery (otherwise we get side effects)
163
+ tg_cont = st.container(key="swgallery")
164
+ with tg_cont:
165
+ gallery.render_whale_gallery(n_cols=4)
166
+
167
+
168
+ # state handling re data_entry phases
169
+ # 0. no data entered yet -> display the file uploader thing
170
+ # 1. we have some images, but not all the metadata fields are done -> validate button shown, disabled
171
+ # 2. all data entered -> validate button enabled
172
+ # 3. validation button pressed, validation done -> enable the inference button.
173
+ # - at this point do we also want to disable changes to the metadata selectors?
174
+ # anyway, simple first.
175
+
176
+ if st.session_state.workflow_fsm.is_in_state('doing_data_entry'):
177
+ # can we advance state? - only when all inputs are set for all uploaded files
178
+ all_inputs_set = check_inputs_are_set(debug=True, empty_ok=False)
179
+ if all_inputs_set:
180
+ st.session_state.workflow_fsm.complete_current_state()
181
+ # -> data_entry_complete
182
+ else:
183
+ # button, disabled; no state change yet.
184
+ st.sidebar.button(":gray[*Validate*]", disabled=True, help="Please fill in all fields.")
185
+
186
+
187
+ if st.session_state.workflow_fsm.is_in_state('data_entry_complete'):
188
+ # can we advance state? - only when the validate button is pressed
189
+ if st.sidebar.button(":white_check_mark:[**Validate**]"):
190
+ # create a dictionary with the submitted observation
191
+ tab_log.info(f"{st.session_state.observations}")
192
+ df = pd.DataFrame([obs.to_dict() for obs in st.session_state.observations.values()])
193
+ #df = pd.DataFrame(st.session_state.observations, index=[0])
194
+ with tab_coords:
195
+ st.table(df)
196
+ # there doesn't seem to be any actual validation here?? TODO: find validator function (each element is validated by the input box, but is there something at the whole image level?)
197
+ # hmm, maybe it should actually just be "I'm done with data entry"
198
+ st.session_state.workflow_fsm.complete_current_state()
199
+ # -> data_entry_validated
200
+
201
+ # state handling re inference phases (tab_inference)
202
+ # 3. validation button pressed, validation done -> enable the inference button.
203
+ # 4. inference button pressed -> ML started. | let's cut this one out, since it would only
204
+ # make sense if we did it as an async action
205
+ # 5. ML done -> show results, and manual validation options
206
+ # 6. manual validation done -> enable the upload buttons
207
+ #
208
+ with tab_inference:
209
+ # inside the inference tab, on button press we call the model (on huggingface hub)
210
+ # which will be run locally.
211
+ # - the model predicts the top 3 most likely species from the input image
212
+ # - these species are shown
213
+ # - the user can override the species prediction using the dropdown
214
+ # - an observation is uploaded if the user chooses.
215
+
216
+
217
+ if st.session_state.MODE_DEV_STATEFUL:
218
+ dbg_show_observation_hashes()
219
+
220
+ add_classifier_header()
221
+ # if we are before data_entry_validated, show the button, disabled.
222
+ if not st.session_state.workflow_fsm.is_in_state_or_beyond('data_entry_validated'):
223
+ tab_inference.button(":gray[*Identify with cetacean classifier*]", disabled=True,
224
+ help="Please validate inputs before proceeding",
225
+ key="button_infer_ceteans")
226
+
227
+ if st.session_state.workflow_fsm.is_in_state('data_entry_validated'):
228
+ # show the button, enabled. If pressed, we start the ML model (And advance state)
229
+ if tab_inference.button("Identify with cetacean classifier",
230
+ key="button_infer_ceteans"):
231
+ cetacean_classifier = AutoModelForImageClassification.from_pretrained(
232
+ "Saving-Willy/cetacean-classifier",
233
+ revision=classifier_revision,
234
+ trust_remote_code=True)
235
+
236
+ cetacean_just_classify(cetacean_classifier)
237
+ st.session_state.workflow_fsm.complete_current_state()
238
+ # trigger a refresh too (refreshhing the prog indicator means the script reruns and
239
+ # we can enter the next state - visualising the results / review)
240
+ # ok it doesn't if done programmatically. maybe interacting with teh button? check docs.
241
+ refresh_progress_display()
242
+ #TODO: validate this doesn't harm performance adversely.
243
+ st.rerun()
244
+
245
+ elif st.session_state.workflow_fsm.is_in_state('ml_classification_completed'):
246
+ # show the results, and allow manual validation
247
+ st.markdown("""### Inference results and manual validation/adjustment """)
248
+ if st.session_state.MODE_DEV_STATEFUL:
249
+ s = ""
250
+ for k, v in st.session_state.whale_prediction1.items():
251
+ s += f"* Image {k}: {v}\n"
252
+
253
+ st.markdown(s)
254
+
255
+ # add a button to advance the state
256
+ if st.button("Confirm species predictions", help="Confirm that all species are selected correctly"):
257
+ st.session_state.workflow_fsm.complete_current_state()
258
+ # -> manual_inspection_completed
259
+ st.rerun()
260
+
261
+ cetacean_show_results_and_review()
262
+
263
+ elif st.session_state.workflow_fsm.is_in_state('manual_inspection_completed'):
264
+ # show the ML results, and allow the user to upload the observation
265
+ st.markdown("""### Inference Results (after manual validation) """)
266
+
267
+
268
+ if st.button("Upload all observations to THE INTERNET!"):
269
+ # let this go through to the push_all func, since it just reports to log for now.
270
+ push_all_observations(enable_push=False)
271
+ st.session_state.workflow_fsm.complete_current_state()
272
+ # -> data_uploaded
273
+ st.rerun()
274
+
275
+ cetacean_show_results()
276
+
277
+ elif st.session_state.workflow_fsm.is_in_state('data_uploaded'):
278
+ # the data has been sent. Lets show the observations again
279
+ # but no buttons to upload (or greyed out ok)
280
+ st.markdown("""### Observation(s) uploaded - thank you!""")
281
+ cetacean_show_results()
282
+
283
+ st.divider()
284
+ #df = pd.DataFrame(st.session_state.observations, index=[0])
285
+ df = pd.DataFrame([obs.to_dict() for obs in st.session_state.observations.values()])
286
+ st.table(df)
287
+
288
+ # didn't decide what the next state is here - I think we are in the terminal state.
289
+ #st.session_state.workflow_fsm.complete_current_state()
290
+
291
+
292
+ # inside the hotdog tab, on button press we call a 2nd model (totally unrelated at present, just for demo
293
+ # purposes, an hotdog image classifier) which will be run locally.
294
+ # - this model predicts if the image is a hotdog or not, and returns probabilities
295
+ # - the input image is the same as for the ceteacean classifier - defined in the sidebar
296
+ tab_hotdogs.title("Hot Dog? Or Not?")
297
+ tab_hotdogs.write("""
298
+ *Run alternative classifer on input images. Here we are using
299
+ a binary classifier - hotdog or not - from
300
+ huggingface.co/julien-c/hotdog-not-hotdog.*""")
301
+
302
+ if tab_hotdogs.button("Get Hotdog Prediction"):
303
+
304
+ pipeline_hot_dog = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
305
+
306
+ if st.session_state.image is None:
307
+ st.info("Please upload an image first.")
308
+ #st.info(str(observations.to_dict()))
309
+
310
+ else:
311
+ hotdog_classify(pipeline_hot_dog, tab_hotdogs)
312
+
313
+
314
+ # after all other processing, we can show the stage/state
315
+ refresh_progress_display()
316
+
317
+
318
+ if __name__ == "__main__":
319
+ main()
src/pages/1_home.py ADDED
File without changes
src/pages/2_classifiers.py ADDED
File without changes
src/pages/3_benchmarking.py ADDED
File without changes
src/pages/4_requests.py ADDED
File without changes