vancauwe commited on
Commit
85c8b0f
·
1 Parent(s): 5c07215

fix: states over pages

Browse files
.env.dist DELETED
File without changes
src/classifier/classifier_image.py CHANGED
@@ -107,7 +107,6 @@ def cetacean_show_results_and_review() -> None:
107
  print(f"[D] {o:3} pred1: {pred1:30} | {hash}")
108
  ix = viewer.WHALE_CLASSES.index(pred1) if pred1 in viewer.WHALE_CLASSES else None
109
  selected_class = st.selectbox(f"Species for observation {str(o)}", viewer.WHALE_CLASSES, index=ix)
110
-
111
  _observation.set_selected_class(selected_class)
112
 
113
  # store the elements of the observation that will be transmitted (not image)
@@ -175,69 +174,3 @@ def cetacean_show_results():
175
  viewer.display_whale(whale_classes, i)
176
  o += 1
177
  col = (col + 1) % row_size
178
-
179
-
180
-
181
-
182
- # func to do all in one
183
- def cetacean_classify_show_and_review(cetacean_classifier):
184
- """Cetacean classifier using the saving-willy model from Saving Willy Hugging Face space.
185
- For each image in the session state, classify the image and display the top 3 predictions.
186
- Args:
187
- cetacean_classifier ([type]): saving-willy model from Saving Willy Hugging Face space
188
- """
189
- raise DeprecationWarning("This function is deprecated. Use individual steps instead")
190
- images = st.session_state.images
191
- observations = st.session_state.observations
192
- hashes = st.session_state.image_hashes
193
- batch_size, row_size, page = gridder(hashes)
194
-
195
- grid = st.columns(row_size)
196
- col = 0
197
- o=1
198
- for hash in hashes:
199
- image = images[hash]
200
-
201
- with grid[col]:
202
- st.image(image, use_column_width=True)
203
- observation = observations[hash].to_dict()
204
- # run classifier model on `image`, and persistently store the output
205
- out = cetacean_classifier(image) # get top 3 matches
206
- st.session_state.whale_prediction1[hash] = out['predictions'][0]
207
- st.session_state.classify_whale_done[hash] = True
208
- msg = f"[D]2 classify_whale_done for {hash}: {st.session_state.classify_whale_done[hash]}, whale_prediction1: {st.session_state.whale_prediction1[hash]}"
209
- g_logger.info(msg)
210
-
211
- # dropdown for selecting/overriding the species prediction
212
- if not st.session_state.classify_whale_done[hash]:
213
- selected_class = st.sidebar.selectbox("Species", viewer.WHALE_CLASSES,
214
- index=None, placeholder="Species not yet identified...",
215
- disabled=True)
216
- else:
217
- pred1 = st.session_state.whale_prediction1[hash]
218
- # get index of pred1 from WHALE_CLASSES, none if not present
219
- print(f"[D] pred1: {pred1}")
220
- ix = viewer.WHALE_CLASSES.index(pred1) if pred1 in viewer.WHALE_CLASSES else None
221
- selected_class = st.selectbox(f"Species for observation {str(o)}", viewer.WHALE_CLASSES, index=ix)
222
-
223
- observation['predicted_class'] = selected_class
224
- if selected_class != st.session_state.whale_prediction1[hash]:
225
- observation['class_overriden'] = selected_class
226
-
227
- st.session_state.public_observation = observation
228
- st.button(f"Upload observation {str(o)} to THE INTERNET!", on_click=push_observations)
229
- # TODO: the metadata only fills properly if `validate` was clicked.
230
- st.markdown(metadata2md())
231
-
232
- msg = f"[D] full observation after inference: {observation}"
233
- g_logger.debug(msg)
234
- print(msg)
235
- # TODO: add a link to more info on the model, next to the button.
236
-
237
- whale_classes = out['predictions'][:]
238
- # render images for the top 3 (that is what the model api returns)
239
- st.markdown(f"Top 3 Predictions for observation {str(o)}")
240
- for i in range(len(whale_classes)):
241
- viewer.display_whale(whale_classes, i)
242
- o += 1
243
- col = (col + 1) % row_size
 
107
  print(f"[D] {o:3} pred1: {pred1:30} | {hash}")
108
  ix = viewer.WHALE_CLASSES.index(pred1) if pred1 in viewer.WHALE_CLASSES else None
109
  selected_class = st.selectbox(f"Species for observation {str(o)}", viewer.WHALE_CLASSES, index=ix)
 
110
  _observation.set_selected_class(selected_class)
111
 
112
  # store the elements of the observation that will be transmitted (not image)
 
174
  viewer.display_whale(whale_classes, i)
175
  o += 1
176
  col = (col + 1) % row_size
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/pages/3_🔥_classifiers.py CHANGED
@@ -7,7 +7,7 @@ st.set_page_config(
7
  page_icon="🔥",
8
  )
9
 
10
- from utils.st_logs import parse_log_buffer, init_logging_session_states
11
 
12
  from transformers import pipeline
13
  from transformers import AutoModelForImageClassification
@@ -25,12 +25,14 @@ from classifier.classifier_hotdog import hotdog_classify
25
 
26
  # setup for the ML model on huggingface (our wrapper)
27
  os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
 
 
 
28
 
29
  ############################################################
30
- # TO- DO: MAKE ENV FILE
31
  #classifier_revision = '0f9c15e2db4d64e7f622ade518854b488d8d35e6'
32
  classifier_revision = 'main' # default/latest version
33
- # and the dataset of observations (hf dataset in our space)
34
  dataset_id = "Saving-Willy/temp_dataset"
35
  data_files = "data/train-00000-of-00001.parquet"
36
  ############################################################
@@ -100,7 +102,7 @@ if st.session_state.workflow_fsm.is_in_state('data_entry_validated'):
100
  if tab_inference.button("Identify with cetacean classifier",
101
  key="button_infer_ceteans"):
102
  cetacean_classifier = AutoModelForImageClassification.from_pretrained(
103
- "Saving-Willy/cetacean-classifier",
104
  revision=classifier_revision,
105
  trust_remote_code=True)
106
 
@@ -161,7 +163,7 @@ elif st.session_state.workflow_fsm.is_in_state('data_uploaded'):
161
  # didn't decide what the next state is here - I think we are in the terminal state.
162
  #st.session_state.workflow_fsm.complete_current_state()
163
 
164
-
165
  # inside the hotdog tab, on button press we call a 2nd model (totally unrelated at present, just for demo
166
  # purposes, an hotdog image classifier) which will be run locally.
167
  # - this model predicts if the image is a hotdog or not, and returns probabilities
 
7
  page_icon="🔥",
8
  )
9
 
10
+ from utils.st_logs import init_logging_session_states
11
 
12
  from transformers import pipeline
13
  from transformers import AutoModelForImageClassification
 
25
 
26
  # setup for the ML model on huggingface (our wrapper)
27
  os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
28
+ # one toggle for all the extra debug text
29
+ if "MODE_DEV_STATEFUL" not in st.session_state:
30
+ st.session_state.MODE_DEV_STATEFUL = False
31
 
32
  ############################################################
33
+ classifier_name = "Saving-Willy/cetacean-classifier"
34
  #classifier_revision = '0f9c15e2db4d64e7f622ade518854b488d8d35e6'
35
  classifier_revision = 'main' # default/latest version
 
36
  dataset_id = "Saving-Willy/temp_dataset"
37
  data_files = "data/train-00000-of-00001.parquet"
38
  ############################################################
 
102
  if tab_inference.button("Identify with cetacean classifier",
103
  key="button_infer_ceteans"):
104
  cetacean_classifier = AutoModelForImageClassification.from_pretrained(
105
+ classifier_name,
106
  revision=classifier_revision,
107
  trust_remote_code=True)
108
 
 
163
  # didn't decide what the next state is here - I think we are in the terminal state.
164
  #st.session_state.workflow_fsm.complete_current_state()
165
 
166
+
167
  # inside the hotdog tab, on button press we call a 2nd model (totally unrelated at present, just for demo
168
  # purposes, an hotdog image classifier) which will be run locally.
169
  # - this model predicts if the image is a hotdog or not, and returns probabilities
src/whale_viewer.py CHANGED
@@ -157,4 +157,6 @@ def display_whale(whale_classes:List[str], i:int, viewcontainer:DeltaGenerator=N
157
  image_path = os.path.join(current_dir, "src/images/references/")
158
  image = Image.open(image_path + df_whale_img_ref.loc[whale_classes[i], "WHALE_IMAGES"])
159
 
160
- viewcontainer.image(image, caption=df_whale_img_ref.loc[whale_classes[i], "WHALE_REFERENCES"], use_column_width=True)
 
 
 
157
  image_path = os.path.join(current_dir, "src/images/references/")
158
  image = Image.open(image_path + df_whale_img_ref.loc[whale_classes[i], "WHALE_IMAGES"])
159
 
160
+ viewcontainer.image(image,
161
+ # caption=df_whale_img_ref.loc[whale_classes[i], "WHALE_REFERENCES"],
162
+ use_column_width=True)