Tomatillo commited on
Commit
664215b
·
verified ·
1 Parent(s): 9ce355a

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +78 -66
src/streamlit_app.py CHANGED
@@ -114,54 +114,39 @@ if run_button:
114
  elif is_multisensor and not sensor_select:
115
  st.session_state.error = "Please select a sensor or 'All sensors' before generating CSV."
116
  else:
117
- with st.spinner("Processing samples..."):
118
- try:
119
- target_classes = parse_classes(classes_input)
120
- client = init_client(api_key)
121
- metrics = []
122
- for sample in samples_objects:
123
- try:
124
- label = client.get_label(sample.uuid)
125
- labelset = getattr(label, 'labelset', '') or ''
126
- labeled_by = getattr(label, 'created_by', '') or ''
127
- reviewed_by = getattr(label, 'reviewed_by', '') or ''
128
- if is_multisensor and sensor_select and sensor_select != 'All sensors':
129
- frames_list = export_sensor_frames_and_annotations(label, sensor_select)
130
- sensor_val = sensor_select
131
- num_frames = len(frames_list)
132
- total_annotations = sum(len(f['annotations']) for f in frames_list)
133
- matching_annotations = sum(
134
- 1
135
- for f in frames_list
136
- for ann in f['annotations']
137
- if getattr(ann, 'category_id', None) in target_classes
138
- )
139
- elif is_multisensor and (not sensor_select or sensor_select == 'All sensors'):
140
- all_sensor_frames = export_all_sensor_frames_and_annotations(label)
141
- for sensor_name, frames_list in all_sensor_frames.items():
142
- num_frames = len(frames_list)
143
- total_annotations = sum(len(f['annotations']) for f in frames_list)
144
- matching_annotations = sum(
145
- 1
146
- for f in frames_list
147
- for ann in f['annotations']
148
- if getattr(ann, 'category_id', None) in target_classes
149
- )
150
- metrics.append({
151
- 'name': getattr(sample, 'name', sample.uuid),
152
- 'uuid': sample.uuid,
153
- 'labelset': labelset,
154
- 'sensor': sensor_name,
155
- 'num_frames': num_frames,
156
- 'total_annotations': total_annotations,
157
- 'matching_annotations': matching_annotations,
158
- 'labeled_by': labeled_by,
159
- 'reviewed_by': reviewed_by
160
- })
161
- continue
162
- else:
163
- frames_list = export_frames_and_annotations(label)
164
- sensor_val = ''
165
  num_frames = len(frames_list)
166
  total_annotations = sum(len(f['annotations']) for f in frames_list)
167
  matching_annotations = sum(
@@ -170,25 +155,52 @@ if run_button:
170
  for ann in f['annotations']
171
  if getattr(ann, 'category_id', None) in target_classes
172
  )
173
- metrics.append({
174
- 'name': getattr(sample, 'name', sample.uuid),
175
- 'uuid': sample.uuid,
176
- 'labelset': labelset,
177
- 'sensor': sensor_val if is_multisensor else '',
178
- 'num_frames': num_frames,
179
- 'total_annotations': total_annotations,
180
- 'matching_annotations': matching_annotations,
181
- 'labeled_by': labeled_by,
182
- 'reviewed_by': reviewed_by
183
- })
184
- except Exception as e:
185
  continue
186
- if not metrics:
187
- st.session_state.error = "No metrics could be generated for the dataset."
188
- else:
189
- st.session_state.csv_content = generate_csv(metrics, dataset_identifier)
190
- except Exception as e:
191
- st.session_state.error = f"An error occurred: {e}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
 
193
  if st.session_state.error:
194
  st.error(st.session_state.error)
 
114
  elif is_multisensor and not sensor_select:
115
  st.session_state.error = "Please select a sensor or 'All sensors' before generating CSV."
116
  else:
117
+ # Show loader/status message while checking dataset type and generating CSV
118
+ status_ctx = None
119
+ try:
120
+ status_ctx = st.status("Checking dataset type...", expanded=True)
121
+ except AttributeError:
122
+ st.info("Checking dataset type...")
123
+ try:
124
+ target_classes = parse_classes(classes_input)
125
+ client = init_client(api_key)
126
+ metrics = []
127
+ # Update loader after dataset type check
128
+ if status_ctx is not None:
129
+ status_ctx.update(label="Dataset type checked. Processing samples...", state="running")
130
+ for sample in samples_objects:
131
+ try:
132
+ label = client.get_label(sample.uuid)
133
+ labelset = getattr(label, 'labelset', '') or ''
134
+ labeled_by = getattr(label, 'created_by', '') or ''
135
+ reviewed_by = getattr(label, 'reviewed_by', '') or ''
136
+ if is_multisensor and sensor_select and sensor_select != 'All sensors':
137
+ frames_list = export_sensor_frames_and_annotations(label, sensor_select)
138
+ sensor_val = sensor_select
139
+ num_frames = len(frames_list)
140
+ total_annotations = sum(len(f['annotations']) for f in frames_list)
141
+ matching_annotations = sum(
142
+ 1
143
+ for f in frames_list
144
+ for ann in f['annotations']
145
+ if getattr(ann, 'category_id', None) in target_classes
146
+ )
147
+ elif is_multisensor and (not sensor_select or sensor_select == 'All sensors'):
148
+ all_sensor_frames = export_all_sensor_frames_and_annotations(label)
149
+ for sensor_name, frames_list in all_sensor_frames.items():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
  num_frames = len(frames_list)
151
  total_annotations = sum(len(f['annotations']) for f in frames_list)
152
  matching_annotations = sum(
 
155
  for ann in f['annotations']
156
  if getattr(ann, 'category_id', None) in target_classes
157
  )
158
+ metrics.append({
159
+ 'name': getattr(sample, 'name', sample.uuid),
160
+ 'uuid': sample.uuid,
161
+ 'labelset': labelset,
162
+ 'sensor': sensor_name,
163
+ 'num_frames': num_frames,
164
+ 'total_annotations': total_annotations,
165
+ 'matching_annotations': matching_annotations,
166
+ 'labeled_by': labeled_by,
167
+ 'reviewed_by': reviewed_by
168
+ })
 
169
  continue
170
+ else:
171
+ frames_list = export_frames_and_annotations(label)
172
+ sensor_val = ''
173
+ num_frames = len(frames_list)
174
+ total_annotations = sum(len(f['annotations']) for f in frames_list)
175
+ matching_annotations = sum(
176
+ 1
177
+ for f in frames_list
178
+ for ann in f['annotations']
179
+ if getattr(ann, 'category_id', None) in target_classes
180
+ )
181
+ metrics.append({
182
+ 'name': getattr(sample, 'name', sample.uuid),
183
+ 'uuid': sample.uuid,
184
+ 'labelset': labelset,
185
+ 'sensor': sensor_val if is_multisensor else '',
186
+ 'num_frames': num_frames,
187
+ 'total_annotations': total_annotations,
188
+ 'matching_annotations': matching_annotations,
189
+ 'labeled_by': labeled_by,
190
+ 'reviewed_by': reviewed_by
191
+ })
192
+ except Exception as e:
193
+ continue
194
+ if not metrics:
195
+ st.session_state.error = "No metrics could be generated for the dataset."
196
+ else:
197
+ st.session_state.csv_content = generate_csv(metrics, dataset_identifier)
198
+ if status_ctx is not None:
199
+ status_ctx.update(label="CSV generated!", state="complete")
200
+ except Exception as e:
201
+ st.session_state.error = f"An error occurred: {e}"
202
+ if status_ctx is not None:
203
+ status_ctx.update(label="Error occurred.", state="error")
204
 
205
  if st.session_state.error:
206
  st.error(st.session_state.error)