Spaces:
Running
Running
mobile friendly
Browse files- app.py +39 -30
- requirements.txt +1 -1
app.py
CHANGED
@@ -24,6 +24,8 @@ from streamlit_cropper import st_cropper
|
|
24 |
from streamlit_drawable_canvas import st_canvas
|
25 |
from streamlit_image_select import image_select
|
26 |
|
|
|
|
|
27 |
|
28 |
def get_memory_usage():
|
29 |
process = psutil.Process()
|
@@ -199,7 +201,7 @@ def prepare_image(image, pad=True, new_size=(1333, 1333)):
|
|
199 |
return new_scaled_size, image
|
200 |
|
201 |
# Function to display various options for image annotation
|
202 |
-
def display_options(image, score_threshold):
|
203 |
col1, col2, col3, col4, col5 = st.columns(5)
|
204 |
with col1:
|
205 |
write_class = st.toggle("Write Class", value=True)
|
@@ -226,6 +228,11 @@ def display_options(image, score_threshold):
|
|
226 |
score_threshold=score_threshold, write_score=write_score, resize=True, return_image=True, axis=True
|
227 |
)
|
228 |
|
|
|
|
|
|
|
|
|
|
|
229 |
# Display the original and annotated images side by side
|
230 |
image_comparison(
|
231 |
img1=annotated_image,
|
@@ -233,11 +240,11 @@ def display_options(image, score_threshold):
|
|
233 |
label1="Annotated Image",
|
234 |
label2="Original Image",
|
235 |
starting_position=99,
|
236 |
-
width=
|
237 |
)
|
238 |
|
239 |
# Function to perform inference on the uploaded image using the loaded models
|
240 |
-
def perform_inference(model_object, model_arrow, image, score_threshold):
|
241 |
_, uploaded_image = prepare_image(image, pad=False)
|
242 |
|
243 |
img_tensor = F.to_tensor(prepare_image(image.convert('RGB'))[1])
|
@@ -245,7 +252,8 @@ def perform_inference(model_object, model_arrow, image, score_threshold):
|
|
245 |
# Display original image
|
246 |
if 'image_placeholder' not in st.session_state:
|
247 |
image_placeholder = st.empty() # Create an empty placeholder
|
248 |
-
|
|
|
249 |
|
250 |
# Prediction
|
251 |
_, st.session_state.prediction = full_prediction(model_object, model_arrow, img_tensor, score_threshold=score_threshold, iou_threshold=0.5, distance_treshold=30)
|
@@ -267,27 +275,21 @@ def perform_inference(model_object, model_arrow, image, score_threshold):
|
|
267 |
def get_image(uploaded_file):
|
268 |
return Image.open(uploaded_file).convert('RGB')
|
269 |
|
|
|
|
|
|
|
270 |
|
271 |
def main():
|
|
|
272 |
|
273 |
-
|
274 |
-
mobile_detect_js = """
|
275 |
-
<script>
|
276 |
-
const isMobile = /Mobi|Android/i.test(navigator.userAgent);
|
277 |
-
document.cookie = `isMobile=${isMobile}; path=/`;
|
278 |
-
</script>
|
279 |
-
"""
|
280 |
-
components.html(mobile_detect_js)
|
281 |
-
|
282 |
-
# Check if the user is on a mobile device
|
283 |
-
is_mobile = False
|
284 |
-
if 'isMobile' in st.session_state:
|
285 |
-
is_mobile = st.session_state.isMobile == 'true'
|
286 |
-
st.set_page_config(layout="centered")
|
287 |
-
else:
|
288 |
-
st.set_page_config(layout="wide")
|
289 |
-
|
290 |
|
|
|
|
|
|
|
|
|
|
|
|
|
291 |
|
292 |
# Add your company logo banner
|
293 |
if is_mobile:
|
@@ -295,6 +297,14 @@ def main():
|
|
295 |
else:
|
296 |
st.image("./images/banner.png", use_column_width=True)
|
297 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
298 |
# Sidebar content
|
299 |
st.sidebar.header("This BPMN AI model recognition is proposed by: \n ELCA in collaboration with EPFL.")
|
300 |
st.sidebar.subheader("Instructions:")
|
@@ -313,8 +323,6 @@ def main():
|
|
313 |
|
314 |
st.sidebar.subheader("You can close this sidebar")
|
315 |
|
316 |
-
# Set the title of the app
|
317 |
-
st.title("BPMN recognition by AI demo")
|
318 |
|
319 |
# Display current memory usage
|
320 |
memory_usage = get_memory_usage()
|
@@ -358,15 +366,16 @@ def main():
|
|
358 |
if uploaded_file is not None:
|
359 |
with st.spinner('Waiting for image display...'):
|
360 |
original_image = get_image(uploaded_file)
|
361 |
-
if is_mobile:
|
362 |
-
cropped_image = st_cropper(original_image, realtime_update=True, box_color='#0000FF', should_resize_image=True, default_coords=(30, original_image.size[0]-30, 30, original_image.size[1]-30))
|
363 |
-
st.image(cropped_image, caption="Cropped Image", use_column_width=False, width=500)
|
364 |
-
else:
|
365 |
col1, col2 = st.columns(2)
|
366 |
with col1:
|
367 |
cropped_image = st_cropper(original_image, realtime_update=True, box_color='#0000FF', should_resize_image=True, default_coords=(30, original_image.size[0]-30, 30, original_image.size[1]-30))
|
368 |
with col2:
|
369 |
st.image(cropped_image, caption="Cropped Image", use_column_width=False, width=500)
|
|
|
|
|
|
|
|
|
370 |
|
371 |
if cropped_image is not None:
|
372 |
col1, col2, col3 = st.columns(3)
|
@@ -376,19 +385,19 @@ def main():
|
|
376 |
if st.button("Launch Prediction"):
|
377 |
st.session_state.crop_image = cropped_image
|
378 |
with st.spinner('Processing...'):
|
379 |
-
perform_inference(model_object, model_arrow, st.session_state.crop_image, score_threshold)
|
380 |
st.balloons()
|
381 |
|
382 |
if 'prediction' in st.session_state and uploaded_file is not None:
|
383 |
with st.spinner('Waiting for result display...'):
|
384 |
-
display_options(st.session_state.crop_image, score_threshold)
|
385 |
|
386 |
with st.spinner('Waiting for BPMN modeler...'):
|
387 |
col1, col2 = st.columns(2)
|
388 |
with col1:
|
389 |
st.session_state.scale = st.slider("Set scale for XML file", min_value=0.1, max_value=2.0, value=1.0, step=0.1)
|
390 |
st.session_state.bpmn_xml = create_XML(st.session_state.prediction.copy(), st.session_state.text_mapping, st.session_state.scale)
|
391 |
-
display_bpmn_xml(st.session_state.bpmn_xml)
|
392 |
|
393 |
gc.collect()
|
394 |
|
|
|
24 |
from streamlit_drawable_canvas import st_canvas
|
25 |
from streamlit_image_select import image_select
|
26 |
|
27 |
+
from streamlit_javascript import st_javascript
|
28 |
+
|
29 |
|
30 |
def get_memory_usage():
|
31 |
process = psutil.Process()
|
|
|
201 |
return new_scaled_size, image
|
202 |
|
203 |
# Function to display various options for image annotation
|
204 |
+
def display_options(image, score_threshold, is_mobile):
|
205 |
col1, col2, col3, col4, col5 = st.columns(5)
|
206 |
with col1:
|
207 |
write_class = st.toggle("Write Class", value=True)
|
|
|
228 |
score_threshold=score_threshold, write_score=write_score, resize=True, return_image=True, axis=True
|
229 |
)
|
230 |
|
231 |
+
if is_mobile is False:
|
232 |
+
width = 1000
|
233 |
+
else:
|
234 |
+
width = 300
|
235 |
+
|
236 |
# Display the original and annotated images side by side
|
237 |
image_comparison(
|
238 |
img1=annotated_image,
|
|
|
240 |
label1="Annotated Image",
|
241 |
label2="Original Image",
|
242 |
starting_position=99,
|
243 |
+
width=width,
|
244 |
)
|
245 |
|
246 |
# Function to perform inference on the uploaded image using the loaded models
|
247 |
+
def perform_inference(model_object, model_arrow, image, score_threshold,is_mobile):
|
248 |
_, uploaded_image = prepare_image(image, pad=False)
|
249 |
|
250 |
img_tensor = F.to_tensor(prepare_image(image.convert('RGB'))[1])
|
|
|
252 |
# Display original image
|
253 |
if 'image_placeholder' not in st.session_state:
|
254 |
image_placeholder = st.empty() # Create an empty placeholder
|
255 |
+
if is_mobile is False:
|
256 |
+
image_placeholder.image(uploaded_image, caption='Original Image', width=1000)
|
257 |
|
258 |
# Prediction
|
259 |
_, st.session_state.prediction = full_prediction(model_object, model_arrow, img_tensor, score_threshold=score_threshold, iou_threshold=0.5, distance_treshold=30)
|
|
|
275 |
def get_image(uploaded_file):
|
276 |
return Image.open(uploaded_file).convert('RGB')
|
277 |
|
278 |
+
|
279 |
+
import streamlit as st
|
280 |
+
from streamlit_js_eval import streamlit_js_eval
|
281 |
|
282 |
def main():
|
283 |
+
st.set_page_config(layout="wide")
|
284 |
|
285 |
+
screen_width = streamlit_js_eval(js_expressions='screen.width', want_output = True, key = 'SCR')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
286 |
|
287 |
+
if screen_width is not None and screen_width < 800:
|
288 |
+
is_mobile = True
|
289 |
+
print('Mobile version')
|
290 |
+
else:
|
291 |
+
is_mobile = False
|
292 |
+
print('Desktop version')
|
293 |
|
294 |
# Add your company logo banner
|
295 |
if is_mobile:
|
|
|
297 |
else:
|
298 |
st.image("./images/banner.png", use_column_width=True)
|
299 |
|
300 |
+
# Use is_mobile flag in your logic
|
301 |
+
if is_mobile:
|
302 |
+
st.title(f"Welcome on the mobile version of the app")
|
303 |
+
else:
|
304 |
+
st.title(f"Welcome on the desktop version of the app")
|
305 |
+
|
306 |
+
|
307 |
+
|
308 |
# Sidebar content
|
309 |
st.sidebar.header("This BPMN AI model recognition is proposed by: \n ELCA in collaboration with EPFL.")
|
310 |
st.sidebar.subheader("Instructions:")
|
|
|
323 |
|
324 |
st.sidebar.subheader("You can close this sidebar")
|
325 |
|
|
|
|
|
326 |
|
327 |
# Display current memory usage
|
328 |
memory_usage = get_memory_usage()
|
|
|
366 |
if uploaded_file is not None:
|
367 |
with st.spinner('Waiting for image display...'):
|
368 |
original_image = get_image(uploaded_file)
|
369 |
+
if is_mobile is False:
|
|
|
|
|
|
|
370 |
col1, col2 = st.columns(2)
|
371 |
with col1:
|
372 |
cropped_image = st_cropper(original_image, realtime_update=True, box_color='#0000FF', should_resize_image=True, default_coords=(30, original_image.size[0]-30, 30, original_image.size[1]-30))
|
373 |
with col2:
|
374 |
st.image(cropped_image, caption="Cropped Image", use_column_width=False, width=500)
|
375 |
+
else:
|
376 |
+
resized_image = original_image
|
377 |
+
st.image(resized_image, caption="Image", use_column_width=False, width=300)
|
378 |
+
cropped_image = original_image
|
379 |
|
380 |
if cropped_image is not None:
|
381 |
col1, col2, col3 = st.columns(3)
|
|
|
385 |
if st.button("Launch Prediction"):
|
386 |
st.session_state.crop_image = cropped_image
|
387 |
with st.spinner('Processing...'):
|
388 |
+
perform_inference(model_object, model_arrow, st.session_state.crop_image, score_threshold, is_mobile)
|
389 |
st.balloons()
|
390 |
|
391 |
if 'prediction' in st.session_state and uploaded_file is not None:
|
392 |
with st.spinner('Waiting for result display...'):
|
393 |
+
display_options(st.session_state.crop_image, score_threshold, is_mobile)
|
394 |
|
395 |
with st.spinner('Waiting for BPMN modeler...'):
|
396 |
col1, col2 = st.columns(2)
|
397 |
with col1:
|
398 |
st.session_state.scale = st.slider("Set scale for XML file", min_value=0.1, max_value=2.0, value=1.0, step=0.1)
|
399 |
st.session_state.bpmn_xml = create_XML(st.session_state.prediction.copy(), st.session_state.text_mapping, st.session_state.scale)
|
400 |
+
display_bpmn_xml(st.session_state.bpmn_xml, is_mobile=is_mobile)
|
401 |
|
402 |
gc.collect()
|
403 |
|
requirements.txt
CHANGED
@@ -9,4 +9,4 @@ streamlit-drawable-canvas==0.9.3
|
|
9 |
streamlit_image_select
|
10 |
opencv-python==4.9.0.80
|
11 |
gdown
|
12 |
-
|
|
|
9 |
streamlit_image_select
|
10 |
opencv-python==4.9.0.80
|
11 |
gdown
|
12 |
+
streamlit_js_eval
|