Spaces:
Sleeping
Sleeping
inference space trial
Browse files- app.py +124 -0
- lambdas.py +85 -0
- requirements.txt +3 -0
app.py
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from pyzbar.pyzbar import decode
|
3 |
+
from lambdas import upload_models, predict
|
4 |
+
import base64
|
5 |
+
from io import BytesIO
|
6 |
+
from PIL import Image
|
7 |
+
|
8 |
+
DEBUG = True
|
9 |
+
|
10 |
+
prefer_frontal_cam_html = """
|
11 |
+
<script>
|
12 |
+
const originalGetUserMedia = navigator.mediaDevices.getUserMedia.bind(navigator.mediaDevices);
|
13 |
+
|
14 |
+
navigator.mediaDevices.getUserMedia = (constraints) => {
|
15 |
+
if (!constraints.video.facingMode) {
|
16 |
+
constraints.video.facingMode = {ideal: "environment"};
|
17 |
+
}
|
18 |
+
return originalGetUserMedia(constraints);
|
19 |
+
};
|
20 |
+
</script>
|
21 |
+
"""
|
22 |
+
|
23 |
+
config = {'possible_shifts': {'No shifts': 0}, 'possible_modes': ["waste"]}
|
24 |
+
restaurant_id = None
|
25 |
+
shift_id = None
|
26 |
+
|
27 |
+
|
28 |
+
def login(username, password) -> bool:
|
29 |
+
# TODO from username and password get restaurant_id
|
30 |
+
config_aux = {'restaurant_id': 3,
|
31 |
+
'restaurant_name': 'Proppos',
|
32 |
+
'mode': 'waste',
|
33 |
+
'possible_modes': ['waste'],
|
34 |
+
'possible_shifts': {'Esmorzar': 1, 'Dinar': 2, 'Sopar': 3},
|
35 |
+
}
|
36 |
+
config.update(config_aux)
|
37 |
+
return True
|
38 |
+
|
39 |
+
|
40 |
+
def start_app(shift_id, mode):
|
41 |
+
try:
|
42 |
+
config_aux = {'shift_id': shift_id,
|
43 |
+
'mode': mode}
|
44 |
+
config.update(config_aux)
|
45 |
+
gr.Info('Loading models', )
|
46 |
+
status_code, r = upload_models(**config)
|
47 |
+
if status_code in (201, 200, 204):
|
48 |
+
gr.Info('Models Correctly Loaded. Ready to predict')
|
49 |
+
else:
|
50 |
+
raise gr.Error(f'Error loading the models: {r}')
|
51 |
+
config.update(r)
|
52 |
+
except Exception as e:
|
53 |
+
raise gr.Error(f'Error Uploading the models. \n {e}')
|
54 |
+
|
55 |
+
|
56 |
+
def predict_app(image, patient_id):
|
57 |
+
buffered = BytesIO()
|
58 |
+
image.save(buffered, format='JPEG')
|
59 |
+
b64image = base64.b64encode(buffered.getvalue()).decode('utf-8')
|
60 |
+
status_code, r = predict(b64image=b64image,
|
61 |
+
patient_identifier=patient_id,
|
62 |
+
**config)
|
63 |
+
if status_code in (200, 201, 204):
|
64 |
+
gr.Info('Prediction Successful')
|
65 |
+
else:
|
66 |
+
raise gr.Error(f'Error predicting {r}')
|
67 |
+
|
68 |
+
|
69 |
+
# APP
|
70 |
+
|
71 |
+
with gr.Blocks(head=prefer_frontal_cam_html) as block:
|
72 |
+
with gr.Tab(label='Welcome'):
|
73 |
+
gr.Markdown(f'# User: {config.get("restaurant_name", "Proppos")}')
|
74 |
+
|
75 |
+
|
76 |
+
@gr.render()
|
77 |
+
def render_dropdowns():
|
78 |
+
shift_dropdown = gr.Dropdown(label='Meal/Comida/Apat',
|
79 |
+
value=list(config["possible_shifts"].items())[0],
|
80 |
+
choices=tuple(config["possible_shifts"].items()))
|
81 |
+
mode_dropdown = gr.Dropdown(label='Mode',
|
82 |
+
value=config['possible_modes'][0],
|
83 |
+
choices=config["possible_modes"])
|
84 |
+
start_button = gr.Button(value='START')
|
85 |
+
start_button.click(fn=start_app, inputs=[shift_dropdown, mode_dropdown])
|
86 |
+
|
87 |
+
with gr.Tab(label='📷 Capture'):
|
88 |
+
# MAIN TAB TO PREDICT
|
89 |
+
gr.Markdown(f""" 1. Click to Access Webcam
|
90 |
+
2.
|
91 |
+
""")
|
92 |
+
im = gr.Image(sources=['webcam'], streaming=True, mirror_webcam=False, type='pil')
|
93 |
+
with gr.Accordion():
|
94 |
+
eater_id = gr.Textbox(label='Patient Identification', placeholder='Searching Patient ID')
|
95 |
+
|
96 |
+
current_eater_id = {'value': None}
|
97 |
+
|
98 |
+
|
99 |
+
@gr.on(inputs=im, outputs=eater_id)
|
100 |
+
def search_eater_id(image):
|
101 |
+
d = decode(image)
|
102 |
+
default_value = None
|
103 |
+
current_value = current_eater_id['value'] or default_value
|
104 |
+
new_value = d[0].data if d else default_value
|
105 |
+
# If it is really a new value different from the default one, change it.
|
106 |
+
final_value = new_value if new_value != default_value else current_value
|
107 |
+
current_eater_id['value'] = final_value
|
108 |
+
return final_value
|
109 |
+
|
110 |
+
|
111 |
+
b = gr.Button('PRESS TO PREDICT')
|
112 |
+
b.click(fn=predict_app, inputs=[im, eater_id], outputs=gr.Info())
|
113 |
+
|
114 |
+
with gr.Tab(label='ℹ️ Status'):
|
115 |
+
gr.Markdown(' Press the button to see the status of the Application and technical information')
|
116 |
+
load_status_button = gr.Button('Load Status')
|
117 |
+
status_json = gr.Json(label='Status')
|
118 |
+
load_status_button.click(fn=lambda: config, outputs=status_json)
|
119 |
+
|
120 |
+
with gr.Tab(label='📄 Documentation'):
|
121 |
+
gr.Markdown()
|
122 |
+
|
123 |
+
#block.launch(auth=("proppos", "Proppos2019"))
|
124 |
+
block.launch(show_api=False, auth=login)
|
lambdas.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import boto3
|
3 |
+
import json
|
4 |
+
|
5 |
+
ACCESS_ID = os.environ.get('accessKeyId', None) or os.environ.get('AWS_ACCESS_KEY_ID', None)
|
6 |
+
ACCESS_KEY = os.environ.get('secretAccessKey', None) or os.environ.get('AWS_SECRET_ACCESS_KEY', None)
|
7 |
+
REGION = os.environ.get('region') or os.environ.get('AWS_REGION', None)
|
8 |
+
|
9 |
+
lambda_client = boto3.client('lambda',
|
10 |
+
region_name=REGION,
|
11 |
+
aws_access_key_id=ACCESS_ID,
|
12 |
+
aws_secret_access_key=ACCESS_KEY)
|
13 |
+
|
14 |
+
|
15 |
+
def run_lambda(body, function_name, invocation_type='RequestResponse'):
|
16 |
+
response = json.load(lambda_client.invoke(FunctionName=function_name,
|
17 |
+
InvocationType=invocation_type,
|
18 |
+
Payload=json.dumps(body))['Payload'])
|
19 |
+
|
20 |
+
return response['statusCode'], json.loads(response['body']) if not isinstance(response['body'], dict) else response[
|
21 |
+
'body']
|
22 |
+
|
23 |
+
|
24 |
+
def upload_models(
|
25 |
+
restaurant_id: int,
|
26 |
+
mode: str = 'waste',
|
27 |
+
shift_id: int = None,
|
28 |
+
what_to_load=None,
|
29 |
+
*args,
|
30 |
+
**kwargs,
|
31 |
+
):
|
32 |
+
"""
|
33 |
+
:param restaurant_id: int
|
34 |
+
:param mode: str
|
35 |
+
:param shift_id: int or None
|
36 |
+
:param what_to_load: dict of form {'od': bool, 'encoder': bool, 'decoder': bool}
|
37 |
+
:return: {"codes": codes,
|
38 |
+
"mode": mode,
|
39 |
+
"ip_ports": ip_ports,
|
40 |
+
"restaurant_id": restaurant_id,
|
41 |
+
"availability": availability,
|
42 |
+
"models_identifier": identifier,
|
43 |
+
"shift": event['shift_id'],
|
44 |
+
"references": references,
|
45 |
+
"models": models}
|
46 |
+
"""
|
47 |
+
|
48 |
+
if what_to_load is None:
|
49 |
+
what_to_load = {'od': True, 'encoder': True, 'decoder': True}
|
50 |
+
|
51 |
+
body = {
|
52 |
+
'mode': mode,
|
53 |
+
'restaurant_id': restaurant_id,
|
54 |
+
'shift_id': shift_id,
|
55 |
+
'what_to_load': what_to_load,
|
56 |
+
}
|
57 |
+
|
58 |
+
status_code, r = run_lambda(body=body, function_name='postModels-fastpay-public-stack')
|
59 |
+
|
60 |
+
return status_code, r
|
61 |
+
|
62 |
+
|
63 |
+
def predict(b64image: str,
|
64 |
+
ip_ports: dict,
|
65 |
+
upload: bool = True,
|
66 |
+
patient_identifier: bool = None,
|
67 |
+
codes: dict = None,
|
68 |
+
models_identifier: str = None,
|
69 |
+
shift: int = None,
|
70 |
+
*args,
|
71 |
+
**kwargs,
|
72 |
+
):
|
73 |
+
body = {
|
74 |
+
"b64image": b64image,
|
75 |
+
"ip_ports": ip_ports,
|
76 |
+
"upload": upload,
|
77 |
+
"patient_identifier": '1', #None, # patient_identifier
|
78 |
+
"codes": codes,
|
79 |
+
"models_identifier": models_identifier,
|
80 |
+
"shift": None #shift,
|
81 |
+
}
|
82 |
+
|
83 |
+
status_code, r = run_lambda(body=body, function_name='getPredict-fastpay-public-stack')
|
84 |
+
|
85 |
+
return status_code, r
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
pyzbar
|
2 |
+
pillow
|
3 |
+
boto3
|