unfinity commited on
Commit
5714fc1
1 Parent(s): 67fd17e

fastapi deploy

Browse files
Files changed (2) hide show
  1. .vscode/launch.json +30 -0
  2. api.py +57 -0
.vscode/launch.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ // Use IntelliSense to learn about possible attributes.
3
+ // Hover to view descriptions of existing attributes.
4
+ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5
+ "version": "0.2.0",
6
+ "configurations": [
7
+ {
8
+ "name": "Uvicorn",
9
+ "type": "debugpy",
10
+ "request": "launch",
11
+ "module": "uvicorn",
12
+ "args": [
13
+ "api:app",
14
+ "--reload"
15
+ ],
16
+ "jinja": true,
17
+ "justMyCode": false,
18
+ "env": {
19
+ "PYTHONPATH": "${workspaceFolder}:${PYTHONPATH}",
20
+ }
21
+ },
22
+ {
23
+ "name": "Python Debugger: Current File",
24
+ "type": "debugpy",
25
+ "request": "launch",
26
+ "program": "${file}",
27
+ "console": "integratedTerminal"
28
+ }
29
+ ]
30
+ }
api.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, File, UploadFile
2
+ from fastapi.responses import JSONResponse
3
+ import io
4
+ import base64
5
+ from ultralytics import YOLO
6
+ import torch
7
+ from PIL import Image, ImageOps
8
+
9
+ import utils
10
+ from drawing import draw_keypoints
11
+
12
+
13
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
14
+ print('Using device:', device)
15
+
16
+ model_pose = YOLO('yolov8l-pose.pt')
17
+ model_pose.to(device)
18
+
19
+ app = FastAPI()
20
+
21
+ @app.post("/predict-image")
22
+ async def predict_image(file: UploadFile = File(...)):
23
+ contents = await file.read()
24
+ input_image = Image.open(io.BytesIO(contents)).convert("RGB")
25
+ input_image = ImageOps.exif_transpose(input_image)
26
+
27
+ # predict
28
+ result = model_pose(input_image)[0]
29
+ keypoints = utils.get_keypoints(result)
30
+
31
+ # draw keypoints
32
+ output_image = draw_keypoints(input_image, keypoints).convert("RGB")
33
+
34
+ # calculate angles
35
+ lea, rea = utils.get_eye_angles(keypoints)
36
+ lba, rba = utils.get_elbow_angles(keypoints)
37
+ angles = {'left_eye_angle': lea, 'right_eye_angle': rea, 'left_elbow_angle': lba, 'right_elbow_angle': rba}
38
+
39
+ # encode to base64
40
+ img_buffer = io.BytesIO()
41
+ output_image.save(img_buffer, format="JPEG")
42
+ img_buffer.seek(0)
43
+ img_base64 = base64.b64encode(img_buffer.getvalue()).decode("utf-8")
44
+
45
+ # prepare json response
46
+ json_data = {
47
+ "keypoints": keypoints,
48
+ "angles": angles,
49
+ "output_image": img_base64
50
+ }
51
+
52
+ return JSONResponse(content=json_data)
53
+
54
+
55
+ if __name__ == "__main__":
56
+ import uvicorn
57
+ uvicorn.run(app, host="0.0.0.0", port=8000)