donghuna commited on
Commit
1ced666
ยท
verified ยท
1 Parent(s): 5d6e7ad

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +46 -33
handler.py CHANGED
@@ -19,38 +19,51 @@ class EndpointHandler:
19
  self.model.eval()
20
 
21
  def __call__(self, data):
22
- # video_path = data["inputs"]["video_path"]
23
- # ftp_password = data["inputs"].get("ftp_password")
24
- # ftp_password = data["inputs"]["ftp_password"]
25
-
26
- inputs = json.loads(data)
27
- video_path = inputs.get("inputs", {}).get("video_path", "")
28
- ftp_password = inputs.get("inputs", {}).get("ftp_password", "")
29
-
30
- processed_frames = read_video(video_path, ftp_password)
31
-
32
-
33
-
34
- # ๋””๋ฒ„๊น…: ์ž…๋ ฅ ๋ฐ์ดํ„ฐ ํ™•์ธ
35
- logger.info(f"Received data: {data}")
36
-
37
- frames = np.array(data['frames'])
38
- frames = torch.tensor(frames).float() # Ensure the data is in the correct format
39
-
40
- # ๋””๋ฒ„๊น…: ํ”„๋ ˆ์ž„ ๋ฐ์ดํ„ฐ ํ™•์ธ
41
- logger.info(f"Frames shape: {frames.shape}")
42
-
43
- # Perform inference
44
- with torch.no_grad():
45
- outputs = self.model(frames.unsqueeze(0)) # Add batch dimension
46
- predictions = torch.softmax(outputs.logits, dim=-1)
47
-
48
- # ๋””๋ฒ„๊น…: ์˜ˆ์ธก ๊ฒฐ๊ณผ ํ™•์ธ
49
- logger.info(f"Predictions: {predictions}")
50
 
51
- predicted_class = torch.argmax(predictions, dim=-1).item()
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
- # ๋””๋ฒ„๊น…: ์˜ˆ์ธก ํด๋ž˜์Šค ํ™•์ธ
54
- logger.info(f"Predicted class: {predicted_class}")
55
-
56
- return {"predicted_class": predicted_class, "predictions": predictions.tolist()}
 
 
 
 
 
 
 
 
19
  self.model.eval()
20
 
21
  def __call__(self, data):
22
+ try:
23
+ # video_path = data["inputs"]["video_path"]
24
+ # ftp_password = data["inputs"].get("ftp_password")
25
+ # ftp_password = data["inputs"]["ftp_password"]
26
+
27
+ inputs = json.loads(data)
28
+ video_path = inputs.get("inputs", {}).get("video_path", "")
29
+ ftp_password = inputs.get("inputs", {}).get("ftp_password", "")
30
+
31
+ processed_frames = read_video(video_path, ftp_password)
32
+
33
+
34
+
35
+ # ๋””๋ฒ„๊น…: ์ž…๋ ฅ ๋ฐ์ดํ„ฐ ํ™•์ธ
36
+ logger.info(f"Received data: {data}")
37
+
38
+ frames = np.array(data['frames'])
39
+ frames = torch.tensor(frames).float() # Ensure the data is in the correct format
40
+
41
+ # ๋””๋ฒ„๊น…: ํ”„๋ ˆ์ž„ ๋ฐ์ดํ„ฐ ํ™•์ธ
42
+ logger.info(f"Frames shape: {frames.shape}")
 
 
 
 
 
 
 
43
 
44
+ # Perform inference
45
+ with torch.no_grad():
46
+ outputs = self.model(frames.unsqueeze(0)) # Add batch dimension
47
+ predictions = torch.softmax(outputs.logits, dim=-1)
48
+
49
+ # ๋””๋ฒ„๊น…: ์˜ˆ์ธก ๊ฒฐ๊ณผ ํ™•์ธ
50
+ logger.info(f"Predictions: {predictions}")
51
+
52
+ predicted_class = torch.argmax(predictions, dim=-1).item()
53
+
54
+ # ๋””๋ฒ„๊น…: ์˜ˆ์ธก ํด๋ž˜์Šค ํ™•์ธ
55
+ logger.info(f"Predicted class: {predicted_class}")
56
+
57
+ return {"predicted_class": predicted_class, "predictions": predictions.tolist()}
58
 
59
+
60
+ except Exception as e:
61
+ error_message = str(e)
62
+ stack_trace = traceback.format_exc()
63
+ logger.error(f"Error: {error_message}")
64
+ logger.error(f"Stack trace: {stack_trace}")
65
+ return json.dumps({
66
+ "status": "error",
67
+ "message": error_message,
68
+ "stack_trace": stack_trace
69
+ }), 500