kumararvindibs commited on
Commit
589bc03
·
verified ·
1 Parent(s): 043d595

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +16 -5
handler.py CHANGED
@@ -5,7 +5,7 @@ import torch
5
  import base64
6
  import io
7
  from transformers import BlipForConditionalGeneration, BlipProcessor
8
-
9
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
10
 
11
  class EndpointHandler():
@@ -15,11 +15,20 @@ class EndpointHandler():
15
  self.model.eval()
16
 
17
  def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
 
 
 
 
 
 
 
18
  print("input data is here------------",data)
19
  input_data = data.get("inputs", {})
20
- print("input data is here-2-----------",input_data)
 
21
  encoded_images = input_data.get("images")
22
- print("input encoded_images is here------------",encoded_images)
 
23
  if not encoded_images:
24
  return {"captions": [], "error": "No images provided"}
25
 
@@ -37,7 +46,7 @@ class EndpointHandler():
37
  # Non test code
38
  dataBytesIO = io.BytesIO(byteImg)
39
  raw_images =[Image.open(dataBytesIO)]
40
-
41
  # Check if any images were successfully decoded
42
  if not raw_images:
43
  print("No valid images found.")
@@ -48,13 +57,15 @@ class EndpointHandler():
48
  "pixel_values": torch.cat([inp["pixel_values"] for inp in processed_inputs], dim=0).to(device),
49
  "max_new_tokens":40
50
  }
51
-
52
  with torch.no_grad():
53
  out = self.model.generate(**processed_inputs)
54
 
55
  captions = self.processor.batch_decode(out, skip_special_tokens=True)
 
56
  print("caption is here-------",captions)
57
  return {"captions": captions}
58
  except Exception as e:
59
  print(f"Error during processing: {str(e)}")
 
60
  return {"captions": [], "error": str(e)}
 
5
  import base64
6
  import io
7
  from transformers import BlipForConditionalGeneration, BlipProcessor
8
+ import logging
9
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
10
 
11
  class EndpointHandler():
 
15
  self.model.eval()
16
 
17
  def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
18
+ logging.debug('------------This is a debug message')
19
+ logging.info('----------------------This is an info message')
20
+ logging.warning('--------This is a warning message')
21
+ logging.error('----------This is an error message')
22
+ logging.critical('-------------------This is a critical message')
23
+ print("000000--",type(data))
24
+ logging.warning('--------This is a warning message')
25
  print("input data is here------------",data)
26
  input_data = data.get("inputs", {})
27
+ logging.warning('------input_data--This is a warning message', input_data)
28
+ print("input data is here-2-----------",type(input_data))
29
  encoded_images = input_data.get("images")
30
+ logging.warning('---encoded_images-----This is a warning message',encoded_images)
31
+ print("input encoded_images is here------------",type(encoded_images))
32
  if not encoded_images:
33
  return {"captions": [], "error": "No images provided"}
34
 
 
46
  # Non test code
47
  dataBytesIO = io.BytesIO(byteImg)
48
  raw_images =[Image.open(dataBytesIO)]
49
+ logging.warning('----raw_images----This is a warning message',raw_images)
50
  # Check if any images were successfully decoded
51
  if not raw_images:
52
  print("No valid images found.")
 
57
  "pixel_values": torch.cat([inp["pixel_values"] for inp in processed_inputs], dim=0).to(device),
58
  "max_new_tokens":40
59
  }
60
+ logging.warning('---processed_inputs-----This is a warning message', processed_inputs)
61
  with torch.no_grad():
62
  out = self.model.generate(**processed_inputs)
63
 
64
  captions = self.processor.batch_decode(out, skip_special_tokens=True)
65
+ logging.warning('----captions----This is a warning message',captions)
66
  print("caption is here-------",captions)
67
  return {"captions": captions}
68
  except Exception as e:
69
  print(f"Error during processing: {str(e)}")
70
+ logging.error(f"Error during processing: ----------------{str(e)}")
71
  return {"captions": [], "error": str(e)}