kumararvindibs commited on
Commit
b29140f
·
verified ·
1 Parent(s): 589bc03

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +12 -11
handler.py CHANGED
@@ -7,7 +7,12 @@ import io
7
  from transformers import BlipForConditionalGeneration, BlipProcessor
8
  import logging
9
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
10
-
 
 
 
 
 
11
  class EndpointHandler():
12
  def __init__(self, path=""):
13
  self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
@@ -15,19 +20,16 @@ class EndpointHandler():
15
  self.model.eval()
16
 
17
  def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
18
- logging.debug('------------This is a debug message')
19
- logging.info('----------------------This is an info message')
20
- logging.warning('--------This is a warning message')
21
- logging.error('----------This is an error message')
22
- logging.critical('-------------------This is a critical message')
23
  print("000000--",type(data))
24
- logging.warning('--------This is a warning message')
25
  print("input data is here------------",data)
26
  input_data = data.get("inputs", {})
27
  logging.warning('------input_data--This is a warning message', input_data)
28
  print("input data is here-2-----------",type(input_data))
29
  encoded_images = input_data.get("images")
30
- logging.warning('---encoded_images-----This is a warning message',encoded_images)
31
  print("input encoded_images is here------------",type(encoded_images))
32
  if not encoded_images:
33
  return {"captions": [], "error": "No images provided"}
@@ -46,7 +48,7 @@ class EndpointHandler():
46
  # Non test code
47
  dataBytesIO = io.BytesIO(byteImg)
48
  raw_images =[Image.open(dataBytesIO)]
49
- logging.warning('----raw_images----This is a warning message',raw_images)
50
  # Check if any images were successfully decoded
51
  if not raw_images:
52
  print("No valid images found.")
@@ -57,12 +59,11 @@ class EndpointHandler():
57
  "pixel_values": torch.cat([inp["pixel_values"] for inp in processed_inputs], dim=0).to(device),
58
  "max_new_tokens":40
59
  }
60
- logging.warning('---processed_inputs-----This is a warning message', processed_inputs)
61
  with torch.no_grad():
62
  out = self.model.generate(**processed_inputs)
63
 
64
  captions = self.processor.batch_decode(out, skip_special_tokens=True)
65
- logging.warning('----captions----This is a warning message',captions)
66
  print("caption is here-------",captions)
67
  return {"captions": captions}
68
  except Exception as e:
 
7
  from transformers import BlipForConditionalGeneration, BlipProcessor
8
  import logging
9
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
10
+ # Configure logging
11
+ logging.basicConfig(level=logging.DEBUG)
12
+ # Configure logging
13
+ logging.basicConfig(level=logging.ERROR)
14
+ # Configure logging
15
+ logging.basicConfig(level=logging.WARNING)
16
  class EndpointHandler():
17
  def __init__(self, path=""):
18
  self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
 
20
  self.model.eval()
21
 
22
  def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]:
23
+ logging.error(f"----------This is an error message")
24
+ #logging.critical('-------------------This is a critical message')
 
 
 
25
  print("000000--",type(data))
26
+ #logging.warning('--------This is a warning message')
27
  print("input data is here------------",data)
28
  input_data = data.get("inputs", {})
29
  logging.warning('------input_data--This is a warning message', input_data)
30
  print("input data is here-2-----------",type(input_data))
31
  encoded_images = input_data.get("images")
32
+ logging.warning(f"---encoded_images-----This is a warning message {str(encoded_images)}")
33
  print("input encoded_images is here------------",type(encoded_images))
34
  if not encoded_images:
35
  return {"captions": [], "error": "No images provided"}
 
48
  # Non test code
49
  dataBytesIO = io.BytesIO(byteImg)
50
  raw_images =[Image.open(dataBytesIO)]
51
+ logging.warning(f"----raw_images----This is a warning message {str(raw_images)}")
52
  # Check if any images were successfully decoded
53
  if not raw_images:
54
  print("No valid images found.")
 
59
  "pixel_values": torch.cat([inp["pixel_values"] for inp in processed_inputs], dim=0).to(device),
60
  "max_new_tokens":40
61
  }
 
62
  with torch.no_grad():
63
  out = self.model.generate(**processed_inputs)
64
 
65
  captions = self.processor.batch_decode(out, skip_special_tokens=True)
66
+ logging.warning(f"----captions----This is a warning message {str(captions)}")
67
  print("caption is here-------",captions)
68
  return {"captions": captions}
69
  except Exception as e: