Commit
·
20a4aae
1
Parent(s):
7a5518e
chore: initial commit with read me and handler
Browse files- README.md +31 -0
- handler.py +49 -0
- requirements.txt +0 -0
README.md
CHANGED
@@ -1,3 +1,34 @@
|
|
1 |
---
|
|
|
|
|
|
|
|
|
2 |
license: apache-2.0
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
tags:
|
3 |
+
- image-to-text
|
4 |
+
- image-captioning
|
5 |
+
- endpoints-template
|
6 |
license: apache-2.0
|
7 |
---
|
8 |
+
|
9 |
+
# Fork of [salesforce/BLIP](https://github.com/salesforce/BLIP) for a `image-captioning` task on 🤗Inference endpoint.
|
10 |
+
|
11 |
+
|
12 |
+
This repo uses a [custom handler](https://huggingface.co/docs/inference-endpoints/guides/custom_handler) for that allows an Inference Endpoint to accept an array of URLs to be used by the BLIP model.
|
13 |
+
|
14 |
+
## Expected Payload
|
15 |
+
|
16 |
+
```json
|
17 |
+
{
|
18 |
+
"inputs": [
|
19 |
+
"https://url.to/image_1.jpg",
|
20 |
+
"https://url.to/image_2.jpg",
|
21 |
+
]
|
22 |
+
}
|
23 |
+
```
|
24 |
+
|
25 |
+
## Response Payload
|
26 |
+
|
27 |
+
```json
|
28 |
+
{
|
29 |
+
"captions": [
|
30 |
+
"a caption for the first image",
|
31 |
+
"a caption for the second image"
|
32 |
+
]
|
33 |
+
}
|
34 |
+
```
|
handler.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# +
|
2 |
+
from typing import Dict, Any
|
3 |
+
from PIL import Image
|
4 |
+
import torch
|
5 |
+
import requests
|
6 |
+
from io import BytesIO
|
7 |
+
from transformers import BlipForConditionalGeneration, BlipProcessor
|
8 |
+
# -
|
9 |
+
|
10 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
11 |
+
|
12 |
+
class EndpointHandler():
|
13 |
+
def __init__(self, path=""):
|
14 |
+
# load the optimized model
|
15 |
+
|
16 |
+
self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
17 |
+
self.model = BlipForConditionalGeneration.from_pretrained(
|
18 |
+
"Salesforce/blip-image-captioning-base"
|
19 |
+
).to(device)
|
20 |
+
self.model.eval()
|
21 |
+
self.model = self.model.to(device)
|
22 |
+
|
23 |
+
|
24 |
+
|
25 |
+
def __call__(self, data: Any) -> Dict[str, Any]:
|
26 |
+
"""
|
27 |
+
Args:
|
28 |
+
data (:obj:):
|
29 |
+
includes the input data and the parameters for the inference.
|
30 |
+
Return:
|
31 |
+
A :obj:`dict`:. The object returned should be a dict of one list like {"captions": ["A hugging face at the office"]} containing :
|
32 |
+
- "caption": A string corresponding to the generated caption.
|
33 |
+
"""
|
34 |
+
inputs = data.pop("inputs", data)
|
35 |
+
parameters = data.pop("parameters", {})
|
36 |
+
|
37 |
+
raw_images = [Image.open(BytesIO(requests.get(_img).content)) for _img in inputs]
|
38 |
+
|
39 |
+
processed_image = self.processor(images=raw_images, return_tensors="pt")
|
40 |
+
processed_image["pixel_values"] = processed_image["pixel_values"].to(device)
|
41 |
+
processed_image = {**processed_image, **parameters}
|
42 |
+
|
43 |
+
with torch.no_grad():
|
44 |
+
out = self.model.generate(
|
45 |
+
**processed_image
|
46 |
+
)
|
47 |
+
captions = self.processor.batch_decode(out, skip_special_tokens=True)
|
48 |
+
# postprocess the prediction
|
49 |
+
return {"captions": captions}
|
requirements.txt
ADDED
File without changes
|