Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,13 +4,13 @@ import requests
|
|
4 |
from transformers import BlipProcessor, BlipForConditionalGeneration
|
5 |
|
6 |
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
7 |
-
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to("
|
8 |
|
9 |
def generate_caption(image_url):
|
10 |
raw_image = Image.open(requests.get(image_url, stream=True).raw).convert('RGB')
|
11 |
|
12 |
# Unconditional image captioning
|
13 |
-
inputs = processor(raw_image, return_tensors="pt").to("
|
14 |
out = model.generate(**inputs)
|
15 |
caption = processor.decode(out[0], skip_special_tokens=True)
|
16 |
|
|
|
4 |
from transformers import BlipProcessor, BlipForConditionalGeneration
|
5 |
|
6 |
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
7 |
+
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to("cpu")
|
8 |
|
9 |
def generate_caption(image_url):
|
10 |
raw_image = Image.open(requests.get(image_url, stream=True).raw).convert('RGB')
|
11 |
|
12 |
# Unconditional image captioning
|
13 |
+
inputs = processor(raw_image, return_tensors="pt").to("cpu")
|
14 |
out = model.generate(**inputs)
|
15 |
caption = processor.decode(out[0], skip_special_tokens=True)
|
16 |
|