Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -72,11 +72,11 @@ class BLIPImageCaptioning:
|
|
72 |
|
73 |
def generate_caption(self, image):
|
74 |
try:
|
75 |
-
model = BlipForConditionalGeneration.from_pretrained(self.blip_model_name)
|
76 |
processor = BlipProcessor.from_pretrained(self.blip_model_name)
|
77 |
|
78 |
raw_image = self.preprocess_image(image)
|
79 |
-
inputs = processor(raw_image, return_tensors="pt")
|
80 |
out = model.generate(**inputs)
|
81 |
unconditional_caption = processor.decode(out[0], skip_special_tokens=True)
|
82 |
|
@@ -176,7 +176,7 @@ def preprocessing_interface(original_image, brightness_slider, contrast_slider,
|
|
176 |
return gr.Error(f"Error in preprocessing: {str(e)}")
|
177 |
|
178 |
def generate_captions(images):
|
179 |
-
blip_model = BlipForConditionalGeneration.from_pretrained(blip_model_name)
|
180 |
blip_processor = BlipProcessor.from_pretrained(blip_model_name)
|
181 |
|
182 |
return [blip_model_instance.generate_caption(image) for image in images]
|
|
|
72 |
|
73 |
def generate_caption(self, image):
|
74 |
try:
|
75 |
+
model = BlipForConditionalGeneration.from_pretrained(self.blip_model_name)
|
76 |
processor = BlipProcessor.from_pretrained(self.blip_model_name)
|
77 |
|
78 |
raw_image = self.preprocess_image(image)
|
79 |
+
inputs = processor(raw_image, return_tensors="pt")
|
80 |
out = model.generate(**inputs)
|
81 |
unconditional_caption = processor.decode(out[0], skip_special_tokens=True)
|
82 |
|
|
|
176 |
return gr.Error(f"Error in preprocessing: {str(e)}")
|
177 |
|
178 |
def generate_captions(images):
|
179 |
+
blip_model = BlipForConditionalGeneration.from_pretrained(blip_model_name)
|
180 |
blip_processor = BlipProcessor.from_pretrained(blip_model_name)
|
181 |
|
182 |
return [blip_model_instance.generate_caption(image) for image in images]
|