from transformers import BlipProcessor, BlipForConditionalGeneration from PIL import Image import gradio as gr import torch # Load BLIP model and processor processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base") model.eval() device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device) # Inference function def generate_caption(image): if image.mode != "RGB": image = image.convert("RGB") inputs = processor(image, return_tensors="pt").to(device, torch.float16) output = model.generate(**inputs, max_new_tokens=50) caption = processor.decode(output[0], skip_special_tokens=True) return caption # Gradio interface iface = gr.Interface( fn=generate_caption, inputs=gr.Image(type="pil"), outputs="text", title="Construction Site Image-to-Text Generator", description="Upload a site photo. The model will detect and describe construction activities." ) iface.launch()