API_URL = "https://api-inference.huggingface.co/models/Salesforce/blip-image-captioning-base" # Helper functions import requests, json import io import base64 #Image-to-text endpoint def get_completion(inputs, parameters=None, ENDPOINT_URL=API_URL): hf_api_key = "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" headers = { "Content-Type": "application/json" } data = { "inputs": inputs } if parameters is not None: data.update({"parameters": parameters}) response = requests.request("POST", ENDPOINT_URL, headers=headers, data=json.dumps(data)) return json.loads(response.content.decode("utf-8")) import gradio as gr def image_to_base64_str(pil_image): byte_arr = io.BytesIO() pil_image.save(byte_arr, format='PNG') byte_arr = byte_arr.getvalue() return str(base64.b64encode(byte_arr).decode('utf-8')) def captioner(image): base64_image = image_to_base64_str(image) result = get_completion(base64_image) return result[0]['generated_text'] demo = gr.Interface(fn=captioner, inputs=[gr.Image(label="Upload image", type="pil")], outputs=[gr.Textbox(label="Caption")], title="Image Captioning with BLIP", description="Caption any image using the BLIP model", allow_flagging="never") demo.launch(inline=False)