{"node":{"id":"urn:cid:bafkr4id3uklxwf3aubpb6ix46ia73bks3q5dqie7vtwtdyu47gfd4vr7ga","properties":{"dataRegistrationJcs":"urn:cid:baga6yaq6eduvswkrqapz724odc4tszdsrgbfkbmk4fjrjxoorigwbekxg7d4w","registeredBy":"did:key:z6MkutbV1GPZLLquVDwfVrkHcJezwvvCp92RmL1MGdvN4J5P","timestamp":"2024-01-18T14:23:47Z","nodeType":"data"}},"enrichments":{"asset_hub":{"asset_id":158,"asset_name":"BLIP: Bootstrapping Language-Image Pre-training","owning_project":"Salesforce Research","asset_description":"BLIP is a versatile model capable of performing tasks such as Visual Question Answering, Image-Text Retrieval, and Image Captioning. Developed by Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi, it utilizes Vision-Language Pre-training (VLP) to excel in both understanding-based and generation-based tasks. The model's efficacy is showcased through state-of-the-art results in various vision-language tasks.","asset_format":"PyTorch","asset_type":"Model","asset_blob_type":"iroh-collection","source_location_url":"","contact_info":"Refer to the original paper or Salesforce's official channels for contact information.","license":"bsd-3-clause","license_link":"https://opensource.org/license/bsd-3-clause/","registered_date":"2024-01-18T14:27:42.880268Z","last_modified_date":"2024-01-18T14:27:42.880268Z"}}}