import streamlit as st import torch import numpy as np from PIL import Image, ImageEnhance import io import requests from transformers import ( BlipForConditionalGeneration, BlipProcessor, VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer, CLIPProcessor, CLIPModel, AutoModelForCausalLM, AutoProcessor ) from deep_translator import GoogleTranslator from scipy.ndimage import variance from concurrent.futures import ThreadPoolExecutor # CONFIGURATION st.set_page_config( page_title="πΌοΈ AI Image Caption Generator", layout="wide", initial_sidebar_state="expanded" ) # Define model configurations MODEL_CONFIGS = { "BLIP": { "name": "BLIP", "icon": "βοΈ", "description": "BLIP excels at generating detailed and accurate image descriptions using vision-language pre-training.", "generate_params": {"max_length": 50, "num_beams": 5, "min_length": 10, "top_p": 0.9, "repetition_penalty": 1.5} }, "ViT-GPT2": { "name": "ViT-GPT2", "icon": "π", "description": "ViT-GPT2 combines Vision Transformer with GPT2 for fluent and consistent image captions.", "generate_params": {"max_length": 50, "num_beams": 5, "min_length": 10, "repetition_penalty": 1.5} }, "GIT": { "name": "GIT-base", "icon": "π", "description": "GIT generates contextually relevant captions with a focus on scene understanding.", "generate_params": {"max_length": 50, "num_beams": 4, "min_length": 8, "repetition_penalty": 1.5} }, "CLIP": { "name": "CLIP", "icon": "π¨", "description": "CLIP provides comprehensive image analysis with confidence scores across content, scene, and style.", } } # LOADING FUNCTIONS @st.cache_resource def load_blip_model(): processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large") model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large") if torch.cuda.is_available(): model = model.to("cuda") return model, processor @st.cache_resource def load_vit_gpt2_model(): model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning") feature_extractor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning") tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning") if torch.cuda.is_available(): model = model.to("cuda") return model, feature_extractor, tokenizer @st.cache_resource def load_git_model(): processor = AutoProcessor.from_pretrained("microsoft/git-base") model = AutoModelForCausalLM.from_pretrained("microsoft/git-base") if torch.cuda.is_available(): model = model.to("cuda") return model, processor @st.cache_resource def load_clip_model(): processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14") model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14") if torch.cuda.is_available(): model = model.to("cuda") return model, processor # IMAGE PROCESSING def preprocess_image(image): max_size = 1024 if max(image.size) > max_size: ratio = max_size / max(image.size) new_size = (int(image.size[0] * ratio), int(image.size[1] * ratio)) image = image.resize(new_size, Image.LANCZOS) enhancer = ImageEnhance.Contrast(image) image = enhancer.enhance(1.2) img_array = np.array(image.convert('L')) if np.mean(img_array) < 100: brightness_enhancer = ImageEnhance.Brightness(image) image = brightness_enhancer.enhance(1.3) return image def check_image_quality(image): if image.width < 200 or image.height < 200: return False, "Image is too small for accurate captioning. Consider using a larger image." img_array = np.array(image.convert('L')) if variance(img_array) < 100: return False, "Image may be too blurry for accurate captioning. Consider using a clearer image." return True, "Image quality is sufficient for captioning." # CAPTION GENERATION FUNCTIONS def generate_caption(image, model_name, models_data): if model_name == "BLIP": model, processor = models_data[model_name] return get_blip_caption(image, model, processor) elif model_name == "ViT-GPT2": model, feature_extractor, tokenizer = models_data[model_name] return get_vit_gpt2_caption(image, model, feature_extractor, tokenizer) elif model_name == "GIT": model, processor = models_data[model_name] return get_git_caption(image, model, processor) elif model_name == "CLIP": model, processor = models_data[model_name] return get_clip_caption(image, model, processor) return "Model not supported" def get_blip_caption(image, model, processor): try: inputs = processor(image, return_tensors="pt") if torch.cuda.is_available(): inputs = {k: v.to("cuda") for k, v in inputs.items()} output = model.generate(**inputs, **MODEL_CONFIGS["BLIP"]["generate_params"]) caption = processor.decode(output[0], skip_special_tokens=True) return caption except Exception as e: return f"BLIP model error: {str(e)}" def get_vit_gpt2_caption(image, model, feature_extractor, tokenizer): try: inputs = feature_extractor(images=image, return_tensors="pt") if torch.cuda.is_available(): inputs = {k: v.to("cuda") for k, v in inputs.items()} output = model.generate(**inputs, **MODEL_CONFIGS["ViT-GPT2"]["generate_params"]) caption = tokenizer.decode(output[0], skip_special_tokens=True) return caption except Exception as e: return f"ViT-GPT2 model error: {str(e)}" def get_git_caption(image, model, processor): try: inputs = processor(images=image, return_tensors="pt") if torch.cuda.is_available(): inputs = {k: v.to("cuda") for k, v in inputs.items()} output = model.generate(**inputs, **MODEL_CONFIGS["GIT"]["generate_params"]) caption = processor.decode(output[0], skip_special_tokens=True) return caption except Exception as e: return f"GIT model error: {str(e)}" CONTENT_CATEGORIES = [ "a portrait photograph", "a landscape photograph", "a wildlife photograph", "an architectural photograph", "a street photograph", "a food photograph", "a fashion photograph", "a sports photograph", "a macro photograph", "a night photograph", "an aerial photograph", "an underwater photograph", "a product photograph", "a documentary photograph", "a travel photograph", "a black and white photograph", "an abstract photograph", "a concert photograph", "a wedding photograph", "a nature photograph" ] SCENE_ATTRIBUTES = [ "indoors", "outdoors", "daytime", "nighttime", "urban", "rural", "beach", "mountains", "forest", "desert", "snowy", "rainy", "foggy", "sunny", "crowded", "empty", "modern", "vintage", "colorful", "minimalist" ] STYLE_ATTRIBUTES = [ "professional", "casual", "artistic", "documentary", "aerial view", "close-up", "wide-angle", "telephoto", "panoramic", "HDR", "long exposure", "shallow depth of field", "silhouette", "motion blur" ] def get_clip_caption(image, model, processor): try: content_inputs = processor(text=CONTENT_CATEGORIES, images=image, return_tensors="pt", padding=True) if torch.cuda.is_available(): content_inputs = {k: v.to("cuda") for k, v in content_inputs.items() if torch.is_tensor(v)} content_outputs = model(**content_inputs) content_probs = content_outputs.logits_per_image.softmax(dim=1)[0] top_content_probs, top_content_indices = torch.topk(content_probs, 2) scene_inputs = processor(text=SCENE_ATTRIBUTES, images=image, return_tensors="pt", padding=True) if torch.cuda.is_available(): scene_inputs = {k: v.to("cuda") for k, v in scene_inputs.items() if torch.is_tensor(v)} scene_outputs = model(**scene_inputs) scene_probs = scene_outputs.logits_per_image.softmax(dim=1)[0] top_scene_probs, top_scene_indices = torch.topk(scene_probs, 2) style_inputs = processor(text=STYLE_ATTRIBUTES, images=image, return_tensors="pt", padding=True) if torch.cuda.is_available(): style_inputs = {k: v.to("cuda") for k, v in style_inputs.items() if torch.is_tensor(v)} style_outputs = model(**style_inputs) style_probs = style_outputs.logits_per_image.softmax(dim=1)[0] top_style_probs, top_style_indices = torch.topk(style_probs, 1) primary_content = CONTENT_CATEGORIES[top_content_indices[0].item()].replace("a ", "") primary_scene = SCENE_ATTRIBUTES[top_scene_indices[0].item()] primary_style = STYLE_ATTRIBUTES[top_style_indices[0].item()] secondary_elements = [] if top_content_probs[1].item() > 0.15: secondary_content = CONTENT_CATEGORIES[top_content_indices[1].item()].replace("a ", "") secondary_elements.append(f"with elements of {secondary_content}") if top_scene_probs[1].item() > 0.15: secondary_scene = SCENE_ATTRIBUTES[top_scene_indices[1].item()] secondary_elements.append(f"also showing {secondary_scene} characteristics") detailed_caption = f"This appears to be {CONTENT_CATEGORIES[top_content_indices[0].item()]} captured in a {primary_scene} setting" if secondary_elements: detailed_caption += ", " + " ".join(secondary_elements) detailed_caption += f". The image has a {primary_style} quality to it." detailed_caption += f" (Primary content: {top_content_probs[0].item()*100:.1f}% confidence)" return detailed_caption except Exception as e: return f"CLIP model error: {str(e)}" # TRANSLATION FUNCTION def batch_translate(texts, target_lang): try: translator = GoogleTranslator(source='en', target=target_lang) return {key: translator.translate(value) for key, value in texts.items()} except Exception as e: return {key: f"Translation error: {str(e)}" for key in texts} # MAIN APPLICATION def main(): # Custom CSS for modern dark mode and no shapes under titles st.markdown(""" """, unsafe_allow_html=True) st.markdown('