import os from huggingface_hub import login from transformers import BlipProcessor, BlipForConditionalGeneration from PIL import Image import easyocr import gradio as gr from diffusers import DiffusionPipeline import torch import spaces # Hugging Face Spaces module from transformers import pipeline # Get Hugging Face Token from environment variable hf_token = os.getenv('HF_AUTH_TOKEN') if not hf_token: raise ValueError("Hugging Face token is not set in the environment variables.") login(token=hf_token) # Load the processor and model processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large") model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large") pipe2= pipeline("image-to-text", model="nlpconnect/vit-gpt2-image-captioning") # Initialize the model pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium") device = "cuda" if torch.cuda.is_available() else "cpu" pipe.to(device) pipe2.to(device) model.to(device) @spaces.GPU(duration=300) def generate_caption_and_image(image): img = image.convert("RGB") # reader = easyocr.Reader(['en']) # result = reader.readtext(img) import random # Define lists for the three variables fabrics = ['cotton', 'silk', 'denim', 'linen', 'polyester', 'wool', 'velvet'] patterns = ['striped', 'floral', 'geometric', 'abstract', 'solid', 'polka dots'] textile_designs = ['woven texture', 'embroidery', 'printed fabric', 'hand-dyed', 'quilting'] # Randomly select one from each category selected_fabric = random.choice(fabrics) selected_pattern = random.choice(patterns) selected_textile_design = random.choice(textile_designs) # Generate caption inputs = processor(image, return_tensors="pt", padding=True, truncation=True, max_length=250) inputs = {key: val.to(device) for key, val in inputs.items()} out = model.generate(**inputs) caption = processor.decode(out[0], skip_special_tokens=True) caption2 =pipe2(img) prompt = f'''Create a highly realistic clothing item based on the following descriptions: The design should reflect {caption1} and {caption2}, blending both themes into a single, stylish, and modern piece of clothing. Incorporate highly realistic and high-quality textures that exude sophistication, with realistic fabric lighting and fine details. Subtly hint at {selected_fabric}, featuring a {selected_pattern} motif and a {selected_textile_design} style that harmoniously balances the essence of both captions.''' # Generate image based on the caption generated_image = pipe(prompt).images[0] return caption, generated_image # Gradio UI iface = gr.Interface( fn=generate_caption_and_image, inputs=gr.Image(type="pil", label="Upload Image"), outputs=[gr.Textbox(label="Generated Caption"), gr.Image(label="Generated Design")], live=True ) iface.launch(share=True)