import os from huggingface_hub import login from transformers import BlipProcessor, BlipForConditionalGeneration from PIL import Image import gradio as gr from diffusers import DiffusionPipeline import torch import spaces # Hugging Face Spaces module import requests from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor from qwen_vl_utils import process_vision_info # Get Hugging Face Token from environment variable hf_token = os.getenv('HF_AUTH_TOKEN') if not hf_token: raise ValueError("Hugging Face token is not set in the environment variables.") login(token=hf_token) # Load the processor and model processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large") model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large") processor1 = BlipProcessor.from_pretrained("noamrot/FuseCap") model2 = BlipForConditionalGeneration.from_pretrained("noamrot/FuseCap") pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium") model3 =model = Qwen2VLForConditionalGeneration.from_pretrained( "prithivMLmods/Qwen2-VL-OCR-2B-Instruct", torch_dtype="auto", device_map="auto" ) processor2 = AutoProcessor.from_pretrained("prithivMLmods/Qwen2-VL-OCR-2B-Instruct") device = "cuda" if torch.cuda.is_available() else "cpu" pipe.to(device) model2.to(device) model.to(device) @spaces.GPU(duration=150) def generate_caption_and_image(image): img = image.convert("RGB") # reader = easyocr.Reader(['en']) # result = reader.readtext(img) import random messages = [ { "role": "user", "content": [ { "type": "image", "image": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg", }, {"type": "text", "text": img}, ], } ] text = processor.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) image_inputs, video_inputs = process_vision_info(messages) inputs = processor( text=[text], images=image_inputs, videos=video_inputs, padding=True, return_tensors="pt", ) inputs = inputs.to("cuda") generated_ids = model.generate(**inputs, max_new_tokens=128) generated_ids_trimmed = [ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) ] result = processor.batch_decode( generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False ) # Define lists for the three variables fabrics = ['cotton', 'silk', 'denim', 'linen', 'polyester', 'wool', 'velvet'] patterns = ['striped', 'floral', 'geometric', 'abstract', 'solid', 'polka dots'] textile_designs = ['woven texture', 'embroidery', 'printed fabric', 'hand-dyed', 'quilting'] # Randomly select one from each category selected_fabric = random.choice(fabrics) selected_pattern = random.choice(patterns) selected_textile_design = random.choice(textile_designs) text = "a picture of " inputs = processor(img, text, return_tensors="pt").to(device) out = model.generate(**inputs, num_beams = 3) caption2 = processor.decode(out[0], skip_special_tokens=True) # Generate caption inputs = processor(image, return_tensors="pt", padding=True, truncation=True, max_length=250) inputs = {key: val.to(device) for key, val in inputs.items()} out = model.generate(**inputs) caption1 = processor.decode(out[0], skip_special_tokens=True) prompt = f'''Create a highly realistic clothing item based on the following descriptions: The design should reflect {caption1} and {caption2}, blending both themes into a single, stylish, and modern piece of clothing. Incorporate highly realistic and high-quality textures that exude sophistication, with realistic fabric lighting and fine details. Subtly hint at {selected_fabric}, featuring a {selected_pattern} motif and a {selected_textile_design} style that harmoniously balances the essence of both captions.and {result} should be written on top of it''' # Generate image based on the caption generated_image = pipe(prompt).images[0] return prompt, generated_image # Gradio UI iface = gr.Interface( fn=generate_caption_and_image, inputs=gr.Image(type="pil", label="Upload Image"), outputs=[gr.Textbox(label="Generated Caption"), gr.Image(label="Generated Design")], live=True ) iface.launch(share=True)