File size: 1,566 Bytes
e342ca5
 
ed9249d
66a6105
da28d91
66a6105
 
 
 
 
 
 
 
 
 
 
 
 
ed9249d
 
 
 
66a6105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66fa52a
66a6105
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import gradio as gr
import torch
from PIL import Image
from diffusers import StableDiffusionInpaintPipeline
import numpy as np

# Load the Stable Diffusion inpainting model
def load_inpainting_model():
    device = "cuda" if torch.cuda.is_available() else "cpu"
    model = StableDiffusionInpaintPipeline.from_pretrained(
        "runwayml/stable-diffusion-inpainting",
        torch_dtype=torch.float16 if device == "cuda" else torch.float32
    )
    model.to(device)
    return model

# Function to edit the clothing image based on prompt
def edit_clothing_image(prompt, image):
    # Ensure the image is in PIL format
    if isinstance(image, np.ndarray):
        image = Image.fromarray(image)
    
    # Load the model
    model = load_inpainting_model()
    
    # Inpainting to edit the image based on the text prompt
    edited_image = model(prompt=prompt, image=image).images[0]
    return edited_image

# Gradio interface
def interface():
    with gr.Blocks() as ui:
        gr.Markdown("# Clothing Image Editing with AI")

        with gr.Row():
            with gr.Column():
                prompt = gr.Textbox(label="Editing Prompt", placeholder="e.g., change the color of the t-shirt to red")
                input_image = gr.Image(label="Upload Image", type="pil")
                output_image = gr.Image(label="Edited Image")

                edit_button = gr.Button("Apply Edit")

        edit_button.click(edit_clothing_image, inputs=[prompt, input_image], outputs=output_image)

    return ui

# Launch the interface
ui = interface()
ui.launch()