import gradio as gr from huggingface_hub import InferenceClient import PIL.Image import io import base64 # Initialize the Hugging Face client HF_TOKEN = "your_token_here" # Replace with your token client = InferenceClient( model="Kwai-Kolors/Kolors-Virtual-Try-On", token=HF_TOKEN ) def virtual_try_on(person_image, garment_image): """ Process the virtual try-on request Args: person_image: PIL Image of the person garment_image: PIL Image of the garment Returns: PIL Image of the result """ try: # Convert images to bytes person_bytes = io.BytesIO() garment_bytes = io.BytesIO() person_image.save(person_bytes, format='PNG') garment_image.save(garment_bytes, format='PNG') # Convert bytes to base64 person_b64 = base64.b64encode(person_bytes.getvalue()).decode('utf-8') garment_b64 = base64.b64encode(garment_bytes.getvalue()).decode('utf-8') # Make API request response = client.post( json={ "inputs": [ {"image": person_b64}, {"image": garment_b64} ] } ) # Convert response to image result_image = PIL.Image.open(io.BytesIO(response)) return result_image, "Success" except Exception as e: return None, f"Error: {str(e)}" # Create Gradio interface demo = gr.Interface( fn=virtual_try_on, inputs=[ gr.Image(type="pil", label="Person Image"), gr.Image(type="pil", label="Garment Image") ], outputs=[ gr.Image(type="pil", label="Result"), gr.Text(label="Status") ], title="Virtual Try-On API", description="Upload a person image and a garment image to see how the garment would look on the person." ) if __name__ == "__main__": demo.launch()