Spaces:
Runtime error
Runtime error
File size: 1,130 Bytes
61a0586 8f2a481 871269e 8f2a481 1c0cda0 0bac47f 8f2a481 0bac47f 0703834 1fceef8 a05ee1f 8f2a481 a05ee1f 61a0586 187d444 a05ee1f 187d444 61a0586 0bac47f a05ee1f 61a0586 a05ee1f c11222a a05ee1f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 |
from transformers import pipeline
from PIL import Image
import numpy as np
from io import BytesIO
import base64
# Initialize segmentation pipeline
segmenter = pipeline(model="mattmdjaga/segformer_b2_clothes")
def encode_image_to_base64(image):
buffered = BytesIO()
image.save(buffered, format="PNG")
return base64.b64encode(buffered.getvalue()).decode('utf-8')
def segment_clothing(img, clothes=["Hat", "Upper-clothes", "Skirt", "Pants", "Dress", "Belt", "Left-shoe", "Right-shoe", "Scarf"]):
# Segment image
segments = segmenter(img)
# Create list of masks and their corresponding clothing types
mask_list = []
for s in segments:
if s['label'] in clothes:
mask_list.append((s['mask'], s['label']))
result_images = []
# Paste all masks on top of each other
for mask, clothing_type in mask_list:
current_mask = np.array(mask)
final_mask_bis = Image.fromarray(current_mask)
img.putalpha(final_mask_bis)
imageBase64 = encode_image_to_base64(img)
result_images.append((clothing_type, imageBase64))
return result_images
|