azkavyro's picture
Added all files including vyro_workflows
6fecfbe
from typing import Tuple
from PIL import Image, ImageOps
import numpy as np
import torch
# from ..utils.face_swap import pil2mask, pil2tensor, tensor2pil
def get_latent_size(LATENT, ORIGINAL_VALUES=False) -> Tuple[int, int]:
lc = LATENT.copy()
size = lc["samples"].shape[3], lc["samples"].shape[2]
if ORIGINAL_VALUES == False:
size = size[0] * 8, size[1] * 8
return size
class GetLatentSize:
def __init__(self) -> None:
pass
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"latent": ("LATENT",),
"original": ([False, True],),
}
}
RETURN_TYPES = ("INT", "INT", "TUPLE",)
CATEGORY = 'Vyro/Utils'
FUNCTION = 'get_size'
def get_size(self, latent, original):
size = get_latent_size(latent, original)
return (size[0], size[1], size,)
class MultilineStringNode:
def __init__(self):
pass
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"Text": ("STRING", {
"default": "",
"multiline": True,
}),
}
}
RETURN_TYPES = ("STRING",)
FUNCTION = "get_value"
CATEGORY = "Vyro/Utils"
def get_value(self, Text):
return (Text,)
class WAS_Images_To_RGB:
def __init__(self):
pass
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"images": ("IMAGE",),
},
}
RETURN_TYPES = ("IMAGE",)
RETURN_NAMES = ("images",)
FUNCTION = "images_to_rgb"
CATEGORY = "Vyro/Image"
def images_to_rgb(self, images):
tensors = []
for image in images:
tensors.append(pil2tensor(tensor2pil(image).convert("RGB")))
tensors = torch.cat(tensors, dim=0)
return (tensors,)
def smooth_region(image, tolerance):
from scipy.ndimage import gaussian_filter
image = image.convert("L")
mask_array = np.array(image)
smoothed_array = gaussian_filter(mask_array, sigma=tolerance)
threshold = np.max(smoothed_array) / 2
smoothed_mask = np.where(smoothed_array >= threshold, 255, 0).astype(np.uint8)
smoothed_image = Image.fromarray(smoothed_mask, mode="L")
return ImageOps.invert(smoothed_image.convert("RGB"))
class WAS_Mask_Smooth_Region:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"masks": ("MASK",),
"sigma": ("FLOAT", {"default":5.0, "min":0.0, "max":128.0, "step":0.1}),
}
}
CATEGORY = "Vyro/Image/Masking"
RETURN_TYPES = ("MASK",)
RETURN_NAMES = ("MASKS",)
FUNCTION = "smooth"
def smooth(self, masks, sigma=128):
if masks is None:
masks = torch.zeros((1, 1, 10, 10)) # Replace 10x10 with the desired dimensions
masks[0, 0, 5, 5] = 1 # One white pixel at the center
if masks.ndim > 3:
regions = []
for mask in masks:
mask_np = np.clip(255. * mask.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)
pil_image = Image.fromarray(mask_np, mode="L")
region_mask = self.WT.Masking.smooth_region(pil_image, sigma)
region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1)
regions.append(region_tensor)
regions_tensor = torch.cat(regions, dim=0)
return (regions_tensor,)
else:
mask_np = np.clip(255. * masks.cpu().numpy().squeeze(), 0, 255).astype(np.uint8)
pil_image = Image.fromarray(mask_np, mode="L")
region_mask = smooth_region(pil_image, sigma)
region_tensor = pil2mask(region_mask).unsqueeze(0).unsqueeze(1)
return (region_tensor,)
class WAS_Latent_Upscale:
def __init__(self):
pass
@classmethod
def INPUT_TYPES(cls):
return {"required": {"samples": ("LATENT",), "mode": (["area", "bicubic", "bilinear", "nearest"],),
"factor": ("FLOAT", {"default": 2.0, "min": 0.1, "max": 8.0, "step": 0.01}),
"align": (["true", "false"], )}}
RETURN_TYPES = ("LATENT",)
FUNCTION = "latent_upscale"
CATEGORY = "Vyro/Latent/Transform"
def latent_upscale(self, samples, mode, factor, align):
valid_modes = ["area", "bicubic", "bilinear", "nearest"]
if mode not in valid_modes:
print(f"Invalid interpolation mode `{mode}` selected. Valid modes are: {', '.join(valid_modes)}")
return (s, )
align = True if align == 'true' else False
if not isinstance(factor, float) or factor <= 0:
print(f"The input `factor` is `{factor}`, but should be a positive or negative float.")
return (s, )
s = samples.copy()
shape = s['samples'].shape
size = tuple(int(round(dim * factor)) for dim in shape[-2:])
if mode in ['linear', 'bilinear', 'bicubic', 'trilinear']:
s["samples"] = torch.nn.functional.interpolate(
s['samples'], size=size, mode=mode, align_corners=align)
else:
s["samples"] = torch.nn.functional.interpolate(s['samples'], size=size, mode=mode)
return (s,)
TEXT_TYPE = "STRING"
class WAS_Text_Concatenate:
def __init__(self):
pass
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"text_a": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}),
"text_b": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}),
"linebreak_addition": (['false','true'], ),
},
"optional": {
"text_c": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}),
"text_d": (TEXT_TYPE, {"forceInput": (True if TEXT_TYPE == 'STRING' else False)}),
}
}
RETURN_TYPES = (TEXT_TYPE,)
FUNCTION = "text_concatenate"
CATEGORY = "Vyro/Text"
def text_concatenate(self, text_a, text_b, text_c=None, text_d=None, linebreak_addition='false'):
return_text = text_a + ("\n" if linebreak_addition == 'true' else '') + text_b
if text_c:
return_text = return_text + ("\n" if linebreak_addition == 'true' else '') + text_c
if text_d:
return_text = return_text + ("\n" if linebreak_addition == 'true' else '') + text_d
return (return_text, )
class WAS_Image_Threshold:
def __init__(self):
pass
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"image": ("IMAGE",),
"threshold": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
},
}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "image_threshold"
CATEGORY = "WAS Suite/Image/Process"
def image_threshold(self, image, threshold=0.5):
return (pil2tensor(self.apply_threshold(tensor2pil(image), threshold)), )
def apply_threshold(self, input_image, threshold=0.5):
# Convert the input image to grayscale
grayscale_image = input_image.convert('L')
# Apply the threshold to the grayscale image
threshold_value = int(threshold * 255)
thresholded_image = grayscale_image.point(
lambda x: 255 if x >= threshold_value else 0, mode='L')
return thresholded_image
class VyroEmptyLatentImage:
def __init__(self, device="cpu"):
self.device = device
@classmethod
def INPUT_TYPES(s):
return {"required": { "vyro_params": ("VYRO_PARAMS",),
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096})}}
RETURN_TYPES = ("LATENT",)
FUNCTION = "generate"
CATEGORY = "latent"
def generate(self, vyro_params, batch_size=1):
height = vyro_params.height if vyro_params.height else 1024
width = vyro_params.width if vyro_params.width else 1024
print(f"LATENT IMAGE SIZE: {height, width}")
latent = torch.zeros([batch_size, 4, height // 8, width // 8])
return ({"samples":latent}, )
NODE_CLASS_MAPPINGS = {
"Get latent size": GetLatentSize,
"Images to RGB": WAS_Images_To_RGB,
"Mask Smooth Region": WAS_Mask_Smooth_Region,
"Latent Upscale by Factor (WAS)": WAS_Latent_Upscale,
"Text box": MultilineStringNode,
"Text Concatenate": WAS_Text_Concatenate,
"Image Threshold": WAS_Image_Threshold,
"Vyro Empty Latent Image": VyroEmptyLatentImage
}