azkavyro's picture
Added all files including vyro_workflows
6fecfbe
import base64
import io
from PIL import Image, ImageOps
from nodes import ImageScale, VAEEncode
import numpy as np
from perlin_noise import perlin_noise
import torch
from torchvision.transforms import ToPILImage
from ..utils import VyroParams
from .perlin import perlin_power_fractal_batch
from PIL import Image
def pil2tensor(image):
return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0)
class VyroPipeInputV2:
def __init__(self):
self.vae_encoder = VAEEncode()
self.image_scale = ImageScale()
pass
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"user_prompt": ("STRING", {"multiline": True}),
"mode": (VyroParams.MODE, {"default": VyroParams.MODE[0]}),
"vae": ("VAE",),
"init_noise_mode": (["perlin1","perlin2","zeros"], {"default": "perlin1"}),
},
"optional": {
"user_neg_prompt": ("STRING",{"multiline": True}),
"batch_size": ("INT", {"default": 1, "max": 4, "min": 1, "step": 1}),
"cfg": ("FLOAT", {"default": 7.5, "min":1.0, "max":30.0, "step":0.1}),
"steps": ("INT", {"default": 20, "min":10, "max":150, "step":1}),
"width": ("INT", {"default": 1024, "min":64, "max":4096, "step":8}),
"height": ("INT", {"default": 1024, "min":64, "max":4096, "step":8}),
"seed": ("INT", {"default": 1, "min": 1, "max": 2**32 - 1}),
"init_img": ("STRING", {"multiline": True}),
"denoise": ("FLOAT", {"default" : 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
"stage1_strength": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01}),
"stage2_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
"efficiency_multiplier": (VyroParams.MULTIPLIER, {"default": VyroParams.MULTIPLIER[0]}),
}
}
RETURN_TYPES = ("VYRO_PARAMS",)
RETURN_NAMES = ("vyro_params",)
FUNCTION = "prep_input"
#OUTPUT_NODE = False
CATEGORY = "Vyro"
def blank_image(self, width, height, red, green, blue):
# Ensure multiples
width = (width // 8) * 8
height = (height // 8) * 8
# Blend image
blank = Image.new(mode="RGB", size=(width, height),
color=(red, green, blue))
return pil2tensor(blank)
def image_to_mask(self, image, channel):
channels = ["red", "green", "blue", "alpha"]
mask = image[:, :, :, channels.index(channel)]
return (mask,)
def mask_to_image(self, mask):
result = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3)
return (result,)
def upscale_image(self,image, upscale_method='nearest', width=512, height=512, crop='disabled'):
if width == 0 and height == 0:
return image
original_aspect_ratio = image.width / image.height
target_aspect_ratio = width / height
pil_methods = {
'nearest': Image.NEAREST,
'bilinear': Image.BILINEAR,
'area': Image.HAMMING,
'bicubic': Image.BICUBIC,
'lanczos': Image.LANCZOS
}
if crop == 'center':
if original_aspect_ratio > target_aspect_ratio:
new_height = height
new_width = int(round(height * original_aspect_ratio))
else:
new_width = width
new_height = int(round(width / original_aspect_ratio))
resized_image = image.resize((new_width, new_height), pil_methods[upscale_method])
left = (resized_image.width - width) / 2
top = (resized_image.height - height) / 2
right = (resized_image.width + width) / 2
bottom = (resized_image.height + height) / 2
resized_image = resized_image.crop((left, top, right, bottom))
else:
if width == 0:
width = int(round(height * original_aspect_ratio))
elif height == 0:
height = int(round(width / original_aspect_ratio))
resized_image = image.resize((width, height), pil_methods[upscale_method])
return resized_image
def prep_input(self, user_prompt, mode, vae, init_noise_mode, user_neg_prompt='', batch_size=1, cfg=7.5, steps=20, width=1024, height=1024, seed=1, init_img='', denoise=1.0,stage1_strength=0.25, stage2_strength=1.0, efficiency_multiplier=1.0):
width = int(width / efficiency_multiplier)
height = int(height / efficiency_multiplier)
# width = (width // 8) * 8
# height = (height // 8) * 8
if init_img == "" or init_img is None or init_img == 'undefined':
generator = torch.cuda.manual_seed(seed)
if 'perlin2' in init_noise_mode:
init_img = perlin_noise(grid_shape=(2, 8), out_shape=(width // 8, height // 8), batch_size=batch_size*4, generator=generator)
init_img = init_img.reshape(batch_size, 4, height // 8, width // 8)
elif 'perlin1' in init_noise_mode:
init_img = perlin_power_fractal_batch(
batch_size=batch_size,
width=width // 8,
height=height // 8,
X=0,
Y=0,
Z=0,
frame=0,
seed=seed
)[0]
init_img = init_img.reshape(batch_size, 4, height // 8, width // 8)
else:
init_img = torch.zeros([batch_size, 4, height // 8, width // 8])
# init_img = self.blank_image(width, height, 0, 0, 0)
# init_img = init_img.unsqueeze(0)
# init_img = latent = torch.zeros([batch_size, 4, height // 8, width // 8])
else:
# Convert init_img from byte string to PIL image
# Get bytes from string
init_img_bytes = base64.b64decode(init_img)
img = Image.open(io.BytesIO(init_img_bytes))
# Center crop to width/height
init_img = pil2tensor(img)
init_img = self.image_scale.upscale(init_img, "nearest-exact", int(width), int(height), "center")[0]
init_img = init_img.squeeze(0)
init_img = init_img.repeat(batch_size, 1, 1, 1)
init_img = self.vae_encoder.encode(vae, init_img)[0]['samples']
params = VyroParams(latents={"samples":init_img}, user_prompt=user_prompt, mode=mode, cfg=cfg, batch_size=batch_size, steps=steps, width=width, height=height, seed=seed, denoise=denoise, user_neg_prompt=user_neg_prompt, stage1_strength=stage1_strength, stage2_strength=stage2_strength, efficiency_multiplier=efficiency_multiplier)
return (params,)
NODE_CLASS_MAPPINGS = {
"Vyro Pipe Input V2": VyroPipeInputV2,
}
# A dictionary that contains the friendly/humanly readable titles for the nodes
NODE_DISPLAY_NAME_MAPPINGS = {
"VyroPipeInputV2": "Vyro Pipe Input V2",
}