Spaces:
Running
Running
import os | |
import sys | |
import json | |
import subprocess | |
import numpy as np | |
import re | |
import datetime | |
from typing import List | |
from PIL import Image, ExifTags | |
from PIL.PngImagePlugin import PngInfo | |
from pathlib import Path | |
import folder_paths | |
from .logger import logger | |
from .image_latent_nodes import * | |
from .load_video_nodes import LoadVideoUpload, LoadVideoPath | |
from .load_images_nodes import LoadImagesFromDirectoryUpload, LoadImagesFromDirectoryPath | |
from .batched_nodes import VAEEncodeBatched, VAEDecodeBatched | |
from .utils import ffmpeg_path, get_audio, hash_path, validate_path, requeue_workflow, gifski_path | |
folder_paths.folder_names_and_paths["VHS_video_formats"] = ( | |
[ | |
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "video_formats"), | |
], | |
[".json"] | |
) | |
def gen_format_widgets(video_format): | |
for k in video_format: | |
if k.endswith("_pass"): | |
for i in range(len(video_format[k])): | |
if isinstance(video_format[k][i], list): | |
item = [video_format[k][i]] | |
yield item | |
video_format[k][i] = item[0] | |
else: | |
if isinstance(video_format[k], list): | |
item = [video_format[k]] | |
yield item | |
video_format[k] = item[0] | |
def get_video_formats(): | |
formats = [] | |
for format_name in folder_paths.get_filename_list("VHS_video_formats"): | |
format_name = format_name[:-5] | |
video_format_path = folder_paths.get_full_path("VHS_video_formats", format_name + ".json") | |
with open(video_format_path, 'r') as stream: | |
video_format = json.load(stream) | |
if "gifski_pass" in video_format and gifski_path is None: | |
#Skip format | |
continue | |
widgets = [w[0] for w in gen_format_widgets(video_format)] | |
if (len(widgets) > 0): | |
formats.append(["video/" + format_name, widgets]) | |
else: | |
formats.append("video/" + format_name) | |
return formats | |
def get_format_widget_defaults(format_name): | |
video_format_path = folder_paths.get_full_path("VHS_video_formats", format_name + ".json") | |
with open(video_format_path, 'r') as stream: | |
video_format = json.load(stream) | |
results = {} | |
for w in gen_format_widgets(video_format): | |
if len(w[0]) > 2 and 'default' in w[0][2]: | |
default = w[0][2]['default'] | |
else: | |
if type(w[0][1]) is list: | |
default = w[0][1][0] | |
else: | |
#NOTE: This doesn't respect max/min, but should be good enough as a fallback to a fallback to a fallback | |
default = {"BOOLEAN": False, "INT": 0, "FLOAT": 0, "STRING": ""}[w[0][1]] | |
results[w[0][0]] = default | |
return results | |
def apply_format_widgets(format_name, kwargs): | |
video_format_path = folder_paths.get_full_path("VHS_video_formats", format_name + ".json") | |
with open(video_format_path, 'r') as stream: | |
video_format = json.load(stream) | |
for w in gen_format_widgets(video_format): | |
assert(w[0][0] in kwargs) | |
w[0] = str(kwargs[w[0][0]]) | |
return video_format | |
def tensor_to_int(tensor, bits): | |
#TODO: investigate benefit of rounding by adding 0.5 before clip/cast | |
tensor = tensor.cpu().numpy() * (2**bits-1) | |
return np.clip(tensor, 0, (2**bits-1)) | |
def tensor_to_shorts(tensor): | |
return tensor_to_int(tensor, 16).astype(np.uint16) | |
def tensor_to_bytes(tensor): | |
return tensor_to_int(tensor, 8).astype(np.uint8) | |
def ffmpeg_process(args, video_format, video_metadata, file_path, env): | |
res = None | |
frame_data = yield | |
if video_format.get('save_metadata', 'False') != 'False': | |
os.makedirs(folder_paths.get_temp_directory(), exist_ok=True) | |
metadata = json.dumps(video_metadata) | |
metadata_path = os.path.join(folder_paths.get_temp_directory(), "metadata.txt") | |
#metadata from file should escape = ; # \ and newline | |
metadata = metadata.replace("\\","\\\\") | |
metadata = metadata.replace(";","\\;") | |
metadata = metadata.replace("#","\\#") | |
metadata = metadata.replace("=","\\=") | |
metadata = metadata.replace("\n","\\\n") | |
metadata = "comment=" + metadata | |
with open(metadata_path, "w") as f: | |
f.write(";FFMETADATA1\n") | |
f.write(metadata) | |
m_args = args[:1] + ["-i", metadata_path] + args[1:] + ["-metadata", "creation_time=now"] | |
with subprocess.Popen(m_args + [file_path], stderr=subprocess.PIPE, | |
stdin=subprocess.PIPE, env=env) as proc: | |
try: | |
while frame_data is not None: | |
proc.stdin.write(frame_data) | |
#TODO: skip flush for increased speed | |
proc.stdin.flush() | |
frame_data = yield | |
proc.stdin.close() | |
res = proc.stderr.read() | |
except BrokenPipeError as e: | |
err = proc.stderr.read() | |
#Check if output file exists. If it does, the re-execution | |
#will also fail. This obscures the cause of the error | |
#and seems to never occur concurrent to the metadata issue | |
if os.path.exists(file_path): | |
raise Exception("An error occured in the ffmpeg subprocess:\n" \ | |
+ err.decode("utf-8")) | |
#Res was not set | |
print(err.decode("utf-8"), end="", file=sys.stderr) | |
logger.warn("An error occurred when saving with metadata") | |
if res != b'': | |
with subprocess.Popen(args + [file_path], stderr=subprocess.PIPE, | |
stdin=subprocess.PIPE, env=env) as proc: | |
try: | |
while frame_data is not None: | |
proc.stdin.write(frame_data) | |
proc.stdin.flush() | |
frame_data = yield | |
proc.stdin.close() | |
res = proc.stderr.read() | |
except BrokenPipeError as e: | |
res = proc.stderr.read() | |
raise Exception("An error occured in the ffmpeg subprocess:\n" \ | |
+ res.decode("utf-8")) | |
if len(res) > 0: | |
print(res.decode("utf-8"), end="", file=sys.stderr) | |
class VideoCombine: | |
def INPUT_TYPES(s): | |
#Hide ffmpeg formats if ffmpeg isn't available | |
if ffmpeg_path is not None: | |
ffmpeg_formats = get_video_formats() | |
else: | |
ffmpeg_formats = [] | |
return { | |
"required": { | |
"images": ("IMAGE",), | |
"frame_rate": ( | |
"INT", | |
{"default": 8, "min": 1, "step": 1}, | |
), | |
"loop_count": ("INT", {"default": 0, "min": 0, "max": 100, "step": 1}), | |
"filename_prefix": ("STRING", {"default": "AnimateDiff"}), | |
"format": (["image/gif", "image/webp"] + ffmpeg_formats,), | |
"pingpong": ("BOOLEAN", {"default": False}), | |
"save_output": ("BOOLEAN", {"default": True}), | |
}, | |
"optional": { | |
"audio": ("VHS_AUDIO",), | |
"batch_manager": ("VHS_BatchManager",) | |
}, | |
"hidden": { | |
"prompt": "PROMPT", | |
"extra_pnginfo": "EXTRA_PNGINFO", | |
"unique_id": "UNIQUE_ID" | |
}, | |
} | |
RETURN_TYPES = ("VHS_FILENAMES",) | |
RETURN_NAMES = ("Filenames",) | |
OUTPUT_NODE = True | |
CATEGORY = "Video Helper Suite π₯π ₯π π ’" | |
FUNCTION = "combine_video" | |
def combine_video( | |
self, | |
images, | |
frame_rate: int, | |
loop_count: int, | |
filename_prefix="AnimateDiff", | |
format="image/gif", | |
pingpong=False, | |
save_output=True, | |
prompt=None, | |
extra_pnginfo=None, | |
audio=None, | |
unique_id=None, | |
manual_format_widgets=None, | |
batch_manager=None | |
): | |
# get output information | |
output_dir = ( | |
folder_paths.get_output_directory() | |
if save_output | |
else folder_paths.get_temp_directory() | |
) | |
( | |
full_output_folder, | |
filename, | |
_, | |
subfolder, | |
_, | |
) = folder_paths.get_save_image_path(filename_prefix, output_dir) | |
output_files = [] | |
metadata = PngInfo() | |
video_metadata = {} | |
if prompt is not None: | |
metadata.add_text("prompt", json.dumps(prompt)) | |
video_metadata["prompt"] = prompt | |
if extra_pnginfo is not None: | |
for x in extra_pnginfo: | |
metadata.add_text(x, json.dumps(extra_pnginfo[x])) | |
video_metadata[x] = extra_pnginfo[x] | |
metadata.add_text("CreationTime", datetime.datetime.now().isoformat(" ")[:19]) | |
if batch_manager is not None and unique_id in batch_manager.outputs: | |
(counter, output_process) = batch_manager.outputs[unique_id] | |
else: | |
# comfy counter workaround | |
max_counter = 0 | |
# Loop through the existing files | |
matcher = re.compile(f"{re.escape(filename)}_(\d+)\D*\..+") | |
for existing_file in os.listdir(full_output_folder): | |
# Check if the file matches the expected format | |
match = matcher.fullmatch(existing_file) | |
if match: | |
# Extract the numeric portion of the filename | |
file_counter = int(match.group(1)) | |
# Update the maximum counter value if necessary | |
if file_counter > max_counter: | |
max_counter = file_counter | |
# Increment the counter by 1 to get the next available value | |
counter = max_counter + 1 | |
output_process = None | |
# save first frame as png to keep metadata | |
file = f"{filename}_{counter:05}.png" | |
file_path = os.path.join(full_output_folder, file) | |
Image.fromarray(tensor_to_bytes(images[0])).save( | |
file_path, | |
pnginfo=metadata, | |
compress_level=4, | |
) | |
output_files.append(file_path) | |
format_type, format_ext = format.split("/") | |
if format_type == "image": | |
if batch_manager is not None: | |
raise Exception("Pillow('image/') formats are not compatible with batched output") | |
image_kwargs = {} | |
if format_ext == "gif": | |
image_kwargs['disposal'] = 2 | |
if format_ext == "webp": | |
#Save timestamp information | |
exif = Image.Exif() | |
exif[ExifTags.IFD.Exif] = {36867: datetime.datetime.now().isoformat(" ")[:19]} | |
image_kwargs['exif'] = exif | |
file = f"{filename}_{counter:05}.{format_ext}" | |
file_path = os.path.join(full_output_folder, file) | |
images = tensor_to_bytes(images) | |
if pingpong: | |
images = np.concatenate((images, images[-2:0:-1])) | |
frames = [Image.fromarray(f) for f in images] | |
# Use pillow directly to save an animated image | |
frames[0].save( | |
file_path, | |
format=format_ext.upper(), | |
save_all=True, | |
append_images=frames[1:], | |
duration=round(1000 / frame_rate), | |
loop=loop_count, | |
compress_level=4, | |
**image_kwargs | |
) | |
output_files.append(file_path) | |
else: | |
# Use ffmpeg to save a video | |
if ffmpeg_path is None: | |
#Should never be reachable | |
raise ProcessLookupError("Could not find ffmpeg") | |
#Acquire additional format_widget values | |
kwargs = None | |
if manual_format_widgets is None: | |
if prompt is not None: | |
kwargs = prompt[unique_id]['inputs'] | |
else: | |
manual_format_widgets = {} | |
if kwargs is None: | |
kwargs = get_format_widget_defaults(format_ext) | |
missing = {} | |
for k in kwargs.keys(): | |
if k in manual_format_widgets: | |
kwargs[k] = manual_format_widgets[k] | |
else: | |
missing[k] = kwargs[k] | |
if len(missing) > 0: | |
logger.warn("Extra format values were not provided, the following defaults will be used: " + str(kwargs) + "\nThis is likely due to usage of ComfyUI-to-python. These values can be manually set by supplying a manual_format_widgets argument") | |
video_format = apply_format_widgets(format_ext, kwargs) | |
if video_format.get('input_color_depth', '8bit') == '16bit': | |
images = tensor_to_shorts(images) | |
if images.shape[-1] == 4: | |
i_pix_fmt = 'rgba64' | |
else: | |
i_pix_fmt = 'rgb48' | |
else: | |
images = tensor_to_bytes(images) | |
if images.shape[-1] == 4: | |
i_pix_fmt = 'rgba' | |
else: | |
i_pix_fmt = 'rgb24' | |
if pingpong: | |
if batch_manager is not None: | |
logger.error("pingpong is incompatible with batched output") | |
images = np.concatenate((images, images[-2:0:-1])) | |
file = f"{filename}_{counter:05}.{video_format['extension']}" | |
file_path = os.path.join(full_output_folder, file) | |
dimensions = f"{len(images[0][0])}x{len(images[0])}" | |
loop_args = ["-vf", "loop=loop=" + str(loop_count)+":size=" + str(len(images))] | |
bitrate_arg = [] | |
bitrate = video_format.get('bitrate') | |
if bitrate is not None: | |
bitrate_arg = ["-b:v", str(bitrate) + "M" if video_format.get('megabit') == 'True' else str(bitrate) + "K"] | |
args = [ffmpeg_path, "-v", "error", "-f", "rawvideo", "-pix_fmt", i_pix_fmt, | |
"-s", dimensions, "-r", str(frame_rate), "-i", "-"] \ | |
+ loop_args + video_format['main_pass'] + bitrate_arg | |
env=os.environ.copy() | |
if "environment" in video_format: | |
env.update(video_format["environment"]) | |
if output_process is None: | |
output_process = ffmpeg_process(args, video_format, video_metadata, file_path, env) | |
#Proceed to first yield | |
output_process.send(None) | |
if batch_manager is not None: | |
batch_manager.outputs[unique_id] = (counter, output_process) | |
output_process.send(images.tobytes()) | |
if batch_manager is not None: | |
requeue_workflow((batch_manager.unique_id, not batch_manager.has_closed_inputs)) | |
if batch_manager is None or batch_manager.has_closed_inputs: | |
#Close pipe and wait for termination. | |
try: | |
output_process.send(None) | |
except StopIteration: | |
pass | |
if batch_manager is not None: | |
batch_manager.outputs.pop(unique_id) | |
if len(batch_manager.outputs) == 0: | |
batch_manager.reset() | |
else: | |
#batch is unfinished | |
#TODO: Check if empty output breaks other custom nodes | |
return {"ui": {"unfinished_batch": [True]}, "result": ((save_output, []),)} | |
output_files.append(file_path) | |
if "gifski_pass" in video_format: | |
gif_output = f"{filename}_{counter:05}.gif" | |
gif_output_path = os.path.join( full_output_folder, gif_output) | |
gifski_args = [gifski_path] + video_format["gifski_pass"] \ | |
+ ["-o", gif_output_path, file_path] | |
try: | |
res = subprocess.run(gifski_args, env=env, check=True, capture_output=True) | |
except subprocess.CalledProcessError as e: | |
raise Exception("An error occured in the gifski subprocess:\n" \ | |
+ e.stderr.decode("utf-8")) | |
if res.stderr: | |
print(res.stderr.decode("utf-8"), end="", file=sys.stderr) | |
#output format is actually an image and should be correctly marked | |
#TODO: Evaluate a more consistent solution for this | |
format = "image/gif" | |
output_files.append(gif_output_path) | |
file = gif_output | |
elif audio is not None and audio() is not False: | |
# Create audio file if input was provided | |
output_file_with_audio = f"{filename}_{counter:05}-audio.{video_format['extension']}" | |
output_file_with_audio_path = os.path.join(full_output_folder, output_file_with_audio) | |
if "audio_pass" not in video_format: | |
logger.warn("Selected video format does not have explicit audio support") | |
video_format["audio_pass"] = ["-c:a", "libopus"] | |
# FFmpeg command with audio re-encoding | |
#TODO: expose audio quality options if format widgets makes it in | |
#Reconsider forcing apad/shortest | |
mux_args = [ffmpeg_path, "-v", "error", "-n", "-i", file_path, | |
"-i", "-", "-c:v", "copy"] \ | |
+ video_format["audio_pass"] \ | |
+ ["-af", "apad", "-shortest", output_file_with_audio_path] | |
try: | |
res = subprocess.run(mux_args, input=audio(), env=env, | |
capture_output=True, check=True) | |
except subprocess.CalledProcessError as e: | |
raise Exception("An error occured in the ffmpeg subprocess:\n" \ | |
+ e.stderr.decode("utf-8")) | |
if res.stderr: | |
print(res.stderr.decode("utf-8"), end="", file=sys.stderr) | |
output_files.append(output_file_with_audio_path) | |
#Return this file with audio to the webui. | |
#It will be muted unless opened or saved with right click | |
file = output_file_with_audio | |
previews = [ | |
{ | |
"filename": file, | |
"subfolder": subfolder, | |
"type": "output" if save_output else "temp", | |
"format": format, | |
} | |
] | |
return {"ui": {"gifs": previews}, "result": ((save_output, output_files),)} | |
def VALIDATE_INPUTS(self, format, **kwargs): | |
return True | |
class LoadAudio: | |
def INPUT_TYPES(s): | |
#Hide ffmpeg formats if ffmpeg isn't available | |
return { | |
"required": { | |
"audio_file": ("STRING", {"default": "input/", "vhs_path_extensions": ['wav','mp3','ogg','m4a','flac']}), | |
}, | |
"optional" : {"seek_seconds": ("FLOAT", {"default": 0, "min": 0})} | |
} | |
RETURN_TYPES = ("VHS_AUDIO",) | |
RETURN_NAMES = ("audio",) | |
CATEGORY = "Video Helper Suite π₯π ₯π π ’" | |
FUNCTION = "load_audio" | |
def load_audio(self, audio_file, seek_seconds): | |
if audio_file is None or validate_path(audio_file) != True: | |
raise Exception("audio_file is not a valid path: " + audio_file) | |
#Eagerly fetch the audio since the user must be using it if the | |
#node executes, unlike Load Video | |
audio = get_audio(audio_file, start_time=seek_seconds) | |
return (lambda : audio,) | |
def IS_CHANGED(s, audio_file, seek_seconds): | |
return hash_path(audio_file) | |
def VALIDATE_INPUTS(s, audio_file, **kwargs): | |
return validate_path(audio_file, allow_none=True) | |
class PruneOutputs: | |
def INPUT_TYPES(s): | |
return { | |
"required": { | |
"filenames": ("VHS_FILENAMES",), | |
"options": (["Intermediate", "Intermediate and Utility"],) | |
} | |
} | |
RETURN_TYPES = () | |
OUTPUT_NODE = True | |
CATEGORY = "Video Helper Suite π₯π ₯π π ’" | |
FUNCTION = "prune_outputs" | |
def prune_outputs(self, filenames, options): | |
if len(filenames[1]) == 0: | |
return () | |
assert(len(filenames[1]) <= 3 and len(filenames[1]) >= 2) | |
delete_list = [] | |
if options in ["Intermediate", "Intermediate and Utility", "All"]: | |
delete_list += filenames[1][1:-1] | |
if options in ["Intermediate and Utility", "All"]: | |
delete_list.append(filenames[1][0]) | |
if options in ["All"]: | |
delete_list.append(filenames[1][-1]) | |
output_dirs = [os.path.abspath("output"), os.path.abspath("temp")] | |
for file in delete_list: | |
#Check that path is actually an output directory | |
if (os.path.commonpath([output_dirs[0], file]) != output_dirs[0]) \ | |
and (os.path.commonpath([output_dirs[1], file]) != output_dirs[1]): | |
raise Exception("Tried to prune output from invalid directory: " + file) | |
if os.path.exists(file): | |
os.remove(file) | |
return () | |
class BatchManager: | |
def __init__(self, frames_per_batch=-1): | |
self.frames_per_batch = frames_per_batch | |
self.inputs = {} | |
self.outputs = {} | |
self.unique_id = None | |
self.has_closed_inputs = False | |
def reset(self): | |
self.close_inputs() | |
for key in self.outputs: | |
if getattr(self.outputs[key][-1], "gi_suspended", False): | |
try: | |
self.outputs[key][-1].send(None) | |
except StopIteration: | |
pass | |
self.__init__(self.frames_per_batch) | |
def has_open_inputs(self): | |
return len(self.inputs) > 0 | |
def close_inputs(self): | |
for key in self.inputs: | |
if getattr(self.inputs[key][-1], "gi_suspended", False): | |
try: | |
self.inputs[key][-1].send(1) | |
except StopIteration: | |
pass | |
self.inputs = {} | |
def INPUT_TYPES(s): | |
return { | |
"required": { | |
"frames_per_batch": ("INT", {"default": 16, "min": 1, "max": 128, "step": 1}) | |
}, | |
"hidden": { | |
"prompt": "PROMPT", | |
"unique_id": "UNIQUE_ID" | |
}, | |
} | |
RETURN_TYPES = ("VHS_BatchManager",) | |
CATEGORY = "Video Helper Suite π₯π ₯π π ’" | |
FUNCTION = "update_batch" | |
def update_batch(self, frames_per_batch, prompt=None, unique_id=None): | |
if unique_id is not None and prompt is not None: | |
requeue = prompt[unique_id]['inputs'].get('requeue', 0) | |
else: | |
requeue = 0 | |
if requeue == 0: | |
self.reset() | |
self.frames_per_batch = frames_per_batch | |
self.unique_id = unique_id | |
#onExecuted seems to not be called unless some message is sent | |
return (self,) | |
NODE_CLASS_MAPPINGS = { | |
"VHS_VideoCombine": VideoCombine, | |
"VHS_LoadVideo": LoadVideoUpload, | |
"VHS_LoadVideoPath": LoadVideoPath, | |
"VHS_LoadImages": LoadImagesFromDirectoryUpload, | |
"VHS_LoadImagesPath": LoadImagesFromDirectoryPath, | |
"VHS_LoadAudio": LoadAudio, | |
"VHS_PruneOutputs": PruneOutputs, | |
"VHS_BatchManager": BatchManager, | |
# Latent and Image nodes | |
"VHS_SplitLatents": SplitLatents, | |
"VHS_SplitImages": SplitImages, | |
"VHS_SplitMasks": SplitMasks, | |
"VHS_MergeLatents": MergeLatents, | |
"VHS_MergeImages": MergeImages, | |
"VHS_MergeMasks": MergeMasks, | |
"VHS_SelectEveryNthLatent": SelectEveryNthLatent, | |
"VHS_SelectEveryNthImage": SelectEveryNthImage, | |
"VHS_SelectEveryNthMask": SelectEveryNthMask, | |
"VHS_GetLatentCount": GetLatentCount, | |
"VHS_GetImageCount": GetImageCount, | |
"VHS_GetMaskCount": GetMaskCount, | |
"VHS_DuplicateLatents": DuplicateLatents, | |
"VHS_DuplicateImages": DuplicateImages, | |
"VHS_DuplicateMasks": DuplicateMasks, | |
# Batched Nodes | |
"VHS_VAEEncodeBatched": VAEEncodeBatched, | |
"VHS_VAEDecodeBatched": VAEDecodeBatched, | |
} | |
NODE_DISPLAY_NAME_MAPPINGS = { | |
"VHS_VideoCombine": "Video Combine π₯π ₯π π ’", | |
"VHS_LoadVideo": "Load Video (Upload) π₯π ₯π π ’", | |
"VHS_LoadVideoPath": "Load Video (Path) π₯π ₯π π ’", | |
"VHS_LoadImages": "Load Images (Upload) π₯π ₯π π ’", | |
"VHS_LoadImagesPath": "Load Images (Path) π₯π ₯π π ’", | |
"VHS_LoadAudio": "Load Audio (Path)π₯π ₯π π ’", | |
"VHS_PruneOutputs": "Prune Outputs π₯π ₯π π ’", | |
"VHS_BatchManager": "Batch Manager π₯π ₯π π ’", | |
# Latent and Image nodes | |
"VHS_SplitLatents": "Split Latent Batch π₯π ₯π π ’", | |
"VHS_SplitImages": "Split Image Batch π₯π ₯π π ’", | |
"VHS_SplitMasks": "Split Mask Batch π₯π ₯π π ’", | |
"VHS_MergeLatents": "Merge Latent Batches π₯π ₯π π ’", | |
"VHS_MergeImages": "Merge Image Batches π₯π ₯π π ’", | |
"VHS_MergeMasks": "Merge Mask Batches π₯π ₯π π ’", | |
"VHS_SelectEveryNthLatent": "Select Every Nth Latent π₯π ₯π π ’", | |
"VHS_SelectEveryNthImage": "Select Every Nth Image π₯π ₯π π ’", | |
"VHS_SelectEveryNthMask": "Select Every Nth Mask π₯π ₯π π ’", | |
"VHS_GetLatentCount": "Get Latent Count π₯π ₯π π ’", | |
"VHS_GetImageCount": "Get Image Count π₯π ₯π π ’", | |
"VHS_GetMaskCount": "Get Mask Count π₯π ₯π π ’", | |
"VHS_DuplicateLatents": "Duplicate Latent Batch π₯π ₯π π ’", | |
"VHS_DuplicateImages": "Duplicate Image Batch π₯π ₯π π ’", | |
"VHS_DuplicateMasks": "Duplicate Mask Batch π₯π ₯π π ’", | |
# Batched Nodes | |
"VHS_VAEEncodeBatched": "VAE Encode Batched π₯π ₯π π ’", | |
"VHS_VAEDecodeBatched": "VAE Decode Batched π₯π ₯π π ’", | |
} | |