WARNING: Installing the current version is causing an issue where ComfyUI fails to start.
"
+ },
+ {
+ "author": "Fannovel16",
+ "title": "ControlNet Preprocessors",
+ "reference": "https://github.com/Fannovel16/comfy_controlnet_preprocessors",
+ "files": [
+ "https://github.com/Fannovel16/comfy_controlnet_preprocessors"
+ ],
+ "install_type": "git-clone",
+ "description": "ControlNet Preprocessors. (To use this extension, you need to download the required model file from Install Models)
NOTE: Please uninstall this custom node and instead install 'ComfyUI's ControlNet Auxiliary Preprocessors' from the default channel. To use nodes belonging to controlnet v1 such as Canny_Edge_Preprocessor, MIDAS_Depth_Map_Preprocessor, Uniformer_SemSegPreprocessor, etc., you need to copy the config.yaml.example file to config.yaml and change skip_v1: True to skip_v1: False.
"
+ },
+ {
+ "author": "comfyanonymous",
+ "title": "ComfyUI_experiments/sampler_tonemap",
+ "reference": "https://github.com/comfyanonymous/ComfyUI_experiments",
+ "files": [
+ "https://github.com/comfyanonymous/ComfyUI_experiments/raw/master/sampler_tonemap.py"
+ ],
+ "install_type": "copy",
+ "description": "ModelSamplerTonemapNoiseTest a node that makes the sampler use a simple tonemapping algorithm to tonemap the noise. It will let you use higher CFG without breaking the image. To using higher CFG lower the multiplier value. Similar to Dynamic Thresholding extension of A1111. "
+ },
+ {
+ "author": "comfyanonymous",
+ "title": "ComfyUI_experiments/sampler_rescalecfg",
+ "reference": "https://github.com/comfyanonymous/ComfyUI_experiments",
+ "files": [
+ "https://github.com/comfyanonymous/ComfyUI_experiments/raw/master/sampler_rescalecfg.py"
+ ],
+ "install_type": "copy",
+ "description": "RescaleClassifierFreeGuidance improves the problem of images being degraded by high CFG.To using higher CFG lower the multiplier value. Similar to Dynamic Thresholding extension of A1111. (reference paper)
It is recommended to use the integrated custom nodes in the default channel for update support rather than installing individual nodes.
"
+ },
+ {
+ "author": "comfyanonymous",
+ "title": "ComfyUI_experiments/advanced_model_merging",
+ "reference": "https://github.com/comfyanonymous/ComfyUI_experiments",
+ "files": [
+ "https://github.com/comfyanonymous/ComfyUI_experiments/raw/master/advanced_model_merging.py"
+ ],
+ "install_type": "copy",
+ "description": "This provides a detailed model merge feature based on block weight. ModelMergeBlock, in vanilla ComfyUI, allows for adjusting the ratios of input/middle/output layers, but this node provides ratio adjustments for all blocks within each layer.
It is recommended to use the integrated custom nodes in the default channel for update support rather than installing individual nodes.
"
+ },
+ {
+ "author": "comfyanonymous",
+ "title": "ComfyUI_experiments/sdxl_model_merging",
+ "reference": "https://github.com/comfyanonymous/ComfyUI_experiments",
+ "files": [
+ "https://github.com/comfyanonymous/ComfyUI_experiments/raw/master/sdxl_model_merging.py"
+ ],
+ "install_type": "copy",
+ "description": "These nodes provide the capability to merge SDXL base models.
It is recommended to use the integrated custom nodes in the default channel for update support rather than installing individual nodes.
It is recommended to use the integrated custom nodes in the default channel for update support rather than installing individual nodes.
"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/custom_nodes/ComfyUI-Manager/node_db/legacy/extension-node-map.json b/custom_nodes/ComfyUI-Manager/node_db/legacy/extension-node-map.json
new file mode 100644
index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b
--- /dev/null
+++ b/custom_nodes/ComfyUI-Manager/node_db/legacy/extension-node-map.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/custom_nodes/ComfyUI-Manager/node_db/legacy/model-list.json b/custom_nodes/ComfyUI-Manager/node_db/legacy/model-list.json
new file mode 100644
index 0000000000000000000000000000000000000000..8e3e1dc4858a08aa46190aa53ba320d565206cf4
--- /dev/null
+++ b/custom_nodes/ComfyUI-Manager/node_db/legacy/model-list.json
@@ -0,0 +1,3 @@
+{
+ "models": []
+}
diff --git a/custom_nodes/ComfyUI-Manager/node_db/new/alter-list.json b/custom_nodes/ComfyUI-Manager/node_db/new/alter-list.json
new file mode 100644
index 0000000000000000000000000000000000000000..072c3bb5e8bd05b6f14f6df25386dc1e1010a137
--- /dev/null
+++ b/custom_nodes/ComfyUI-Manager/node_db/new/alter-list.json
@@ -0,0 +1,4 @@
+{
+ "items": [
+ ]
+}
\ No newline at end of file
diff --git a/custom_nodes/ComfyUI-Manager/node_db/new/custom-node-list.json b/custom_nodes/ComfyUI-Manager/node_db/new/custom-node-list.json
new file mode 100644
index 0000000000000000000000000000000000000000..1b241afc34f95cc28f89109340630d0c67710abf
--- /dev/null
+++ b/custom_nodes/ComfyUI-Manager/node_db/new/custom-node-list.json
@@ -0,0 +1,695 @@
+{
+ "custom_nodes": [
+ {
+ "author": "#NOTICE_1.13",
+ "title": "NOTICE: This channel is not the default channel.",
+ "reference": "https://github.com/ltdrdata/ComfyUI-Manager",
+ "files": [],
+ "install_type": "git-clone",
+ "description": "If you see this message, your ComfyUI-Manager is outdated.\nRecent channel provides only the list of the latest nodes. If you want to find the complete node list, please go to the Default channel.\nMaking LoRA has never been easier!"
+ },
+
+
+ {
+ "author": "Kijai",
+ "title": "Animatediff MotionLoRA Trainer",
+ "reference": "https://github.com/kijai/ComfyUI-ADMotionDirector",
+ "files": [
+ "https://github.com/kijai/ComfyUI-ADMotionDirector"
+ ],
+ "install_type": "git-clone",
+ "description": "This is a trainer for AnimateDiff MotionLoRAs, based on the implementation of MotionDirector by ExponentialML."
+ },
+ {
+ "author": "GavChap",
+ "title": "ComfyUI-CascadeResolutions",
+ "reference": "https://github.com/GavChap/ComfyUI-CascadeResolutions",
+ "files": [
+ "https://github.com/GavChap/ComfyUI-CascadeResolutions"
+ ],
+ "install_type": "git-clone",
+ "description": "Nodes:Cascade Resolutions"
+ },
+ {
+ "author": "blepping",
+ "title": "ComfyUI-sonar",
+ "reference": "https://github.com/blepping/ComfyUI-sonar",
+ "files": [
+ "https://github.com/blepping/ComfyUI-sonar"
+ ],
+ "install_type": "git-clone",
+ "description": "A janky implementation of Sonar sampling (momentum-based sampling) for ComfyUI."
+ },
+ {
+ "author": "StartHua",
+ "title": "comfyui_segformer_b2_clothes",
+ "reference": "https://github.com/StartHua/Comfyui_segformer_b2_clothes",
+ "files": [
+ "https://github.com/StartHua/Comfyui_segformer_b2_clothes"
+ ],
+ "install_type": "git-clone",
+ "description": "SegFormer model fine-tuned on ATR dataset for clothes segmentation but can also be used for human segmentation!\nDownload the weight and put it under checkpoints: [a/https://huggingface.co/mattmdjaga/segformer_b2_clothes](https://huggingface.co/mattmdjaga/segformer_b2_clothes)"
+ },
+ {
+ "author": "AshMartian",
+ "title": "Dir Gir",
+ "reference": "https://github.com/AshMartian/ComfyUI-DirGir",
+ "files": [
+ "https://github.com/AshMartian/ComfyUI-DirGir/raw/main/dir_picker.py",
+ "https://github.com/AshMartian/ComfyUI-DirGir/raw/main/dir_loop.py"
+ ],
+ "install_type": "copy",
+ "description": "A collection of ComfyUI directory automation utility nodes. Directory Get It Right adds a GUI directory browser, and smart directory loop/iteration node that supports regex and file extension filtering."
+ },
+ {
+ "author": "ccvv804",
+ "title": "ComfyUI StableCascade using diffusers for Low VRAM",
+ "reference": "https://github.com/ccvv804/ComfyUI-DiffusersStableCascade-LowVRAM",
+ "files": [
+ "https://github.com/ccvv804/ComfyUI-DiffusersStableCascade-LowVRAM"
+ ],
+ "install_type": "git-clone",
+ "description": "Works with RTX 4070ti 12GB.\nSimple quick wrapper for [a/https://huggingface.co/stabilityai/stable-cascade](https://huggingface.co/stabilityai/stable-cascade)\nComfy is going to implement this properly soon, this repo is just for quick testing for the impatient!"
+ },
+ {
+ "author": "yuvraj108c",
+ "title": "ComfyUI-Pronodes",
+ "reference": "https://github.com/yuvraj108c/ComfyUI-Pronodes",
+ "files": [
+ "https://github.com/yuvraj108c/ComfyUI-Pronodes"
+ ],
+ "install_type": "git-clone",
+ "description": "A collection of nice utility nodes for ComfyUI"
+ },
+ {
+ "author": "pkpkTech",
+ "title": "ComfyUI-SaveQueues",
+ "reference": "https://github.com/pkpkTech/ComfyUI-SaveQueues",
+ "files": [
+ "https://github.com/pkpkTech/ComfyUI-SaveQueues"
+ ],
+ "install_type": "git-clone",
+ "description": "Add a button to the menu to save and load the running queue and the pending queues.\nThis is intended to be used when you want to exit ComfyUI with queues still remaining."
+ },
+ {
+ "author": "jordoh",
+ "title": "ComfyUI Deepface",
+ "reference": "https://github.com/jordoh/ComfyUI-Deepface",
+ "files": [
+ "https://github.com/jordoh/ComfyUI-Deepface"
+ ],
+ "install_type": "git-clone",
+ "description": "ComfyUI nodes wrapping the [a/deepface](https://github.com/serengil/deepface) library."
+ },
+ {
+ "author": "kijai",
+ "title": "ComfyUI StableCascade using diffusers",
+ "reference": "https://github.com/kijai/ComfyUI-DiffusersStableCascade",
+ "files": [
+ "https://github.com/kijai/ComfyUI-DiffusersStableCascade"
+ ],
+ "install_type": "git-clone",
+ "description": "Simple quick wrapper for [a/https://huggingface.co/stabilityai/stable-cascade](https://huggingface.co/stabilityai/stable-cascade)\nComfy is going to implement this properly soon, this repo is just for quick testing for the impatient!"
+ },
+ {
+ "author": "Extraltodeus",
+ "title": "ComfyUI-AutomaticCFG",
+ "reference": "https://github.com/Extraltodeus/ComfyUI-AutomaticCFG",
+ "files": [
+ "https://github.com/Extraltodeus/ComfyUI-AutomaticCFG"
+ ],
+ "install_type": "git-clone",
+ "description": "My own version 'from scratch' of a self-rescaling CFG. It isn't much but it's honest work.\nTLDR: set your CFG at 8 to try it. No burned images and artifacts anymore. CFG is also a bit more sensitive because it's a proportion around 8. Low scale like 4 also gives really nice results since your CFG is not the CFG anymore. Also in general even with relatively low settings it seems to improve the quality."
+ },
+ {
+ "author": "Mamaaaamooooo",
+ "title": "Batch Rembg for ComfyUI",
+ "reference": "https://github.com/Mamaaaamooooo/batchImg-rembg-ComfyUI-nodes",
+ "files": [
+ "https://github.com/Mamaaaamooooo/batchImg-rembg-ComfyUI-nodes"
+ ],
+ "install_type": "git-clone",
+ "description": "Remove background of plural images."
+ },
+ {
+ "author": "ShmuelRonen",
+ "title": "ComfyUI-SVDResizer",
+ "reference": "https://github.com/ShmuelRonen/ComfyUI-SVDResizer",
+ "files": [
+ "https://github.com/ShmuelRonen/ComfyUI-SVDResizer"
+ ],
+ "install_type": "git-clone",
+ "description": "SVDResizer is a helper for resizing the source image, according to the sizes enabled in Stable Video Diffusion. The rationale behind the possibility of changing the size of the image in steps between the ranges of 576 and 1024, is the use of the greatest common denominator of these two numbers which is 64. SVD is lenient with resizing that adheres to this rule, so the chance of coherent video that is not the standard size of 576X1024 is greater. It is advisable to keep the value 1024 constant and play with the second size to maintain the stability of the result."
+ },
+ {
+ "author": "xiaoxiaodesha",
+ "title": "hd-nodes-comfyui",
+ "reference": "https://github.com/xiaoxiaodesha/hd_node",
+ "files": [
+ "https://github.com/xiaoxiaodesha/hd_node"
+ ],
+ "install_type": "git-clone",
+ "description": "Nodes:Combine HDMasks, Cover HDMasks, HD FaceIndex, HD SmoothEdge, HD GetMaskArea, HD Image Levels, HD Ultimate SD Upscale"
+ },
+ {
+ "author": "StartHua",
+ "title": "Comfyui_joytag",
+ "reference": "https://github.com/StartHua/Comfyui_joytag",
+ "files": [
+ "https://github.com/StartHua/Comfyui_joytag"
+ ],
+ "install_type": "git-clone",
+ "description": "JoyTag is a state of the art AI vision model for tagging images, with a focus on sex positivity and inclusivity. It uses the Danbooru tagging schema, but works across a wide range of images, from hand drawn to photographic.\nDownload the weight and put it under checkpoints: [a/https://huggingface.co/fancyfeast/joytag/tree/main](https://huggingface.co/fancyfeast/joytag/tree/main)"
+ },
+ {
+ "author": "redhottensors",
+ "title": "ComfyUI-Prediction",
+ "reference": "https://github.com/redhottensors/ComfyUI-Prediction",
+ "files": [
+ "https://github.com/redhottensors/ComfyUI-Prediction"
+ ],
+ "install_type": "git-clone",
+ "description": "Fully customizable Classifier Free Guidance for ComfyUI."
+ },
+ {
+ "author": "nkchocoai",
+ "title": "ComfyUI-TextOnSegs",
+ "reference": "https://github.com/nkchocoai/ComfyUI-TextOnSegs",
+ "files": [
+ "https://github.com/nkchocoai/ComfyUI-TextOnSegs"
+ ],
+ "install_type": "git-clone",
+ "description": "Add a node for drawing text with CR Draw Text of ComfyUI_Comfyroll_CustomNodes to the area of SEGS detected by Ultralytics Detector of ComfyUI-Impact-Pack."
+ },
+ {
+ "author": "cubiq",
+ "title": "ComfyUI InstantID (Native Support)",
+ "reference": "https://github.com/cubiq/ComfyUI_InstantID",
+ "files": [
+ "https://github.com/cubiq/ComfyUI_InstantID"
+ ],
+ "install_type": "git-clone",
+ "description": "Native [a/InstantID](https://github.com/InstantID/InstantID) support for ComfyUI.\nThis extension differs from the many already available as it doesn't use diffusers but instead implements InstantID natively and it fully integrates with ComfyUI.\nPlease note this still could be considered beta stage, looking forward to your feedback."
+ },
+ {
+ "author": "Franck-Demongin",
+ "title": "NX_PromptStyler",
+ "reference": "https://github.com/Franck-Demongin/NX_PromptStyler",
+ "files": [
+ "https://github.com/Franck-Demongin/NX_PromptStyler"
+ ],
+ "install_type": "git-clone",
+ "description": "A custom node for ComfyUI to create a prompt based on a list of keywords saved in CSV files."
+ },
+ {
+ "author": "Billius-AI",
+ "title": "ComfyUI-Path-Helper",
+ "reference": "https://github.com/Billius-AI/ComfyUI-Path-Helper",
+ "files": [
+ "https://github.com/Billius-AI/ComfyUI-Path-Helper"
+ ],
+ "install_type": "git-clone",
+ "description": "Nodes:Create Project Root, Add Folder, Add Folder Advanced, Add File Name Prefix, Add File Name Prefix Advanced, ShowPath"
+ },
+ {
+ "author": "mbrostami",
+ "title": "ComfyUI-HF",
+ "reference": "https://github.com/mbrostami/ComfyUI-HF",
+ "files": [
+ "https://github.com/mbrostami/ComfyUI-HF"
+ ],
+ "install_type": "git-clone",
+ "description": "ComfyUI Node to work with Hugging Face repositories"
+ },
+ {
+ "author": "digitaljohn",
+ "title": "ComfyUI-ProPost",
+ "reference": "https://github.com/digitaljohn/comfyui-propost",
+ "files": [
+ "https://github.com/digitaljohn/comfyui-propost"
+ ],
+ "install_type": "git-clone",
+ "description": "A set of custom ComfyUI nodes for performing basic post-processing effects including Film Grain and Vignette. These effects can help to take the edge off AI imagery and make them feel more natural."
+ },
+ {
+ "author": "deforum",
+ "title": "Deforum Nodes",
+ "reference": "https://github.com/XmYx/deforum-comfy-nodes",
+ "files": [
+ "https://github.com/XmYx/deforum-comfy-nodes"
+ ],
+ "install_type": "git-clone",
+ "description": "Official Deforum animation pipeline tools that provide a unique way to create frame-by-frame generative motion art."
+ },
+ {
+ "author": "adbrasi",
+ "title": "ComfyUI-TrashNodes-DownloadHuggingface",
+ "reference": "https://github.com/adbrasi/ComfyUI-TrashNodes-DownloadHuggingface",
+ "files": [
+ "https://github.com/adbrasi/ComfyUI-TrashNodes-DownloadHuggingface"
+ ],
+ "install_type": "git-clone",
+ "description": "ComfyUI-TrashNodes-DownloadHuggingface is a ComfyUI node designed to facilitate the download of models you have just trained and uploaded to Hugging Face. This node is particularly useful for users who employ Google Colab for training and need to quickly download their models for deployment."
+ },
+ {
+ "author": "DonBaronFactory",
+ "title": "ComfyUI-Cre8it-Nodes",
+ "reference": "https://github.com/DonBaronFactory/ComfyUI-Cre8it-Nodes",
+ "files": [
+ "https://github.com/DonBaronFactory/ComfyUI-Cre8it-Nodes"
+ ],
+ "install_type": "git-clone",
+ "description": "Nodes:CRE8IT Serial Prompter, CRE8IT Apply Serial Prompter, CRE8IT Image Sizer. A few simple nodes to facilitate working wiht ComfyUI Workflows"
+ },
+ {
+ "author": "dezi-ai",
+ "title": "ComfyUI Animate LCM",
+ "reference": "https://github.com/dezi-ai/ComfyUI-AnimateLCM",
+ "files": [
+ "https://github.com/dezi-ai/ComfyUI-AnimateLCM"
+ ],
+ "install_type": "git-clone",
+ "description": "ComfyUI implementation for [a/AnimateLCM](https://animatelcm.github.io/) [[a/paper](https://arxiv.org/abs/2402.00769)]."
+ },
+ {
+ "author": "kadirnar",
+ "title": "ComfyUI-Transformers",
+ "reference": "https://github.com/kadirnar/ComfyUI-Transformers",
+ "files": [
+ "https://github.com/kadirnar/ComfyUI-Transformers"
+ ],
+ "install_type": "git-clone",
+ "description": "ComfyUI-Transformers is a cutting-edge project combining the power of computer vision and natural language processing to create intuitive and user-friendly interfaces. Our goal is to make technology more accessible and engaging."
+ },
+ {
+ "author": "chaojie",
+ "title": "ComfyUI-DynamiCrafter",
+ "reference": "https://github.com/chaojie/ComfyUI-DynamiCrafter",
+ "files": [
+ "https://github.com/chaojie/ComfyUI-DynamiCrafter"
+ ],
+ "install_type": "git-clone",
+ "description": "Better Dynamic, Higher Resolution, and Stronger Coherence!"
+ },
+ {
+ "author": "bilal-arikan",
+ "title": "ComfyUI_TextAssets",
+ "reference": "https://github.com/bilal-arikan/ComfyUI_TextAssets",
+ "files": [
+ "https://github.com/bilal-arikan/ComfyUI_TextAssets"
+ ],
+ "install_type": "git-clone",
+ "description": "With this node you can upload text files to input folder from your local computer."
+ },
+ {
+ "author": "ZHO-ZHO-ZHO",
+ "title": "ComfyUI SegMoE",
+ "reference": "https://github.com/ZHO-ZHO-ZHO/ComfyUI-SegMoE",
+ "files": [
+ "https://github.com/ZHO-ZHO-ZHO/ComfyUI-SegMoE"
+ ],
+ "install_type": "git-clone",
+ "description": "Unofficial implementation of [a/SegMoE: Segmind Mixture of Diffusion Experts](https://github.com/segmind/segmoe) for ComfyUI"
+ },
+ {
+ "author": "ZHO-ZHO-ZHO",
+ "title": "ComfyUI-SVD-ZHO (WIP)",
+ "reference": "https://github.com/ZHO-ZHO-ZHO/ComfyUI-SVD-ZHO",
+ "files": [
+ "https://github.com/ZHO-ZHO-ZHO/ComfyUI-SVD-ZHO"
+ ],
+ "install_type": "git-clone",
+ "description": "My Workflows + Auxiliary nodes for Stable Video Diffusion (SVD)"
+ },
+ {
+ "author": "MarkoCa1",
+ "title": "ComfyUI_Segment_Mask",
+ "reference": "https://github.com/MarkoCa1/ComfyUI_Segment_Mask",
+ "files": [
+ "https://github.com/MarkoCa1/ComfyUI_Segment_Mask"
+ ],
+ "install_type": "git-clone",
+ "description": "Mask cutout based on Segment Anything."
+ },
+ {
+ "author": "antrobot",
+ "title": "antrobots-comfyUI-nodepack",
+ "reference": "https://github.com/antrobot1234/antrobots-comfyUI-nodepack",
+ "files": [
+ "https://github.com/antrobot1234/antrobots-comfyUI-nodepack"
+ ],
+ "install_type": "git-clone",
+ "description": "A small node pack containing various things I felt like ought to be in base comfy-UI. Currently includes Some image handling nodes to help with inpainting, a version of KSampler (advanced) that allows for denoise, and a node that can swap it's inputs. Remember to make an issue if you experience any bugs or errors!"
+ },
+ {
+ "author": "dfl",
+ "title": "comfyui-clip-with-break",
+ "reference": "https://github.com/dfl/comfyui-clip-with-break",
+ "files": [
+ "https://github.com/dfl/comfyui-clip-with-break"
+ ],
+ "install_type": "git-clone",
+ "description": "Clip text encoder with BREAK formatting like A1111 (uses conditioning concat)"
+ },
+ {
+ "author": "yffyhk",
+ "title": "comfyui_auto_danbooru",
+ "reference": "https://github.com/yffyhk/comfyui_auto_danbooru",
+ "files": [
+ "https://github.com/yffyhk/comfyui_auto_danbooru"
+ ],
+ "install_type": "git-clone",
+ "description": "Nodes: Get Danbooru, Tag Encode"
+ },
+ {
+ "author": "Clybius",
+ "title": "ComfyUI Extra Samplers",
+ "reference": "https://github.com/Clybius/ComfyUI-Extra-Samplers",
+ "files": [
+ "https://github.com/Clybius/ComfyUI-Extra-Samplers"
+ ],
+ "install_type": "git-clone",
+ "description": "Nodes: SamplerCustomNoise, SamplerCustomNoiseDuo, SamplerCustomModelMixtureDuo, SamplerRES_Momentumized, SamplerDPMPP_DualSDE_Momentumized, SamplerCLYB_4M_SDE_Momentumized, SamplerTTM, SamplerLCMCustom\nThis extension provides various custom samplers not offered by the default nodes in ComfyUI."
+ },
+ {
+ "author": "ttulttul",
+ "title": "ComfyUI-Tensor-Operations",
+ "reference": "https://github.com/ttulttul/ComfyUI-Tensor-Operations",
+ "files": [
+ "https://github.com/ttulttul/ComfyUI-Tensor-Operations"
+ ],
+ "install_type": "git-clone",
+ "description": "This repo contains nodes for ComfyUI that implement some helpful operations on tensors, such as normalization."
+ },
+ {
+ "author": "davask",
+ "title": "🐰 MarasIT Nodes",
+ "reference": "https://github.com/davask/ComfyUI-MarasIT-Nodes",
+ "files": [
+ "https://github.com/davask/ComfyUI-MarasIT-Nodes"
+ ],
+ "install_type": "git-clone",
+ "description": "This is a revised version of the Bus node from the [a/Was Node Suite](https://github.com/WASasquatch/was-node-suite-comfyui) to integrate more input/output."
+ },
+ {
+ "author": "chaojie",
+ "title": "ComfyUI-Panda3d",
+ "reference": "https://github.com/chaojie/ComfyUI-Panda3d",
+ "files": [
+ "https://github.com/chaojie/ComfyUI-Panda3d"
+ ],
+ "install_type": "git-clone",
+ "description": "ComfyUI 3d engine"
+ },
+ {
+ "author": "shadowcz007",
+ "title": "Consistency Decoder",
+ "reference": "https://github.com/shadowcz007/comfyui-consistency-decoder",
+ "files": [
+ "https://github.com/shadowcz007/comfyui-consistency-decoder"
+ ],
+ "install_type": "git-clone",
+ "description": "[a/openai Consistency Decoder](https://github.com/openai/consistencydecoder). After downloading the [a/OpenAI VAE model](https://openaipublic.azureedge.net/diff-vae/c9cebd3132dd9c42936d803e33424145a748843c8f716c0814838bdc8a2fe7cb/decoder.pt), place it in the `model/vae` directory for use."
+ },
+ {
+ "author": "pkpk",
+ "title": "ComfyUI-TemporaryLoader",
+ "reference": "https://github.com/pkpkTech/ComfyUI-TemporaryLoader",
+ "files": [
+ "https://github.com/pkpkTech/ComfyUI-TemporaryLoader"
+ ],
+ "install_type": "git-clone",
+ "description": "This is a custom node of ComfyUI that downloads and loads models from the input URL. The model is temporarily downloaded into memory and not saved to storage.\nThis could be useful when trying out models or when using various models on machines with limited storage. Since the model is downloaded into memory, expect higher memory usage than usual."
+ },
+ {
+ "author": "TemryL",
+ "title": "ComfyS3: Amazon S3 Integration for ComfyUI",
+ "reference": "https://github.com/TemryL/ComfyS3",
+ "files": [
+ "https://github.com/TemryL/ComfyS3"
+ ],
+ "install_type": "git-clone",
+ "description": "ComfyS3 seamlessly integrates with [a/Amazon S3](https://aws.amazon.com/en/s3/) in ComfyUI. This open-source project provides custom nodes for effortless loading and saving of images, videos, and checkpoint models directly from S3 buckets within the ComfyUI graph interface."
+ },
+ {
+ "author": "trumanwong",
+ "title": "ComfyUI-NSFW-Detection",
+ "reference": "https://github.com/trumanwong/ComfyUI-NSFW-Detection",
+ "files": [
+ "https://github.com/trumanwong/ComfyUI-NSFW-Detection"
+ ],
+ "install_type": "git-clone",
+ "description": "An implementation of NSFW Detection for ComfyUI"
+ },
+ {
+ "author": "AIGODLIKE",
+ "title": "AIGODLIKE-ComfyUI-Studio",
+ "reference": "https://github.com/AIGODLIKE/AIGODLIKE-ComfyUI-Studio",
+ "files": [
+ "https://github.com/AIGODLIKE/AIGODLIKE-ComfyUI-Studio"
+ ],
+ "install_type": "git-clone",
+ "description": "Improve the interactive experience of using ComfyUI, such as making the loading of ComfyUI models more intuitive and making it easier to create model thumbnails"
+ },
+ {
+ "author": "Chan-0312",
+ "title": "ComfyUI-IPAnimate",
+ "reference": "https://github.com/Chan-0312/ComfyUI-IPAnimate",
+ "files": [
+ "https://github.com/Chan-0312/ComfyUI-IPAnimate"
+ ],
+ "install_type": "git-clone",
+ "description": "This is a project that generates videos frame by frame based on IPAdapter+ControlNet. Unlike [a/Steerable-motion](https://github.com/banodoco/Steerable-Motion), we do not rely on AnimateDiff. This decision is primarily due to the fact that the videos generated by AnimateDiff are often blurry. Through frame-by-frame control using IPAdapter+ControlNet, we can produce higher definition and more controllable videos."
+ },
+ {
+ "author": "LyazS",
+ "title": "Anime Character Segmentation node for comfyui",
+ "reference": "https://github.com/LyazS/comfyui-anime-seg",
+ "files": [
+ "https://github.com/LyazS/comfyui-anime-seg"
+ ],
+ "install_type": "git-clone",
+ "description": "A Anime Character Segmentation node for comfyui, based on [this hf space](https://huggingface.co/spaces/skytnt/anime-remove-background)."
+ },
+ {
+ "author": "zhongpei",
+ "title": "ComfyUI for InstructIR",
+ "reference": "https://github.com/zhongpei/ComfyUI-InstructIR",
+ "files": [
+ "https://github.com/zhongpei/ComfyUI-InstructIR"
+ ],
+ "install_type": "git-clone",
+ "description": "Enhancing Image Restoration. (ref:[a/InstructIR](https://github.com/mv-lab/InstructIR))"
+ },
+ {
+ "author": "nosiu",
+ "title": "ComfyUI InstantID Faceswapper",
+ "reference": "https://github.com/nosiu/comfyui-instantId-faceswap",
+ "files": [
+ "https://github.com/nosiu/comfyui-instantId-faceswap"
+ ],
+ "install_type": "git-clone",
+ "description": "Implementation of [a/faceswap](https://github.com/nosiu/InstantID-faceswap/tree/main) based on [a/InstantID](https://github.com/InstantID/InstantID) for ComfyUI. Allows usage of [a/LCM Lora](https://huggingface.co/latent-consistency/lcm-lora-sdxl) which can produce good results in only a few generation steps.\nNOTE:Works ONLY with SDXL checkpoints."
+ },
+ {
+ "author": "ricklove",
+ "title": "comfyui-ricklove",
+ "reference": "https://github.com/ricklove/comfyui-ricklove",
+ "files": [
+ "https://github.com/ricklove/comfyui-ricklove"
+ ],
+ "install_type": "git-clone",
+ "description": "Nodes: Image Crop and Resize by Mask, Image Uncrop, Image Shadow, Optical Flow (Dip), Warp Image with Flow, Image Threshold (Channels), Finetune Variable, Finetune Analyze, Finetune Analyze Batch, ... Misc ComfyUI nodes by Rick Love"
+ },
+ {
+ "author": "chaojie",
+ "title": "ComfyUI-Pymunk",
+ "reference": "https://github.com/chaojie/ComfyUI-Pymunk",
+ "files": [
+ "https://github.com/chaojie/ComfyUI-Pymunk"
+ ],
+ "install_type": "git-clone",
+ "description": "Pymunk is a easy-to-use pythonic 2d physics library that can be used whenever you need 2d rigid body physics from Python"
+ },
+ {
+ "author": "ZHO-ZHO-ZHO",
+ "title": "ComfyUI-Qwen-VL-API",
+ "reference": "https://github.com/ZHO-ZHO-ZHO/ComfyUI-Qwen-VL-API",
+ "files": [
+ "https://github.com/ZHO-ZHO-ZHO/ComfyUI-Qwen-VL-API"
+ ],
+ "install_type": "git-clone",
+ "description": "QWen-VL-Plus & QWen-VL-Max in ComfyUI"
+ },
+ {
+ "author": "shadowcz007",
+ "title": "comfyui-ultralytics-yolo",
+ "reference": "https://github.com/shadowcz007/comfyui-ultralytics-yolo",
+ "files": [
+ "https://github.com/shadowcz007/comfyui-ultralytics-yolo"
+ ],
+ "install_type": "git-clone",
+ "description": "Nodes:Detect By Label."
+ },
+ {
+ "author": "StartHua",
+ "title": "ComfyUI_Seg_VITON",
+ "reference": "https://github.com/StartHua/ComfyUI_Seg_VITON",
+ "files": [
+ "https://github.com/StartHua/ComfyUI_Seg_VITON"
+ ],
+ "install_type": "git-clone",
+ "description": "Nodes:segformer_clothes, segformer_agnostic, segformer_remove_bg, stabel_vition. Nodes for model dress up."
+ },
+ {
+ "author": "HaydenReeve",
+ "title": "ComfyUI Better Strings",
+ "reference": "https://github.com/HaydenReeve/ComfyUI-Better-Strings",
+ "files": [
+ "https://github.com/HaydenReeve/ComfyUI-Better-Strings"
+ ],
+ "install_type": "git-clone",
+ "description": "Strings should be easy, and simple. This extension aims to provide a set of nodes that make working with strings in ComfyUI a little bit easier."
+ },
+ {
+ "author": "Loewen-Hob",
+ "title": "Rembg Background Removal Node for ComfyUI",
+ "reference": "https://github.com/Loewen-Hob/rembg-comfyui-node-better",
+ "files": [
+ "https://github.com/Loewen-Hob/rembg-comfyui-node-better"
+ ],
+ "install_type": "git-clone",
+ "description": "This custom node is based on the [a/rembg-comfyui-node](https://github.com/Jcd1230/rembg-comfyui-node) but provides additional functionality to select ONNX models."
+ },
+ {
+ "author": "mape",
+ "title": "mape's ComfyUI Helpers",
+ "reference": "https://github.com/mape/ComfyUI-mape-Helpers",
+ "files": [
+ "https://github.com/mape/ComfyUI-mape-Helpers"
+ ],
+ "install_type": "git-clone",
+ "description": "A project that combines all my qualify of life improvements for ComyUI. For more info visit: [a/https://comfyui.ma.pe/](https://comfyui.ma.pe/)"
+ },
+ {
+ "author": "zhongpei",
+ "title": "Comfyui_image2prompt",
+ "reference": "https://github.com/zhongpei/Comfyui_image2prompt",
+ "files": [
+ "https://github.com/zhongpei/Comfyui_image2prompt"
+ ],
+ "install_type": "git-clone",
+ "description": "Nodes:Image to Text, Loader Image to Text Model."
+ },
+ {
+ "author": "jamal-alkharrat",
+ "title": "ComfyUI_rotate_image",
+ "reference": "https://github.com/jamal-alkharrat/ComfyUI_rotate_image",
+ "files": [
+ "https://github.com/jamal-alkharrat/ComfyUI_rotate_image"
+ ],
+ "install_type": "git-clone",
+ "description": "ComfyUI Custom Node to Rotate Images, Img2Img node."
+ },
+ {
+ "author": "JerryOrbachJr",
+ "title": "ComfyUI-RandomSize",
+ "reference": "https://github.com/JerryOrbachJr/ComfyUI-RandomSize",
+ "files": [
+ "https://github.com/JerryOrbachJr/ComfyUI-RandomSize"
+ ],
+ "install_type": "git-clone",
+ "description": "A ComfyUI custom node that randomly selects a height and width pair from a list in a config file"
+ },
+ {
+ "author": "blepping",
+ "title": "ComfyUI-bleh",
+ "reference": "https://github.com/blepping/ComfyUI-bleh",
+ "files": [
+ "https://github.com/blepping/ComfyUI-bleh"
+ ],
+ "install_type": "git-clone",
+ "description": "Better TAESD previews, BlehHyperTile."
+ },
+ {
+ "author": "yuvraj108c",
+ "title": "ComfyUI Whisper",
+ "reference": "https://github.com/yuvraj108c/ComfyUI-Whisper",
+ "files": [
+ "https://github.com/yuvraj108c/ComfyUI-Whisper"
+ ],
+ "install_type": "git-clone",
+ "description": "Transcribe audio and add subtitles to videos using Whisper in ComfyUI"
+ },
+ {
+ "author": "kijai",
+ "title": "ComfyUI-CCSR",
+ "reference": "https://github.com/kijai/ComfyUI-CCSR",
+ "files": [
+ "https://github.com/kijai/ComfyUI-CCSR"
+ ],
+ "install_type": "git-clone",
+ "description": "ComfyUI- CCSR upscaler node"
+ },
+ {
+ "author": "azure-dragon-ai",
+ "title": "ComfyUI-ClipScore-Nodes",
+ "reference": "https://github.com/azure-dragon-ai/ComfyUI-ClipScore-Nodes",
+ "files": [
+ "https://github.com/azure-dragon-ai/ComfyUI-ClipScore-Nodes"
+ ],
+ "install_type": "git-clone",
+ "description": "Nodes:ImageScore, Loader, Image Processor, Real Image Processor, Fake Image Processor, Text Processor. ComfyUI Nodes for ClipScore"
+ },
+ {
+ "author": "Hiero207",
+ "title": "ComfyUI-Hiero-Nodes",
+ "reference": "https://github.com/Hiero207/ComfyUI-Hiero-Nodes",
+ "files": [
+ "https://github.com/Hiero207/ComfyUI-Hiero-Nodes"
+ ],
+ "install_type": "git-clone",
+ "description": "Nodes:Post to Discord w/ Webhook"
+ },
+ {
+ "author": "azure-dragon-ai",
+ "title": "ComfyUI-ClipScore-Nodes",
+ "reference": "https://github.com/azure-dragon-ai/ComfyUI-ClipScore-Nodes",
+ "files": [
+ "https://github.com/azure-dragon-ai/ComfyUI-ClipScore-Nodes"
+ ],
+ "install_type": "git-clone",
+ "description": "ComfyUI Nodes for ClipScore"
+ },
+ {
+ "author": "godspede",
+ "title": "ComfyUI Substring",
+ "reference": "https://github.com/godspede/ComfyUI_Substring",
+ "files": [
+ "https://github.com/godspede/ComfyUI_Substring"
+ ],
+ "install_type": "git-clone",
+ "description": "Just a simple substring node that takes text and length as input, and outputs the first length characters."
+ },
+ {
+ "author": "gokayfem",
+ "title": "VLM_nodes",
+ "reference": "https://github.com/gokayfem/ComfyUI_VLM_nodes",
+ "files": [
+ "https://github.com/gokayfem/ComfyUI_VLM_nodes"
+ ],
+ "install_type": "git-clone",
+ "description": "Nodes:VisionQuestionAnswering Node, PromptGenerate Node"
+ },
+ {
+ "author": "godspede",
+ "title": "ComfyUI Substring",
+ "reference": "https://github.com/godspede/ComfyUI_Substring",
+ "files": [
+ "https://github.com/godspede/ComfyUI_Substring"
+ ],
+ "install_type": "git-clone",
+ "description": "Just a simple substring node that takes text and length as input, and outputs the first length characters."
+ }
+ ]
+}
diff --git a/custom_nodes/ComfyUI-Manager/node_db/new/extension-node-map.json b/custom_nodes/ComfyUI-Manager/node_db/new/extension-node-map.json
new file mode 100644
index 0000000000000000000000000000000000000000..06eb608e9a068fb14f976af858a10ae0dc43bec2
--- /dev/null
+++ b/custom_nodes/ComfyUI-Manager/node_db/new/extension-node-map.json
@@ -0,0 +1,8424 @@
+{
+ "https://gist.github.com/alkemann/7361b8eb966f29c8238fd323409efb68/raw/f9605be0b38d38d3e3a2988f89248ff557010076/alkemann.py": [
+ [
+ "Int to Text",
+ "Save A1 Image",
+ "Seed With Text"
+ ],
+ {
+ "title_aux": "alkemann nodes"
+ }
+ ],
+ "https://git.mmaker.moe/mmaker/sd-webui-color-enhance": [
+ [
+ "MMakerColorBlend",
+ "MMakerColorEnhance"
+ ],
+ {
+ "title_aux": "Color Enhance"
+ }
+ ],
+ "https://github.com/0xbitches/ComfyUI-LCM": [
+ [
+ "LCM_Sampler",
+ "LCM_Sampler_Advanced",
+ "LCM_img2img_Sampler",
+ "LCM_img2img_Sampler_Advanced"
+ ],
+ {
+ "title_aux": "Latent Consistency Model for ComfyUI"
+ }
+ ],
+ "https://github.com/1shadow1/hayo_comfyui_nodes/raw/main/LZCNodes.py": [
+ [
+ "LoadPILImages",
+ "MergeImages",
+ "make_transparentmask",
+ "tensor_trans_pil",
+ "words_generatee"
+ ],
+ {
+ "title_aux": "Hayo comfyui nodes"
+ }
+ ],
+ "https://github.com/42lux/ComfyUI-safety-checker": [
+ [
+ "Safety Checker"
+ ],
+ {
+ "title_aux": "ComfyUI-safety-checker"
+ }
+ ],
+ "https://github.com/54rt1n/ComfyUI-DareMerge": [
+ [
+ "DM_AdvancedDareModelMerger",
+ "DM_AdvancedModelMerger",
+ "DM_AttentionGradient",
+ "DM_BlockGradient",
+ "DM_BlockModelMerger",
+ "DM_DareClipMerger",
+ "DM_DareModelMergerBlock",
+ "DM_DareModelMergerElement",
+ "DM_DareModelMergerMBW",
+ "DM_GradientEdit",
+ "DM_GradientOperations",
+ "DM_GradientReporting",
+ "DM_InjectNoise",
+ "DM_LoRALoaderTags",
+ "DM_LoRAReporting",
+ "DM_MBWGradient",
+ "DM_MagnitudeMasker",
+ "DM_MaskEdit",
+ "DM_MaskOperations",
+ "DM_MaskReporting",
+ "DM_ModelReporting",
+ "DM_NormalizeModel",
+ "DM_QuadMasker",
+ "DM_ShellGradient",
+ "DM_SimpleMasker"
+ ],
+ {
+ "title_aux": "ComfyUI-DareMerge"
+ }
+ ],
+ "https://github.com/80sVectorz/ComfyUI-Static-Primitives": [
+ [
+ "FloatStaticPrimitive",
+ "IntStaticPrimitive",
+ "StringMlStaticPrimitive",
+ "StringStaticPrimitive"
+ ],
+ {
+ "title_aux": "ComfyUI-Static-Primitives"
+ }
+ ],
+ "https://github.com/AInseven/ComfyUI-fastblend": [
+ [
+ "FillDarkMask",
+ "InterpolateKeyFrame",
+ "MaskListcaptoBatch",
+ "MyOpenPoseNode",
+ "SmoothVideo",
+ "reBatchImage"
+ ],
+ {
+ "title_aux": "ComfyUI-fastblend"
+ }
+ ],
+ "https://github.com/AIrjen/OneButtonPrompt": [
+ [
+ "AutoNegativePrompt",
+ "CreatePromptVariant",
+ "OneButtonPreset",
+ "OneButtonPrompt",
+ "SavePromptToFile"
+ ],
+ {
+ "title_aux": "One Button Prompt"
+ }
+ ],
+ "https://github.com/AbdullahAlfaraj/Comfy-Photoshop-SD": [
+ [
+ "APS_LatentBatch",
+ "APS_Seed",
+ "ContentMaskLatent",
+ "ControlNetScript",
+ "ControlnetUnit",
+ "GaussianLatentImage",
+ "GetConfig",
+ "LoadImageBase64",
+ "LoadImageWithMetaData",
+ "LoadLorasFromPrompt",
+ "MaskExpansion"
+ ],
+ {
+ "title_aux": "Comfy-Photoshop-SD"
+ }
+ ],
+ "https://github.com/AbyssYuan0/ComfyUI_BadgerTools": [
+ [
+ "ApplyMaskToImage-badger",
+ "CropImageByMask-badger",
+ "ExpandImageWithColor-badger",
+ "FindThickLinesFromCanny-badger",
+ "FloatToInt-badger",
+ "FloatToString-badger",
+ "FrameToVideo-badger",
+ "GarbageCollect-badger",
+ "GetColorFromBorder-badger",
+ "GetDirName-badger",
+ "GetUUID-badger",
+ "IdentifyBorderColorToMask-badger",
+ "IdentifyColorToMask-badger",
+ "ImageNormalization-badger",
+ "ImageOverlap-badger",
+ "ImageScaleToSide-badger",
+ "IntToString-badger",
+ "SegmentToMaskByPoint-badger",
+ "StringToFizz-badger",
+ "TextListToString-badger",
+ "TrimTransparentEdges-badger",
+ "VideoCutFromDir-badger",
+ "VideoToFrame-badger",
+ "deleteDir-badger",
+ "findCenterOfMask-badger",
+ "getImageSide-badger",
+ "getParentDir-badger",
+ "mkdir-badger"
+ ],
+ {
+ "title_aux": "ComfyUI_BadgerTools"
+ }
+ ],
+ "https://github.com/Acly/comfyui-inpaint-nodes": [
+ [
+ "INPAINT_ApplyFooocusInpaint",
+ "INPAINT_InpaintWithModel",
+ "INPAINT_LoadFooocusInpaint",
+ "INPAINT_LoadInpaintModel",
+ "INPAINT_MaskedBlur",
+ "INPAINT_MaskedFill",
+ "INPAINT_VAEEncodeInpaintConditioning"
+ ],
+ {
+ "title_aux": "ComfyUI Inpaint Nodes"
+ }
+ ],
+ "https://github.com/Acly/comfyui-tooling-nodes": [
+ [
+ "ETN_ApplyMaskToImage",
+ "ETN_CropImage",
+ "ETN_LoadImageBase64",
+ "ETN_LoadMaskBase64",
+ "ETN_SendImageWebSocket"
+ ],
+ {
+ "title_aux": "ComfyUI Nodes for External Tooling"
+ }
+ ],
+ "https://github.com/Amorano/Jovimetrix": [
+ [],
+ {
+ "author": "amorano",
+ "description": "Webcams, GLSL shader, Media Streaming, Tick animation, Image manipulation,",
+ "nodename_pattern": " \\(jov\\)$",
+ "title": "Jovimetrix",
+ "title_aux": "Jovimetrix Composition Nodes"
+ }
+ ],
+ "https://github.com/ArtBot2023/CharacterFaceSwap": [
+ [
+ "Color Blend",
+ "Crop Face",
+ "Exclude Facial Feature",
+ "Generation Parameter Input",
+ "Generation Parameter Output",
+ "Image Full BBox",
+ "Load BiseNet",
+ "Load RetinaFace",
+ "Mask Contour",
+ "Segment Face",
+ "Uncrop Face"
+ ],
+ {
+ "title_aux": "Character Face Swap"
+ }
+ ],
+ "https://github.com/ArtVentureX/comfyui-animatediff": [
+ [
+ "AnimateDiffCombine",
+ "AnimateDiffLoraLoader",
+ "AnimateDiffModuleLoader",
+ "AnimateDiffSampler",
+ "AnimateDiffSlidingWindowOptions",
+ "ImageSizeAndBatchSize",
+ "LoadVideo"
+ ],
+ {
+ "title_aux": "AnimateDiff"
+ }
+ ],
+ "https://github.com/AustinMroz/ComfyUI-SpliceTools": [
+ [
+ "LogSigmas",
+ "RerangeSigmas",
+ "SpliceDenoised",
+ "SpliceLatents",
+ "TemporalSplice"
+ ],
+ {
+ "title_aux": "SpliceTools"
+ }
+ ],
+ "https://github.com/BadCafeCode/masquerade-nodes-comfyui": [
+ [
+ "Blur",
+ "Change Channel Count",
+ "Combine Masks",
+ "Constant Mask",
+ "Convert Color Space",
+ "Create QR Code",
+ "Create Rect Mask",
+ "Cut By Mask",
+ "Get Image Size",
+ "Image To Mask",
+ "Make Image Batch",
+ "Mask By Text",
+ "Mask Morphology",
+ "Mask To Region",
+ "MasqueradeIncrementer",
+ "Mix Color By Mask",
+ "Mix Images By Mask",
+ "Paste By Mask",
+ "Prune By Mask",
+ "Separate Mask Components",
+ "Unary Image Op",
+ "Unary Mask Op"
+ ],
+ {
+ "title_aux": "Masquerade Nodes"
+ }
+ ],
+ "https://github.com/Beinsezii/bsz-cui-extras": [
+ [
+ "BSZAbsoluteHires",
+ "BSZAspectHires",
+ "BSZColoredLatentImageXL",
+ "BSZCombinedHires",
+ "BSZHueChromaXL",
+ "BSZInjectionKSampler",
+ "BSZLatentDebug",
+ "BSZLatentFill",
+ "BSZLatentGradient",
+ "BSZLatentHSVAImage",
+ "BSZLatentOffsetXL",
+ "BSZLatentRGBAImage",
+ "BSZLatentbuster",
+ "BSZPixelbuster",
+ "BSZPixelbusterHelp",
+ "BSZPrincipledConditioning",
+ "BSZPrincipledSampler",
+ "BSZPrincipledScale",
+ "BSZStrangeResample"
+ ],
+ {
+ "title_aux": "bsz-cui-extras"
+ }
+ ],
+ "https://github.com/BennyKok/comfyui-deploy": [
+ [
+ "ComfyUIDeployExternalCheckpoint",
+ "ComfyUIDeployExternalImage",
+ "ComfyUIDeployExternalImageAlpha",
+ "ComfyUIDeployExternalLora",
+ "ComfyUIDeployExternalNumber",
+ "ComfyUIDeployExternalNumberInt",
+ "ComfyUIDeployExternalText"
+ ],
+ {
+ "author": "BennyKok",
+ "description": "",
+ "nickname": "Comfy Deploy",
+ "title": "comfyui-deploy",
+ "title_aux": "ComfyUI Deploy"
+ }
+ ],
+ "https://github.com/Bikecicle/ComfyUI-Waveform-Extensions/raw/main/EXT_AudioManipulation.py": [
+ [
+ "BatchJoinAudio",
+ "CutAudio",
+ "DuplicateAudio",
+ "JoinAudio",
+ "ResampleAudio",
+ "ReverseAudio",
+ "StretchAudio"
+ ],
+ {
+ "title_aux": "Waveform Extensions"
+ }
+ ],
+ "https://github.com/Billius-AI/ComfyUI-Path-Helper": [
+ [
+ "Add File Name Prefix",
+ "Add File Name Prefix Advanced",
+ "Add Folder",
+ "Add Folder Advanced",
+ "Create Project Root",
+ "Join Variables",
+ "Show Path",
+ "Show String"
+ ],
+ {
+ "title_aux": "ComfyUI-Path-Helper"
+ }
+ ],
+ "https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb": [
+ [
+ "BNK_AddCLIPSDXLParams",
+ "BNK_AddCLIPSDXLRParams",
+ "BNK_CLIPTextEncodeAdvanced",
+ "BNK_CLIPTextEncodeSDXLAdvanced"
+ ],
+ {
+ "title_aux": "Advanced CLIP Text Encode"
+ }
+ ],
+ "https://github.com/BlenderNeko/ComfyUI_Cutoff": [
+ [
+ "BNK_CutoffBasePrompt",
+ "BNK_CutoffRegionsToConditioning",
+ "BNK_CutoffRegionsToConditioning_ADV",
+ "BNK_CutoffSetRegions"
+ ],
+ {
+ "title_aux": "ComfyUI Cutoff"
+ }
+ ],
+ "https://github.com/BlenderNeko/ComfyUI_Noise": [
+ [
+ "BNK_DuplicateBatchIndex",
+ "BNK_GetSigma",
+ "BNK_InjectNoise",
+ "BNK_NoisyLatentImage",
+ "BNK_SlerpLatent",
+ "BNK_Unsampler"
+ ],
+ {
+ "title_aux": "ComfyUI Noise"
+ }
+ ],
+ "https://github.com/BlenderNeko/ComfyUI_SeeCoder": [
+ [
+ "ConcatConditioning",
+ "SEECoderImageEncode"
+ ],
+ {
+ "title_aux": "SeeCoder [WIP]"
+ }
+ ],
+ "https://github.com/BlenderNeko/ComfyUI_TiledKSampler": [
+ [
+ "BNK_TiledKSampler",
+ "BNK_TiledKSamplerAdvanced"
+ ],
+ {
+ "title_aux": "Tiled sampling for ComfyUI"
+ }
+ ],
+ "https://github.com/CYBERLOOM-INC/ComfyUI-nodes-hnmr": [
+ [
+ "CLIPIter",
+ "Dict2Model",
+ "GridImage",
+ "ImageBlend2",
+ "KSamplerOverrided",
+ "KSamplerSetting",
+ "KSamplerXYZ",
+ "LatentToHist",
+ "LatentToImage",
+ "ModelIter",
+ "RandomLatentImage",
+ "SaveStateDict",
+ "SaveText",
+ "StateDictLoader",
+ "StateDictMerger",
+ "StateDictMergerBlockWeighted",
+ "StateDictMergerBlockWeightedMulti",
+ "VAEDecodeBatched",
+ "VAEEncodeBatched",
+ "VAEIter"
+ ],
+ {
+ "title_aux": "ComfyUI-nodes-hnmr"
+ }
+ ],
+ "https://github.com/CaptainGrock/ComfyUIInvisibleWatermark/raw/main/Invisible%20Watermark.py": [
+ [
+ "Apply Invisible Watermark",
+ "Extract Watermark"
+ ],
+ {
+ "title_aux": "ComfyUIInvisibleWatermark"
+ }
+ ],
+ "https://github.com/Chan-0312/ComfyUI-IPAnimate": [
+ [
+ "IPAdapterAnimate"
+ ],
+ {
+ "title_aux": "ComfyUI-IPAnimate"
+ }
+ ],
+ "https://github.com/Chaoses-Ib/ComfyUI_Ib_CustomNodes": [
+ [
+ "ImageToPIL",
+ "LoadImageFromPath",
+ "PILToImage",
+ "PILToMask"
+ ],
+ {
+ "title_aux": "ComfyUI_Ib_CustomNodes"
+ }
+ ],
+ "https://github.com/Clybius/ComfyUI-Extra-Samplers": [
+ [
+ "SamplerCLYB_4M_SDE_Momentumized",
+ "SamplerCustomModelMixtureDuo",
+ "SamplerCustomNoise",
+ "SamplerCustomNoiseDuo",
+ "SamplerDPMPP_DualSDE_Momentumized",
+ "SamplerEulerAncestralDancing_Experimental",
+ "SamplerLCMCustom",
+ "SamplerRES_Momentumized",
+ "SamplerTTM"
+ ],
+ {
+ "title_aux": "ComfyUI Extra Samplers"
+ }
+ ],
+ "https://github.com/Clybius/ComfyUI-Latent-Modifiers": [
+ [
+ "Latent Diffusion Mega Modifier"
+ ],
+ {
+ "title_aux": "ComfyUI-Latent-Modifiers"
+ }
+ ],
+ "https://github.com/CosmicLaca/ComfyUI_Primere_Nodes": [
+ [
+ "PrimereAnyDetailer",
+ "PrimereAnyOutput",
+ "PrimereCKPT",
+ "PrimereCKPTLoader",
+ "PrimereCLIPEncoder",
+ "PrimereClearPrompt",
+ "PrimereDynamicParser",
+ "PrimereEmbedding",
+ "PrimereEmbeddingHandler",
+ "PrimereEmbeddingKeywordMerger",
+ "PrimereEmotionsStyles",
+ "PrimereHypernetwork",
+ "PrimereImageSegments",
+ "PrimereKSampler",
+ "PrimereLCMSelector",
+ "PrimereLORA",
+ "PrimereLYCORIS",
+ "PrimereLatentNoise",
+ "PrimereLoraKeywordMerger",
+ "PrimereLoraStackMerger",
+ "PrimereLycorisKeywordMerger",
+ "PrimereLycorisStackMerger",
+ "PrimereMetaCollector",
+ "PrimereMetaRead",
+ "PrimereMetaSave",
+ "PrimereMidjourneyStyles",
+ "PrimereModelConceptSelector",
+ "PrimereModelKeyword",
+ "PrimereNetworkTagLoader",
+ "PrimerePrompt",
+ "PrimerePromptSwitch",
+ "PrimereRefinerPrompt",
+ "PrimereResolution",
+ "PrimereResolutionMultiplier",
+ "PrimereResolutionMultiplierMPX",
+ "PrimereSamplers",
+ "PrimereSamplersSteps",
+ "PrimereSeed",
+ "PrimereStepsCfg",
+ "PrimereStyleLoader",
+ "PrimereStylePile",
+ "PrimereTextOutput",
+ "PrimereVAE",
+ "PrimereVAELoader",
+ "PrimereVAESelector",
+ "PrimereVisualCKPT",
+ "PrimereVisualEmbedding",
+ "PrimereVisualHypernetwork",
+ "PrimereVisualLORA",
+ "PrimereVisualLYCORIS",
+ "PrimereVisualStyle"
+ ],
+ {
+ "title_aux": "Primere nodes for ComfyUI"
+ }
+ ],
+ "https://github.com/Danand/ComfyUI-ComfyCouple": [
+ [
+ "Attention couple",
+ "Comfy Couple"
+ ],
+ {
+ "author": "Rei D.",
+ "description": "If you want to draw two different characters together without blending their features, so you could try to check out this custom node.",
+ "nickname": "Danand",
+ "title": "Comfy Couple",
+ "title_aux": "ComfyUI-ComfyCouple"
+ }
+ ],
+ "https://github.com/Davemane42/ComfyUI_Dave_CustomNode": [
+ [
+ "ABGRemover",
+ "ConditioningStretch",
+ "ConditioningUpscale",
+ "MultiAreaConditioning",
+ "MultiLatentComposite"
+ ],
+ {
+ "title_aux": "Visual Area Conditioning / Latent composition"
+ }
+ ],
+ "https://github.com/Derfuu/Derfuu_ComfyUI_ModdedNodes": [
+ [
+ "ABSNode_DF",
+ "Absolute value",
+ "Ceil",
+ "CeilNode_DF",
+ "Conditioning area scale by ratio",
+ "ConditioningSetArea with tuples",
+ "ConditioningSetAreaEXT_DF",
+ "ConditioningSetArea_DF",
+ "CosNode_DF",
+ "Cosines",
+ "Divide",
+ "DivideNode_DF",
+ "EmptyLatentImage_DF",
+ "Float",
+ "Float debug print",
+ "Float2Tuple_DF",
+ "FloatDebugPrint_DF",
+ "FloatNode_DF",
+ "Floor",
+ "FloorNode_DF",
+ "Get image size",
+ "Get latent size",
+ "GetImageSize_DF",
+ "GetLatentSize_DF",
+ "Image scale by ratio",
+ "Image scale to side",
+ "ImageScale_Ratio_DF",
+ "ImageScale_Side_DF",
+ "Int debug print",
+ "Int to float",
+ "Int to tuple",
+ "Int2Float_DF",
+ "IntDebugPrint_DF",
+ "Integer",
+ "IntegerNode_DF",
+ "Latent Scale by ratio",
+ "Latent Scale to side",
+ "LatentComposite with tuples",
+ "LatentScale_Ratio_DF",
+ "LatentScale_Side_DF",
+ "MultilineStringNode_DF",
+ "Multiply",
+ "MultiplyNode_DF",
+ "PowNode_DF",
+ "Power",
+ "Random",
+ "RandomFloat_DF",
+ "SinNode_DF",
+ "Sinus",
+ "SqrtNode_DF",
+ "Square root",
+ "String debug print",
+ "StringNode_DF",
+ "Subtract",
+ "SubtractNode_DF",
+ "Sum",
+ "SumNode_DF",
+ "TanNode_DF",
+ "Tangent",
+ "Text",
+ "Text box",
+ "Tuple",
+ "Tuple debug print",
+ "Tuple multiply",
+ "Tuple swap",
+ "Tuple to floats",
+ "Tuple to ints",
+ "Tuple2Float_DF",
+ "TupleDebugPrint_DF",
+ "TupleNode_DF"
+ ],
+ {
+ "title_aux": "Derfuu_ComfyUI_ModdedNodes"
+ }
+ ],
+ "https://github.com/DonBaronFactory/ComfyUI-Cre8it-Nodes": [
+ [
+ "ApplySerialPrompter",
+ "ImageSizer",
+ "SerialPrompter"
+ ],
+ {
+ "author": "CRE8IT GmbH",
+ "description": "This extension offers various nodes.",
+ "nickname": "cre8Nodes",
+ "title": "cr8SerialPrompter",
+ "title_aux": "ComfyUI-Cre8it-Nodes"
+ }
+ ],
+ "https://github.com/Electrofried/ComfyUI-OpenAINode": [
+ [
+ "OpenAINode"
+ ],
+ {
+ "title_aux": "OpenAINode"
+ }
+ ],
+ "https://github.com/EllangoK/ComfyUI-post-processing-nodes": [
+ [
+ "ArithmeticBlend",
+ "AsciiArt",
+ "Blend",
+ "Blur",
+ "CannyEdgeMask",
+ "ChromaticAberration",
+ "ColorCorrect",
+ "ColorTint",
+ "Dissolve",
+ "Dither",
+ "DodgeAndBurn",
+ "FilmGrain",
+ "Glow",
+ "HSVThresholdMask",
+ "KMeansQuantize",
+ "KuwaharaBlur",
+ "Parabolize",
+ "PencilSketch",
+ "PixelSort",
+ "Pixelize",
+ "Quantize",
+ "Sharpen",
+ "SineWave",
+ "Solarize",
+ "Vignette"
+ ],
+ {
+ "title_aux": "ComfyUI-post-processing-nodes"
+ }
+ ],
+ "https://github.com/Extraltodeus/ComfyUI-AutomaticCFG": [
+ [
+ "Automatic CFG",
+ "Automatic CFG channels multipliers"
+ ],
+ {
+ "title_aux": "ComfyUI-AutomaticCFG"
+ }
+ ],
+ "https://github.com/Extraltodeus/LoadLoraWithTags": [
+ [
+ "LoraLoaderTagsQuery"
+ ],
+ {
+ "title_aux": "LoadLoraWithTags"
+ }
+ ],
+ "https://github.com/Extraltodeus/noise_latent_perlinpinpin": [
+ [
+ "NoisyLatentPerlin"
+ ],
+ {
+ "title_aux": "noise latent perlinpinpin"
+ }
+ ],
+ "https://github.com/Extraltodeus/sigmas_tools_and_the_golden_scheduler": [
+ [
+ "Get sigmas as float",
+ "Graph sigmas",
+ "Manual scheduler",
+ "Merge sigmas by average",
+ "Merge sigmas gradually",
+ "Multiply sigmas",
+ "Split and concatenate sigmas",
+ "The Golden Scheduler"
+ ],
+ {
+ "title_aux": "sigmas_tools_and_the_golden_scheduler"
+ }
+ ],
+ "https://github.com/Fannovel16/ComfyUI-Frame-Interpolation": [
+ [
+ "AMT VFI",
+ "CAIN VFI",
+ "EISAI VFI",
+ "FILM VFI",
+ "FLAVR VFI",
+ "GMFSS Fortuna VFI",
+ "IFRNet VFI",
+ "IFUnet VFI",
+ "KSampler Gradually Adding More Denoise (efficient)",
+ "M2M VFI",
+ "Make Interpolation State List",
+ "RIFE VFI",
+ "STMFNet VFI",
+ "Sepconv VFI"
+ ],
+ {
+ "title_aux": "ComfyUI Frame Interpolation"
+ }
+ ],
+ "https://github.com/Fannovel16/ComfyUI-Loopchain": [
+ [
+ "EmptyLatentImageLoop",
+ "FolderToImageStorage",
+ "ImageStorageExportLoop",
+ "ImageStorageImport",
+ "ImageStorageReset",
+ "LatentStorageExportLoop",
+ "LatentStorageImport",
+ "LatentStorageReset"
+ ],
+ {
+ "title_aux": "ComfyUI Loopchain"
+ }
+ ],
+ "https://github.com/Fannovel16/ComfyUI-MotionDiff": [
+ [
+ "EmptyMotionData",
+ "ExportSMPLTo3DSoftware",
+ "MotionCLIPTextEncode",
+ "MotionDataVisualizer",
+ "MotionDiffLoader",
+ "MotionDiffSimpleSampler",
+ "RenderSMPLMesh",
+ "SMPLLoader",
+ "SaveSMPL",
+ "SmplifyMotionData"
+ ],
+ {
+ "title_aux": "ComfyUI MotionDiff"
+ }
+ ],
+ "https://github.com/Fannovel16/ComfyUI-Video-Matting": [
+ [
+ "BRIAAI Matting",
+ "Robust Video Matting"
+ ],
+ {
+ "title_aux": "ComfyUI-Video-Matting"
+ }
+ ],
+ "https://github.com/Fannovel16/comfyui_controlnet_aux": [
+ [
+ "AIO_Preprocessor",
+ "AnimalPosePreprocessor",
+ "AnimeFace_SemSegPreprocessor",
+ "AnimeLineArtPreprocessor",
+ "BAE-NormalMapPreprocessor",
+ "BinaryPreprocessor",
+ "CannyEdgePreprocessor",
+ "ColorPreprocessor",
+ "DWPreprocessor",
+ "DensePosePreprocessor",
+ "DepthAnythingPreprocessor",
+ "DiffusionEdge_Preprocessor",
+ "FacialPartColoringFromPoseKps",
+ "FakeScribblePreprocessor",
+ "HEDPreprocessor",
+ "HintImageEnchance",
+ "ImageGenResolutionFromImage",
+ "ImageGenResolutionFromLatent",
+ "ImageIntensityDetector",
+ "ImageLuminanceDetector",
+ "InpaintPreprocessor",
+ "LeReS-DepthMapPreprocessor",
+ "LineArtPreprocessor",
+ "LineartStandardPreprocessor",
+ "M-LSDPreprocessor",
+ "Manga2Anime_LineArt_Preprocessor",
+ "MaskOptFlow",
+ "MediaPipe-FaceMeshPreprocessor",
+ "MeshGraphormer-DepthMapPreprocessor",
+ "MiDaS-DepthMapPreprocessor",
+ "MiDaS-NormalMapPreprocessor",
+ "OneFormer-ADE20K-SemSegPreprocessor",
+ "OneFormer-COCO-SemSegPreprocessor",
+ "OpenposePreprocessor",
+ "PiDiNetPreprocessor",
+ "PixelPerfectResolution",
+ "SAMPreprocessor",
+ "SavePoseKpsAsJsonFile",
+ "ScribblePreprocessor",
+ "Scribble_XDoG_Preprocessor",
+ "SemSegPreprocessor",
+ "ShufflePreprocessor",
+ "TEEDPreprocessor",
+ "TilePreprocessor",
+ "UniFormer-SemSegPreprocessor",
+ "Unimatch_OptFlowPreprocessor",
+ "Zoe-DepthMapPreprocessor",
+ "Zoe_DepthAnythingPreprocessor"
+ ],
+ {
+ "author": "tstandley",
+ "title_aux": "ComfyUI's ControlNet Auxiliary Preprocessors"
+ }
+ ],
+ "https://github.com/Feidorian/feidorian-ComfyNodes": [
+ [],
+ {
+ "nodename_pattern": "^Feidorian_",
+ "title_aux": "feidorian-ComfyNodes"
+ }
+ ],
+ "https://github.com/Fictiverse/ComfyUI_Fictiverse": [
+ [
+ "Add Noise to Image with Mask",
+ "Color correction",
+ "Displace Image with Depth",
+ "Displace Images with Mask",
+ "Zoom Image with Depth"
+ ],
+ {
+ "title_aux": "ComfyUI Fictiverse Nodes"
+ }
+ ],
+ "https://github.com/FizzleDorf/ComfyUI-AIT": [
+ [
+ "AIT_Unet_Loader",
+ "AIT_VAE_Encode_Loader"
+ ],
+ {
+ "title_aux": "ComfyUI-AIT"
+ }
+ ],
+ "https://github.com/FizzleDorf/ComfyUI_FizzNodes": [
+ [
+ "AbsCosWave",
+ "AbsSinWave",
+ "BatchGLIGENSchedule",
+ "BatchPromptSchedule",
+ "BatchPromptScheduleEncodeSDXL",
+ "BatchPromptScheduleLatentInput",
+ "BatchPromptScheduleNodeFlowEnd",
+ "BatchPromptScheduleSDXLLatentInput",
+ "BatchStringSchedule",
+ "BatchValueSchedule",
+ "BatchValueScheduleLatentInput",
+ "CalculateFrameOffset",
+ "ConcatStringSingle",
+ "CosWave",
+ "FizzFrame",
+ "FizzFrameConcatenate",
+ "ImageBatchFromValueSchedule",
+ "Init FizzFrame",
+ "InvCosWave",
+ "InvSinWave",
+ "Lerp",
+ "PromptSchedule",
+ "PromptScheduleEncodeSDXL",
+ "PromptScheduleNodeFlow",
+ "PromptScheduleNodeFlowEnd",
+ "SawtoothWave",
+ "SinWave",
+ "SquareWave",
+ "StringConcatenate",
+ "StringSchedule",
+ "TriangleWave",
+ "ValueSchedule",
+ "convertKeyframeKeysToBatchKeys"
+ ],
+ {
+ "title_aux": "FizzNodes"
+ }
+ ],
+ "https://github.com/FlyingFireCo/tiled_ksampler": [
+ [
+ "Asymmetric Tiled KSampler",
+ "Circular VAEDecode",
+ "Tiled KSampler"
+ ],
+ {
+ "title_aux": "tiled_ksampler"
+ }
+ ],
+ "https://github.com/Franck-Demongin/NX_PromptStyler": [
+ [
+ "NX_PromptStyler"
+ ],
+ {
+ "title_aux": "NX_PromptStyler"
+ }
+ ],
+ "https://github.com/GMapeSplat/ComfyUI_ezXY": [
+ [
+ "ConcatenateString",
+ "ItemFromDropdown",
+ "IterationDriver",
+ "JoinImages",
+ "LineToConsole",
+ "NumberFromList",
+ "NumbersToList",
+ "PlotImages",
+ "StringFromList",
+ "StringToLabel",
+ "StringsToList",
+ "ezMath",
+ "ezXY_AssemblePlot",
+ "ezXY_Driver"
+ ],
+ {
+ "title_aux": "ezXY scripts and nodes"
+ }
+ ],
+ "https://github.com/GTSuya-Studio/ComfyUI-Gtsuya-Nodes": [
+ [
+ "Danbooru (ID)",
+ "Danbooru (Random)",
+ "Random File From Path",
+ "Replace Strings",
+ "Simple Wildcards",
+ "Simple Wildcards (Dir.)",
+ "Wildcards Nodes"
+ ],
+ {
+ "title_aux": "ComfyUI-GTSuya-Nodes"
+ }
+ ],
+ "https://github.com/GavChap/ComfyUI-CascadeResolutions": [
+ [
+ "CascadeResolutions"
+ ],
+ {
+ "title_aux": "ComfyUI-CascadeResolutions"
+ }
+ ],
+ "https://github.com/Gourieff/comfyui-reactor-node": [
+ [
+ "ReActorFaceSwap",
+ "ReActorLoadFaceModel",
+ "ReActorRestoreFace",
+ "ReActorSaveFaceModel"
+ ],
+ {
+ "title_aux": "ReActor Node for ComfyUI"
+ }
+ ],
+ "https://github.com/HAL41/ComfyUI-aichemy-nodes": [
+ [
+ "aichemyYOLOv8Segmentation"
+ ],
+ {
+ "title_aux": "ComfyUI aichemy nodes"
+ }
+ ],
+ "https://github.com/Hangover3832/ComfyUI-Hangover-Moondream": [
+ [
+ "Moondream Interrogator (NO COMMERCIAL USE)"
+ ],
+ {
+ "title_aux": "ComfyUI-Hangover-Moondream"
+ }
+ ],
+ "https://github.com/Hangover3832/ComfyUI-Hangover-Nodes": [
+ [
+ "Image Scale Bounding Box",
+ "MS kosmos-2 Interrogator",
+ "Make Inpaint Model",
+ "Save Image w/o Metadata"
+ ],
+ {
+ "title_aux": "ComfyUI-Hangover-Nodes"
+ }
+ ],
+ "https://github.com/Haoming02/comfyui-diffusion-cg": [
+ [
+ "Normalization",
+ "NormalizationXL",
+ "Recenter",
+ "Recenter XL"
+ ],
+ {
+ "title_aux": "ComfyUI Diffusion Color Grading"
+ }
+ ],
+ "https://github.com/Haoming02/comfyui-floodgate": [
+ [
+ "FloodGate"
+ ],
+ {
+ "title_aux": "ComfyUI Floodgate"
+ }
+ ],
+ "https://github.com/HaydenReeve/ComfyUI-Better-Strings": [
+ [
+ "BetterString"
+ ],
+ {
+ "title_aux": "ComfyUI Better Strings"
+ }
+ ],
+ "https://github.com/HebelHuber/comfyui-enhanced-save-node": [
+ [
+ "EnhancedSaveNode"
+ ],
+ {
+ "title_aux": "comfyui-enhanced-save-node"
+ }
+ ],
+ "https://github.com/Hiero207/ComfyUI-Hiero-Nodes": [
+ [
+ "Post to Discord w/ Webhook"
+ ],
+ {
+ "author": "Hiero",
+ "description": "Just some nodes that I wanted/needed, so I made them.",
+ "nickname": "HNodes",
+ "title": "Hiero-Nodes",
+ "title_aux": "ComfyUI-Hiero-Nodes"
+ }
+ ],
+ "https://github.com/IDGallagher/ComfyUI-IG-Nodes": [
+ [
+ "IG Analyze SSIM",
+ "IG Cross Fade Images",
+ "IG Explorer",
+ "IG Float",
+ "IG Folder",
+ "IG Int",
+ "IG Load Image",
+ "IG Load Images",
+ "IG Multiply",
+ "IG Path Join",
+ "IG String",
+ "IG ZFill"
+ ],
+ {
+ "author": "IDGallagher",
+ "description": "Custom nodes to aid in the exploration of Latent Space",
+ "nickname": "IG Interpolation Nodes",
+ "title": "IG Interpolation Nodes",
+ "title_aux": "IG Interpolation Nodes"
+ }
+ ],
+ "https://github.com/Inzaniak/comfyui-ranbooru": [
+ [
+ "PromptBackground",
+ "PromptLimit",
+ "PromptMix",
+ "PromptRandomWeight",
+ "PromptRemove",
+ "Ranbooru",
+ "RanbooruURL",
+ "RandomPicturePath"
+ ],
+ {
+ "title_aux": "Ranbooru for ComfyUI"
+ }
+ ],
+ "https://github.com/JPS-GER/ComfyUI_JPS-Nodes": [
+ [
+ "Conditioning Switch (JPS)",
+ "ControlNet Switch (JPS)",
+ "Crop Image Pipe (JPS)",
+ "Crop Image Settings (JPS)",
+ "Crop Image Square (JPS)",
+ "Crop Image TargetSize (JPS)",
+ "CtrlNet CannyEdge Pipe (JPS)",
+ "CtrlNet CannyEdge Settings (JPS)",
+ "CtrlNet MiDaS Pipe (JPS)",
+ "CtrlNet MiDaS Settings (JPS)",
+ "CtrlNet OpenPose Pipe (JPS)",
+ "CtrlNet OpenPose Settings (JPS)",
+ "CtrlNet ZoeDepth Pipe (JPS)",
+ "CtrlNet ZoeDepth Settings (JPS)",
+ "Disable Enable Switch (JPS)",
+ "Enable Disable Switch (JPS)",
+ "Generation TXT IMG Settings (JPS)",
+ "Get Date Time String (JPS)",
+ "Get Image Size (JPS)",
+ "IP Adapter Settings (JPS)",
+ "IP Adapter Settings Pipe (JPS)",
+ "IP Adapter Single Settings (JPS)",
+ "IP Adapter Single Settings Pipe (JPS)",
+ "IPA Switch (JPS)",
+ "Image Switch (JPS)",
+ "ImageToImage Pipe (JPS)",
+ "ImageToImage Settings (JPS)",
+ "Images Masks MultiPipe (JPS)",
+ "Integer Switch (JPS)",
+ "Largest Int (JPS)",
+ "Latent Switch (JPS)",
+ "Lora Loader (JPS)",
+ "Mask Switch (JPS)",
+ "Model Switch (JPS)",
+ "Multiply Float Float (JPS)",
+ "Multiply Int Float (JPS)",
+ "Multiply Int Int (JPS)",
+ "Resolution Multiply (JPS)",
+ "Revision Settings (JPS)",
+ "Revision Settings Pipe (JPS)",
+ "SDXL Basic Settings (JPS)",
+ "SDXL Basic Settings Pipe (JPS)",
+ "SDXL Fundamentals MultiPipe (JPS)",
+ "SDXL Prompt Handling (JPS)",
+ "SDXL Prompt Handling Plus (JPS)",
+ "SDXL Prompt Styler (JPS)",
+ "SDXL Recommended Resolution Calc (JPS)",
+ "SDXL Resolutions (JPS)",
+ "Sampler Scheduler Settings (JPS)",
+ "Save Images Plus (JPS)",
+ "Substract Int Int (JPS)",
+ "Text Concatenate (JPS)",
+ "Text Prompt (JPS)",
+ "VAE Switch (JPS)"
+ ],
+ {
+ "author": "JPS",
+ "description": "Various nodes to handle SDXL Resolutions, SDXL Basic Settings, IP Adapter Settings, Revision Settings, SDXL Prompt Styler, Crop Image to Square, Crop Image to Target Size, Get Date-Time String, Resolution Multiply, Largest Integer, 5-to-1 Switches for Integer, Images, Latents, Conditioning, Model, VAE, ControlNet",
+ "nickname": "JPS Custom Nodes",
+ "title": "JPS Custom Nodes for ComfyUI",
+ "title_aux": "JPS Custom Nodes for ComfyUI"
+ }
+ ],
+ "https://github.com/JaredTherriault/ComfyUI-JNodes": [
+ [
+ "JNodes_AddOrSetMetaDataKey",
+ "JNodes_AnyToString",
+ "JNodes_AppendReversedFrames",
+ "JNodes_BooleanSelectorWithString",
+ "JNodes_CheckpointSelectorWithString",
+ "JNodes_GetOutputDirectory",
+ "JNodes_GetParameterFromList",
+ "JNodes_GetParameterGlobal",
+ "JNodes_GetTempDirectory",
+ "JNodes_ImageFormatSelector",
+ "JNodes_ImageSizeSelector",
+ "JNodes_LoadVideo",
+ "JNodes_LoraExtractor",
+ "JNodes_OutVideoInfo",
+ "JNodes_ParseDynamicPrompts",
+ "JNodes_ParseParametersToGlobalList",
+ "JNodes_ParseWildcards",
+ "JNodes_PromptBuilderSingleSubject",
+ "JNodes_RemoveCommentedText",
+ "JNodes_RemoveMetaDataKey",
+ "JNodes_RemoveParseableDataForInference",
+ "JNodes_SamplerSelectorWithString",
+ "JNodes_SaveImageWithOutput",
+ "JNodes_SaveVideo",
+ "JNodes_SchedulerSelectorWithString",
+ "JNodes_SearchAndReplace",
+ "JNodes_SearchAndReplaceFromFile",
+ "JNodes_SearchAndReplaceFromList",
+ "JNodes_SetNegativePromptInMetaData",
+ "JNodes_SetPositivePromptInMetaData",
+ "JNodes_SplitAndJoin",
+ "JNodes_StringLiteral",
+ "JNodes_SyncedStringLiteral",
+ "JNodes_TokenCounter",
+ "JNodes_TrimAndStrip",
+ "JNodes_UploadVideo",
+ "JNodes_VaeSelectorWithString"
+ ],
+ {
+ "title_aux": "ComfyUI-JNodes"
+ }
+ ],
+ "https://github.com/JcandZero/ComfyUI_GLM4Node": [
+ [
+ "GLM3_turbo_CHAT",
+ "GLM4_CHAT",
+ "GLM4_Vsion_IMGURL"
+ ],
+ {
+ "title_aux": "ComfyUI_GLM4Node"
+ }
+ ],
+ "https://github.com/Jcd1230/rembg-comfyui-node": [
+ [
+ "Image Remove Background (rembg)"
+ ],
+ {
+ "title_aux": "Rembg Background Removal Node for ComfyUI"
+ }
+ ],
+ "https://github.com/JerryOrbachJr/ComfyUI-RandomSize": [
+ [
+ "JOJR_RandomSize"
+ ],
+ {
+ "author": "JerryOrbachJr",
+ "description": "A ComfyUI custom node that randomly selects a height and width pair from a list in a config file",
+ "nickname": "Random Size",
+ "title": "Random Size",
+ "title_aux": "ComfyUI-RandomSize"
+ }
+ ],
+ "https://github.com/Jordach/comfy-plasma": [
+ [
+ "JDC_AutoContrast",
+ "JDC_BlendImages",
+ "JDC_BrownNoise",
+ "JDC_Contrast",
+ "JDC_EqualizeGrey",
+ "JDC_GaussianBlur",
+ "JDC_GreyNoise",
+ "JDC_Greyscale",
+ "JDC_ImageLoader",
+ "JDC_ImageLoaderMeta",
+ "JDC_PinkNoise",
+ "JDC_Plasma",
+ "JDC_PlasmaSampler",
+ "JDC_PowerImage",
+ "JDC_RandNoise",
+ "JDC_ResizeFactor"
+ ],
+ {
+ "title_aux": "comfy-plasma"
+ }
+ ],
+ "https://github.com/Kaharos94/ComfyUI-Saveaswebp": [
+ [
+ "Save_as_webp"
+ ],
+ {
+ "title_aux": "ComfyUI-Saveaswebp"
+ }
+ ],
+ "https://github.com/Kangkang625/ComfyUI-paint-by-example": [
+ [
+ "PaintbyExamplePipeLoader",
+ "PaintbyExampleSampler"
+ ],
+ {
+ "title_aux": "ComfyUI-Paint-by-Example"
+ }
+ ],
+ "https://github.com/Kosinkadink/ComfyUI-Advanced-ControlNet": [
+ [
+ "ACN_AdvancedControlNetApply",
+ "ACN_ControlNetLoaderWithLoraAdvanced",
+ "ACN_DefaultUniversalWeights",
+ "ACN_SparseCtrlIndexMethodNode",
+ "ACN_SparseCtrlLoaderAdvanced",
+ "ACN_SparseCtrlMergedLoaderAdvanced",
+ "ACN_SparseCtrlRGBPreprocessor",
+ "ACN_SparseCtrlSpreadMethodNode",
+ "ControlNetLoaderAdvanced",
+ "CustomControlNetWeights",
+ "CustomT2IAdapterWeights",
+ "DiffControlNetLoaderAdvanced",
+ "LatentKeyframe",
+ "LatentKeyframeBatchedGroup",
+ "LatentKeyframeGroup",
+ "LatentKeyframeTiming",
+ "LoadImagesFromDirectory",
+ "ScaledSoftControlNetWeights",
+ "ScaledSoftMaskedUniversalWeights",
+ "SoftControlNetWeights",
+ "SoftT2IAdapterWeights",
+ "TimestepKeyframe"
+ ],
+ {
+ "title_aux": "ComfyUI-Advanced-ControlNet"
+ }
+ ],
+ "https://github.com/Kosinkadink/ComfyUI-AnimateDiff-Evolved": [
+ [
+ "ADE_AdjustPEFullStretch",
+ "ADE_AdjustPEManual",
+ "ADE_AdjustPESweetspotStretch",
+ "ADE_AnimateDiffCombine",
+ "ADE_AnimateDiffKeyframe",
+ "ADE_AnimateDiffLoRALoader",
+ "ADE_AnimateDiffLoaderGen1",
+ "ADE_AnimateDiffLoaderV1Advanced",
+ "ADE_AnimateDiffLoaderWithContext",
+ "ADE_AnimateDiffModelSettings",
+ "ADE_AnimateDiffModelSettingsAdvancedAttnStrengths",
+ "ADE_AnimateDiffModelSettingsSimple",
+ "ADE_AnimateDiffModelSettings_Release",
+ "ADE_AnimateDiffSamplingSettings",
+ "ADE_AnimateDiffSettings",
+ "ADE_AnimateDiffUniformContextOptions",
+ "ADE_AnimateDiffUnload",
+ "ADE_ApplyAnimateDiffModel",
+ "ADE_ApplyAnimateDiffModelSimple",
+ "ADE_BatchedContextOptions",
+ "ADE_CustomCFG",
+ "ADE_CustomCFGKeyframe",
+ "ADE_EmptyLatentImageLarge",
+ "ADE_IterationOptsDefault",
+ "ADE_IterationOptsFreeInit",
+ "ADE_LoadAnimateDiffModel",
+ "ADE_LoopedUniformContextOptions",
+ "ADE_LoopedUniformViewOptions",
+ "ADE_MaskedLoadLora",
+ "ADE_MultivalDynamic",
+ "ADE_MultivalScaledMask",
+ "ADE_NoiseLayerAdd",
+ "ADE_NoiseLayerAddWeighted",
+ "ADE_NoiseLayerReplace",
+ "ADE_RawSigmaSchedule",
+ "ADE_SigmaSchedule",
+ "ADE_SigmaScheduleSplitAndCombine",
+ "ADE_SigmaScheduleWeightedAverage",
+ "ADE_SigmaScheduleWeightedAverageInterp",
+ "ADE_StandardStaticContextOptions",
+ "ADE_StandardStaticViewOptions",
+ "ADE_StandardUniformContextOptions",
+ "ADE_StandardUniformViewOptions",
+ "ADE_UseEvolvedSampling",
+ "ADE_ViewsOnlyContextOptions",
+ "AnimateDiffLoaderV1",
+ "CheckpointLoaderSimpleWithNoiseSelect"
+ ],
+ {
+ "title_aux": "AnimateDiff Evolved"
+ }
+ ],
+ "https://github.com/Kosinkadink/ComfyUI-VideoHelperSuite": [
+ [
+ "VHS_BatchManager",
+ "VHS_DuplicateImages",
+ "VHS_DuplicateLatents",
+ "VHS_DuplicateMasks",
+ "VHS_GetImageCount",
+ "VHS_GetLatentCount",
+ "VHS_GetMaskCount",
+ "VHS_LoadAudio",
+ "VHS_LoadImages",
+ "VHS_LoadImagesPath",
+ "VHS_LoadVideo",
+ "VHS_LoadVideoPath",
+ "VHS_MergeImages",
+ "VHS_MergeLatents",
+ "VHS_MergeMasks",
+ "VHS_PruneOutputs",
+ "VHS_SelectEveryNthImage",
+ "VHS_SelectEveryNthLatent",
+ "VHS_SelectEveryNthMask",
+ "VHS_SplitImages",
+ "VHS_SplitLatents",
+ "VHS_SplitMasks",
+ "VHS_VAEDecodeBatched",
+ "VHS_VAEEncodeBatched",
+ "VHS_VideoCombine"
+ ],
+ {
+ "title_aux": "ComfyUI-VideoHelperSuite"
+ }
+ ],
+ "https://github.com/LEv145/images-grid-comfy-plugin": [
+ [
+ "GridAnnotation",
+ "ImageCombine",
+ "ImagesGridByColumns",
+ "ImagesGridByRows",
+ "LatentCombine"
+ ],
+ {
+ "title_aux": "ImagesGrid"
+ }
+ ],
+ "https://github.com/LarryJane491/Image-Captioning-in-ComfyUI": [
+ [
+ "LoRA Caption Load",
+ "LoRA Caption Save"
+ ],
+ {
+ "title_aux": "Image-Captioning-in-ComfyUI"
+ }
+ ],
+ "https://github.com/LarryJane491/Lora-Training-in-Comfy": [
+ [
+ "Lora Training in Comfy (Advanced)",
+ "Lora Training in ComfyUI",
+ "Tensorboard Access"
+ ],
+ {
+ "title_aux": "Lora-Training-in-Comfy"
+ }
+ ],
+ "https://github.com/Layer-norm/comfyui-lama-remover": [
+ [
+ "LamaRemover",
+ "LamaRemoverIMG"
+ ],
+ {
+ "title_aux": "Comfyui lama remover"
+ }
+ ],
+ "https://github.com/Lerc/canvas_tab": [
+ [
+ "Canvas_Tab",
+ "Send_To_Editor"
+ ],
+ {
+ "author": "Lerc",
+ "description": "This extension provides a full page image editor with mask support. There are two nodes, one to receive images from the editor and one to send images to the editor.",
+ "nickname": "Canvas Tab",
+ "title": "Canvas Tab",
+ "title_aux": "Canvas Tab"
+ }
+ ],
+ "https://github.com/Limitex/ComfyUI-Calculation": [
+ [
+ "CenterCalculation",
+ "CreateQRCode"
+ ],
+ {
+ "title_aux": "ComfyUI-Calculation"
+ }
+ ],
+ "https://github.com/Limitex/ComfyUI-Diffusers": [
+ [
+ "CreateIntListNode",
+ "DiffusersClipTextEncode",
+ "DiffusersModelMakeup",
+ "DiffusersPipelineLoader",
+ "DiffusersSampler",
+ "DiffusersSchedulerLoader",
+ "DiffusersVaeLoader",
+ "LcmLoraLoader",
+ "StreamDiffusionCreateStream",
+ "StreamDiffusionFastSampler",
+ "StreamDiffusionSampler",
+ "StreamDiffusionWarmup"
+ ],
+ {
+ "title_aux": "ComfyUI-Diffusers"
+ }
+ ],
+ "https://github.com/Loewen-Hob/rembg-comfyui-node-better": [
+ [
+ "Image Remove Background (rembg)"
+ ],
+ {
+ "title_aux": "Rembg Background Removal Node for ComfyUI"
+ }
+ ],
+ "https://github.com/LonicaMewinsky/ComfyUI-MakeFrame": [
+ [
+ "BreakFrames",
+ "BreakGrid",
+ "GetKeyFrames",
+ "MakeGrid",
+ "RandomImageFromDir"
+ ],
+ {
+ "title_aux": "ComfyBreakAnim"
+ }
+ ],
+ "https://github.com/LonicaMewinsky/ComfyUI-RawSaver": [
+ [
+ "SaveTifImage"
+ ],
+ {
+ "title_aux": "ComfyUI-RawSaver"
+ }
+ ],
+ "https://github.com/LyazS/comfyui-anime-seg": [
+ [
+ "Anime Character Seg"
+ ],
+ {
+ "title_aux": "Anime Character Segmentation node for comfyui"
+ }
+ ],
+ "https://github.com/M1kep/ComfyLiterals": [
+ [
+ "Checkpoint",
+ "Float",
+ "Int",
+ "KepStringLiteral",
+ "Lora",
+ "Operation",
+ "String"
+ ],
+ {
+ "title_aux": "ComfyLiterals"
+ }
+ ],
+ "https://github.com/M1kep/ComfyUI-KepOpenAI": [
+ [
+ "KepOpenAI_ImageWithPrompt"
+ ],
+ {
+ "title_aux": "ComfyUI-KepOpenAI"
+ }
+ ],
+ "https://github.com/M1kep/ComfyUI-OtherVAEs": [
+ [
+ "OtherVAE_Taesd"
+ ],
+ {
+ "title_aux": "ComfyUI-OtherVAEs"
+ }
+ ],
+ "https://github.com/M1kep/Comfy_KepKitchenSink": [
+ [
+ "KepRotateImage"
+ ],
+ {
+ "title_aux": "Comfy_KepKitchenSink"
+ }
+ ],
+ "https://github.com/M1kep/Comfy_KepListStuff": [
+ [
+ "Empty Images",
+ "Image Overlay",
+ "ImageListLoader",
+ "Join Float Lists",
+ "Join Image Lists",
+ "KepStringList",
+ "KepStringListFromNewline",
+ "Kep_JoinListAny",
+ "Kep_RepeatList",
+ "Kep_ReverseList",
+ "Kep_VariableImageBuilder",
+ "List Length",
+ "Range(Num Steps) - Float",
+ "Range(Num Steps) - Int",
+ "Range(Step) - Float",
+ "Range(Step) - Int",
+ "Stack Images",
+ "XYAny",
+ "XYImage"
+ ],
+ {
+ "title_aux": "Comfy_KepListStuff"
+ }
+ ],
+ "https://github.com/M1kep/Comfy_KepMatteAnything": [
+ [
+ "MatteAnything_DinoBoxes",
+ "MatteAnything_GenerateVITMatte",
+ "MatteAnything_InitSamPredictor",
+ "MatteAnything_LoadDINO",
+ "MatteAnything_LoadVITMatteModel",
+ "MatteAnything_SAMLoader",
+ "MatteAnything_SAMMaskFromBoxes",
+ "MatteAnything_ToTrimap"
+ ],
+ {
+ "title_aux": "Comfy_KepMatteAnything"
+ }
+ ],
+ "https://github.com/M1kep/KepPromptLang": [
+ [
+ "Build Gif",
+ "Special CLIP Loader"
+ ],
+ {
+ "title_aux": "KepPromptLang"
+ }
+ ],
+ "https://github.com/MNeMoNiCuZ/ComfyUI-mnemic-nodes": [
+ [
+ "Save Text File_mne"
+ ],
+ {
+ "title_aux": "ComfyUI-mnemic-nodes"
+ }
+ ],
+ "https://github.com/Mamaaaamooooo/batchImg-rembg-ComfyUI-nodes": [
+ [
+ "Image Remove Background (rembg)"
+ ],
+ {
+ "title_aux": "Batch Rembg for ComfyUI"
+ }
+ ],
+ "https://github.com/ManglerFTW/ComfyI2I": [
+ [
+ "Color Transfer",
+ "Combine and Paste",
+ "Inpaint Segments",
+ "Mask Ops"
+ ],
+ {
+ "author": "ManglerFTW",
+ "title": "ComfyI2I",
+ "title_aux": "ComfyI2I"
+ }
+ ],
+ "https://github.com/MarkoCa1/ComfyUI_Segment_Mask": [
+ [
+ "AutomaticMask(segment anything)"
+ ],
+ {
+ "title_aux": "ComfyUI_Segment_Mask"
+ }
+ ],
+ "https://github.com/Miosp/ComfyUI-FBCNN": [
+ [
+ "JPEG artifacts removal FBCNN"
+ ],
+ {
+ "title_aux": "ComfyUI-FBCNN"
+ }
+ ],
+ "https://github.com/MitoshiroPJ/comfyui_slothful_attention": [
+ [
+ "NearSightedAttention",
+ "NearSightedAttentionSimple",
+ "NearSightedTile",
+ "SlothfulAttention"
+ ],
+ {
+ "title_aux": "ComfyUI Slothful Attention"
+ }
+ ],
+ "https://github.com/MrForExample/ComfyUI-3D-Pack": [
+ [],
+ {
+ "nodename_pattern": "^\\[Comfy3D\\]",
+ "title_aux": "ComfyUI-3D-Pack"
+ }
+ ],
+ "https://github.com/MrForExample/ComfyUI-AnimateAnyone-Evolved": [
+ [],
+ {
+ "nodename_pattern": "^\\[AnimateAnyone\\]",
+ "title_aux": "ComfyUI-AnimateAnyone-Evolved"
+ }
+ ],
+ "https://github.com/NicholasMcCarthy/ComfyUI_TravelSuite": [
+ [
+ "LatentTravel"
+ ],
+ {
+ "title_aux": "ComfyUI_TravelSuite"
+ }
+ ],
+ "https://github.com/NimaNzrii/comfyui-photoshop": [
+ [
+ "PhotoshopToComfyUI"
+ ],
+ {
+ "title_aux": "comfyui-photoshop"
+ }
+ ],
+ "https://github.com/NimaNzrii/comfyui-popup_preview": [
+ [
+ "PreviewPopup"
+ ],
+ {
+ "title_aux": "comfyui-popup_preview"
+ }
+ ],
+ "https://github.com/Niutonian/ComfyUi-NoodleWebcam": [
+ [
+ "WebcamNode"
+ ],
+ {
+ "title_aux": "ComfyUi-NoodleWebcam"
+ }
+ ],
+ "https://github.com/Nlar/ComfyUI_CartoonSegmentation": [
+ [
+ "AnimeSegmentation",
+ "KenBurnsConfigLoader",
+ "KenBurns_Processor",
+ "LoadImageFilename"
+ ],
+ {
+ "author": "Nels Larsen",
+ "description": "This extension offers a front end to the Cartoon Segmentation Project (https://github.com/CartoonSegmentation/CartoonSegmentation)",
+ "nickname": "CfyCS",
+ "title": "ComfyUI_CartoonSegmentation",
+ "title_aux": "ComfyUI_CartoonSegmentation"
+ }
+ ],
+ "https://github.com/NotHarroweD/Harronode": [
+ [
+ "Harronode"
+ ],
+ {
+ "author": "HarroweD and quadmoon (https://github.com/traugdor)",
+ "description": "This extension to ComfyUI will build a prompt for the Harrlogos LoRA for SDXL.",
+ "nickname": "Harronode",
+ "nodename_pattern": "Harronode",
+ "title": "Harrlogos Prompt Builder Node",
+ "title_aux": "Harronode"
+ }
+ ],
+ "https://github.com/Nourepide/ComfyUI-Allor": [
+ [
+ "AlphaChanelAdd",
+ "AlphaChanelAddByMask",
+ "AlphaChanelAsMask",
+ "AlphaChanelRemove",
+ "AlphaChanelRestore",
+ "ClipClamp",
+ "ClipVisionClamp",
+ "ClipVisionOutputClamp",
+ "ConditioningClamp",
+ "ControlNetClamp",
+ "GligenClamp",
+ "ImageBatchCopy",
+ "ImageBatchFork",
+ "ImageBatchGet",
+ "ImageBatchJoin",
+ "ImageBatchPermute",
+ "ImageBatchRemove",
+ "ImageClamp",
+ "ImageCompositeAbsolute",
+ "ImageCompositeAbsoluteByContainer",
+ "ImageCompositeRelative",
+ "ImageCompositeRelativeByContainer",
+ "ImageContainer",
+ "ImageContainerInheritanceAdd",
+ "ImageContainerInheritanceMax",
+ "ImageContainerInheritanceScale",
+ "ImageContainerInheritanceSum",
+ "ImageDrawArc",
+ "ImageDrawArcByContainer",
+ "ImageDrawChord",
+ "ImageDrawChordByContainer",
+ "ImageDrawEllipse",
+ "ImageDrawEllipseByContainer",
+ "ImageDrawLine",
+ "ImageDrawLineByContainer",
+ "ImageDrawPieslice",
+ "ImageDrawPiesliceByContainer",
+ "ImageDrawPolygon",
+ "ImageDrawRectangle",
+ "ImageDrawRectangleByContainer",
+ "ImageDrawRectangleRounded",
+ "ImageDrawRectangleRoundedByContainer",
+ "ImageEffectsAdjustment",
+ "ImageEffectsGrayscale",
+ "ImageEffectsLensBokeh",
+ "ImageEffectsLensChromaticAberration",
+ "ImageEffectsLensOpticAxis",
+ "ImageEffectsLensVignette",
+ "ImageEffectsLensZoomBurst",
+ "ImageEffectsNegative",
+ "ImageEffectsSepia",
+ "ImageFilterBilateralBlur",
+ "ImageFilterBlur",
+ "ImageFilterBoxBlur",
+ "ImageFilterContour",
+ "ImageFilterDetail",
+ "ImageFilterEdgeEnhance",
+ "ImageFilterEdgeEnhanceMore",
+ "ImageFilterEmboss",
+ "ImageFilterFindEdges",
+ "ImageFilterGaussianBlur",
+ "ImageFilterGaussianBlurAdvanced",
+ "ImageFilterMax",
+ "ImageFilterMedianBlur",
+ "ImageFilterMin",
+ "ImageFilterMode",
+ "ImageFilterRank",
+ "ImageFilterSharpen",
+ "ImageFilterSmooth",
+ "ImageFilterSmoothMore",
+ "ImageFilterStackBlur",
+ "ImageNoiseBeta",
+ "ImageNoiseBinomial",
+ "ImageNoiseBytes",
+ "ImageNoiseGaussian",
+ "ImageSegmentation",
+ "ImageSegmentationCustom",
+ "ImageSegmentationCustomAdvanced",
+ "ImageText",
+ "ImageTextMultiline",
+ "ImageTextMultilineOutlined",
+ "ImageTextOutlined",
+ "ImageTransformCropAbsolute",
+ "ImageTransformCropCorners",
+ "ImageTransformCropRelative",
+ "ImageTransformPaddingAbsolute",
+ "ImageTransformPaddingRelative",
+ "ImageTransformResizeAbsolute",
+ "ImageTransformResizeClip",
+ "ImageTransformResizeRelative",
+ "ImageTransformRotate",
+ "ImageTransformTranspose",
+ "LatentClamp",
+ "MaskClamp",
+ "ModelClamp",
+ "StyleModelClamp",
+ "UpscaleModelClamp",
+ "VaeClamp"
+ ],
+ {
+ "title_aux": "Allor Plugin"
+ }
+ ],
+ "https://github.com/Nuked88/ComfyUI-N-Nodes": [
+ [
+ "CLIPTextEncodeAdvancedNSuite [n-suite]",
+ "DynamicPrompt [n-suite]",
+ "Float Variable [n-suite]",
+ "FrameInterpolator [n-suite]",
+ "GPT Loader Simple [n-suite]",
+ "GPT Sampler [n-suite]",
+ "ImagePadForOutpaintAdvanced [n-suite]",
+ "Integer Variable [n-suite]",
+ "Llava Clip Loader [n-suite]",
+ "LoadFramesFromFolder [n-suite]",
+ "LoadVideo [n-suite]",
+ "SaveVideo [n-suite]",
+ "SetMetadataForSaveVideo [n-suite]",
+ "String Variable [n-suite]"
+ ],
+ {
+ "title_aux": "ComfyUI-N-Nodes"
+ }
+ ],
+ "https://github.com/Off-Live/ComfyUI-off-suite": [
+ [
+ "Apply CLAHE",
+ "Cached Image Load From URL",
+ "Crop Center wigh SEGS",
+ "Crop Center with SEGS",
+ "Dilate Mask for Each Face",
+ "GW Number Formatting",
+ "Image Crop Fit",
+ "Image Resize Fit",
+ "OFF SEGS to Image",
+ "Paste Face Segment to Image",
+ "Query Gender and Age",
+ "SEGS to Face Crop Data",
+ "Safe Mask to Image",
+ "VAE Encode For Inpaint V2",
+ "Watermarking"
+ ],
+ {
+ "title_aux": "ComfyUI-off-suite"
+ }
+ ],
+ "https://github.com/Onierous/QRNG_Node_ComfyUI/raw/main/qrng_node.py": [
+ [
+ "QRNG_Node_CSV"
+ ],
+ {
+ "title_aux": "QRNG_Node_ComfyUI"
+ }
+ ],
+ "https://github.com/PCMonsterx/ComfyUI-CSV-Loader": [
+ [
+ "Load Artists CSV",
+ "Load Artmovements CSV",
+ "Load Characters CSV",
+ "Load Colors CSV",
+ "Load Composition CSV",
+ "Load Lighting CSV",
+ "Load Negative CSV",
+ "Load Positive CSV",
+ "Load Settings CSV",
+ "Load Styles CSV"
+ ],
+ {
+ "title_aux": "ComfyUI-CSV-Loader"
+ }
+ ],
+ "https://github.com/ParmanBabra/ComfyUI-Malefish-Custom-Scripts": [
+ [
+ "CSVPromptsLoader",
+ "CombinePrompt",
+ "MultiLoraLoader",
+ "RandomPrompt"
+ ],
+ {
+ "title_aux": "ComfyUI-Malefish-Custom-Scripts"
+ }
+ ],
+ "https://github.com/Pfaeff/pfaeff-comfyui": [
+ [
+ "AstropulsePixelDetector",
+ "BackgroundRemover",
+ "ImagePadForBetterOutpaint",
+ "Inpainting",
+ "InpaintingPipelineLoader"
+ ],
+ {
+ "title_aux": "pfaeff-comfyui"
+ }
+ ],
+ "https://github.com/QaisMalkawi/ComfyUI-QaisHelper": [
+ [
+ "Bool Binary Operation",
+ "Bool Unary Operation",
+ "Item Debugger",
+ "Item Switch",
+ "Nearest SDXL Resolution",
+ "SDXL Resolution",
+ "Size Swapper"
+ ],
+ {
+ "title_aux": "ComfyUI-Qais-Helper"
+ }
+ ],
+ "https://github.com/RenderRift/ComfyUI-RenderRiftNodes": [
+ [
+ "AnalyseMetadata",
+ "DateIntegerNode",
+ "DisplayMetaOptions",
+ "LoadImageWithMeta",
+ "MetadataOverlayNode",
+ "VideoPathMetaExtraction"
+ ],
+ {
+ "title_aux": "ComfyUI-RenderRiftNodes"
+ }
+ ],
+ "https://github.com/Ryuukeisyou/comfyui_face_parsing": [
+ [
+ "BBoxListItemSelect(FaceParsing)",
+ "BBoxResize(FaceParsing)",
+ "ColorAdjust(FaceParsing)",
+ "FaceBBoxDetect(FaceParsing)",
+ "FaceBBoxDetectorLoader(FaceParsing)",
+ "FaceParse(FaceParsing)",
+ "FaceParsingModelLoader(FaceParsing)",
+ "FaceParsingProcessorLoader(FaceParsing)",
+ "FaceParsingResultsParser(FaceParsing)",
+ "GuidedFilter(FaceParsing)",
+ "ImageCropWithBBox(FaceParsing)",
+ "ImageInsertWithBBox(FaceParsing)",
+ "ImageListSelect(FaceParsing)",
+ "ImagePadWithBBox(FaceParsing)",
+ "ImageResizeCalculator(FaceParsing)",
+ "ImageResizeWithBBox(FaceParsing)",
+ "ImageSize(FaceParsing)",
+ "LatentCropWithBBox(FaceParsing)",
+ "LatentInsertWithBBox(FaceParsing)",
+ "LatentSize(FaceParsing)",
+ "MaskComposite(FaceParsing)",
+ "MaskListComposite(FaceParsing)",
+ "MaskListSelect(FaceParsing)",
+ "MaskToBBox(FaceParsing)",
+ "SkinDetectTraditional(FaceParsing)"
+ ],
+ {
+ "title_aux": "comfyui_face_parsing"
+ }
+ ],
+ "https://github.com/Ryuukeisyou/comfyui_image_io_helpers": [
+ [
+ "ImageLoadAsMaskByPath(ImageIOHelpers)",
+ "ImageLoadByPath(ImageIOHelpers)",
+ "ImageLoadFromBase64(ImageIOHelpers)",
+ "ImageSaveAsBase64(ImageIOHelpers)",
+ "ImageSaveToPath(ImageIOHelpers)"
+ ],
+ {
+ "title_aux": "comfyui_image_io_helpers"
+ }
+ ],
+ "https://github.com/SLAPaper/ComfyUI-Image-Selector": [
+ [
+ "ImageDuplicator",
+ "ImageSelector",
+ "LatentDuplicator",
+ "LatentSelector"
+ ],
+ {
+ "title_aux": "ComfyUI-Image-Selector"
+ }
+ ],
+ "https://github.com/SOELexicon/ComfyUI-LexMSDBNodes": [
+ [
+ "MSSqlSelectNode",
+ "MSSqlTableNode"
+ ],
+ {
+ "title_aux": "LexMSDBNodes"
+ }
+ ],
+ "https://github.com/SOELexicon/ComfyUI-LexTools": [
+ [
+ "AgeClassifierNode",
+ "ArtOrHumanClassifierNode",
+ "DocumentClassificationNode",
+ "FoodCategoryClassifierNode",
+ "ImageAspectPadNode",
+ "ImageCaptioning",
+ "ImageFilterByFloatScoreNode",
+ "ImageFilterByIntScoreNode",
+ "ImageQualityScoreNode",
+ "ImageRankingNode",
+ "ImageScaleToMin",
+ "MD5ImageHashNode",
+ "SamplerPropertiesNode",
+ "ScoreConverterNode",
+ "SeedIncrementerNode",
+ "SegformerNode",
+ "SegformerNodeMasks",
+ "SegformerNodeMergeSegments",
+ "StepCfgIncrementNode"
+ ],
+ {
+ "title_aux": "ComfyUI-LexTools"
+ }
+ ],
+ "https://github.com/SadaleNet/CLIPTextEncodeA1111-ComfyUI/raw/master/custom_nodes/clip_text_encoder_a1111.py": [
+ [
+ "CLIPTextEncodeA1111",
+ "RerouteTextForCLIPTextEncodeA1111"
+ ],
+ {
+ "title_aux": "ComfyUI A1111-like Prompt Custom Node Solution"
+ }
+ ],
+ "https://github.com/Scholar01/ComfyUI-Keyframe": [
+ [
+ "KeyframeApply",
+ "KeyframeInterpolationPart",
+ "KeyframePart"
+ ],
+ {
+ "title_aux": "SComfyUI-Keyframe"
+ }
+ ],
+ "https://github.com/SeargeDP/SeargeSDXL": [
+ [
+ "SeargeAdvancedParameters",
+ "SeargeCheckpointLoader",
+ "SeargeConditionMixing",
+ "SeargeConditioningMuxer2",
+ "SeargeConditioningMuxer5",
+ "SeargeConditioningParameters",
+ "SeargeControlnetAdapterV2",
+ "SeargeControlnetModels",
+ "SeargeCustomAfterUpscaling",
+ "SeargeCustomAfterVaeDecode",
+ "SeargeCustomPromptMode",
+ "SeargeDebugPrinter",
+ "SeargeEnablerInputs",
+ "SeargeFloatConstant",
+ "SeargeFloatMath",
+ "SeargeFloatPair",
+ "SeargeFreeU",
+ "SeargeGenerated1",
+ "SeargeGenerationParameters",
+ "SeargeHighResolution",
+ "SeargeImage2ImageAndInpainting",
+ "SeargeImageAdapterV2",
+ "SeargeImageSave",
+ "SeargeImageSaving",
+ "SeargeInput1",
+ "SeargeInput2",
+ "SeargeInput3",
+ "SeargeInput4",
+ "SeargeInput5",
+ "SeargeInput6",
+ "SeargeInput7",
+ "SeargeIntegerConstant",
+ "SeargeIntegerMath",
+ "SeargeIntegerPair",
+ "SeargeIntegerScaler",
+ "SeargeLatentMuxer3",
+ "SeargeLoraLoader",
+ "SeargeLoras",
+ "SeargeMagicBox",
+ "SeargeModelSelector",
+ "SeargeOperatingMode",
+ "SeargeOutput1",
+ "SeargeOutput2",
+ "SeargeOutput3",
+ "SeargeOutput4",
+ "SeargeOutput5",
+ "SeargeOutput6",
+ "SeargeOutput7",
+ "SeargeParameterProcessor",
+ "SeargePipelineStart",
+ "SeargePipelineTerminator",
+ "SeargePreviewImage",
+ "SeargePromptAdapterV2",
+ "SeargePromptCombiner",
+ "SeargePromptStyles",
+ "SeargePromptText",
+ "SeargeSDXLBasePromptEncoder",
+ "SeargeSDXLImage2ImageSampler",
+ "SeargeSDXLImage2ImageSampler2",
+ "SeargeSDXLPromptEncoder",
+ "SeargeSDXLRefinerPromptEncoder",
+ "SeargeSDXLSampler",
+ "SeargeSDXLSampler2",
+ "SeargeSDXLSamplerV3",
+ "SeargeSamplerAdvanced",
+ "SeargeSamplerInputs",
+ "SeargeSaveFolderInputs",
+ "SeargeSeparator",
+ "SeargeStylePreprocessor",
+ "SeargeTextInputV2",
+ "SeargeUpscaleModelLoader",
+ "SeargeUpscaleModels",
+ "SeargeVAELoader"
+ ],
+ {
+ "title_aux": "SeargeSDXL"
+ }
+ ],
+ "https://github.com/Ser-Hilary/SDXL_sizing/raw/main/conditioning_sizing_for_SDXL.py": [
+ [
+ "get_aspect_from_image",
+ "get_aspect_from_ints",
+ "sizing_node",
+ "sizing_node_basic",
+ "sizing_node_unparsed"
+ ],
+ {
+ "title_aux": "SDXL_sizing"
+ }
+ ],
+ "https://github.com/ShmuelRonen/ComfyUI-SVDResizer": [
+ [
+ "SVDRsizer"
+ ],
+ {
+ "title_aux": "ComfyUI-SVDResizer"
+ }
+ ],
+ "https://github.com/Shraknard/ComfyUI-Remover": [
+ [
+ "Remover"
+ ],
+ {
+ "title_aux": "ComfyUI-Remover"
+ }
+ ],
+ "https://github.com/Siberpone/lazy-pony-prompter": [
+ [
+ "LPP_Deleter",
+ "LPP_Derpibooru",
+ "LPP_E621",
+ "LPP_Loader_Derpibooru",
+ "LPP_Loader_E621",
+ "LPP_Saver"
+ ],
+ {
+ "title_aux": "Lazy Pony Prompter"
+ }
+ ],
+ "https://github.com/Smuzzies/comfyui_chatbox_overlay/raw/main/chatbox_overlay.py": [
+ [
+ "Chatbox Overlay"
+ ],
+ {
+ "title_aux": "Chatbox Overlay node for ComfyUI"
+ }
+ ],
+ "https://github.com/SoftMeng/ComfyUI_Mexx_Poster": [
+ [
+ "ComfyUI_Mexx_Poster"
+ ],
+ {
+ "title_aux": "ComfyUI_Mexx_Poster"
+ }
+ ],
+ "https://github.com/SoftMeng/ComfyUI_Mexx_Styler": [
+ [
+ "MexxSDXLPromptStyler",
+ "MexxSDXLPromptStylerAdvanced"
+ ],
+ {
+ "title_aux": "ComfyUI_Mexx_Styler"
+ }
+ ],
+ "https://github.com/SpaceKendo/ComfyUI-svd_txt2vid": [
+ [
+ "SVD_txt2vid_ConditioningwithLatent"
+ ],
+ {
+ "title_aux": "Text to video for Stable Video Diffusion in ComfyUI"
+ }
+ ],
+ "https://github.com/Stability-AI/stability-ComfyUI-nodes": [
+ [
+ "ColorBlend",
+ "ControlLoraSave",
+ "GetImageSize"
+ ],
+ {
+ "title_aux": "stability-ComfyUI-nodes"
+ }
+ ],
+ "https://github.com/StartHua/ComfyUI_Seg_VITON": [
+ [
+ "segformer_agnostic",
+ "segformer_clothes",
+ "segformer_remove_bg",
+ "stabel_vition"
+ ],
+ {
+ "title_aux": "ComfyUI_Seg_VITON"
+ }
+ ],
+ "https://github.com/StartHua/Comfyui_joytag": [
+ [
+ "CXH_JoyTag"
+ ],
+ {
+ "title_aux": "Comfyui_joytag"
+ }
+ ],
+ "https://github.com/StartHua/Comfyui_segformer_b2_clothes": [
+ [
+ "segformer_b2_clothes"
+ ],
+ {
+ "title_aux": "comfyui_segformer_b2_clothes"
+ }
+ ],
+ "https://github.com/Suzie1/ComfyUI_Comfyroll_CustomNodes": [
+ [
+ "CR 8 Channel In",
+ "CR 8 Channel Out",
+ "CR Apply ControlNet",
+ "CR Apply LoRA Stack",
+ "CR Apply Model Merge",
+ "CR Apply Multi Upscale",
+ "CR Apply Multi-ControlNet",
+ "CR Arabic Text RTL",
+ "CR Aspect Ratio",
+ "CR Aspect Ratio Banners",
+ "CR Aspect Ratio SDXL",
+ "CR Aspect Ratio Social Media",
+ "CR Batch Images From List",
+ "CR Batch Process Switch",
+ "CR Binary Pattern",
+ "CR Binary To Bit List",
+ "CR Bit Schedule",
+ "CR Central Schedule",
+ "CR Checker Pattern",
+ "CR Clamp Value",
+ "CR Clip Input Switch",
+ "CR Color Bars",
+ "CR Color Gradient",
+ "CR Color Panel",
+ "CR Color Tint",
+ "CR Combine Prompt",
+ "CR Combine Schedules",
+ "CR Comic Panel Templates",
+ "CR Composite Text",
+ "CR Conditioning Input Switch",
+ "CR Conditioning Mixer",
+ "CR ControlNet Input Switch",
+ "CR Current Frame",
+ "CR Cycle Images",
+ "CR Cycle Images Simple",
+ "CR Cycle LoRAs",
+ "CR Cycle Models",
+ "CR Cycle Text",
+ "CR Cycle Text Simple",
+ "CR Data Bus In",
+ "CR Data Bus Out",
+ "CR Debatch Frames",
+ "CR Diamond Panel",
+ "CR Draw Perspective Text",
+ "CR Draw Pie",
+ "CR Draw Shape",
+ "CR Draw Text",
+ "CR Encode Scheduled Prompts",
+ "CR Feathered Border",
+ "CR Float Range List",
+ "CR Float To Integer",
+ "CR Float To String",
+ "CR Font File List",
+ "CR Get Parameter From Prompt",
+ "CR Gradient Float",
+ "CR Gradient Integer",
+ "CR Half Drop Panel",
+ "CR Halftone Filter",
+ "CR Halftone Grid",
+ "CR Hires Fix Process Switch",
+ "CR Image Border",
+ "CR Image Grid Panel",
+ "CR Image Input Switch",
+ "CR Image Input Switch (4 way)",
+ "CR Image List",
+ "CR Image List Simple",
+ "CR Image Output",
+ "CR Image Panel",
+ "CR Image Pipe Edit",
+ "CR Image Pipe In",
+ "CR Image Pipe Out",
+ "CR Image Size",
+ "CR Img2Img Process Switch",
+ "CR Increment Float",
+ "CR Increment Integer",
+ "CR Index",
+ "CR Index Increment",
+ "CR Index Multiply",
+ "CR Index Reset",
+ "CR Input Text List",
+ "CR Integer Multiple",
+ "CR Integer Range List",
+ "CR Integer To String",
+ "CR Interpolate Latents",
+ "CR Intertwine Lists",
+ "CR Keyframe List",
+ "CR Latent Batch Size",
+ "CR Latent Input Switch",
+ "CR LoRA List",
+ "CR LoRA Stack",
+ "CR Load Animation Frames",
+ "CR Load Flow Frames",
+ "CR Load GIF As List",
+ "CR Load Image List",
+ "CR Load Image List Plus",
+ "CR Load LoRA",
+ "CR Load Prompt Style",
+ "CR Load Schedule From File",
+ "CR Load Scheduled ControlNets",
+ "CR Load Scheduled LoRAs",
+ "CR Load Scheduled Models",
+ "CR Load Text List",
+ "CR Mask Text",
+ "CR Math Operation",
+ "CR Model Input Switch",
+ "CR Model List",
+ "CR Model Merge Stack",
+ "CR Module Input",
+ "CR Module Output",
+ "CR Module Pipe Loader",
+ "CR Multi Upscale Stack",
+ "CR Multi-ControlNet Stack",
+ "CR Multiline Text",
+ "CR Output Flow Frames",
+ "CR Output Schedule To File",
+ "CR Overlay Text",
+ "CR Overlay Transparent Image",
+ "CR Page Layout",
+ "CR Pipe Switch",
+ "CR Polygons",
+ "CR Prompt List",
+ "CR Prompt List Keyframes",
+ "CR Prompt Scheduler",
+ "CR Prompt Text",
+ "CR Radial Gradient",
+ "CR Random Hex Color",
+ "CR Random LoRA Stack",
+ "CR Random Multiline Colors",
+ "CR Random Multiline Values",
+ "CR Random Panel Codes",
+ "CR Random RGB",
+ "CR Random RGB Gradient",
+ "CR Random Shape Pattern",
+ "CR Random Weight LoRA",
+ "CR Repeater",
+ "CR SD1.5 Aspect Ratio",
+ "CR SDXL Aspect Ratio",
+ "CR SDXL Base Prompt Encoder",
+ "CR SDXL Prompt Mix Presets",
+ "CR SDXL Prompt Mixer",
+ "CR SDXL Style Text",
+ "CR Save Text To File",
+ "CR Schedule Input Switch",
+ "CR Schedule To ScheduleList",
+ "CR Seamless Checker",
+ "CR Seed",
+ "CR Seed to Int",
+ "CR Select Font",
+ "CR Select ISO Size",
+ "CR Select Model",
+ "CR Select Resize Method",
+ "CR Set Switch From String",
+ "CR Set Value On Binary",
+ "CR Set Value On Boolean",
+ "CR Set Value on String",
+ "CR Simple Banner",
+ "CR Simple Binary Pattern",
+ "CR Simple Binary Pattern Simple",
+ "CR Simple Image Compare",
+ "CR Simple List",
+ "CR Simple Meme Template",
+ "CR Simple Prompt List",
+ "CR Simple Prompt List Keyframes",
+ "CR Simple Prompt Scheduler",
+ "CR Simple Schedule",
+ "CR Simple Text Panel",
+ "CR Simple Text Scheduler",
+ "CR Simple Text Watermark",
+ "CR Simple Titles",
+ "CR Simple Value Scheduler",
+ "CR Split String",
+ "CR Starburst Colors",
+ "CR Starburst Lines",
+ "CR String To Boolean",
+ "CR String To Combo",
+ "CR String To Number",
+ "CR Style Bars",
+ "CR Switch Model and CLIP",
+ "CR Text",
+ "CR Text Blacklist",
+ "CR Text Concatenate",
+ "CR Text Cycler",
+ "CR Text Input Switch",
+ "CR Text Input Switch (4 way)",
+ "CR Text Length",
+ "CR Text List",
+ "CR Text List Simple",
+ "CR Text List To String",
+ "CR Text Operation",
+ "CR Text Replace",
+ "CR Text Scheduler",
+ "CR Thumbnail Preview",
+ "CR Trigger",
+ "CR Upscale Image",
+ "CR VAE Decode",
+ "CR VAE Input Switch",
+ "CR Value",
+ "CR Value Cycler",
+ "CR Value Scheduler",
+ "CR Vignette Filter",
+ "CR XY From Folder",
+ "CR XY Index",
+ "CR XY Interpolate",
+ "CR XY List",
+ "CR XY Product",
+ "CR XY Save Grid Image",
+ "CR XYZ Index",
+ "CR_Aspect Ratio For Print"
+ ],
+ {
+ "author": "Suzie1",
+ "description": "175 custom nodes for artists, designers and animators.",
+ "nickname": "Comfyroll Studio",
+ "title": "Comfyroll Studio",
+ "title_aux": "ComfyUI_Comfyroll_CustomNodes"
+ }
+ ],
+ "https://github.com/Sxela/ComfyWarp": [
+ [
+ "ExtractOpticalFlow",
+ "LoadFrame",
+ "LoadFrameFromDataset",
+ "LoadFrameFromFolder",
+ "LoadFramePairFromDataset",
+ "LoadFrameSequence",
+ "MakeFrameDataset",
+ "MixConsistencyMaps",
+ "OffsetNumber",
+ "ResizeToFit",
+ "SaveFrame",
+ "WarpFrame"
+ ],
+ {
+ "title_aux": "ComfyWarp"
+ }
+ ],
+ "https://github.com/TGu-97/ComfyUI-TGu-utils": [
+ [
+ "MPNReroute",
+ "MPNSwitch",
+ "PNSwitch"
+ ],
+ {
+ "title_aux": "TGu Utilities"
+ }
+ ],
+ "https://github.com/THtianhao/ComfyUI-FaceChain": [
+ [
+ "FC CropAndPaste",
+ "FC CropBottom",
+ "FC CropToOrigin",
+ "FC FaceDetectCrop",
+ "FC FaceFusion",
+ "FC FaceSegAndReplace",
+ "FC FaceSegment",
+ "FC MaskOP",
+ "FC RemoveCannyFace",
+ "FC ReplaceByMask",
+ "FC StyleLoraLoad"
+ ],
+ {
+ "title_aux": "ComfyUI-FaceChain"
+ }
+ ],
+ "https://github.com/THtianhao/ComfyUI-Portrait-Maker": [
+ [
+ "PM_BoxCropImage",
+ "PM_ColorTransfer",
+ "PM_ExpandMaskBox",
+ "PM_FaceFusion",
+ "PM_FaceShapMatch",
+ "PM_FaceSkin",
+ "PM_GetImageInfo",
+ "PM_ImageResizeTarget",
+ "PM_ImageScaleShort",
+ "PM_MakeUpTransfer",
+ "PM_MaskDilateErode",
+ "PM_MaskMerge2Image",
+ "PM_PortraitEnhancement",
+ "PM_RatioMerge2Image",
+ "PM_ReplaceBoxImg",
+ "PM_RetinaFace",
+ "PM_Similarity",
+ "PM_SkinRetouching",
+ "PM_SuperColorTransfer",
+ "PM_SuperMakeUpTransfer"
+ ],
+ {
+ "title_aux": "ComfyUI-Portrait-Maker"
+ }
+ ],
+ "https://github.com/TRI3D-LC/tri3d-comfyui-nodes": [
+ [
+ "tri3d-adjust-neck",
+ "tri3d-atr-parse",
+ "tri3d-atr-parse-batch",
+ "tri3d-clipdrop-bgremove-api",
+ "tri3d-dwpose",
+ "tri3d-extract-hand",
+ "tri3d-extract-parts-batch",
+ "tri3d-extract-parts-batch2",
+ "tri3d-extract-parts-mask-batch",
+ "tri3d-face-recognise",
+ "tri3d-float-to-image",
+ "tri3d-fuzzification",
+ "tri3d-image-mask-2-box",
+ "tri3d-image-mask-box-2-image",
+ "tri3d-interaction-canny",
+ "tri3d-load-pose-json",
+ "tri3d-pose-adaption",
+ "tri3d-pose-to-image",
+ "tri3d-position-hands",
+ "tri3d-position-parts-batch",
+ "tri3d-recolor-mask",
+ "tri3d-recolor-mask-LAB_space",
+ "tri3d-recolor-mask-LAB_space_manual",
+ "tri3d-recolor-mask-RGB_space",
+ "tri3d-skin-feathered-padded-mask",
+ "tri3d-swap-pixels"
+ ],
+ {
+ "title_aux": "tri3d-comfyui-nodes"
+ }
+ ],
+ "https://github.com/Taremin/comfyui-prompt-extranetworks": [
+ [
+ "PromptExtraNetworks"
+ ],
+ {
+ "title_aux": "ComfyUI Prompt ExtraNetworks"
+ }
+ ],
+ "https://github.com/Taremin/comfyui-string-tools": [
+ [
+ "StringToolsBalancedChoice",
+ "StringToolsConcat",
+ "StringToolsRandomChoice",
+ "StringToolsString",
+ "StringToolsText"
+ ],
+ {
+ "title_aux": "ComfyUI String Tools"
+ }
+ ],
+ "https://github.com/TeaCrab/ComfyUI-TeaNodes": [
+ [
+ "TC_ColorFill",
+ "TC_EqualizeCLAHE",
+ "TC_ImageResize",
+ "TC_ImageScale",
+ "TC_RandomColorFill",
+ "TC_SizeApproximation"
+ ],
+ {
+ "title_aux": "ComfyUI-TeaNodes"
+ }
+ ],
+ "https://github.com/TemryL/ComfyS3": [
+ [
+ "DownloadFileS3",
+ "LoadImageS3",
+ "SaveImageS3",
+ "SaveVideoFilesS3",
+ "UploadFileS3"
+ ],
+ {
+ "title_aux": "ComfyS3"
+ }
+ ],
+ "https://github.com/TheBarret/ZSuite": [
+ [
+ "ZSuite: Prompter",
+ "ZSuite: RF Noise",
+ "ZSuite: SeedMod"
+ ],
+ {
+ "title_aux": "ZSuite"
+ }
+ ],
+ "https://github.com/TinyTerra/ComfyUI_tinyterraNodes": [
+ [
+ "ttN busIN",
+ "ttN busOUT",
+ "ttN compareInput",
+ "ttN concat",
+ "ttN debugInput",
+ "ttN float",
+ "ttN hiresfixScale",
+ "ttN imageOutput",
+ "ttN imageREMBG",
+ "ttN int",
+ "ttN multiModelMerge",
+ "ttN pipe2BASIC",
+ "ttN pipe2DETAILER",
+ "ttN pipeEDIT",
+ "ttN pipeEncodeConcat",
+ "ttN pipeIN",
+ "ttN pipeKSampler",
+ "ttN pipeKSamplerAdvanced",
+ "ttN pipeKSamplerSDXL",
+ "ttN pipeLoader",
+ "ttN pipeLoaderSDXL",
+ "ttN pipeLoraStack",
+ "ttN pipeOUT",
+ "ttN seed",
+ "ttN seedDebug",
+ "ttN text",
+ "ttN text3BOX_3WAYconcat",
+ "ttN text7BOX_concat",
+ "ttN textDebug",
+ "ttN xyPlot"
+ ],
+ {
+ "author": "tinyterra",
+ "description": "This extension offers various pipe nodes, fullscreen image viewer based on node history, dynamic widgets, interface customization, and more.",
+ "nickname": "ttNodes",
+ "nodename_pattern": "^ttN ",
+ "title": "tinyterraNodes",
+ "title_aux": "tinyterraNodes"
+ }
+ ],
+ "https://github.com/TripleHeadedMonkey/ComfyUI_MileHighStyler": [
+ [
+ "menus"
+ ],
+ {
+ "title_aux": "ComfyUI_MileHighStyler"
+ }
+ ],
+ "https://github.com/Tropfchen/ComfyUI-Embedding_Picker": [
+ [
+ "EmbeddingPicker"
+ ],
+ {
+ "title_aux": "Embedding Picker"
+ }
+ ],
+ "https://github.com/Tropfchen/ComfyUI-yaResolutionSelector": [
+ [
+ "YARS",
+ "YARSAdv"
+ ],
+ {
+ "title_aux": "YARS: Yet Another Resolution Selector"
+ }
+ ],
+ "https://github.com/Trung0246/ComfyUI-0246": [
+ [
+ "0246.Beautify",
+ "0246.BoxRange",
+ "0246.CastReroute",
+ "0246.Cloud",
+ "0246.Convert",
+ "0246.Count",
+ "0246.Highway",
+ "0246.HighwayBatch",
+ "0246.Hold",
+ "0246.Hub",
+ "0246.Junction",
+ "0246.JunctionBatch",
+ "0246.Loop",
+ "0246.Merge",
+ "0246.Meta",
+ "0246.Pick",
+ "0246.RandomInt",
+ "0246.Script",
+ "0246.ScriptNode",
+ "0246.ScriptPile",
+ "0246.ScriptRule",
+ "0246.Stringify",
+ "0246.Switch"
+ ],
+ {
+ "author": "Trung0246",
+ "description": "Random nodes for ComfyUI I made to solve my struggle with ComfyUI (ex: pipe, process). Have varying quality.",
+ "nickname": "ComfyUI-0246",
+ "title": "ComfyUI-0246",
+ "title_aux": "ComfyUI-0246"
+ }
+ ],
+ "https://github.com/Ttl/ComfyUi_NNLatentUpscale": [
+ [
+ "NNLatentUpscale"
+ ],
+ {
+ "title_aux": "ComfyUI Neural network latent upscale custom node"
+ }
+ ],
+ "https://github.com/Umikaze-job/select_folder_path_easy": [
+ [
+ "SelectFolderPathEasy"
+ ],
+ {
+ "title_aux": "select_folder_path_easy"
+ }
+ ],
+ "https://github.com/WASasquatch/ASTERR": [
+ [
+ "ASTERR",
+ "SaveASTERR"
+ ],
+ {
+ "title_aux": "ASTERR"
+ }
+ ],
+ "https://github.com/WASasquatch/ComfyUI_Preset_Merger": [
+ [
+ "Preset_Model_Merge"
+ ],
+ {
+ "title_aux": "ComfyUI Preset Merger"
+ }
+ ],
+ "https://github.com/WASasquatch/FreeU_Advanced": [
+ [
+ "FreeU (Advanced)",
+ "FreeU_V2 (Advanced)"
+ ],
+ {
+ "title_aux": "FreeU_Advanced"
+ }
+ ],
+ "https://github.com/WASasquatch/PPF_Noise_ComfyUI": [
+ [
+ "Blend Latents (PPF Noise)",
+ "Cross-Hatch Power Fractal (PPF Noise)",
+ "Images as Latents (PPF Noise)",
+ "Perlin Power Fractal Latent (PPF Noise)"
+ ],
+ {
+ "title_aux": "PPF_Noise_ComfyUI"
+ }
+ ],
+ "https://github.com/WASasquatch/PowerNoiseSuite": [
+ [
+ "Blend Latents (PPF Noise)",
+ "Cross-Hatch Power Fractal (PPF Noise)",
+ "Cross-Hatch Power Fractal Settings (PPF Noise)",
+ "Images as Latents (PPF Noise)",
+ "Latent Adjustment (PPF Noise)",
+ "Latents to CPU (PPF Noise)",
+ "Linear Cross-Hatch Power Fractal (PPF Noise)",
+ "Perlin Power Fractal Latent (PPF Noise)",
+ "Perlin Power Fractal Settings (PPF Noise)",
+ "Power KSampler Advanced (PPF Noise)",
+ "Power-Law Noise (PPF Noise)"
+ ],
+ {
+ "title_aux": "Power Noise Suite for ComfyUI"
+ }
+ ],
+ "https://github.com/WASasquatch/WAS_Extras": [
+ [
+ "BLVAEEncode",
+ "CLIPTextEncodeList",
+ "CLIPTextEncodeSequence2",
+ "ConditioningBlend",
+ "DebugInput",
+ "KSamplerSeq",
+ "KSamplerSeq2",
+ "VAEEncodeForInpaint (WAS)",
+ "VividSharpen"
+ ],
+ {
+ "title_aux": "WAS_Extras"
+ }
+ ],
+ "https://github.com/WASasquatch/was-node-suite-comfyui": [
+ [
+ "BLIP Analyze Image",
+ "BLIP Model Loader",
+ "Blend Latents",
+ "Boolean To Text",
+ "Bounded Image Blend",
+ "Bounded Image Blend with Mask",
+ "Bounded Image Crop",
+ "Bounded Image Crop with Mask",
+ "Bus Node",
+ "CLIP Input Switch",
+ "CLIP Vision Input Switch",
+ "CLIPSeg Batch Masking",
+ "CLIPSeg Masking",
+ "CLIPSeg Model Loader",
+ "CLIPTextEncode (BlenderNeko Advanced + NSP)",
+ "CLIPTextEncode (NSP)",
+ "Cache Node",
+ "Checkpoint Loader",
+ "Checkpoint Loader (Simple)",
+ "Conditioning Input Switch",
+ "Constant Number",
+ "Control Net Model Input Switch",
+ "Convert Masks to Images",
+ "Create Grid Image",
+ "Create Grid Image from Batch",
+ "Create Morph Image",
+ "Create Morph Image from Path",
+ "Create Video from Path",
+ "Debug Number to Console",
+ "Dictionary to Console",
+ "Diffusers Hub Model Down-Loader",
+ "Diffusers Model Loader",
+ "Export API",
+ "Image Analyze",
+ "Image Aspect Ratio",
+ "Image Batch",
+ "Image Blank",
+ "Image Blend",
+ "Image Blend by Mask",
+ "Image Blending Mode",
+ "Image Bloom Filter",
+ "Image Bounds",
+ "Image Bounds to Console",
+ "Image Canny Filter",
+ "Image Chromatic Aberration",
+ "Image Color Palette",
+ "Image Crop Face",
+ "Image Crop Location",
+ "Image Crop Square Location",
+ "Image Displacement Warp",
+ "Image Dragan Photography Filter",
+ "Image Edge Detection Filter",
+ "Image Film Grain",
+ "Image Filter Adjustments",
+ "Image Flip",
+ "Image Generate Gradient",
+ "Image Gradient Map",
+ "Image High Pass Filter",
+ "Image History Loader",
+ "Image Input Switch",
+ "Image Levels Adjustment",
+ "Image Load",
+ "Image Lucy Sharpen",
+ "Image Median Filter",
+ "Image Mix RGB Channels",
+ "Image Monitor Effects Filter",
+ "Image Nova Filter",
+ "Image Padding",
+ "Image Paste Crop",
+ "Image Paste Crop by Location",
+ "Image Paste Face",
+ "Image Perlin Noise",
+ "Image Perlin Power Fractal",
+ "Image Pixelate",
+ "Image Power Noise",
+ "Image Rembg (Remove Background)",
+ "Image Remove Background (Alpha)",
+ "Image Remove Color",
+ "Image Resize",
+ "Image Rotate",
+ "Image Rotate Hue",
+ "Image SSAO (Ambient Occlusion)",
+ "Image SSDO (Direct Occlusion)",
+ "Image Save",
+ "Image Seamless Texture",
+ "Image Select Channel",
+ "Image Select Color",
+ "Image Shadows and Highlights",
+ "Image Size to Number",
+ "Image Stitch",
+ "Image Style Filter",
+ "Image Threshold",
+ "Image Tiled",
+ "Image Transpose",
+ "Image Voronoi Noise Filter",
+ "Image fDOF Filter",
+ "Image to Latent Mask",
+ "Image to Noise",
+ "Image to Seed",
+ "Images to Linear",
+ "Images to RGB",
+ "Inset Image Bounds",
+ "Integer place counter",
+ "KSampler (WAS)",
+ "KSampler Cycle",
+ "Latent Batch",
+ "Latent Input Switch",
+ "Latent Noise Injection",
+ "Latent Size to Number",
+ "Latent Upscale by Factor (WAS)",
+ "Load Cache",
+ "Load Image Batch",
+ "Load Lora",
+ "Load Text File",
+ "Logic Boolean",
+ "Logic Boolean Primitive",
+ "Logic Comparison AND",
+ "Logic Comparison OR",
+ "Logic Comparison XOR",
+ "Logic NOT",
+ "Lora Input Switch",
+ "Lora Loader",
+ "Mask Arbitrary Region",
+ "Mask Batch",
+ "Mask Batch to Mask",
+ "Mask Ceiling Region",
+ "Mask Crop Dominant Region",
+ "Mask Crop Minority Region",
+ "Mask Crop Region",
+ "Mask Dilate Region",
+ "Mask Dominant Region",
+ "Mask Erode Region",
+ "Mask Fill Holes",
+ "Mask Floor Region",
+ "Mask Gaussian Region",
+ "Mask Invert",
+ "Mask Minority Region",
+ "Mask Paste Region",
+ "Mask Smooth Region",
+ "Mask Threshold Region",
+ "Masks Add",
+ "Masks Combine Batch",
+ "Masks Combine Regions",
+ "Masks Subtract",
+ "MiDaS Depth Approximation",
+ "MiDaS Mask Image",
+ "MiDaS Model Loader",
+ "Model Input Switch",
+ "Number Counter",
+ "Number Input Condition",
+ "Number Input Switch",
+ "Number Multiple Of",
+ "Number Operation",
+ "Number PI",
+ "Number to Float",
+ "Number to Int",
+ "Number to Seed",
+ "Number to String",
+ "Number to Text",
+ "Prompt Multiple Styles Selector",
+ "Prompt Styles Selector",
+ "Random Number",
+ "SAM Image Mask",
+ "SAM Model Loader",
+ "SAM Parameters",
+ "SAM Parameters Combine",
+ "Samples Passthrough (Stat System)",
+ "Save Text File",
+ "Seed",
+ "String to Text",
+ "Tensor Batch to Image",
+ "Text Add Token by Input",
+ "Text Add Tokens",
+ "Text Compare",
+ "Text Concatenate",
+ "Text Contains",
+ "Text Dictionary Convert",
+ "Text Dictionary Get",
+ "Text Dictionary Keys",
+ "Text Dictionary New",
+ "Text Dictionary To Text",
+ "Text Dictionary Update",
+ "Text File History Loader",
+ "Text Find and Replace",
+ "Text Find and Replace Input",
+ "Text Find and Replace by Dictionary",
+ "Text Input Switch",
+ "Text List",
+ "Text List Concatenate",
+ "Text List to Text",
+ "Text Load Line From File",
+ "Text Multiline",
+ "Text Parse A1111 Embeddings",
+ "Text Parse Noodle Soup Prompts",
+ "Text Parse Tokens",
+ "Text Random Line",
+ "Text Random Prompt",
+ "Text Shuffle",
+ "Text String",
+ "Text String Truncate",
+ "Text to Conditioning",
+ "Text to Console",
+ "Text to Number",
+ "Text to String",
+ "True Random.org Number Generator",
+ "Upscale Model Loader",
+ "Upscale Model Switch",
+ "VAE Input Switch",
+ "Video Dump Frames",
+ "Write to GIF",
+ "Write to Video",
+ "unCLIP Checkpoint Loader"
+ ],
+ {
+ "title_aux": "WAS Node Suite"
+ }
+ ],
+ "https://github.com/WebDev9000/WebDev9000-Nodes": [
+ [
+ "IgnoreBraces",
+ "SettingsSwitch"
+ ],
+ {
+ "title_aux": "WebDev9000-Nodes"
+ }
+ ],
+ "https://github.com/YMC-GitHub/ymc-node-suite-comfyui": [
+ [
+ "canvas-util-cal-size",
+ "conditioning-util-input-switch",
+ "cutoff-region-util",
+ "hks-util-cal-denoise-step",
+ "img-util-get-image-size",
+ "img-util-switch-input-image",
+ "io-image-save",
+ "io-text-save",
+ "io-util-file-list-get",
+ "io-util-file-list-get-text",
+ "number-util-random-num",
+ "pipe-util-to-basic-pipe",
+ "region-util-get-by-center-and-size",
+ "region-util-get-by-lt",
+ "region-util-get-crop-location-from-center-size-text",
+ "region-util-get-pad-out-location-by-size",
+ "text-preset-colors",
+ "text-util-join-text",
+ "text-util-loop-text",
+ "text-util-path-list",
+ "text-util-prompt-add-prompt",
+ "text-util-prompt-adv-dup",
+ "text-util-prompt-adv-search",
+ "text-util-prompt-del",
+ "text-util-prompt-dup",
+ "text-util-prompt-join",
+ "text-util-prompt-search",
+ "text-util-prompt-shuffle",
+ "text-util-prompt-std",
+ "text-util-prompt-unweight",
+ "text-util-random-text",
+ "text-util-search-text",
+ "text-util-show-text",
+ "text-util-switch-text",
+ "xyz-util-txt-to-int"
+ ],
+ {
+ "title_aux": "ymc-node-suite-comfyui"
+ }
+ ],
+ "https://github.com/YOUR-WORST-TACO/ComfyUI-TacoNodes": [
+ [
+ "Example",
+ "TacoAnimatedLoader",
+ "TacoGifMaker",
+ "TacoImg2ImgAnimatedLoader",
+ "TacoImg2ImgAnimatedProcessor",
+ "TacoLatent"
+ ],
+ {
+ "title_aux": "ComfyUI-TacoNodes"
+ }
+ ],
+ "https://github.com/YinBailiang/MergeBlockWeighted_fo_ComfyUI": [
+ [
+ "MergeBlockWeighted"
+ ],
+ {
+ "title_aux": "MergeBlockWeighted_fo_ComfyUI"
+ }
+ ],
+ "https://github.com/ZHO-ZHO-ZHO/ComfyUI-ArtGallery": [
+ [
+ "ArtGallery_Zho",
+ "ArtistsImage_Zho",
+ "CamerasImage_Zho",
+ "FilmsImage_Zho",
+ "MovementsImage_Zho",
+ "StylesImage_Zho"
+ ],
+ {
+ "title_aux": "ComfyUI-ArtGallery"
+ }
+ ],
+ "https://github.com/ZHO-ZHO-ZHO/ComfyUI-Gemini": [
+ [
+ "ConcatText_Zho",
+ "DisplayText_Zho",
+ "Gemini_API_Chat_Zho",
+ "Gemini_API_S_Chat_Zho",
+ "Gemini_API_S_Vsion_ImgURL_Zho",
+ "Gemini_API_S_Zho",
+ "Gemini_API_Vsion_ImgURL_Zho",
+ "Gemini_API_Zho"
+ ],
+ {
+ "title_aux": "ComfyUI-Gemini"
+ }
+ ],
+ "https://github.com/ZHO-ZHO-ZHO/ComfyUI-InstantID": [
+ [
+ "IDBaseModelLoader_fromhub",
+ "IDBaseModelLoader_local",
+ "IDControlNetLoader",
+ "IDGenerationNode",
+ "ID_Prompt_Styler",
+ "InsightFaceLoader_Zho",
+ "Ipadapter_instantidLoader"
+ ],
+ {
+ "title_aux": "ComfyUI-InstantID"
+ }
+ ],
+ "https://github.com/ZHO-ZHO-ZHO/ComfyUI-PhotoMaker-ZHO": [
+ [
+ "BaseModel_Loader_fromhub",
+ "BaseModel_Loader_local",
+ "LoRALoader",
+ "NEW_PhotoMaker_Generation",
+ "PhotoMakerAdapter_Loader_fromhub",
+ "PhotoMakerAdapter_Loader_local",
+ "PhotoMaker_Generation",
+ "Prompt_Styler",
+ "Ref_Image_Preprocessing"
+ ],
+ {
+ "title_aux": "ComfyUI PhotoMaker (ZHO)"
+ }
+ ],
+ "https://github.com/ZHO-ZHO-ZHO/ComfyUI-Q-Align": [
+ [
+ "QAlign_Zho"
+ ],
+ {
+ "title_aux": "ComfyUI-Q-Align"
+ }
+ ],
+ "https://github.com/ZHO-ZHO-ZHO/ComfyUI-Qwen-VL-API": [
+ [
+ "QWenVL_API_S_Multi_Zho",
+ "QWenVL_API_S_Zho"
+ ],
+ {
+ "title_aux": "ComfyUI-Qwen-VL-API"
+ }
+ ],
+ "https://github.com/ZHO-ZHO-ZHO/ComfyUI-SVD-ZHO": [
+ [
+ "SVD_Aspect_Ratio_Zho",
+ "SVD_Steps_MotionStrength_Seed_Zho",
+ "SVD_Styler_Zho"
+ ],
+ {
+ "title_aux": "ComfyUI-SVD-ZHO (WIP)"
+ }
+ ],
+ "https://github.com/ZHO-ZHO-ZHO/ComfyUI-SegMoE": [
+ [
+ "SMoE_Generation_Zho",
+ "SMoE_ModelLoader_Zho"
+ ],
+ {
+ "title_aux": "ComfyUI SegMoE"
+ }
+ ],
+ "https://github.com/ZHO-ZHO-ZHO/ComfyUI-Text_Image-Composite": [
+ [
+ "AlphaChanelAddByMask",
+ "ImageCompositeBy_BG_Zho",
+ "ImageCompositeBy_Zho",
+ "ImageComposite_BG_Zho",
+ "ImageComposite_Zho",
+ "RGB_Image_Zho",
+ "Text_Image_Frame_Zho",
+ "Text_Image_Multiline_Zho",
+ "Text_Image_Zho"
+ ],
+ {
+ "title_aux": "ComfyUI-Text_Image-Composite [WIP]"
+ }
+ ],
+ "https://github.com/ZHO-ZHO-ZHO/comfyui-portrait-master-zh-cn": [
+ [
+ "PortraitMaster_\u4e2d\u6587\u7248"
+ ],
+ {
+ "title_aux": "comfyui-portrait-master-zh-cn"
+ }
+ ],
+ "https://github.com/ZaneA/ComfyUI-ImageReward": [
+ [
+ "ImageRewardLoader",
+ "ImageRewardScore"
+ ],
+ {
+ "title_aux": "ImageReward"
+ }
+ ],
+ "https://github.com/Zuellni/ComfyUI-ExLlama": [
+ [
+ "ZuellniExLlamaGenerator",
+ "ZuellniExLlamaLoader",
+ "ZuellniTextPreview",
+ "ZuellniTextReplace"
+ ],
+ {
+ "title_aux": "ComfyUI-ExLlama"
+ }
+ ],
+ "https://github.com/Zuellni/ComfyUI-PickScore-Nodes": [
+ [
+ "ZuellniPickScoreImageProcessor",
+ "ZuellniPickScoreLoader",
+ "ZuellniPickScoreSelector",
+ "ZuellniPickScoreTextProcessor"
+ ],
+ {
+ "title_aux": "ComfyUI PickScore Nodes"
+ }
+ ],
+ "https://github.com/a1lazydog/ComfyUI-AudioScheduler": [
+ [
+ "AmplitudeToGraph",
+ "AmplitudeToNumber",
+ "AudioToAmplitudeGraph",
+ "AudioToFFTs",
+ "BatchAmplitudeSchedule",
+ "ClipAmplitude",
+ "GateNormalizedAmplitude",
+ "LoadAudio",
+ "NormalizeAmplitude",
+ "NormalizedAmplitudeDrivenString",
+ "NormalizedAmplitudeToGraph",
+ "NormalizedAmplitudeToNumber",
+ "TransientAmplitudeBasic"
+ ],
+ {
+ "title_aux": "ComfyUI-AudioScheduler"
+ }
+ ],
+ "https://github.com/abdozmantar/ComfyUI-InstaSwap": [
+ [
+ "InstaSwapFaceSwap",
+ "InstaSwapLoadFaceModel",
+ "InstaSwapSaveFaceModel"
+ ],
+ {
+ "title_aux": "InstaSwap Face Swap Node for ComfyUI"
+ }
+ ],
+ "https://github.com/abyz22/image_control": [
+ [
+ "abyz22_Convertpipe",
+ "abyz22_Editpipe",
+ "abyz22_FirstNonNull",
+ "abyz22_FromBasicPipe_v2",
+ "abyz22_Frompipe",
+ "abyz22_ImpactWildcardEncode",
+ "abyz22_ImpactWildcardEncode_GetPrompt",
+ "abyz22_Ksampler",
+ "abyz22_Padding Image",
+ "abyz22_RemoveControlnet",
+ "abyz22_SaveImage",
+ "abyz22_SetQueue",
+ "abyz22_ToBasicPipe",
+ "abyz22_Topipe",
+ "abyz22_blend_onecolor",
+ "abyz22_blendimages",
+ "abyz22_bypass",
+ "abyz22_drawmask",
+ "abyz22_lamaInpaint",
+ "abyz22_lamaPreprocessor",
+ "abyz22_makecircles",
+ "abyz22_setimageinfo",
+ "abyz22_smallhead"
+ ],
+ {
+ "title_aux": "image_control"
+ }
+ ],
+ "https://github.com/adbrasi/ComfyUI-TrashNodes-DownloadHuggingface": [
+ [
+ "DownloadLinkChecker",
+ "ShowFileNames"
+ ],
+ {
+ "title_aux": "ComfyUI-TrashNodes-DownloadHuggingface"
+ }
+ ],
+ "https://github.com/adieyal/comfyui-dynamicprompts": [
+ [
+ "DPCombinatorialGenerator",
+ "DPFeelingLucky",
+ "DPJinja",
+ "DPMagicPrompt",
+ "DPOutput",
+ "DPRandomGenerator"
+ ],
+ {
+ "title_aux": "DynamicPrompts Custom Nodes"
+ }
+ ],
+ "https://github.com/adriflex/ComfyUI_Blender_Texdiff": [
+ [
+ "ViewportColor",
+ "ViewportDepth"
+ ],
+ {
+ "title_aux": "ComfyUI_Blender_Texdiff"
+ }
+ ],
+ "https://github.com/aegis72/aegisflow_utility_nodes": [
+ [
+ "Add Text To Image",
+ "Aegisflow CLIP Pass",
+ "Aegisflow Conditioning Pass",
+ "Aegisflow Image Pass",
+ "Aegisflow Latent Pass",
+ "Aegisflow Mask Pass",
+ "Aegisflow Model Pass",
+ "Aegisflow Pos/Neg Pass",
+ "Aegisflow SDXL Tuple Pass",
+ "Aegisflow VAE Pass",
+ "Aegisflow controlnet preprocessor bus",
+ "Apply Instagram Filter",
+ "Brightness_Contrast_Ally",
+ "Flatten Colors",
+ "Gaussian Blur_Ally",
+ "GlitchThis Effect",
+ "Hue Rotation",
+ "Image Flip_ally",
+ "Placeholder Tuple",
+ "Swap Color Mode",
+ "aegisflow Multi_Pass",
+ "aegisflow Multi_Pass XL",
+ "af_pipe_in_15",
+ "af_pipe_in_xl",
+ "af_pipe_out_15",
+ "af_pipe_out_xl"
+ ],
+ {
+ "title_aux": "AegisFlow Utility Nodes"
+ }
+ ],
+ "https://github.com/aegis72/comfyui-styles-all": [
+ [
+ "menus"
+ ],
+ {
+ "title_aux": "ComfyUI-styles-all"
+ }
+ ],
+ "https://github.com/ai-liam/comfyui_liam_util": [
+ [
+ "LiamLoadImage"
+ ],
+ {
+ "title_aux": "LiamUtil"
+ }
+ ],
+ "https://github.com/aianimation55/ComfyUI-FatLabels": [
+ [
+ "FatLabels"
+ ],
+ {
+ "title_aux": "Comfy UI FatLabels"
+ }
+ ],
+ "https://github.com/alexopus/ComfyUI-Image-Saver": [
+ [
+ "Cfg Literal (Image Saver)",
+ "Checkpoint Loader with Name (Image Saver)",
+ "Float Literal (Image Saver)",
+ "Image Saver",
+ "Int Literal (Image Saver)",
+ "Sampler Selector (Image Saver)",
+ "Scheduler Selector (Image Saver)",
+ "Seed Generator (Image Saver)",
+ "String Literal (Image Saver)",
+ "Width/Height Literal (Image Saver)"
+ ],
+ {
+ "title_aux": "ComfyUI Image Saver"
+ }
+ ],
+ "https://github.com/alpertunga-bile/prompt-generator-comfyui": [
+ [
+ "Prompt Generator"
+ ],
+ {
+ "title_aux": "prompt-generator"
+ }
+ ],
+ "https://github.com/alsritter/asymmetric-tiling-comfyui": [
+ [
+ "Asymmetric_Tiling_KSampler"
+ ],
+ {
+ "title_aux": "asymmetric-tiling-comfyui"
+ }
+ ],
+ "https://github.com/alt-key-project/comfyui-dream-project": [
+ [
+ "Analyze Palette [Dream]",
+ "Beat Curve [Dream]",
+ "Big Float Switch [Dream]",
+ "Big Image Switch [Dream]",
+ "Big Int Switch [Dream]",
+ "Big Latent Switch [Dream]",
+ "Big Palette Switch [Dream]",
+ "Big Text Switch [Dream]",
+ "Boolean To Float [Dream]",
+ "Boolean To Int [Dream]",
+ "Build Prompt [Dream]",
+ "CSV Curve [Dream]",
+ "CSV Generator [Dream]",
+ "Calculation [Dream]",
+ "Common Frame Dimensions [Dream]",
+ "Compare Palettes [Dream]",
+ "FFMPEG Video Encoder [Dream]",
+ "File Count [Dream]",
+ "Finalize Prompt [Dream]",
+ "Float Input [Dream]",
+ "Float to Log Entry [Dream]",
+ "Frame Count Calculator [Dream]",
+ "Frame Counter (Directory) [Dream]",
+ "Frame Counter (Simple) [Dream]",
+ "Frame Counter Info [Dream]",
+ "Frame Counter Offset [Dream]",
+ "Frame Counter Time Offset [Dream]",
+ "Image Brightness Adjustment [Dream]",
+ "Image Color Shift [Dream]",
+ "Image Contrast Adjustment [Dream]",
+ "Image Motion [Dream]",
+ "Image Sequence Blend [Dream]",
+ "Image Sequence Loader [Dream]",
+ "Image Sequence Saver [Dream]",
+ "Image Sequence Tweening [Dream]",
+ "Int Input [Dream]",
+ "Int to Log Entry [Dream]",
+ "Laboratory [Dream]",
+ "Linear Curve [Dream]",
+ "Log Entry Joiner [Dream]",
+ "Log File [Dream]",
+ "Noise from Area Palettes [Dream]",
+ "Noise from Palette [Dream]",
+ "Palette Color Align [Dream]",
+ "Palette Color Shift [Dream]",
+ "Sample Image Area as Palette [Dream]",
+ "Sample Image as Palette [Dream]",
+ "Saw Curve [Dream]",
+ "Sine Curve [Dream]",
+ "Smooth Event Curve [Dream]",
+ "String Input [Dream]",
+ "String Tokenizer [Dream]",
+ "String to Log Entry [Dream]",
+ "Text Input [Dream]",
+ "Triangle Curve [Dream]",
+ "Triangle Event Curve [Dream]",
+ "WAV Curve [Dream]"
+ ],
+ {
+ "title_aux": "Dream Project Animation Nodes"
+ }
+ ],
+ "https://github.com/alt-key-project/comfyui-dream-video-batches": [
+ [
+ "Blended Transition [DVB]",
+ "Calculation [DVB]",
+ "Create Frame Set [DVB]",
+ "Divide [DVB]",
+ "Fade From Black [DVB]",
+ "Fade To Black [DVB]",
+ "Float Input [DVB]",
+ "For Each Done [DVB]",
+ "For Each Filename [DVB]",
+ "Frame Set Append [DVB]",
+ "Frame Set Frame Dimensions Scaled [DVB]",
+ "Frame Set Index Offset [DVB]",
+ "Frame Set Merger [DVB]",
+ "Frame Set Reindex [DVB]",
+ "Frame Set Repeat [DVB]",
+ "Frame Set Reverse [DVB]",
+ "Frame Set Split Beginning [DVB]",
+ "Frame Set Split End [DVB]",
+ "Frame Set Splitter [DVB]",
+ "Generate Inbetween Frames [DVB]",
+ "Int Input [DVB]",
+ "Linear Camera Pan [DVB]",
+ "Linear Camera Roll [DVB]",
+ "Linear Camera Zoom [DVB]",
+ "Load Image From Path [DVB]",
+ "Multiply [DVB]",
+ "Sine Camera Pan [DVB]",
+ "Sine Camera Roll [DVB]",
+ "Sine Camera Zoom [DVB]",
+ "String Input [DVB]",
+ "Text Input [DVB]",
+ "Trace Memory Allocation [DVB]",
+ "Unwrap Frame Set [DVB]"
+ ],
+ {
+ "title_aux": "Dream Video Batches"
+ }
+ ],
+ "https://github.com/an90ray/ComfyUI_RErouter_CustomNodes": [
+ [
+ "CLIPTextEncode (RE)",
+ "CLIPTextEncodeSDXL (RE)",
+ "CLIPTextEncodeSDXLRefiner (RE)",
+ "Int (RE)",
+ "RErouter <=",
+ "RErouter =>",
+ "String (RE)"
+ ],
+ {
+ "title_aux": "ComfyUI_RErouter_CustomNodes"
+ }
+ ],
+ "https://github.com/andersxa/comfyui-PromptAttention": [
+ [
+ "CLIPAttentionMaskEncode"
+ ],
+ {
+ "title_aux": "CLIP Directional Prompt Attention"
+ }
+ ],
+ "https://github.com/antrobot1234/antrobots-comfyUI-nodepack": [
+ [
+ "composite",
+ "crop",
+ "paste",
+ "preview_mask",
+ "scale"
+ ],
+ {
+ "title_aux": "antrobots ComfyUI Nodepack"
+ }
+ ],
+ "https://github.com/asagi4/ComfyUI-CADS": [
+ [
+ "CADS"
+ ],
+ {
+ "title_aux": "ComfyUI-CADS"
+ }
+ ],
+ "https://github.com/asagi4/comfyui-prompt-control": [
+ [
+ "EditableCLIPEncode",
+ "FilterSchedule",
+ "LoRAScheduler",
+ "PCApplySettings",
+ "PCPromptFromSchedule",
+ "PCScheduleSettings",
+ "PCSplitSampling",
+ "PromptControlSimple",
+ "PromptToSchedule",
+ "ScheduleToCond",
+ "ScheduleToModel"
+ ],
+ {
+ "title_aux": "ComfyUI prompt control"
+ }
+ ],
+ "https://github.com/asagi4/comfyui-utility-nodes": [
+ [
+ "MUForceCacheClear",
+ "MUJinjaRender",
+ "MUSimpleWildcard"
+ ],
+ {
+ "title_aux": "asagi4/comfyui-utility-nodes"
+ }
+ ],
+ "https://github.com/aszc-dev/ComfyUI-CoreMLSuite": [
+ [
+ "Core ML Converter",
+ "Core ML LCM Converter",
+ "Core ML LoRA Loader",
+ "CoreMLModelAdapter",
+ "CoreMLSampler",
+ "CoreMLSamplerAdvanced",
+ "CoreMLUNetLoader"
+ ],
+ {
+ "title_aux": "Core ML Suite for ComfyUI"
+ }
+ ],
+ "https://github.com/avatechai/avatar-graph-comfyui": [
+ [
+ "ApplyMeshTransformAsShapeKey",
+ "B_ENUM",
+ "B_VECTOR3",
+ "B_VECTOR4",
+ "Combine Points",
+ "CreateShapeFlow",
+ "ExportBlendshapes",
+ "ExportGLTF",
+ "Extract Boundary Points",
+ "Image Alpha Mask Merge",
+ "ImageBridge",
+ "LoadImageFromRequest",
+ "LoadImageWithAlpha",
+ "LoadValueFromRequest",
+ "SAM MultiLayer",
+ "Save Image With Workflow"
+ ],
+ {
+ "author": "Avatech Limited",
+ "description": "Include nodes for sam + bpy operation, that allows workflow creations for generative 2d character rig.",
+ "nickname": "Avatar Graph",
+ "title": "Avatar Graph",
+ "title_aux": "avatar-graph-comfyui"
+ }
+ ],
+ "https://github.com/azure-dragon-ai/ComfyUI-ClipScore-Nodes": [
+ [
+ "HaojihuiClipScoreFakeImageProcessor",
+ "HaojihuiClipScoreImageProcessor",
+ "HaojihuiClipScoreImageScore",
+ "HaojihuiClipScoreLoader",
+ "HaojihuiClipScoreRealImageProcessor",
+ "HaojihuiClipScoreTextProcessor"
+ ],
+ {
+ "title_aux": "ComfyUI-ClipScore-Nodes"
+ }
+ ],
+ "https://github.com/badjeff/comfyui_lora_tag_loader": [
+ [
+ "LoraTagLoader"
+ ],
+ {
+ "title_aux": "LoRA Tag Loader for ComfyUI"
+ }
+ ],
+ "https://github.com/banodoco/steerable-motion": [
+ [
+ "BatchCreativeInterpolation"
+ ],
+ {
+ "title_aux": "Steerable Motion"
+ }
+ ],
+ "https://github.com/bash-j/mikey_nodes": [
+ [
+ "AddMetaData",
+ "Batch Crop Image",
+ "Batch Crop Resize Inplace",
+ "Batch Load Images",
+ "Batch Resize Image for SDXL",
+ "Checkpoint Loader Simple Mikey",
+ "CinematicLook",
+ "Empty Latent Ratio Custom SDXL",
+ "Empty Latent Ratio Select SDXL",
+ "EvalFloats",
+ "FaceFixerOpenCV",
+ "FileNamePrefix",
+ "FileNamePrefixDateDirFirst",
+ "Float to String",
+ "HaldCLUT",
+ "Image Caption",
+ "ImageBorder",
+ "ImageOverlay",
+ "ImagePaste",
+ "Int to String",
+ "LMStudioPrompt",
+ "Load Image Based on Number",
+ "LoraSyntaxProcessor",
+ "Mikey Sampler",
+ "Mikey Sampler Base Only",
+ "Mikey Sampler Base Only Advanced",
+ "Mikey Sampler Tiled",
+ "Mikey Sampler Tiled Base Only",
+ "MikeySamplerTiledAdvanced",
+ "MikeySamplerTiledAdvancedBaseOnly",
+ "OobaPrompt",
+ "PresetRatioSelector",
+ "Prompt With SDXL",
+ "Prompt With Style",
+ "Prompt With Style V2",
+ "Prompt With Style V3",
+ "Range Float",
+ "Range Integer",
+ "Ratio Advanced",
+ "Resize Image for SDXL",
+ "Save Image If True",
+ "Save Image With Prompt Data",
+ "Save Images Mikey",
+ "Save Images No Display",
+ "SaveMetaData",
+ "SearchAndReplace",
+ "Seed String",
+ "Style Conditioner",
+ "Style Conditioner Base Only",
+ "Text2InputOr3rdOption",
+ "TextCombinations",
+ "TextCombinations3",
+ "TextConcat",
+ "TextPreserve",
+ "Upscale Tile Calculator",
+ "Wildcard Processor",
+ "WildcardAndLoraSyntaxProcessor",
+ "WildcardOobaPrompt"
+ ],
+ {
+ "title_aux": "Mikey Nodes"
+ }
+ ],
+ "https://github.com/bedovyy/ComfyUI_NAIDGenerator": [
+ [
+ "GenerateNAID",
+ "Img2ImgOptionNAID",
+ "InpaintingOptionNAID",
+ "MaskImageToNAID",
+ "ModelOptionNAID",
+ "PromptToNAID"
+ ],
+ {
+ "title_aux": "ComfyUI_NAIDGenerator"
+ }
+ ],
+ "https://github.com/biegert/ComfyUI-CLIPSeg/raw/main/custom_nodes/clipseg.py": [
+ [
+ "CLIPSeg",
+ "CombineSegMasks"
+ ],
+ {
+ "title_aux": "CLIPSeg"
+ }
+ ],
+ "https://github.com/bilal-arikan/ComfyUI_TextAssets": [
+ [
+ "LoadTextAsset"
+ ],
+ {
+ "title_aux": "ComfyUI_TextAssets"
+ }
+ ],
+ "https://github.com/blepping/ComfyUI-bleh": [
+ [
+ "BlehDeepShrink",
+ "BlehDiscardPenultimateSigma",
+ "BlehForceSeedSampler",
+ "BlehHyperTile",
+ "BlehInsaneChainSampler",
+ "BlehModelPatchConditional"
+ ],
+ {
+ "title_aux": "ComfyUI-bleh"
+ }
+ ],
+ "https://github.com/blepping/ComfyUI-sonar": [
+ [
+ "NoisyLatentLike",
+ "SamplerSonarDPMPPSDE",
+ "SamplerSonarEuler",
+ "SamplerSonarEulerA",
+ "SonarCustomNoise",
+ "SonarGuidanceConfig"
+ ],
+ {
+ "title_aux": "ComfyUI-sonar"
+ }
+ ],
+ "https://github.com/bmad4ever/comfyui_ab_samplercustom": [
+ [
+ "AB SamplerCustom (experimental)"
+ ],
+ {
+ "title_aux": "comfyui_ab_sampler"
+ }
+ ],
+ "https://github.com/bmad4ever/comfyui_bmad_nodes": [
+ [
+ "AdaptiveThresholding",
+ "Add String To Many",
+ "AddAlpha",
+ "AdjustRect",
+ "AnyToAny",
+ "BoundingRect (contours)",
+ "BuildColorRangeAdvanced (hsv)",
+ "BuildColorRangeHSV (hsv)",
+ "CLAHE",
+ "CLIPEncodeMultiple",
+ "CLIPEncodeMultipleAdvanced",
+ "ChameleonMask",
+ "CheckpointLoader (dirty)",
+ "CheckpointLoaderSimple (dirty)",
+ "Color (RGB)",
+ "Color (hexadecimal)",
+ "Color Clip",
+ "Color Clip (advanced)",
+ "Color Clip ADE20k",
+ "ColorDictionary",
+ "ColorDictionary (custom)",
+ "Conditioning (combine multiple)",
+ "Conditioning (combine selective)",
+ "Conditioning Grid (cond)",
+ "Conditioning Grid (string)",
+ "Conditioning Grid (string) Advanced",
+ "Contour To Mask",
+ "Contours",
+ "ControlNetHadamard",
+ "ControlNetHadamard (manual)",
+ "ConvertImg",
+ "CopyMakeBorder",
+ "CreateRequestMetadata",
+ "DistanceTransform",
+ "Draw Contour(s)",
+ "EqualizeHistogram",
+ "ExtendColorList",
+ "ExtendCondList",
+ "ExtendFloatList",
+ "ExtendImageList",
+ "ExtendIntList",
+ "ExtendLatentList",
+ "ExtendMaskList",
+ "ExtendModelList",
+ "ExtendStringList",
+ "FadeMaskEdges",
+ "Filter Contour",
+ "FindComplementaryColor",
+ "FindThreshold",
+ "FlatLatentsIntoSingleGrid",
+ "Framed Mask Grab Cut",
+ "Framed Mask Grab Cut 2",
+ "FromListGet1Color",
+ "FromListGet1Cond",
+ "FromListGet1Float",
+ "FromListGet1Image",
+ "FromListGet1Int",
+ "FromListGet1Latent",
+ "FromListGet1Mask",
+ "FromListGet1Model",
+ "FromListGet1String",
+ "FromListGetColors",
+ "FromListGetConds",
+ "FromListGetFloats",
+ "FromListGetImages",
+ "FromListGetInts",
+ "FromListGetLatents",
+ "FromListGetMasks",
+ "FromListGetModels",
+ "FromListGetStrings",
+ "Get Contour from list",
+ "Get Models",
+ "Get Prompt",
+ "HypernetworkLoader (dirty)",
+ "ImageBatchToList",
+ "InRange (hsv)",
+ "Inpaint",
+ "Input/String to Int Array",
+ "KMeansColor",
+ "Load 64 Encoded Image",
+ "LoraLoader (dirty)",
+ "MaskGrid N KSamplers Advanced",
+ "MaskOuterBlur",
+ "Merge Latent Batch Gridwise",
+ "MonoMerge",
+ "MorphologicOperation",
+ "MorphologicSkeletoning",
+ "NaiveAutoKMeansColor",
+ "OtsuThreshold",
+ "RGB to HSV",
+ "Rect Grab Cut",
+ "Remap",
+ "RemapBarrelDistortion",
+ "RemapFromInsideParabolas",
+ "RemapFromQuadrilateral (homography)",
+ "RemapInsideParabolas",
+ "RemapInsideParabolasAdvanced",
+ "RemapPinch",
+ "RemapReverseBarrelDistortion",
+ "RemapStretch",
+ "RemapToInnerCylinder",
+ "RemapToOuterCylinder",
+ "RemapToQuadrilateral",
+ "RemapWarpPolar",
+ "Repeat Into Grid (image)",
+ "Repeat Into Grid (latent)",
+ "RequestInputs",
+ "SampleColorHSV",
+ "Save Image (api)",
+ "SeamlessClone",
+ "SeamlessClone (simple)",
+ "SetRequestStateToComplete",
+ "String",
+ "String to Float",
+ "String to Integer",
+ "ToColorList",
+ "ToCondList",
+ "ToFloatList",
+ "ToImageList",
+ "ToIntList",
+ "ToLatentList",
+ "ToMaskList",
+ "ToModelList",
+ "ToStringList",
+ "UnGridify (image)",
+ "VAEEncodeBatch"
+ ],
+ {
+ "title_aux": "Bmad Nodes"
+ }
+ ],
+ "https://github.com/bmad4ever/comfyui_lists_cartesian_product": [
+ [
+ "AnyListCartesianProduct"
+ ],
+ {
+ "title_aux": "Lists Cartesian Product"
+ }
+ ],
+ "https://github.com/bradsec/ComfyUI_ResolutionSelector": [
+ [
+ "ResolutionSelector"
+ ],
+ {
+ "title_aux": "ResolutionSelector for ComfyUI"
+ }
+ ],
+ "https://github.com/braintacles/braintacles-comfyui-nodes": [
+ [
+ "CLIPTextEncodeSDXL-Multi-IO",
+ "CLIPTextEncodeSDXL-Pipe",
+ "Empty Latent Image from Aspect-Ratio",
+ "Random Find and Replace",
+ "VAE Decode Pipe",
+ "VAE Decode Tiled Pipe",
+ "VAE Encode Pipe",
+ "VAE Encode Tiled Pipe"
+ ],
+ {
+ "title_aux": "braintacles-nodes"
+ }
+ ],
+ "https://github.com/brianfitzgerald/style_aligned_comfy": [
+ [
+ "StyleAlignedBatchAlign",
+ "StyleAlignedReferenceSampler",
+ "StyleAlignedSampleReferenceLatents"
+ ],
+ {
+ "title_aux": "StyleAligned for ComfyUI"
+ }
+ ],
+ "https://github.com/bronkula/comfyui-fitsize": [
+ [
+ "FS: Crop Image Into Even Pieces",
+ "FS: Fit Image And Resize",
+ "FS: Fit Size From Image",
+ "FS: Fit Size From Int",
+ "FS: Image Region To Mask",
+ "FS: Load Image And Resize To Fit",
+ "FS: Pick Image From Batch",
+ "FS: Pick Image From Batches",
+ "FS: Pick Image From List"
+ ],
+ {
+ "title_aux": "comfyui-fitsize"
+ }
+ ],
+ "https://github.com/bruefire/ComfyUI-SeqImageLoader": [
+ [
+ "VFrame Loader With Mask Editor",
+ "Video Loader With Mask Editor"
+ ],
+ {
+ "title_aux": "ComfyUI Sequential Image Loader"
+ }
+ ],
+ "https://github.com/budihartono/comfyui_otonx_nodes": [
+ [
+ "OTX Integer Multiple Inputs 4",
+ "OTX Integer Multiple Inputs 5",
+ "OTX Integer Multiple Inputs 6",
+ "OTX KSampler Feeder",
+ "OTX Versatile Multiple Inputs 4",
+ "OTX Versatile Multiple Inputs 5",
+ "OTX Versatile Multiple Inputs 6"
+ ],
+ {
+ "title_aux": "Otonx's Custom Nodes"
+ }
+ ],
+ "https://github.com/bvhari/ComfyUI_ImageProcessing": [
+ [
+ "BilateralFilter",
+ "Brightness",
+ "Gamma",
+ "Hue",
+ "Saturation",
+ "SigmoidCorrection",
+ "UnsharpMask"
+ ],
+ {
+ "title_aux": "ImageProcessing"
+ }
+ ],
+ "https://github.com/bvhari/ComfyUI_LatentToRGB": [
+ [
+ "LatentToRGB"
+ ],
+ {
+ "title_aux": "LatentToRGB"
+ }
+ ],
+ "https://github.com/bvhari/ComfyUI_PerpWeight": [
+ [
+ "CLIPTextEncodePerpWeight"
+ ],
+ {
+ "title_aux": "ComfyUI_PerpWeight"
+ }
+ ],
+ "https://github.com/catscandrive/comfyui-imagesubfolders/raw/main/loadImageWithSubfolders.py": [
+ [
+ "LoadImagewithSubfolders"
+ ],
+ {
+ "title_aux": "Image loader with subfolders"
+ }
+ ],
+ "https://github.com/celsojr2013/comfyui_simpletools/raw/main/google_translator.py": [
+ [
+ "GoogleTranslator"
+ ],
+ {
+ "title_aux": "ComfyUI SimpleTools Suit"
+ }
+ ],
+ "https://github.com/ceruleandeep/ComfyUI-LLaVA-Captioner": [
+ [
+ "LlavaCaptioner"
+ ],
+ {
+ "title_aux": "ComfyUI LLaVA Captioner"
+ }
+ ],
+ "https://github.com/chaojie/ComfyUI-DragNUWA": [
+ [
+ "BrushMotion",
+ "CompositeMotionBrush",
+ "CompositeMotionBrushWithoutModel",
+ "DragNUWA Run",
+ "DragNUWA Run MotionBrush",
+ "Get First Image",
+ "Get Last Image",
+ "InstantCameraMotionBrush",
+ "InstantObjectMotionBrush",
+ "Load CheckPoint DragNUWA",
+ "Load MotionBrush From Optical Flow",
+ "Load MotionBrush From Optical Flow Directory",
+ "Load MotionBrush From Optical Flow Without Model",
+ "Load MotionBrush From Tracking Points",
+ "Load MotionBrush From Tracking Points Without Model",
+ "Load Pose KeyPoints",
+ "Loop",
+ "LoopEnd_IMAGE",
+ "LoopStart_IMAGE",
+ "Split Tracking Points"
+ ],
+ {
+ "title_aux": "ComfyUI-DragNUWA"
+ }
+ ],
+ "https://github.com/chaojie/ComfyUI-DynamiCrafter": [
+ [
+ "DynamiCrafter Simple",
+ "DynamiCrafterLoader"
+ ],
+ {
+ "title_aux": "ComfyUI-DynamiCrafter"
+ }
+ ],
+ "https://github.com/chaojie/ComfyUI-I2VGEN-XL": [
+ [
+ "I2VGEN-XL Simple",
+ "Modelscope Pipeline Loader"
+ ],
+ {
+ "title_aux": "ComfyUI-I2VGEN-XL"
+ }
+ ],
+ "https://github.com/chaojie/ComfyUI-LightGlue": [
+ [
+ "LightGlue Loader",
+ "LightGlue Simple",
+ "LightGlue Simple Multi"
+ ],
+ {
+ "title_aux": "ComfyUI-LightGlue"
+ }
+ ],
+ "https://github.com/chaojie/ComfyUI-Moore-AnimateAnyone": [
+ [
+ "Moore-AnimateAnyone Denoising Unet",
+ "Moore-AnimateAnyone Image Encoder",
+ "Moore-AnimateAnyone Pipeline Loader",
+ "Moore-AnimateAnyone Pose Guider",
+ "Moore-AnimateAnyone Reference Unet",
+ "Moore-AnimateAnyone Simple",
+ "Moore-AnimateAnyone VAE"
+ ],
+ {
+ "title_aux": "ComfyUI-Moore-AnimateAnyone"
+ }
+ ],
+ "https://github.com/chaojie/ComfyUI-Motion-Vector-Extractor": [
+ [
+ "Motion Vector Extractor",
+ "VideoCombineThenPath"
+ ],
+ {
+ "title_aux": "ComfyUI-Motion-Vector-Extractor"
+ }
+ ],
+ "https://github.com/chaojie/ComfyUI-MotionCtrl": [
+ [
+ "Load Motion Camera Preset",
+ "Load Motion Traj Preset",
+ "Load Motionctrl Checkpoint",
+ "Motionctrl Cond",
+ "Motionctrl Sample",
+ "Motionctrl Sample Simple",
+ "Select Image Indices"
+ ],
+ {
+ "title_aux": "ComfyUI-MotionCtrl"
+ }
+ ],
+ "https://github.com/chaojie/ComfyUI-MotionCtrl-SVD": [
+ [
+ "Load Motionctrl-SVD Camera Preset",
+ "Load Motionctrl-SVD Checkpoint",
+ "Motionctrl-SVD Sample Simple"
+ ],
+ {
+ "title_aux": "ComfyUI-MotionCtrl-SVD"
+ }
+ ],
+ "https://github.com/chaojie/ComfyUI-Panda3d": [
+ [
+ "Panda3dAmbientLight",
+ "Panda3dAttachNewNode",
+ "Panda3dBase",
+ "Panda3dDirectionalLight",
+ "Panda3dLoadDepthModel",
+ "Panda3dLoadModel",
+ "Panda3dLoadTexture",
+ "Panda3dModelMerge",
+ "Panda3dTest",
+ "Panda3dTextureMerge"
+ ],
+ {
+ "title_aux": "ComfyUI-Panda3d"
+ }
+ ],
+ "https://github.com/chaojie/ComfyUI-Pymunk": [
+ [
+ "PygameRun",
+ "PygameSurface",
+ "PymunkDynamicBox",
+ "PymunkDynamicCircle",
+ "PymunkRun",
+ "PymunkShapeMerge",
+ "PymunkSpace",
+ "PymunkStaticLine"
+ ],
+ {
+ "title_aux": "ComfyUI-Pymunk"
+ }
+ ],
+ "https://github.com/chaojie/ComfyUI-RAFT": [
+ [
+ "Load MotionBrush",
+ "RAFT Run",
+ "Save MotionBrush",
+ "VizMotionBrush"
+ ],
+ {
+ "title_aux": "ComfyUI-RAFT"
+ }
+ ],
+ "https://github.com/chflame163/ComfyUI_LayerStyle": [
+ [
+ "LayerColor: Brightness & Contrast",
+ "LayerColor: ColorAdapter",
+ "LayerColor: Exposure",
+ "LayerColor: Gamma",
+ "LayerColor: HSV",
+ "LayerColor: LAB",
+ "LayerColor: LUT Apply",
+ "LayerColor: RGB",
+ "LayerColor: YUV",
+ "LayerFilter: ChannelShake",
+ "LayerFilter: ColorMap",
+ "LayerFilter: GaussianBlur",
+ "LayerFilter: MotionBlur",
+ "LayerFilter: Sharp & Soft",
+ "LayerFilter: SkinBeauty",
+ "LayerFilter: SoftLight",
+ "LayerFilter: WaterColor",
+ "LayerMask: CreateGradientMask",
+ "LayerMask: MaskBoxDetect",
+ "LayerMask: MaskByDifferent",
+ "LayerMask: MaskEdgeShrink",
+ "LayerMask: MaskEdgeUltraDetail",
+ "LayerMask: MaskGradient",
+ "LayerMask: MaskGrow",
+ "LayerMask: MaskInvert",
+ "LayerMask: MaskMotionBlur",
+ "LayerMask: MaskPreview",
+ "LayerMask: MaskStroke",
+ "LayerMask: PixelSpread",
+ "LayerMask: RemBgUltra",
+ "LayerMask: SegmentAnythingUltra",
+ "LayerStyle: ColorOverlay",
+ "LayerStyle: DropShadow",
+ "LayerStyle: GradientOverlay",
+ "LayerStyle: InnerGlow",
+ "LayerStyle: InnerShadow",
+ "LayerStyle: OuterGlow",
+ "LayerStyle: Stroke",
+ "LayerUtility: ColorImage",
+ "LayerUtility: ColorPicker",
+ "LayerUtility: CropByMask",
+ "LayerUtility: ExtendCanvas",
+ "LayerUtility: GetColorTone",
+ "LayerUtility: GetImageSize",
+ "LayerUtility: GradientImage",
+ "LayerUtility: ImageBlend",
+ "LayerUtility: ImageBlendAdvance",
+ "LayerUtility: ImageChannelMerge",
+ "LayerUtility: ImageChannelSplit",
+ "LayerUtility: ImageMaskScaleAs",
+ "LayerUtility: ImageOpacity",
+ "LayerUtility: ImageScaleRestore",
+ "LayerUtility: ImageShift",
+ "LayerUtility: LayerImageTransform",
+ "LayerUtility: LayerMaskTransform",
+ "LayerUtility: PrintInfo",
+ "LayerUtility: RestoreCropBox",
+ "LayerUtility: TextImage",
+ "LayerUtility: XY to Percent"
+ ],
+ {
+ "title_aux": "ComfyUI Layer Style"
+ }
+ ],
+ "https://github.com/chflame163/ComfyUI_MSSpeech_TTS": [
+ [
+ "Input Trigger",
+ "MicrosoftSpeech_TTS",
+ "Play Sound",
+ "Play Sound (loop)"
+ ],
+ {
+ "title_aux": "ComfyUI_MSSpeech_TTS"
+ }
+ ],
+ "https://github.com/chflame163/ComfyUI_WordCloud": [
+ [
+ "ComfyWordCloud",
+ "LoadTextFile",
+ "RGB_Picker"
+ ],
+ {
+ "title_aux": "ComfyUI_WordCloud"
+ }
+ ],
+ "https://github.com/chibiace/ComfyUI-Chibi-Nodes": [
+ [
+ "ConditionText",
+ "ConditionTextMulti",
+ "ImageAddText",
+ "ImageSimpleResize",
+ "ImageSizeInfo",
+ "ImageTool",
+ "Int2String",
+ "LoadEmbedding",
+ "LoadImageExtended",
+ "Loader",
+ "Prompts",
+ "RandomResolutionLatent",
+ "SaveImages",
+ "SeedGenerator",
+ "SimpleSampler",
+ "TextSplit",
+ "Textbox",
+ "Wildcards"
+ ],
+ {
+ "title_aux": "ComfyUI-Chibi-Nodes"
+ }
+ ],
+ "https://github.com/chrisgoringe/cg-image-picker": [
+ [
+ "Preview Chooser",
+ "Preview Chooser Fabric"
+ ],
+ {
+ "author": "chrisgoringe",
+ "description": "Custom nodes that preview images and pause the workflow to allow the user to select one or more to progress",
+ "nickname": "Image Chooser",
+ "title": "Image Chooser",
+ "title_aux": "Image chooser"
+ }
+ ],
+ "https://github.com/chrisgoringe/cg-noise": [
+ [
+ "Hijack",
+ "KSampler Advanced with Variations",
+ "KSampler with Variations",
+ "UnHijack"
+ ],
+ {
+ "title_aux": "Variation seeds"
+ }
+ ],
+ "https://github.com/chrisgoringe/cg-use-everywhere": [
+ [
+ "Seed Everywhere"
+ ],
+ {
+ "nodename_pattern": "(^(Prompts|Anything) Everywhere|Simple String)",
+ "title_aux": "Use Everywhere (UE Nodes)"
+ }
+ ],
+ "https://github.com/city96/ComfyUI_ColorMod": [
+ [
+ "ColorModEdges",
+ "ColorModPivot",
+ "LoadImageHighPrec",
+ "PreviewImageHighPrec",
+ "SaveImageHighPrec"
+ ],
+ {
+ "title_aux": "ComfyUI_ColorMod"
+ }
+ ],
+ "https://github.com/city96/ComfyUI_DiT": [
+ [
+ "DiTCheckpointLoader",
+ "DiTCheckpointLoaderSimple",
+ "DiTLabelCombine",
+ "DiTLabelSelect",
+ "DiTSampler"
+ ],
+ {
+ "title_aux": "ComfyUI_DiT [WIP]"
+ }
+ ],
+ "https://github.com/city96/ComfyUI_ExtraModels": [
+ [
+ "DiTCondLabelEmpty",
+ "DiTCondLabelSelect",
+ "DitCheckpointLoader",
+ "ExtraVAELoader",
+ "PixArtCheckpointLoader",
+ "PixArtDPMSampler",
+ "PixArtLoraLoader",
+ "PixArtResolutionSelect",
+ "PixArtT5TextEncode",
+ "T5TextEncode",
+ "T5v11Loader"
+ ],
+ {
+ "title_aux": "Extra Models for ComfyUI"
+ }
+ ],
+ "https://github.com/city96/ComfyUI_NetDist": [
+ [
+ "CombineImageBatch",
+ "FetchRemote",
+ "LoadCurrentWorkflowJSON",
+ "LoadDiskWorkflowJSON",
+ "LoadImageUrl",
+ "LoadLatentNumpy",
+ "LoadLatentUrl",
+ "RemoteChainEnd",
+ "RemoteChainStart",
+ "RemoteQueueSimple",
+ "RemoteQueueWorker",
+ "SaveDiskWorkflowJSON",
+ "SaveImageUrl",
+ "SaveLatentNumpy"
+ ],
+ {
+ "title_aux": "ComfyUI_NetDist"
+ }
+ ],
+ "https://github.com/city96/SD-Advanced-Noise": [
+ [
+ "LatentGaussianNoise",
+ "MathEncode"
+ ],
+ {
+ "title_aux": "SD-Advanced-Noise"
+ }
+ ],
+ "https://github.com/city96/SD-Latent-Interposer": [
+ [
+ "LatentInterposer"
+ ],
+ {
+ "title_aux": "Latent-Interposer"
+ }
+ ],
+ "https://github.com/city96/SD-Latent-Upscaler": [
+ [
+ "LatentUpscaler"
+ ],
+ {
+ "title_aux": "SD-Latent-Upscaler"
+ }
+ ],
+ "https://github.com/civitai/comfy-nodes": [
+ [
+ "CivitAI_Checkpoint_Loader",
+ "CivitAI_Lora_Loader"
+ ],
+ {
+ "title_aux": "comfy-nodes"
+ }
+ ],
+ "https://github.com/comfyanonymous/ComfyUI": [
+ [
+ "BasicScheduler",
+ "CLIPLoader",
+ "CLIPMergeSimple",
+ "CLIPSave",
+ "CLIPSetLastLayer",
+ "CLIPTextEncode",
+ "CLIPTextEncodeControlnet",
+ "CLIPTextEncodeSDXL",
+ "CLIPTextEncodeSDXLRefiner",
+ "CLIPVisionEncode",
+ "CLIPVisionLoader",
+ "Canny",
+ "CheckpointLoader",
+ "CheckpointLoaderSimple",
+ "CheckpointSave",
+ "ConditioningAverage",
+ "ConditioningCombine",
+ "ConditioningConcat",
+ "ConditioningSetArea",
+ "ConditioningSetAreaPercentage",
+ "ConditioningSetAreaStrength",
+ "ConditioningSetMask",
+ "ConditioningSetTimestepRange",
+ "ConditioningZeroOut",
+ "ControlNetApply",
+ "ControlNetApplyAdvanced",
+ "ControlNetLoader",
+ "CropMask",
+ "DiffControlNetLoader",
+ "DiffusersLoader",
+ "DualCLIPLoader",
+ "EmptyImage",
+ "EmptyLatentImage",
+ "ExponentialScheduler",
+ "FeatherMask",
+ "FlipSigmas",
+ "FreeU",
+ "FreeU_V2",
+ "GLIGENLoader",
+ "GLIGENTextBoxApply",
+ "GrowMask",
+ "HyperTile",
+ "HypernetworkLoader",
+ "ImageBatch",
+ "ImageBlend",
+ "ImageBlur",
+ "ImageColorToMask",
+ "ImageCompositeMasked",
+ "ImageCrop",
+ "ImageFromBatch",
+ "ImageInvert",
+ "ImageOnlyCheckpointLoader",
+ "ImageOnlyCheckpointSave",
+ "ImagePadForOutpaint",
+ "ImageQuantize",
+ "ImageScale",
+ "ImageScaleBy",
+ "ImageScaleToTotalPixels",
+ "ImageSharpen",
+ "ImageToMask",
+ "ImageUpscaleWithModel",
+ "InpaintModelConditioning",
+ "InvertMask",
+ "JoinImageWithAlpha",
+ "KSampler",
+ "KSamplerAdvanced",
+ "KSamplerSelect",
+ "KarrasScheduler",
+ "LatentAdd",
+ "LatentBatch",
+ "LatentBatchSeedBehavior",
+ "LatentBlend",
+ "LatentComposite",
+ "LatentCompositeMasked",
+ "LatentCrop",
+ "LatentFlip",
+ "LatentFromBatch",
+ "LatentInterpolate",
+ "LatentMultiply",
+ "LatentRotate",
+ "LatentSubtract",
+ "LatentUpscale",
+ "LatentUpscaleBy",
+ "LoadImage",
+ "LoadImageMask",
+ "LoadLatent",
+ "LoraLoader",
+ "LoraLoaderModelOnly",
+ "MaskComposite",
+ "MaskToImage",
+ "ModelMergeAdd",
+ "ModelMergeBlocks",
+ "ModelMergeSimple",
+ "ModelMergeSubtract",
+ "ModelSamplingContinuousEDM",
+ "ModelSamplingDiscrete",
+ "ModelSamplingStableCascade",
+ "PatchModelAddDownscale",
+ "PerpNeg",
+ "PhotoMakerEncode",
+ "PhotoMakerLoader",
+ "PolyexponentialScheduler",
+ "PorterDuffImageComposite",
+ "PreviewImage",
+ "RebatchImages",
+ "RebatchLatents",
+ "RepeatImageBatch",
+ "RepeatLatentBatch",
+ "RescaleCFG",
+ "SDTurboScheduler",
+ "SD_4XUpscale_Conditioning",
+ "SVD_img2vid_Conditioning",
+ "SamplerCustom",
+ "SamplerDPMPP_2M_SDE",
+ "SamplerDPMPP_SDE",
+ "SaveAnimatedPNG",
+ "SaveAnimatedWEBP",
+ "SaveImage",
+ "SaveLatent",
+ "SelfAttentionGuidance",
+ "SetLatentNoiseMask",
+ "SolidMask",
+ "SplitImageWithAlpha",
+ "SplitSigmas",
+ "StableCascade_EmptyLatentImage",
+ "StableCascade_StageB_Conditioning",
+ "StableZero123_Conditioning",
+ "StableZero123_Conditioning_Batched",
+ "StyleModelApply",
+ "StyleModelLoader",
+ "TomePatchModel",
+ "UNETLoader",
+ "UpscaleModelLoader",
+ "VAEDecode",
+ "VAEDecodeTiled",
+ "VAEEncode",
+ "VAEEncodeForInpaint",
+ "VAEEncodeTiled",
+ "VAELoader",
+ "VAESave",
+ "VPScheduler",
+ "VideoLinearCFGGuidance",
+ "unCLIPCheckpointLoader",
+ "unCLIPConditioning"
+ ],
+ {
+ "title_aux": "ComfyUI"
+ }
+ ],
+ "https://github.com/comfyanonymous/ComfyUI_experiments": [
+ [
+ "ModelMergeBlockNumber",
+ "ModelMergeSDXL",
+ "ModelMergeSDXLDetailedTransformers",
+ "ModelMergeSDXLTransformers",
+ "ModelSamplerTonemapNoiseTest",
+ "ReferenceOnlySimple",
+ "RescaleClassifierFreeGuidanceTest",
+ "TonemapNoiseWithRescaleCFG"
+ ],
+ {
+ "title_aux": "ComfyUI_experiments"
+ }
+ ],
+ "https://github.com/concarne000/ConCarneNode": [
+ [
+ "BingImageGrabber",
+ "Zephyr"
+ ],
+ {
+ "title_aux": "ConCarneNode"
+ }
+ ],
+ "https://github.com/coreyryanhanson/ComfyQR": [
+ [
+ "comfy-qr-by-image-size",
+ "comfy-qr-by-module-size",
+ "comfy-qr-by-module-split",
+ "comfy-qr-mask_errors"
+ ],
+ {
+ "title_aux": "ComfyQR"
+ }
+ ],
+ "https://github.com/coreyryanhanson/ComfyQR-scanning-nodes": [
+ [
+ "comfy-qr-read",
+ "comfy-qr-validate"
+ ],
+ {
+ "title_aux": "ComfyQR-scanning-nodes"
+ }
+ ],
+ "https://github.com/cubiq/ComfyUI_IPAdapter_plus": [
+ [
+ "IPAdapterApply",
+ "IPAdapterApplyEncoded",
+ "IPAdapterApplyFaceID",
+ "IPAdapterBatchEmbeds",
+ "IPAdapterEncoder",
+ "IPAdapterLoadEmbeds",
+ "IPAdapterModelLoader",
+ "IPAdapterSaveEmbeds",
+ "IPAdapterTilesMasked",
+ "InsightFaceLoader",
+ "PrepImageForClipVision",
+ "PrepImageForInsightFace"
+ ],
+ {
+ "title_aux": "ComfyUI_IPAdapter_plus"
+ }
+ ],
+ "https://github.com/cubiq/ComfyUI_InstantID": [
+ [
+ "ApplyInstantID",
+ "FaceKeypointsPreprocessor",
+ "InstantIDFaceAnalysis",
+ "InstantIDModelLoader"
+ ],
+ {
+ "title_aux": "ComfyUI InstantID (Native Support)"
+ }
+ ],
+ "https://github.com/cubiq/ComfyUI_SimpleMath": [
+ [
+ "SimpleMath",
+ "SimpleMathDebug"
+ ],
+ {
+ "title_aux": "Simple Math"
+ }
+ ],
+ "https://github.com/cubiq/ComfyUI_essentials": [
+ [
+ "BatchCount+",
+ "CLIPTextEncodeSDXL+",
+ "ConsoleDebug+",
+ "DebugTensorShape+",
+ "DrawText+",
+ "ExtractKeyframes+",
+ "GetImageSize+",
+ "ImageApplyLUT+",
+ "ImageCASharpening+",
+ "ImageCompositeFromMaskBatch+",
+ "ImageCrop+",
+ "ImageDesaturate+",
+ "ImageEnhanceDifference+",
+ "ImageExpandBatch+",
+ "ImageFlip+",
+ "ImageFromBatch+",
+ "ImagePosterize+",
+ "ImageRemoveBackground+",
+ "ImageResize+",
+ "ImageSeamCarving+",
+ "KSamplerVariationsStochastic+",
+ "KSamplerVariationsWithNoise+",
+ "MaskBatch+",
+ "MaskBlur+",
+ "MaskExpandBatch+",
+ "MaskFlip+",
+ "MaskFromBatch+",
+ "MaskFromColor+",
+ "MaskPreview+",
+ "ModelCompile+",
+ "NoiseFromImage~",
+ "RemBGSession+",
+ "RemoveLatentMask+",
+ "SDXLEmptyLatentSizePicker+",
+ "SimpleMath+",
+ "TransitionMask+"
+ ],
+ {
+ "title_aux": "ComfyUI Essentials"
+ }
+ ],
+ "https://github.com/dagthomas/comfyui_dagthomas": [
+ [
+ "CSL",
+ "CSVPromptGenerator",
+ "PromptGenerator"
+ ],
+ {
+ "title_aux": "SDXL Auto Prompter"
+ }
+ ],
+ "https://github.com/daniel-lewis-ab/ComfyUI-Llama": [
+ [
+ "Call LLM Advanced",
+ "Call LLM Basic",
+ "LLM_Create_Completion Advanced",
+ "LLM_Detokenize",
+ "LLM_Embed",
+ "LLM_Eval",
+ "LLM_Load_State",
+ "LLM_Reset",
+ "LLM_Sample",
+ "LLM_Save_State",
+ "LLM_Token_BOS",
+ "LLM_Token_EOS",
+ "LLM_Tokenize",
+ "Load LLM Model Advanced",
+ "Load LLM Model Basic"
+ ],
+ {
+ "title_aux": "ComfyUI-Llama"
+ }
+ ],
+ "https://github.com/daniel-lewis-ab/ComfyUI-TTS": [
+ [
+ "Load_Piper_Model",
+ "Piper_Speak_Text"
+ ],
+ {
+ "title_aux": "ComfyUI-TTS"
+ }
+ ],
+ "https://github.com/darkpixel/darkprompts": [
+ [
+ "DarkCombine",
+ "DarkFaceIndexShuffle",
+ "DarkLoRALoader",
+ "DarkPrompt"
+ ],
+ {
+ "title_aux": "DarkPrompts"
+ }
+ ],
+ "https://github.com/davask/ComfyUI-MarasIT-Nodes": [
+ [
+ "MarasitBusNode",
+ "MarasitBusPipeNode",
+ "MarasitPipeNodeBasic",
+ "MarasitUniversalBusNode"
+ ],
+ {
+ "title_aux": "MarasIT Nodes"
+ }
+ ],
+ "https://github.com/dave-palt/comfyui_DSP_imagehelpers": [
+ [
+ "dsp-imagehelpers-concat"
+ ],
+ {
+ "title_aux": "comfyui_DSP_imagehelpers"
+ }
+ ],
+ "https://github.com/dawangraoming/ComfyUI_ksampler_gpu/raw/main/ksampler_gpu.py": [
+ [
+ "KSamplerAdvancedGPU",
+ "KSamplerGPU"
+ ],
+ {
+ "title_aux": "KSampler GPU"
+ }
+ ],
+ "https://github.com/daxthin/DZ-FaceDetailer": [
+ [
+ "DZ_Face_Detailer"
+ ],
+ {
+ "title_aux": "DZ-FaceDetailer"
+ }
+ ],
+ "https://github.com/deroberon/StableZero123-comfyui": [
+ [
+ "SDZero ImageSplit",
+ "Stablezero123",
+ "Stablezero123WithDepth"
+ ],
+ {
+ "title_aux": "StableZero123-comfyui"
+ }
+ ],
+ "https://github.com/deroberon/demofusion-comfyui": [
+ [
+ "Batch Unsampler",
+ "Demofusion",
+ "Demofusion From Single File",
+ "Iterative Mixing KSampler"
+ ],
+ {
+ "title_aux": "demofusion-comfyui"
+ }
+ ],
+ "https://github.com/dfl/comfyui-clip-with-break": [
+ [
+ "AdvancedCLIPTextEncodeWithBreak",
+ "CLIPTextEncodeWithBreak"
+ ],
+ {
+ "author": "dfl",
+ "description": "CLIP text encoder that does BREAK prompting like A1111",
+ "nickname": "CLIP with BREAK",
+ "title": "CLIP with BREAK syntax",
+ "title_aux": "comfyui-clip-with-break"
+ }
+ ],
+ "https://github.com/digitaljohn/comfyui-propost": [
+ [
+ "ProPostApplyLUT",
+ "ProPostDepthMapBlur",
+ "ProPostFilmGrain",
+ "ProPostRadialBlur",
+ "ProPostVignette"
+ ],
+ {
+ "title_aux": "ComfyUI-ProPost"
+ }
+ ],
+ "https://github.com/dimtoneff/ComfyUI-PixelArt-Detector": [
+ [
+ "PixelArtAddDitherPattern",
+ "PixelArtDetectorConverter",
+ "PixelArtDetectorSave",
+ "PixelArtDetectorToImage",
+ "PixelArtLoadPalettes"
+ ],
+ {
+ "title_aux": "ComfyUI PixelArt Detector"
+ }
+ ],
+ "https://github.com/diontimmer/ComfyUI-Vextra-Nodes": [
+ [
+ "Add Text To Image",
+ "Apply Instagram Filter",
+ "Create Solid Color",
+ "Flatten Colors",
+ "Generate Noise Image",
+ "GlitchThis Effect",
+ "Hue Rotation",
+ "Load Picture Index",
+ "Pixel Sort",
+ "Play Sound At Execution",
+ "Prettify Prompt Using distilgpt2",
+ "Swap Color Mode"
+ ],
+ {
+ "title_aux": "ComfyUI-Vextra-Nodes"
+ }
+ ],
+ "https://github.com/djbielejeski/a-person-mask-generator": [
+ [
+ "APersonMaskGenerator"
+ ],
+ {
+ "title_aux": "a-person-mask-generator"
+ }
+ ],
+ "https://github.com/dmarx/ComfyUI-AudioReactive": [
+ [
+ "OpAbs",
+ "OpBandpass",
+ "OpClamp",
+ "OpHarmonic",
+ "OpModulo",
+ "OpNormalize",
+ "OpNovelty",
+ "OpPercussive",
+ "OpPow",
+ "OpPow2",
+ "OpPredominant_pulse",
+ "OpQuantize",
+ "OpRms",
+ "OpSmoosh",
+ "OpSmooth",
+ "OpSqrt",
+ "OpStretch",
+ "OpSustain",
+ "OpThreshold"
+ ],
+ {
+ "title_aux": "ComfyUI-AudioReactive"
+ }
+ ],
+ "https://github.com/dmarx/ComfyUI-Keyframed": [
+ [
+ "Example",
+ "KfAddCurveToPGroup",
+ "KfAddCurveToPGroupx10",
+ "KfApplyCurveToCond",
+ "KfConditioningAdd",
+ "KfConditioningAddx10",
+ "KfCurveConstant",
+ "KfCurveDraw",
+ "KfCurveFromString",
+ "KfCurveFromYAML",
+ "KfCurveInverse",
+ "KfCurveToAcnLatentKeyframe",
+ "KfCurvesAdd",
+ "KfCurvesAddx10",
+ "KfCurvesDivide",
+ "KfCurvesMultiply",
+ "KfCurvesMultiplyx10",
+ "KfCurvesSubtract",
+ "KfDebug_Clip",
+ "KfDebug_Cond",
+ "KfDebug_Curve",
+ "KfDebug_Float",
+ "KfDebug_Image",
+ "KfDebug_Int",
+ "KfDebug_Latent",
+ "KfDebug_Model",
+ "KfDebug_Passthrough",
+ "KfDebug_Segs",
+ "KfDebug_String",
+ "KfDebug_Vae",
+ "KfDrawSchedule",
+ "KfEvaluateCurveAtT",
+ "KfGetCurveFromPGroup",
+ "KfGetScheduleConditionAtTime",
+ "KfGetScheduleConditionSlice",
+ "KfKeyframedCondition",
+ "KfKeyframedConditionWithText",
+ "KfPGroupCurveAdd",
+ "KfPGroupCurveMultiply",
+ "KfPGroupDraw",
+ "KfPGroupProd",
+ "KfPGroupSum",
+ "KfSetCurveLabel",
+ "KfSetKeyframe",
+ "KfSinusoidalAdjustAmplitude",
+ "KfSinusoidalAdjustFrequency",
+ "KfSinusoidalAdjustPhase",
+ "KfSinusoidalAdjustWavelength",
+ "KfSinusoidalEntangledZeroOneFromFrequencyx2",
+ "KfSinusoidalEntangledZeroOneFromFrequencyx3",
+ "KfSinusoidalEntangledZeroOneFromFrequencyx4",
+ "KfSinusoidalEntangledZeroOneFromFrequencyx5",
+ "KfSinusoidalEntangledZeroOneFromFrequencyx6",
+ "KfSinusoidalEntangledZeroOneFromFrequencyx7",
+ "KfSinusoidalEntangledZeroOneFromFrequencyx8",
+ "KfSinusoidalEntangledZeroOneFromFrequencyx9",
+ "KfSinusoidalEntangledZeroOneFromWavelengthx2",
+ "KfSinusoidalEntangledZeroOneFromWavelengthx3",
+ "KfSinusoidalEntangledZeroOneFromWavelengthx4",
+ "KfSinusoidalEntangledZeroOneFromWavelengthx5",
+ "KfSinusoidalEntangledZeroOneFromWavelengthx6",
+ "KfSinusoidalEntangledZeroOneFromWavelengthx7",
+ "KfSinusoidalEntangledZeroOneFromWavelengthx8",
+ "KfSinusoidalEntangledZeroOneFromWavelengthx9",
+ "KfSinusoidalGetAmplitude",
+ "KfSinusoidalGetFrequency",
+ "KfSinusoidalGetPhase",
+ "KfSinusoidalGetWavelength",
+ "KfSinusoidalWithFrequency",
+ "KfSinusoidalWithWavelength"
+ ],
+ {
+ "title_aux": "ComfyUI-Keyframed"
+ }
+ ],
+ "https://github.com/drago87/ComfyUI_Dragos_Nodes": [
+ [
+ "file_padding",
+ "image_info",
+ "lora_loader",
+ "vae_loader"
+ ],
+ {
+ "title_aux": "ComfyUI_Dragos_Nodes"
+ }
+ ],
+ "https://github.com/drustan-hawk/primitive-types": [
+ [
+ "float",
+ "int",
+ "string",
+ "string_multiline"
+ ],
+ {
+ "title_aux": "primitive-types"
+ }
+ ],
+ "https://github.com/ealkanat/comfyui_easy_padding": [
+ [
+ "comfyui-easy-padding"
+ ],
+ {
+ "title_aux": "ComfyUI Easy Padding"
+ }
+ ],
+ "https://github.com/edenartlab/eden_comfy_pipelines": [
+ [
+ "CLIP_Interrogator",
+ "Eden_Bool",
+ "Eden_Compare",
+ "Eden_DebugPrint",
+ "Eden_Float",
+ "Eden_Int",
+ "Eden_String",
+ "Filepicker",
+ "IMG_blender",
+ "IMG_padder",
+ "IMG_scaler",
+ "IMG_unpadder",
+ "If ANY execute A else B",
+ "LatentTypeConversion",
+ "SaveImageAdvanced",
+ "VAEDecode_to_folder"
+ ],
+ {
+ "title_aux": "eden_comfy_pipelines"
+ }
+ ],
+ "https://github.com/evanspearman/ComfyMath": [
+ [
+ "CM_BoolBinaryOperation",
+ "CM_BoolToInt",
+ "CM_BoolUnaryOperation",
+ "CM_BreakoutVec2",
+ "CM_BreakoutVec3",
+ "CM_BreakoutVec4",
+ "CM_ComposeVec2",
+ "CM_ComposeVec3",
+ "CM_ComposeVec4",
+ "CM_FloatBinaryCondition",
+ "CM_FloatBinaryOperation",
+ "CM_FloatToInt",
+ "CM_FloatToNumber",
+ "CM_FloatUnaryCondition",
+ "CM_FloatUnaryOperation",
+ "CM_IntBinaryCondition",
+ "CM_IntBinaryOperation",
+ "CM_IntToBool",
+ "CM_IntToFloat",
+ "CM_IntToNumber",
+ "CM_IntUnaryCondition",
+ "CM_IntUnaryOperation",
+ "CM_NearestSDXLResolution",
+ "CM_NumberBinaryCondition",
+ "CM_NumberBinaryOperation",
+ "CM_NumberToFloat",
+ "CM_NumberToInt",
+ "CM_NumberUnaryCondition",
+ "CM_NumberUnaryOperation",
+ "CM_SDXLResolution",
+ "CM_Vec2BinaryCondition",
+ "CM_Vec2BinaryOperation",
+ "CM_Vec2ScalarOperation",
+ "CM_Vec2ToScalarBinaryOperation",
+ "CM_Vec2ToScalarUnaryOperation",
+ "CM_Vec2UnaryCondition",
+ "CM_Vec2UnaryOperation",
+ "CM_Vec3BinaryCondition",
+ "CM_Vec3BinaryOperation",
+ "CM_Vec3ScalarOperation",
+ "CM_Vec3ToScalarBinaryOperation",
+ "CM_Vec3ToScalarUnaryOperation",
+ "CM_Vec3UnaryCondition",
+ "CM_Vec3UnaryOperation",
+ "CM_Vec4BinaryCondition",
+ "CM_Vec4BinaryOperation",
+ "CM_Vec4ScalarOperation",
+ "CM_Vec4ToScalarBinaryOperation",
+ "CM_Vec4ToScalarUnaryOperation",
+ "CM_Vec4UnaryCondition",
+ "CM_Vec4UnaryOperation"
+ ],
+ {
+ "title_aux": "ComfyMath"
+ }
+ ],
+ "https://github.com/fearnworks/ComfyUI_FearnworksNodes/raw/main/fw_nodes.py": [
+ [
+ "Count Files in Directory (FW)",
+ "Count Tokens (FW)",
+ "Token Count Ranker(FW)",
+ "Trim To Tokens (FW)"
+ ],
+ {
+ "title_aux": "Fearnworks Custom Nodes"
+ }
+ ],
+ "https://github.com/fexli/fexli-util-node-comfyui": [
+ [
+ "FEBCPrompt",
+ "FEBatchGenStringBCDocker",
+ "FEColor2Image",
+ "FEColorOut",
+ "FEDataInsertor",
+ "FEDataPacker",
+ "FEDataUnpacker",
+ "FEDeepClone",
+ "FEDictPacker",
+ "FEDictUnpacker",
+ "FEEncLoraLoader",
+ "FEExtraInfoAdd",
+ "FEGenStringBCDocker",
+ "FEGenStringGPT",
+ "FEImageNoiseGenerate",
+ "FEImagePadForOutpaint",
+ "FEImagePadForOutpaintByImage",
+ "FEOperatorIf",
+ "FEPythonStrOp",
+ "FERandomLoraSelect",
+ "FERandomPrompt",
+ "FERandomizedColor2Image",
+ "FERandomizedColorOut",
+ "FERerouteWithName",
+ "FESaveEncryptImage",
+ "FETextCombine",
+ "FETextInput"
+ ],
+ {
+ "title_aux": "fexli-util-node-comfyui"
+ }
+ ],
+ "https://github.com/filipemeneses/comfy_pixelization": [
+ [
+ "Pixelization"
+ ],
+ {
+ "title_aux": "Pixelization"
+ }
+ ],
+ "https://github.com/filliptm/ComfyUI_Fill-Nodes": [
+ [
+ "FL_ImageCaptionSaver",
+ "FL_ImageRandomizer"
+ ],
+ {
+ "title_aux": "ComfyUI_Fill-Nodes"
+ }
+ ],
+ "https://github.com/fitCorder/fcSuite/raw/main/fcSuite.py": [
+ [
+ "fcFloat",
+ "fcFloatMatic",
+ "fcHex",
+ "fcInteger"
+ ],
+ {
+ "title_aux": "fcSuite"
+ }
+ ],
+ "https://github.com/florestefano1975/comfyui-portrait-master": [
+ [
+ "PortraitMaster"
+ ],
+ {
+ "title_aux": "comfyui-portrait-master"
+ }
+ ],
+ "https://github.com/florestefano1975/comfyui-prompt-composer": [
+ [
+ "PromptComposerCustomLists",
+ "PromptComposerEffect",
+ "PromptComposerGrouping",
+ "PromptComposerMerge",
+ "PromptComposerStyler",
+ "PromptComposerTextSingle",
+ "promptComposerTextMultiple"
+ ],
+ {
+ "title_aux": "comfyui-prompt-composer"
+ }
+ ],
+ "https://github.com/flowtyone/ComfyUI-Flowty-LDSR": [
+ [
+ "LDSRModelLoader",
+ "LDSRUpscale",
+ "LDSRUpscaler"
+ ],
+ {
+ "title_aux": "ComfyUI-Flowty-LDSR"
+ }
+ ],
+ "https://github.com/flyingshutter/As_ComfyUI_CustomNodes": [
+ [
+ "BatchIndex_AS",
+ "CropImage_AS",
+ "ImageMixMasked_As",
+ "ImageToMask_AS",
+ "Increment_AS",
+ "Int2Any_AS",
+ "LatentAdd_AS",
+ "LatentMixMasked_As",
+ "LatentMix_AS",
+ "LatentToImages_AS",
+ "LoadLatent_AS",
+ "MapRange_AS",
+ "MaskToImage_AS",
+ "Math_AS",
+ "NoiseImage_AS",
+ "Number2Float_AS",
+ "Number2Int_AS",
+ "Number_AS",
+ "SaveLatent_AS",
+ "TextToImage_AS",
+ "TextWildcardList_AS"
+ ],
+ {
+ "title_aux": "As_ComfyUI_CustomNodes"
+ }
+ ],
+ "https://github.com/foxtrot-roger/comfyui-rf-nodes": [
+ [
+ "LogBool",
+ "LogFloat",
+ "LogInt",
+ "LogNumber",
+ "LogString",
+ "LogVec2",
+ "LogVec3",
+ "RF_AtIndexString",
+ "RF_BoolToString",
+ "RF_FloatToString",
+ "RF_IntToString",
+ "RF_JsonStyleLoader",
+ "RF_MergeLines",
+ "RF_NumberToString",
+ "RF_OptionsString",
+ "RF_RangeFloat",
+ "RF_RangeInt",
+ "RF_RangeNumber",
+ "RF_SavePromptInfo",
+ "RF_SplitLines",
+ "RF_TextConcatenate",
+ "RF_TextInput",
+ "RF_TextReplace",
+ "RF_Timestamp",
+ "RF_ToString",
+ "RF_Vec2ToString",
+ "RF_Vec3ToString",
+ "TextLine"
+ ],
+ {
+ "title_aux": "RF Nodes"
+ }
+ ],
+ "https://github.com/gemell1/ComfyUI_GMIC": [
+ [
+ "GmicCliWrapper"
+ ],
+ {
+ "title_aux": "ComfyUI_GMIC"
+ }
+ ],
+ "https://github.com/giriss/comfy-image-saver": [
+ [
+ "Cfg Literal",
+ "Checkpoint Selector",
+ "Int Literal",
+ "Sampler Selector",
+ "Save Image w/Metadata",
+ "Scheduler Selector",
+ "Seed Generator",
+ "String Literal",
+ "Width/Height Literal"
+ ],
+ {
+ "title_aux": "Save Image with Generation Metadata"
+ }
+ ],
+ "https://github.com/glibsonoran/Plush-for-ComfyUI": [
+ [
+ "DalleImage",
+ "Enhancer",
+ "ImgTextSwitch",
+ "Plush-Exif Wrangler",
+ "mulTextSwitch"
+ ],
+ {
+ "title_aux": "Plush-for-ComfyUI"
+ }
+ ],
+ "https://github.com/glifxyz/ComfyUI-GlifNodes": [
+ [
+ "GlifConsistencyDecoder",
+ "GlifPatchConsistencyDecoderTiled",
+ "SDXLAspectRatio"
+ ],
+ {
+ "title_aux": "ComfyUI-GlifNodes"
+ }
+ ],
+ "https://github.com/glowcone/comfyui-base64-to-image": [
+ [
+ "LoadImageFromBase64"
+ ],
+ {
+ "title_aux": "Load Image From Base64 URI"
+ }
+ ],
+ "https://github.com/godspede/ComfyUI_Substring": [
+ [
+ "SubstringTheory"
+ ],
+ {
+ "title_aux": "ComfyUI Substring"
+ }
+ ],
+ "https://github.com/gokayfem/ComfyUI_VLM_nodes": [
+ [
+ "Joytag",
+ "JsonToText",
+ "KeywordExtraction",
+ "LLMLoader",
+ "LLMPromptGenerator",
+ "LLMSampler",
+ "LLava Loader Simple",
+ "LLavaPromptGenerator",
+ "LLavaSamplerAdvanced",
+ "LLavaSamplerSimple",
+ "LlavaClipLoader",
+ "MoonDream",
+ "PromptGenerateAPI",
+ "SimpleText",
+ "Suggester",
+ "ViewText"
+ ],
+ {
+ "title_aux": "VLM_nodes"
+ }
+ ],
+ "https://github.com/guoyk93/yk-node-suite-comfyui": [
+ [
+ "YKImagePadForOutpaint",
+ "YKMaskToImage"
+ ],
+ {
+ "title_aux": "y.k.'s ComfyUI node suite"
+ }
+ ],
+ "https://github.com/hhhzzyang/Comfyui_Lama": [
+ [
+ "LamaApply",
+ "LamaModelLoader",
+ "YamlConfigLoader"
+ ],
+ {
+ "title_aux": "Comfyui-Lama"
+ }
+ ],
+ "https://github.com/hinablue/ComfyUI_3dPoseEditor": [
+ [
+ "Hina.PoseEditor3D"
+ ],
+ {
+ "title_aux": "ComfyUI 3D Pose Editor"
+ }
+ ],
+ "https://github.com/hustille/ComfyUI_Fooocus_KSampler": [
+ [
+ "KSampler With Refiner (Fooocus)"
+ ],
+ {
+ "title_aux": "ComfyUI_Fooocus_KSampler"
+ }
+ ],
+ "https://github.com/hustille/ComfyUI_hus_utils": [
+ [
+ "3way Prompt Styler",
+ "Batch State",
+ "Date Time Format",
+ "Debug Extra",
+ "Fetch widget value",
+ "Text Hash"
+ ],
+ {
+ "title_aux": "hus' utils for ComfyUI"
+ }
+ ],
+ "https://github.com/hylarucoder/ComfyUI-Eagle-PNGInfo": [
+ [
+ "EagleImageNode",
+ "SDXLPromptStyler",
+ "SDXLPromptStylerAdvanced",
+ "SDXLResolutionPresets"
+ ],
+ {
+ "title_aux": "Eagle PNGInfo"
+ }
+ ],
+ "https://github.com/idrirap/ComfyUI-Lora-Auto-Trigger-Words": [
+ [
+ "FusionText",
+ "LoraListNames",
+ "LoraLoaderAdvanced",
+ "LoraLoaderStackedAdvanced",
+ "LoraLoaderStackedVanilla",
+ "LoraLoaderVanilla",
+ "LoraTagsOnly",
+ "Randomizer",
+ "TagsFormater",
+ "TagsSelector",
+ "TextInputBasic"
+ ],
+ {
+ "title_aux": "ComfyUI-Lora-Auto-Trigger-Words"
+ }
+ ],
+ "https://github.com/imb101/ComfyUI-FaceSwap": [
+ [
+ "FaceSwapNode"
+ ],
+ {
+ "title_aux": "FaceSwap"
+ }
+ ],
+ "https://github.com/jags111/ComfyUI_Jags_Audiotools": [
+ [
+ "BatchJoinAudio",
+ "BatchToList",
+ "BitCrushAudioFX",
+ "BulkVariation",
+ "ChorusAudioFX",
+ "ClippingAudioFX",
+ "CompressorAudioFX",
+ "ConcatAudioList",
+ "ConvolutionAudioFX",
+ "CutAudio",
+ "DelayAudioFX",
+ "DistortionAudioFX",
+ "DuplicateAudio",
+ "GainAudioFX",
+ "GenerateAudioSample",
+ "GenerateAudioWave",
+ "GetAudioFromFolderIndex",
+ "GetSingle",
+ "GetStringByIndex",
+ "HighShelfFilter",
+ "HighpassFilter",
+ "ImageToSpectral",
+ "InvertAudioFX",
+ "JoinAudio",
+ "LadderFilter",
+ "LimiterAudioFX",
+ "ListToBatch",
+ "LoadAudioDir",
+ "LoadAudioFile",
+ "LoadAudioModel (DD)",
+ "LoadVST3",
+ "LowShelfFilter",
+ "LowpassFilter",
+ "MP3CompressorAudioFX",
+ "MixAudioTensors",
+ "NoiseGateAudioFX",
+ "OTTAudioFX",
+ "PeakFilter",
+ "PhaserEffectAudioFX",
+ "PitchShiftAudioFX",
+ "PlotSpectrogram",
+ "PreviewAudioFile",
+ "PreviewAudioTensor",
+ "ResampleAudio",
+ "ReverbAudioFX",
+ "ReverseAudio",
+ "SaveAudioTensor",
+ "SequenceVariation",
+ "SliceAudio",
+ "SoundPlayer",
+ "StretchAudio",
+ "samplerate"
+ ],
+ {
+ "author": "jags111",
+ "description": "This extension offers various audio generation tools",
+ "nickname": "Audiotools",
+ "title": "Jags_Audiotools",
+ "title_aux": "ComfyUI_Jags_Audiotools"
+ }
+ ],
+ "https://github.com/jags111/ComfyUI_Jags_VectorMagic": [
+ [
+ "CircularVAEDecode",
+ "JagsCLIPSeg",
+ "JagsClipseg",
+ "JagsCombineMasks",
+ "SVG",
+ "YoloSEGdetectionNode",
+ "YoloSegNode",
+ "color_drop",
+ "my unique name",
+ "xy_Tiling_KSampler"
+ ],
+ {
+ "author": "jags111",
+ "description": "This extension offers various vector manipulation and generation tools",
+ "nickname": "Jags_VectorMagic",
+ "title": "Jags_VectorMagic",
+ "title_aux": "ComfyUI_Jags_VectorMagic"
+ }
+ ],
+ "https://github.com/jags111/efficiency-nodes-comfyui": [
+ [
+ "AnimateDiff Script",
+ "Apply ControlNet Stack",
+ "Control Net Stacker",
+ "Eff. Loader SDXL",
+ "Efficient Loader",
+ "HighRes-Fix Script",
+ "Image Overlay",
+ "Join XY Inputs of Same Type",
+ "KSampler (Efficient)",
+ "KSampler Adv. (Efficient)",
+ "KSampler SDXL (Eff.)",
+ "LatentUpscaler",
+ "LoRA Stack to String converter",
+ "LoRA Stacker",
+ "Manual XY Entry Info",
+ "NNLatentUpscale",
+ "Noise Control Script",
+ "Pack SDXL Tuple",
+ "Tiled Upscaler Script",
+ "Unpack SDXL Tuple",
+ "XY Input: Add/Return Noise",
+ "XY Input: Aesthetic Score",
+ "XY Input: CFG Scale",
+ "XY Input: Checkpoint",
+ "XY Input: Clip Skip",
+ "XY Input: Control Net",
+ "XY Input: Control Net Plot",
+ "XY Input: Denoise",
+ "XY Input: LoRA",
+ "XY Input: LoRA Plot",
+ "XY Input: LoRA Stacks",
+ "XY Input: Manual XY Entry",
+ "XY Input: Prompt S/R",
+ "XY Input: Refiner On/Off",
+ "XY Input: Sampler/Scheduler",
+ "XY Input: Seeds++ Batch",
+ "XY Input: Steps",
+ "XY Input: VAE",
+ "XY Plot"
+ ],
+ {
+ "title_aux": "Efficiency Nodes for ComfyUI Version 2.0+"
+ }
+ ],
+ "https://github.com/jamal-alkharrat/ComfyUI_rotate_image": [
+ [
+ "RotateImage"
+ ],
+ {
+ "title_aux": "ComfyUI_rotate_image"
+ }
+ ],
+ "https://github.com/jamesWalker55/comfyui-various": [
+ [],
+ {
+ "nodename_pattern": "^JW",
+ "title_aux": "Various ComfyUI Nodes by Type"
+ }
+ ],
+ "https://github.com/jesenzhang/ComfyUI_StreamDiffusion": [
+ [
+ "StreamDiffusion_Loader",
+ "StreamDiffusion_Sampler"
+ ],
+ {
+ "title_aux": "ComfyUI_StreamDiffusion"
+ }
+ ],
+ "https://github.com/jitcoder/lora-info": [
+ [
+ "ImageFromURL",
+ "LoraInfo"
+ ],
+ {
+ "title_aux": "LoraInfo"
+ }
+ ],
+ "https://github.com/jjkramhoeft/ComfyUI-Jjk-Nodes": [
+ [
+ "JjkConcat",
+ "JjkShowText",
+ "JjkText",
+ "SDXLRecommendedImageSize"
+ ],
+ {
+ "title_aux": "ComfyUI-Jjk-Nodes"
+ }
+ ],
+ "https://github.com/jojkaart/ComfyUI-sampler-lcm-alternative": [
+ [
+ "LCMScheduler",
+ "SamplerLCMAlternative",
+ "SamplerLCMCycle"
+ ],
+ {
+ "title_aux": "ComfyUI-sampler-lcm-alternative"
+ }
+ ],
+ "https://github.com/jordoh/ComfyUI-Deepface": [
+ [
+ "DeepfaceExtractFaces",
+ "DeepfaceVerify"
+ ],
+ {
+ "title_aux": "ComfyUI Deepface"
+ }
+ ],
+ "https://github.com/jtrue/ComfyUI-JaRue": [
+ [
+ "Text2Image_jru",
+ "YouTube2Prompt_jru"
+ ],
+ {
+ "nodename_pattern": "_jru$",
+ "title_aux": "ComfyUI-JaRue"
+ }
+ ],
+ "https://github.com/ka-puna/comfyui-yanc": [
+ [
+ "YANC.ConcatStrings",
+ "YANC.FormatDatetimeString",
+ "YANC.GetWidgetValueString",
+ "YANC.IntegerCaster",
+ "YANC.MultilineString",
+ "YANC.TruncateString"
+ ],
+ {
+ "title_aux": "comfyui-yanc"
+ }
+ ],
+ "https://github.com/kadirnar/ComfyUI-Transformers": [
+ [
+ "DepthEstimationPipeline",
+ "ImageClassificationPipeline",
+ "ImageSegmentationPipeline",
+ "ObjectDetectionPipeline"
+ ],
+ {
+ "title_aux": "ComfyUI-Transformers"
+ }
+ ],
+ "https://github.com/kenjiqq/qq-nodes-comfyui": [
+ [
+ "Any List",
+ "Axis Pack",
+ "Axis Unpack",
+ "Image Accumulator End",
+ "Image Accumulator Start",
+ "Load Lines From Text File",
+ "Slice List",
+ "Text Splitter",
+ "XY Grid Helper"
+ ],
+ {
+ "title_aux": "qq-nodes-comfyui"
+ }
+ ],
+ "https://github.com/kft334/Knodes": [
+ [
+ "Image(s) To Websocket (Base64)",
+ "ImageOutput",
+ "Load Image (Base64)",
+ "Load Images (Base64)"
+ ],
+ {
+ "title_aux": "Knodes"
+ }
+ ],
+ "https://github.com/kijai/ComfyUI-CCSR": [
+ [
+ "CCSR_Model_Select",
+ "CCSR_Upscale"
+ ],
+ {
+ "title_aux": "ComfyUI-CCSR"
+ }
+ ],
+ "https://github.com/kijai/ComfyUI-DDColor": [
+ [
+ "DDColor_Colorize"
+ ],
+ {
+ "title_aux": "ComfyUI-DDColor"
+ }
+ ],
+ "https://github.com/kijai/ComfyUI-KJNodes": [
+ [
+ "AddLabel",
+ "BatchCLIPSeg",
+ "BatchCropFromMask",
+ "BatchCropFromMaskAdvanced",
+ "BatchUncrop",
+ "BatchUncropAdvanced",
+ "BboxToInt",
+ "ColorMatch",
+ "ColorToMask",
+ "CondPassThrough",
+ "ConditioningMultiCombine",
+ "ConditioningSetMaskAndCombine",
+ "ConditioningSetMaskAndCombine3",
+ "ConditioningSetMaskAndCombine4",
+ "ConditioningSetMaskAndCombine5",
+ "CreateAudioMask",
+ "CreateFadeMask",
+ "CreateFadeMaskAdvanced",
+ "CreateFluidMask",
+ "CreateGradientMask",
+ "CreateMagicMask",
+ "CreateShapeMask",
+ "CreateTextMask",
+ "CreateVoronoiMask",
+ "CrossFadeImages",
+ "DummyLatentOut",
+ "EffnetEncode",
+ "EmptyLatentImagePresets",
+ "FilterZeroMasksAndCorrespondingImages",
+ "FlipSigmasAdjusted",
+ "FloatConstant",
+ "GLIGENTextBoxApplyBatch",
+ "GenerateNoise",
+ "GetImageRangeFromBatch",
+ "GetImagesFromBatchIndexed",
+ "GetLatentsFromBatchIndexed",
+ "GrowMaskWithBlur",
+ "INTConstant",
+ "ImageBatchRepeatInterleaving",
+ "ImageBatchTestPattern",
+ "ImageConcanate",
+ "ImageGrabPIL",
+ "ImageGridComposite2x2",
+ "ImageGridComposite3x3",
+ "ImageTransformByNormalizedAmplitude",
+ "ImageUpscaleWithModelBatched",
+ "InjectNoiseToLatent",
+ "InsertImageBatchByIndexes",
+ "NormalizeLatent",
+ "NormalizedAmplitudeToMask",
+ "OffsetMask",
+ "OffsetMaskByNormalizedAmplitude",
+ "ReferenceOnlySimple3",
+ "ReplaceImagesInBatch",
+ "ResizeMask",
+ "ReverseImageBatch",
+ "RoundMask",
+ "SaveImageWithAlpha",
+ "ScaleBatchPromptSchedule",
+ "SomethingToString",
+ "SoundReactive",
+ "SplitBboxes",
+ "StableZero123_BatchSchedule",
+ "StringConstant",
+ "VRAM_Debug",
+ "WidgetToString"
+ ],
+ {
+ "title_aux": "KJNodes for ComfyUI"
+ }
+ ],
+ "https://github.com/kijai/ComfyUI-Marigold": [
+ [
+ "ColorizeDepthmap",
+ "MarigoldDepthEstimation",
+ "RemapDepth",
+ "SaveImageOpenEXR"
+ ],
+ {
+ "title_aux": "Marigold depth estimation in ComfyUI"
+ }
+ ],
+ "https://github.com/kijai/ComfyUI-SVD": [
+ [
+ "SVDimg2vid"
+ ],
+ {
+ "title_aux": "ComfyUI-SVD"
+ }
+ ],
+ "https://github.com/kinfolk0117/ComfyUI_GradientDeepShrink": [
+ [
+ "GradientPatchModelAddDownscale",
+ "GradientPatchModelAddDownscaleAdvanced"
+ ],
+ {
+ "title_aux": "ComfyUI_GradientDeepShrink"
+ }
+ ],
+ "https://github.com/kinfolk0117/ComfyUI_Pilgram": [
+ [
+ "Pilgram"
+ ],
+ {
+ "title_aux": "ComfyUI_Pilgram"
+ }
+ ],
+ "https://github.com/kinfolk0117/ComfyUI_SimpleTiles": [
+ [
+ "DynamicTileMerge",
+ "DynamicTileSplit",
+ "TileCalc",
+ "TileMerge",
+ "TileSplit"
+ ],
+ {
+ "title_aux": "SimpleTiles"
+ }
+ ],
+ "https://github.com/kinfolk0117/ComfyUI_TiledIPAdapter": [
+ [
+ "TiledIPAdapter"
+ ],
+ {
+ "title_aux": "TiledIPAdapter"
+ }
+ ],
+ "https://github.com/knuknX/ComfyUI-Image-Tools": [
+ [
+ "BatchImagePathLoader",
+ "ImageBgRemoveProcessor",
+ "ImageCheveretoUploader",
+ "ImageStandardResizeProcessor",
+ "JSONMessageNotifyTool",
+ "PreviewJSONNode",
+ "SingleImagePathLoader",
+ "SingleImageUrlLoader"
+ ],
+ {
+ "title_aux": "ComfyUI-Image-Tools"
+ }
+ ],
+ "https://github.com/kohya-ss/ControlNet-LLLite-ComfyUI": [
+ [
+ "LLLiteLoader"
+ ],
+ {
+ "title_aux": "ControlNet-LLLite-ComfyUI"
+ }
+ ],
+ "https://github.com/komojini/ComfyUI_SDXL_DreamBooth_LoRA_CustomNodes": [
+ [
+ "S3 Bucket LoRA",
+ "S3Bucket_Load_LoRA",
+ "XL DreamBooth LoRA",
+ "XLDB_LoRA"
+ ],
+ {
+ "title_aux": "ComfyUI_SDXL_DreamBooth_LoRA_CustomNodes"
+ }
+ ],
+ "https://github.com/komojini/komojini-comfyui-nodes": [
+ [
+ "BatchCreativeInterpolationNodeDynamicSettings",
+ "CachedGetter",
+ "DragNUWAImageCanvas",
+ "FlowBuilder",
+ "FlowBuilder (adv)",
+ "FlowBuilder (advanced)",
+ "FlowBuilder (advanced) Setter",
+ "FlowBuilderSetter",
+ "FlowBuilderSetter (adv)",
+ "Getter",
+ "ImageCropByRatio",
+ "ImageCropByRatioAndResize",
+ "ImageGetter",
+ "ImageMerger",
+ "ImagesCropByRatioAndResizeBatch",
+ "KSamplerAdvancedCacheable",
+ "KSamplerCacheable",
+ "Setter",
+ "UltimateVideoLoader",
+ "UltimateVideoLoader (simple)",
+ "YouTubeVideoLoader"
+ ],
+ {
+ "title_aux": "komojini-comfyui-nodes"
+ }
+ ],
+ "https://github.com/kwaroran/abg-comfyui": [
+ [
+ "Remove Image Background (abg)"
+ ],
+ {
+ "title_aux": "abg-comfyui"
+ }
+ ],
+ "https://github.com/laksjdjf/LCMSampler-ComfyUI": [
+ [
+ "SamplerLCM",
+ "TAESDLoader"
+ ],
+ {
+ "title_aux": "LCMSampler-ComfyUI"
+ }
+ ],
+ "https://github.com/laksjdjf/LoRA-Merger-ComfyUI": [
+ [
+ "LoraLoaderFromWeight",
+ "LoraLoaderWeightOnly",
+ "LoraMerge",
+ "LoraSave"
+ ],
+ {
+ "title_aux": "LoRA-Merger-ComfyUI"
+ }
+ ],
+ "https://github.com/laksjdjf/attention-couple-ComfyUI": [
+ [
+ "Attention couple"
+ ],
+ {
+ "title_aux": "attention-couple-ComfyUI"
+ }
+ ],
+ "https://github.com/laksjdjf/cd-tuner_negpip-ComfyUI": [
+ [
+ "CDTuner",
+ "Negapip",
+ "Negpip"
+ ],
+ {
+ "title_aux": "cd-tuner_negpip-ComfyUI"
+ }
+ ],
+ "https://github.com/laksjdjf/pfg-ComfyUI": [
+ [
+ "PFG"
+ ],
+ {
+ "title_aux": "pfg-ComfyUI"
+ }
+ ],
+ "https://github.com/lilly1987/ComfyUI_node_Lilly": [
+ [
+ "CheckpointLoaderSimpleText",
+ "LoraLoaderText",
+ "LoraLoaderTextRandom",
+ "Random_Sampler",
+ "VAELoaderDecode"
+ ],
+ {
+ "title_aux": "simple wildcard for ComfyUI"
+ }
+ ],
+ "https://github.com/lldacing/comfyui-easyapi-nodes": [
+ [
+ "Base64ToImage",
+ "Base64ToMask",
+ "ImageToBase64",
+ "ImageToBase64Advanced",
+ "LoadImageFromURL",
+ "LoadImageToBase64",
+ "LoadMaskFromURL",
+ "MaskImageToBase64",
+ "MaskToBase64",
+ "MaskToBase64Image",
+ "SamAutoMaskSEGS"
+ ],
+ {
+ "title_aux": "comfyui-easyapi-nodes"
+ }
+ ],
+ "https://github.com/longgui0318/comfyui-mask-util": [
+ [
+ "Mask Region Info",
+ "Mask Selection Of Masks",
+ "Split Masks"
+ ],
+ {
+ "title_aux": "comfyui-mask-util"
+ }
+ ],
+ "https://github.com/lordgasmic/ComfyUI-Wildcards/raw/master/wildcards.py": [
+ [
+ "CLIPTextEncodeWithWildcards"
+ ],
+ {
+ "title_aux": "Wildcards"
+ }
+ ],
+ "https://github.com/lrzjason/ComfyUIJasonNode/raw/main/SDXLMixSampler.py": [
+ [
+ "SDXLMixSampler"
+ ],
+ {
+ "title_aux": "ComfyUIJasonNode"
+ }
+ ],
+ "https://github.com/ltdrdata/ComfyUI-Impact-Pack": [
+ [
+ "AddMask",
+ "BasicPipeToDetailerPipe",
+ "BasicPipeToDetailerPipeSDXL",
+ "BboxDetectorCombined",
+ "BboxDetectorCombined_v2",
+ "BboxDetectorForEach",
+ "BboxDetectorSEGS",
+ "BitwiseAndMask",
+ "BitwiseAndMaskForEach",
+ "CLIPSegDetectorProvider",
+ "CfgScheduleHookProvider",
+ "CombineRegionalPrompts",
+ "CoreMLDetailerHookProvider",
+ "DenoiseScheduleHookProvider",
+ "DenoiseSchedulerDetailerHookProvider",
+ "DetailerForEach",
+ "DetailerForEachDebug",
+ "DetailerForEachDebugPipe",
+ "DetailerForEachPipe",
+ "DetailerForEachPipeForAnimateDiff",
+ "DetailerHookCombine",
+ "DetailerPipeToBasicPipe",
+ "EditBasicPipe",
+ "EditDetailerPipe",
+ "EditDetailerPipeSDXL",
+ "EmptySegs",
+ "FaceDetailer",
+ "FaceDetailerPipe",
+ "FromBasicPipe",
+ "FromBasicPipe_v2",
+ "FromDetailerPipe",
+ "FromDetailerPipeSDXL",
+ "FromDetailerPipe_v2",
+ "ImageListToImageBatch",
+ "ImageMaskSwitch",
+ "ImageReceiver",
+ "ImageSender",
+ "ImpactAssembleSEGS",
+ "ImpactCombineConditionings",
+ "ImpactCompare",
+ "ImpactConcatConditionings",
+ "ImpactConditionalBranch",
+ "ImpactConditionalBranchSelMode",
+ "ImpactConditionalStopIteration",
+ "ImpactControlBridge",
+ "ImpactControlNetApplyAdvancedSEGS",
+ "ImpactControlNetApplySEGS",
+ "ImpactControlNetClearSEGS",
+ "ImpactConvertDataType",
+ "ImpactDecomposeSEGS",
+ "ImpactDilateMask",
+ "ImpactDilateMaskInSEGS",
+ "ImpactDilate_Mask_SEG_ELT",
+ "ImpactDummyInput",
+ "ImpactEdit_SEG_ELT",
+ "ImpactFloat",
+ "ImpactFrom_SEG_ELT",
+ "ImpactGaussianBlurMask",
+ "ImpactGaussianBlurMaskInSEGS",
+ "ImpactHFTransformersClassifierProvider",
+ "ImpactIfNone",
+ "ImpactImageBatchToImageList",
+ "ImpactImageInfo",
+ "ImpactInt",
+ "ImpactInversedSwitch",
+ "ImpactIsNotEmptySEGS",
+ "ImpactKSamplerAdvancedBasicPipe",
+ "ImpactKSamplerBasicPipe",
+ "ImpactLatentInfo",
+ "ImpactLogger",
+ "ImpactLogicalOperators",
+ "ImpactMakeImageBatch",
+ "ImpactMakeImageList",
+ "ImpactMakeTileSEGS",
+ "ImpactMinMax",
+ "ImpactNeg",
+ "ImpactNodeSetMuteState",
+ "ImpactQueueTrigger",
+ "ImpactQueueTriggerCountdown",
+ "ImpactRemoteBoolean",
+ "ImpactRemoteInt",
+ "ImpactSEGSClassify",
+ "ImpactSEGSConcat",
+ "ImpactSEGSLabelFilter",
+ "ImpactSEGSOrderedFilter",
+ "ImpactSEGSPicker",
+ "ImpactSEGSRangeFilter",
+ "ImpactSEGSToMaskBatch",
+ "ImpactSEGSToMaskList",
+ "ImpactScaleBy_BBOX_SEG_ELT",
+ "ImpactSegsAndMask",
+ "ImpactSegsAndMaskForEach",
+ "ImpactSetWidgetValue",
+ "ImpactSimpleDetectorSEGS",
+ "ImpactSimpleDetectorSEGSPipe",
+ "ImpactSimpleDetectorSEGS_for_AD",
+ "ImpactSleep",
+ "ImpactStringSelector",
+ "ImpactSwitch",
+ "ImpactValueReceiver",
+ "ImpactValueSender",
+ "ImpactWildcardEncode",
+ "ImpactWildcardProcessor",
+ "IterativeImageUpscale",
+ "IterativeLatentUpscale",
+ "KSamplerAdvancedProvider",
+ "KSamplerProvider",
+ "LatentPixelScale",
+ "LatentReceiver",
+ "LatentSender",
+ "LatentSwitch",
+ "MMDetDetectorProvider",
+ "MMDetLoader",
+ "MaskDetailerPipe",
+ "MaskListToMaskBatch",
+ "MaskPainter",
+ "MaskToSEGS",
+ "MaskToSEGS_for_AnimateDiff",
+ "MasksToMaskList",
+ "MediaPipeFaceMeshToSEGS",
+ "NoiseInjectionDetailerHookProvider",
+ "NoiseInjectionHookProvider",
+ "ONNXDetectorProvider",
+ "ONNXDetectorSEGS",
+ "PixelKSampleHookCombine",
+ "PixelKSampleUpscalerProvider",
+ "PixelKSampleUpscalerProviderPipe",
+ "PixelTiledKSampleUpscalerProvider",
+ "PixelTiledKSampleUpscalerProviderPipe",
+ "PreviewBridge",
+ "PreviewBridgeLatent",
+ "PreviewDetailerHookProvider",
+ "ReencodeLatent",
+ "ReencodeLatentPipe",
+ "RegionalPrompt",
+ "RegionalSampler",
+ "RegionalSamplerAdvanced",
+ "RemoveImageFromSEGS",
+ "RemoveNoiseMask",
+ "SAMDetectorCombined",
+ "SAMDetectorSegmented",
+ "SAMLoader",
+ "SEGSDetailer",
+ "SEGSDetailerForAnimateDiff",
+ "SEGSLabelFilterDetailerHookProvider",
+ "SEGSOrderedFilterDetailerHookProvider",
+ "SEGSPaste",
+ "SEGSPreview",
+ "SEGSPreviewCNet",
+ "SEGSRangeFilterDetailerHookProvider",
+ "SEGSSwitch",
+ "SEGSToImageList",
+ "SegmDetectorCombined",
+ "SegmDetectorCombined_v2",
+ "SegmDetectorForEach",
+ "SegmDetectorSEGS",
+ "Segs Mask",
+ "Segs Mask ForEach",
+ "SegsMaskCombine",
+ "SegsToCombinedMask",
+ "SetDefaultImageForSEGS",
+ "StepsScheduleHookProvider",
+ "SubtractMask",
+ "SubtractMaskForEach",
+ "TiledKSamplerProvider",
+ "ToBasicPipe",
+ "ToBinaryMask",
+ "ToDetailerPipe",
+ "ToDetailerPipeSDXL",
+ "TwoAdvancedSamplersForMask",
+ "TwoSamplersForMask",
+ "TwoSamplersForMaskUpscalerProvider",
+ "TwoSamplersForMaskUpscalerProviderPipe",
+ "UltralyticsDetectorProvider",
+ "UnsamplerDetailerHookProvider",
+ "UnsamplerHookProvider"
+ ],
+ {
+ "author": "Dr.Lt.Data",
+ "description": "This extension offers various detector nodes and detailer nodes that allow you to configure a workflow that automatically enhances facial details. And provide iterative upscaler.",
+ "nickname": "Impact Pack",
+ "title": "Impact Pack",
+ "title_aux": "ComfyUI Impact Pack"
+ }
+ ],
+ "https://github.com/ltdrdata/ComfyUI-Inspire-Pack": [
+ [
+ "AnimeLineArt_Preprocessor_Provider_for_SEGS //Inspire",
+ "ApplyRegionalIPAdapters //Inspire",
+ "BindImageListPromptList //Inspire",
+ "CLIPTextEncodeWithWeight //Inspire",
+ "CacheBackendData //Inspire",
+ "CacheBackendDataList //Inspire",
+ "CacheBackendDataNumberKey //Inspire",
+ "CacheBackendDataNumberKeyList //Inspire",
+ "Canny_Preprocessor_Provider_for_SEGS //Inspire",
+ "ChangeImageBatchSize //Inspire",
+ "CheckpointLoaderSimpleShared //Inspire",
+ "Color_Preprocessor_Provider_for_SEGS //Inspire",
+ "ConcatConditioningsWithMultiplier //Inspire",
+ "DWPreprocessor_Provider_for_SEGS //Inspire",
+ "FakeScribblePreprocessor_Provider_for_SEGS //Inspire",
+ "FloatRange //Inspire",
+ "FromIPAdapterPipe //Inspire",
+ "GlobalSampler //Inspire",
+ "GlobalSeed //Inspire",
+ "HEDPreprocessor_Provider_for_SEGS //Inspire",
+ "HyperTile //Inspire",
+ "IPAdapterModelHelper //Inspire",
+ "ImageBatchSplitter //Inspire",
+ "InpaintPreprocessor_Provider_for_SEGS //Inspire",
+ "KSampler //Inspire",
+ "KSamplerAdvanced //Inspire",
+ "KSamplerAdvancedPipe //Inspire",
+ "KSamplerAdvancedProgress //Inspire",
+ "KSamplerPipe //Inspire",
+ "KSamplerProgress //Inspire",
+ "LatentBatchSplitter //Inspire",
+ "LeRes_DepthMap_Preprocessor_Provider_for_SEGS //Inspire",
+ "LineArt_Preprocessor_Provider_for_SEGS //Inspire",
+ "ListCounter //Inspire",
+ "LoadImage //Inspire",
+ "LoadImageListFromDir //Inspire",
+ "LoadImagesFromDir //Inspire",
+ "LoadPromptsFromDir //Inspire",
+ "LoadPromptsFromFile //Inspire",
+ "LoadSinglePromptFromFile //Inspire",
+ "LoraBlockInfo //Inspire",
+ "LoraLoaderBlockWeight //Inspire",
+ "MakeBasicPipe //Inspire",
+ "Manga2Anime_LineArt_Preprocessor_Provider_for_SEGS //Inspire",
+ "MediaPipeFaceMeshDetectorProvider //Inspire",
+ "MediaPipe_FaceMesh_Preprocessor_Provider_for_SEGS //Inspire",
+ "MeshGraphormerDepthMapPreprocessorProvider_for_SEGS //Inspire",
+ "MiDaS_DepthMap_Preprocessor_Provider_for_SEGS //Inspire",
+ "OpenPose_Preprocessor_Provider_for_SEGS //Inspire",
+ "PromptBuilder //Inspire",
+ "PromptExtractor //Inspire",
+ "RandomGeneratorForList //Inspire",
+ "RegionalConditioningColorMask //Inspire",
+ "RegionalConditioningSimple //Inspire",
+ "RegionalIPAdapterColorMask //Inspire",
+ "RegionalIPAdapterEncodedColorMask //Inspire",
+ "RegionalIPAdapterEncodedMask //Inspire",
+ "RegionalIPAdapterMask //Inspire",
+ "RegionalPromptColorMask //Inspire",
+ "RegionalPromptSimple //Inspire",
+ "RegionalSeedExplorerColorMask //Inspire",
+ "RegionalSeedExplorerMask //Inspire",
+ "RemoveBackendData //Inspire",
+ "RemoveBackendDataNumberKey //Inspire",
+ "RemoveControlNet //Inspire",
+ "RemoveControlNetFromRegionalPrompts //Inspire",
+ "RetrieveBackendData //Inspire",
+ "RetrieveBackendDataNumberKey //Inspire",
+ "SeedExplorer //Inspire",
+ "ShowCachedInfo //Inspire",
+ "TilePreprocessor_Provider_for_SEGS //Inspire",
+ "ToIPAdapterPipe //Inspire",
+ "UnzipPrompt //Inspire",
+ "WildcardEncode //Inspire",
+ "XY Input: Lora Block Weight //Inspire",
+ "ZipPrompt //Inspire",
+ "Zoe_DepthMap_Preprocessor_Provider_for_SEGS //Inspire"
+ ],
+ {
+ "author": "Dr.Lt.Data",
+ "description": "This extension provides various nodes to support Lora Block Weight and the Impact Pack.",
+ "nickname": "Inspire Pack",
+ "nodename_pattern": "Inspire$",
+ "title": "Inspire Pack",
+ "title_aux": "ComfyUI Inspire Pack"
+ }
+ ],
+ "https://github.com/m-sokes/ComfyUI-Sokes-Nodes": [
+ [
+ "Custom Date Format | sokes \ud83e\uddac",
+ "Latent Switch x9 | sokes \ud83e\uddac"
+ ],
+ {
+ "title_aux": "ComfyUI Sokes Nodes"
+ }
+ ],
+ "https://github.com/m957ymj75urz/ComfyUI-Custom-Nodes/raw/main/clip-text-encode-split/clip_text_encode_split.py": [
+ [
+ "RawText",
+ "RawTextCombine",
+ "RawTextEncode",
+ "RawTextReplace"
+ ],
+ {
+ "title_aux": "m957ymj75urz/ComfyUI-Custom-Nodes"
+ }
+ ],
+ "https://github.com/mape/ComfyUI-mape-Helpers": [
+ [
+ "mape Variable"
+ ],
+ {
+ "author": "mape",
+ "description": "Various QoL improvements like prompt tweaking, variable assignment, image preview, fuzzy search, error reporting, organizing and node navigation.",
+ "nickname": "\ud83d\udfe1 mape's helpers",
+ "title": "mape's helpers",
+ "title_aux": "mape's ComfyUI Helpers"
+ }
+ ],
+ "https://github.com/marhensa/sdxl-recommended-res-calc": [
+ [
+ "RecommendedResCalc"
+ ],
+ {
+ "title_aux": "Recommended Resolution Calculator"
+ }
+ ],
+ "https://github.com/martijnat/comfyui-previewlatent": [
+ [
+ "PreviewLatent",
+ "PreviewLatentAdvanced",
+ "PreviewLatentXL"
+ ],
+ {
+ "title_aux": "comfyui-previewlatent"
+ }
+ ],
+ "https://github.com/massao000/ComfyUI_aspect_ratios": [
+ [
+ "Aspect Ratios Node"
+ ],
+ {
+ "title_aux": "ComfyUI_aspect_ratios"
+ }
+ ],
+ "https://github.com/matan1905/ComfyUI-Serving-Toolkit": [
+ [
+ "DiscordServing",
+ "ServingInputNumber",
+ "ServingInputText",
+ "ServingOutput",
+ "WebSocketServing"
+ ],
+ {
+ "title_aux": "ComfyUI Serving toolkit"
+ }
+ ],
+ "https://github.com/mav-rik/facerestore_cf": [
+ [
+ "CropFace",
+ "FaceRestoreCFWithModel",
+ "FaceRestoreModelLoader"
+ ],
+ {
+ "title_aux": "Facerestore CF (Code Former)"
+ }
+ ],
+ "https://github.com/mbrostami/ComfyUI-HF": [
+ [
+ "GPT2Node"
+ ],
+ {
+ "title_aux": "ComfyUI-HF"
+ }
+ ],
+ "https://github.com/mcmonkeyprojects/sd-dynamic-thresholding": [
+ [
+ "DynamicThresholdingFull",
+ "DynamicThresholdingSimple"
+ ],
+ {
+ "title_aux": "Stable Diffusion Dynamic Thresholding (CFG Scale Fix)"
+ }
+ ],
+ "https://github.com/meap158/ComfyUI-Background-Replacement": [
+ [
+ "BackgroundReplacement",
+ "ImageComposite"
+ ],
+ {
+ "title_aux": "ComfyUI-Background-Replacement"
+ }
+ ],
+ "https://github.com/meap158/ComfyUI-GPU-temperature-protection": [
+ [
+ "GPUTemperatureProtection"
+ ],
+ {
+ "title_aux": "GPU temperature protection"
+ }
+ ],
+ "https://github.com/meap158/ComfyUI-Prompt-Expansion": [
+ [
+ "PromptExpansion"
+ ],
+ {
+ "title_aux": "ComfyUI-Prompt-Expansion"
+ }
+ ],
+ "https://github.com/melMass/comfy_mtb": [
+ [
+ "Animation Builder (mtb)",
+ "Any To String (mtb)",
+ "Batch Float (mtb)",
+ "Batch Float Assemble (mtb)",
+ "Batch Float Fill (mtb)",
+ "Batch Make (mtb)",
+ "Batch Merge (mtb)",
+ "Batch Shake (mtb)",
+ "Batch Shape (mtb)",
+ "Batch Transform (mtb)",
+ "Bbox (mtb)",
+ "Bbox From Mask (mtb)",
+ "Blur (mtb)",
+ "Color Correct (mtb)",
+ "Colored Image (mtb)",
+ "Concat Images (mtb)",
+ "Crop (mtb)",
+ "Debug (mtb)",
+ "Deep Bump (mtb)",
+ "Export With Ffmpeg (mtb)",
+ "Face Swap (mtb)",
+ "Film Interpolation (mtb)",
+ "Fit Number (mtb)",
+ "Float To Number (mtb)",
+ "Get Batch From History (mtb)",
+ "Image Compare (mtb)",
+ "Image Premultiply (mtb)",
+ "Image Remove Background Rembg (mtb)",
+ "Image Resize Factor (mtb)",
+ "Image Tile Offset (mtb)",
+ "Int To Bool (mtb)",
+ "Int To Number (mtb)",
+ "Interpolate Clip Sequential (mtb)",
+ "Latent Lerp (mtb)",
+ "Load Face Analysis Model (mtb)",
+ "Load Face Enhance Model (mtb)",
+ "Load Face Swap Model (mtb)",
+ "Load Film Model (mtb)",
+ "Load Image From Url (mtb)",
+ "Load Image Sequence (mtb)",
+ "Mask To Image (mtb)",
+ "Math Expression (mtb)",
+ "Model Patch Seamless (mtb)",
+ "Pick From Batch (mtb)",
+ "Qr Code (mtb)",
+ "Restore Face (mtb)",
+ "Save Gif (mtb)",
+ "Save Image Grid (mtb)",
+ "Save Image Sequence (mtb)",
+ "Save Tensors (mtb)",
+ "Sharpen (mtb)",
+ "Smart Step (mtb)",
+ "Stack Images (mtb)",
+ "String Replace (mtb)",
+ "Styles Loader (mtb)",
+ "Text To Image (mtb)",
+ "Transform Image (mtb)",
+ "Uncrop (mtb)",
+ "Unsplash Image (mtb)",
+ "Vae Decode (mtb)"
+ ],
+ {
+ "nodename_pattern": "\\(mtb\\)$",
+ "title_aux": "MTB Nodes"
+ }
+ ],
+ "https://github.com/mihaiiancu/ComfyUI_Inpaint": [
+ [
+ "InpaintMediapipe"
+ ],
+ {
+ "title_aux": "mihaiiancu/Inpaint"
+ }
+ ],
+ "https://github.com/mikkel/ComfyUI-text-overlay": [
+ [
+ "Image Text Overlay"
+ ],
+ {
+ "title_aux": "ComfyUI - Text Overlay Plugin"
+ }
+ ],
+ "https://github.com/mikkel/comfyui-mask-boundingbox": [
+ [
+ "Mask Bounding Box"
+ ],
+ {
+ "title_aux": "ComfyUI - Mask Bounding Box"
+ }
+ ],
+ "https://github.com/mlinmg/ComfyUI-LaMA-Preprocessor": [
+ [
+ "LaMaPreprocessor",
+ "lamaPreprocessor"
+ ],
+ {
+ "title_aux": "LaMa Preprocessor [WIP]"
+ }
+ ],
+ "https://github.com/modusCell/ComfyUI-dimension-node-modusCell": [
+ [
+ "DimensionProviderFree modusCell",
+ "DimensionProviderRatio modusCell",
+ "String Concat modusCell"
+ ],
+ {
+ "title_aux": "Preset Dimensions"
+ }
+ ],
+ "https://github.com/mpiquero7164/ComfyUI-SaveImgPrompt": [
+ [
+ "Save IMG Prompt"
+ ],
+ {
+ "title_aux": "SaveImgPrompt"
+ }
+ ],
+ "https://github.com/nagolinc/ComfyUI_FastVAEDecorder_SDXL": [
+ [
+ "FastLatentToImage"
+ ],
+ {
+ "title_aux": "ComfyUI_FastVAEDecorder_SDXL"
+ }
+ ],
+ "https://github.com/natto-maki/ComfyUI-NegiTools": [
+ [
+ "NegiTools_CompositeImages",
+ "NegiTools_DepthEstimationByMarigold",
+ "NegiTools_DetectFaceRotationForInpainting",
+ "NegiTools_ImageProperties",
+ "NegiTools_LatentProperties",
+ "NegiTools_NoiseImageGenerator",
+ "NegiTools_OpenAiDalle3",
+ "NegiTools_OpenAiGpt",
+ "NegiTools_OpenAiGpt4v",
+ "NegiTools_OpenAiTranslate",
+ "NegiTools_OpenPoseToPointList",
+ "NegiTools_PointListToMask",
+ "NegiTools_RandomImageLoader",
+ "NegiTools_SaveImageToDirectory",
+ "NegiTools_SeedGenerator",
+ "NegiTools_StereoImageGenerator",
+ "NegiTools_StringFunction"
+ ],
+ {
+ "title_aux": "ComfyUI-NegiTools"
+ }
+ ],
+ "https://github.com/nicolai256/comfyUI_Nodes_nicolai256/raw/main/yugioh-presets.py": [
+ [
+ "yugioh_Presets"
+ ],
+ {
+ "title_aux": "comfyUI_Nodes_nicolai256"
+ }
+ ],
+ "https://github.com/ningxiaoxiao/comfyui-NDI": [
+ [
+ "NDI_LoadImage",
+ "NDI_SendImage"
+ ],
+ {
+ "title_aux": "comfyui-NDI"
+ }
+ ],
+ "https://github.com/nkchocoai/ComfyUI-PromptUtilities": [
+ [
+ "PromptUtilitiesConstString",
+ "PromptUtilitiesConstStringMultiLine",
+ "PromptUtilitiesFormatString",
+ "PromptUtilitiesJoinStringList",
+ "PromptUtilitiesLoadPreset",
+ "PromptUtilitiesLoadPresetAdvanced",
+ "PromptUtilitiesRandomPreset",
+ "PromptUtilitiesRandomPresetAdvanced"
+ ],
+ {
+ "title_aux": "ComfyUI-PromptUtilities"
+ }
+ ],
+ "https://github.com/nkchocoai/ComfyUI-SizeFromPresets": [
+ [
+ "EmptyLatentImageFromPresetsSD15",
+ "EmptyLatentImageFromPresetsSDXL",
+ "RandomEmptyLatentImageFromPresetsSD15",
+ "RandomEmptyLatentImageFromPresetsSDXL",
+ "RandomSizeFromPresetsSD15",
+ "RandomSizeFromPresetsSDXL",
+ "SizeFromPresetsSD15",
+ "SizeFromPresetsSDXL"
+ ],
+ {
+ "title_aux": "ComfyUI-SizeFromPresets"
+ }
+ ],
+ "https://github.com/nkchocoai/ComfyUI-TextOnSegs": [
+ [
+ "CalcMaxFontSize",
+ "ExtractDominantColor",
+ "GetComplementaryColor",
+ "SegsToRegion",
+ "TextOnSegsFloodFill"
+ ],
+ {
+ "title_aux": "ComfyUI-TextOnSegs"
+ }
+ ],
+ "https://github.com/noembryo/ComfyUI-noEmbryo": [
+ [
+ "PromptTermList1",
+ "PromptTermList2",
+ "PromptTermList3",
+ "PromptTermList4",
+ "PromptTermList5",
+ "PromptTermList6"
+ ],
+ {
+ "author": "noEmbryo",
+ "description": "Some useful nodes for ComfyUI",
+ "nickname": "noEmbryo",
+ "title": "noEmbryo nodes for ComfyUI",
+ "title_aux": "noEmbryo nodes"
+ }
+ ],
+ "https://github.com/nosiu/comfyui-instantId-faceswap": [
+ [
+ "FaceEmbed",
+ "FaceSwapGenerationInpaint",
+ "FaceSwapSetupPipeline",
+ "LCMLora"
+ ],
+ {
+ "title_aux": "ComfyUI InstantID Faceswapper"
+ }
+ ],
+ "https://github.com/noxinias/ComfyUI_NoxinNodes": [
+ [
+ "NoxinChime",
+ "NoxinPromptLoad",
+ "NoxinPromptSave",
+ "NoxinScaledResolution",
+ "NoxinSimpleMath",
+ "NoxinSplitPrompt"
+ ],
+ {
+ "title_aux": "ComfyUI_NoxinNodes"
+ }
+ ],
+ "https://github.com/ntc-ai/ComfyUI-DARE-LoRA-Merge": [
+ [
+ "Apply LoRA",
+ "DARE Merge LoRA Stack",
+ "Save LoRA"
+ ],
+ {
+ "title_aux": "ComfyUI - Apply LoRA Stacker with DARE"
+ }
+ ],
+ "https://github.com/ntdviet/comfyui-ext/raw/main/custom_nodes/gcLatentTunnel/gcLatentTunnel.py": [
+ [
+ "gcLatentTunnel"
+ ],
+ {
+ "title_aux": "ntdviet/comfyui-ext"
+ }
+ ],
+ "https://github.com/omar92/ComfyUI-QualityOfLifeSuit_Omar92": [
+ [
+ "CLIPStringEncode _O",
+ "Chat completion _O",
+ "ChatGPT Simple _O",
+ "ChatGPT _O",
+ "ChatGPT compact _O",
+ "Chat_Completion _O",
+ "Chat_Message _O",
+ "Chat_Message_fromString _O",
+ "Concat Text _O",
+ "ConcatRandomNSP_O",
+ "Debug String _O",
+ "Debug Text _O",
+ "Debug Text route _O",
+ "Edit_image _O",
+ "Equation1param _O",
+ "Equation2params _O",
+ "GetImage_(Width&Height) _O",
+ "GetLatent_(Width&Height) _O",
+ "ImageScaleFactor _O",
+ "ImageScaleFactorSimple _O",
+ "LatentUpscaleFactor _O",
+ "LatentUpscaleFactorSimple _O",
+ "LatentUpscaleMultiply",
+ "Note _O",
+ "RandomNSP _O",
+ "Replace Text _O",
+ "String _O",
+ "Text _O",
+ "Text2Image _O",
+ "Trim Text _O",
+ "VAEDecodeParallel _O",
+ "combine_chat_messages _O",
+ "compine_chat_messages _O",
+ "concat Strings _O",
+ "create image _O",
+ "create_image _O",
+ "debug Completeion _O",
+ "debug messages_O",
+ "float _O",
+ "floatToInt _O",
+ "floatToText _O",
+ "int _O",
+ "intToFloat _O",
+ "load_openAI _O",
+ "replace String _O",
+ "replace String advanced _O",
+ "saveTextToFile _O",
+ "seed _O",
+ "selectLatentFromBatch _O",
+ "string2Image _O",
+ "trim String _O",
+ "variation_image _O"
+ ],
+ {
+ "title_aux": "Quality of life Suit:V2"
+ }
+ ],
+ "https://github.com/ostris/ostris_nodes_comfyui": [
+ [
+ "LLM Pipe Loader - Ostris",
+ "LLM Prompt Upsampling - Ostris",
+ "One Seed - Ostris",
+ "Text Box - Ostris"
+ ],
+ {
+ "nodename_pattern": "- Ostris$",
+ "title_aux": "Ostris Nodes ComfyUI"
+ }
+ ],
+ "https://github.com/ownimage/ComfyUI-ownimage": [
+ [
+ "Caching Image Loader"
+ ],
+ {
+ "title_aux": "ComfyUI-ownimage"
+ }
+ ],
+ "https://github.com/oyvindg/ComfyUI-TrollSuite": [
+ [
+ "BinaryImageMask",
+ "ImagePadding",
+ "LoadLastImage",
+ "RandomMask",
+ "TransparentImage"
+ ],
+ {
+ "title_aux": "ComfyUI-TrollSuite"
+ }
+ ],
+ "https://github.com/palant/extended-saveimage-comfyui": [
+ [
+ "SaveImageExtended"
+ ],
+ {
+ "title_aux": "Extended Save Image for ComfyUI"
+ }
+ ],
+ "https://github.com/palant/image-resize-comfyui": [
+ [
+ "ImageResize"
+ ],
+ {
+ "title_aux": "Image Resize for ComfyUI"
+ }
+ ],
+ "https://github.com/pants007/comfy-pants": [
+ [
+ "CLIPTextEncodeAIO",
+ "Image Make Square"
+ ],
+ {
+ "title_aux": "pants"
+ }
+ ],
+ "https://github.com/paulo-coronado/comfy_clip_blip_node": [
+ [
+ "CLIPTextEncodeBLIP",
+ "CLIPTextEncodeBLIP-2",
+ "Example"
+ ],
+ {
+ "title_aux": "comfy_clip_blip_node"
+ }
+ ],
+ "https://github.com/picturesonpictures/comfy_PoP": [
+ [
+ "AdaptiveCannyDetector_PoP",
+ "AnyAspectRatio",
+ "ConditioningMultiplier_PoP",
+ "ConditioningNormalizer_PoP",
+ "DallE3_PoP",
+ "LoadImageResizer_PoP",
+ "LoraStackLoader10_PoP",
+ "LoraStackLoader_PoP",
+ "VAEDecoderPoP",
+ "VAEEncoderPoP"
+ ],
+ {
+ "title_aux": "comfy_PoP"
+ }
+ ],
+ "https://github.com/pkpkTech/ComfyUI-SaveAVIF": [
+ [
+ "SaveAvif"
+ ],
+ {
+ "title_aux": "ComfyUI-SaveAVIF"
+ }
+ ],
+ "https://github.com/pkpkTech/ComfyUI-TemporaryLoader": [
+ [
+ "LoadTempCheckpoint",
+ "LoadTempLoRA",
+ "LoadTempMultiLoRA"
+ ],
+ {
+ "title_aux": "ComfyUI-TemporaryLoader"
+ }
+ ],
+ "https://github.com/pythongosssss/ComfyUI-Custom-Scripts": [
+ [
+ "CheckpointLoader|pysssss",
+ "ConstrainImageforVideo|pysssss",
+ "ConstrainImage|pysssss",
+ "LoadText|pysssss",
+ "LoraLoader|pysssss",
+ "MathExpression|pysssss",
+ "MultiPrimitive|pysssss",
+ "PlaySound|pysssss",
+ "Repeater|pysssss",
+ "ReroutePrimitive|pysssss",
+ "SaveText|pysssss",
+ "ShowText|pysssss",
+ "StringFunction|pysssss"
+ ],
+ {
+ "title_aux": "pythongosssss/ComfyUI-Custom-Scripts"
+ }
+ ],
+ "https://github.com/pythongosssss/ComfyUI-WD14-Tagger": [
+ [
+ "WD14Tagger|pysssss"
+ ],
+ {
+ "title_aux": "ComfyUI WD 1.4 Tagger"
+ }
+ ],
+ "https://github.com/ramyma/A8R8_ComfyUI_nodes": [
+ [
+ "Base64ImageInput",
+ "Base64ImageOutput"
+ ],
+ {
+ "title_aux": "A8R8 ComfyUI Nodes"
+ }
+ ],
+ "https://github.com/rcfcu2000/zhihuige-nodes-comfyui": [
+ [
+ "Combine ZHGMasks",
+ "Cover ZHGMasks",
+ "From ZHG pip",
+ "GroundingDinoModelLoader (zhihuige)",
+ "GroundingDinoPIPESegment (zhihuige)",
+ "GroundingDinoSAMSegment (zhihuige)",
+ "InvertMask (zhihuige)",
+ "SAMModelLoader (zhihuige)",
+ "To ZHG pip",
+ "ZHG FaceIndex",
+ "ZHG GetMaskArea",
+ "ZHG Image Levels",
+ "ZHG SaveImage",
+ "ZHG SmoothEdge",
+ "ZHG UltimateSDUpscale"
+ ],
+ {
+ "title_aux": "zhihuige-nodes-comfyui"
+ }
+ ],
+ "https://github.com/rcsaquino/comfyui-custom-nodes": [
+ [
+ "BackgroundRemover | rcsaquino",
+ "VAELoader | rcsaquino",
+ "VAEProcessor | rcsaquino"
+ ],
+ {
+ "title_aux": "rcsaquino/comfyui-custom-nodes"
+ }
+ ],
+ "https://github.com/receyuki/comfyui-prompt-reader-node": [
+ [
+ "SDBatchLoader",
+ "SDLoraLoader",
+ "SDLoraSelector",
+ "SDParameterExtractor",
+ "SDParameterGenerator",
+ "SDPromptMerger",
+ "SDPromptReader",
+ "SDPromptSaver",
+ "SDTypeConverter"
+ ],
+ {
+ "author": "receyuki",
+ "description": "ComfyUI node version of the SD Prompt Reader",
+ "nickname": "SD Prompt Reader",
+ "title": "SD Prompt Reader",
+ "title_aux": "comfyui-prompt-reader-node"
+ }
+ ],
+ "https://github.com/redhottensors/ComfyUI-Prediction": [
+ [
+ "AvoidErasePrediction",
+ "CFGPrediction",
+ "CombinePredictions",
+ "ConditionedPrediction",
+ "PerpNegPrediction",
+ "SamplerCustomPrediction",
+ "ScalePrediction",
+ "ScaledGuidancePrediction"
+ ],
+ {
+ "author": "RedHotTensors",
+ "description": "Fully customizable Classifer Free Guidance for ComfyUI",
+ "nickname": "ComfyUI-Prediction",
+ "title": "ComfyUI-Prediction",
+ "title_aux": "ComfyUI-Prediction"
+ }
+ ],
+ "https://github.com/rgthree/rgthree-comfy": [
+ [],
+ {
+ "author": "rgthree",
+ "description": "A bunch of nodes I created that I also find useful.",
+ "nickname": "rgthree",
+ "nodename_pattern": " \\(rgthree\\)$",
+ "title": "Comfy Nodes",
+ "title_aux": "rgthree's ComfyUI Nodes"
+ }
+ ],
+ "https://github.com/richinsley/Comfy-LFO": [
+ [
+ "LFO_Pulse",
+ "LFO_Sawtooth",
+ "LFO_Sine",
+ "LFO_Square",
+ "LFO_Triangle"
+ ],
+ {
+ "title_aux": "Comfy-LFO"
+ }
+ ],
+ "https://github.com/ricklove/comfyui-ricklove": [
+ [
+ "RL_Crop_Resize",
+ "RL_Crop_Resize_Batch",
+ "RL_Depth16",
+ "RL_Finetune_Analyze",
+ "RL_Finetune_Analyze_Batch",
+ "RL_Finetune_Variable",
+ "RL_Image_Shadow",
+ "RL_Image_Threshold_Channels",
+ "RL_Internet_Search",
+ "RL_LoadImageSequence",
+ "RL_Optical_Flow_Dip",
+ "RL_SaveImageSequence",
+ "RL_Uncrop",
+ "RL_Warp_Image",
+ "RL_Zoe_Depth_Map_Preprocessor",
+ "RL_Zoe_Depth_Map_Preprocessor_Raw_Infer",
+ "RL_Zoe_Depth_Map_Preprocessor_Raw_Process"
+ ],
+ {
+ "title_aux": "comfyui-ricklove"
+ }
+ ],
+ "https://github.com/rklaffehn/rk-comfy-nodes": [
+ [
+ "RK_CivitAIAddHashes",
+ "RK_CivitAIMetaChecker"
+ ],
+ {
+ "title_aux": "rk-comfy-nodes"
+ }
+ ],
+ "https://github.com/romeobuilderotti/ComfyUI-PNG-Metadata": [
+ [
+ "SetMetadataAll",
+ "SetMetadataString"
+ ],
+ {
+ "title_aux": "ComfyUI PNG Metadata"
+ }
+ ],
+ "https://github.com/rui40000/RUI-Nodes": [
+ [
+ "ABCondition",
+ "CharacterCount"
+ ],
+ {
+ "title_aux": "RUI-Nodes"
+ }
+ ],
+ "https://github.com/s1dlx/comfy_meh/raw/main/meh.py": [
+ [
+ "MergingExecutionHelper"
+ ],
+ {
+ "title_aux": "comfy_meh"
+ }
+ ],
+ "https://github.com/seanlynch/comfyui-optical-flow": [
+ [
+ "Apply optical flow",
+ "Compute optical flow",
+ "Visualize optical flow"
+ ],
+ {
+ "title_aux": "ComfyUI Optical Flow"
+ }
+ ],
+ "https://github.com/seanlynch/srl-nodes": [
+ [
+ "SRL Conditional Interrrupt",
+ "SRL Eval",
+ "SRL Filter Image List",
+ "SRL Format String"
+ ],
+ {
+ "title_aux": "SRL's nodes"
+ }
+ ],
+ "https://github.com/sergekatzmann/ComfyUI_Nimbus-Pack": [
+ [
+ "ImageResizeAndCropNode",
+ "ImageSquareAdapterNode"
+ ],
+ {
+ "title_aux": "ComfyUI_Nimbus-Pack"
+ }
+ ],
+ "https://github.com/shadowcz007/comfyui-consistency-decoder": [
+ [
+ "VAEDecodeConsistencyDecoder",
+ "VAELoaderConsistencyDecoder"
+ ],
+ {
+ "title_aux": "Consistency Decoder"
+ }
+ ],
+ "https://github.com/shadowcz007/comfyui-mixlab-nodes": [
+ [
+ "3DImage",
+ "AppInfo",
+ "AreaToMask",
+ "CenterImage",
+ "CharacterInText",
+ "ChatGPTOpenAI",
+ "CkptNames_",
+ "Color",
+ "DynamicDelayProcessor",
+ "EmbeddingPrompt",
+ "EnhanceImage",
+ "FaceToMask",
+ "FeatheredMask",
+ "FloatSlider",
+ "FloatingVideo",
+ "Font",
+ "GamePal",
+ "GetImageSize_",
+ "GradientImage",
+ "GridOutput",
+ "ImageColorTransfer",
+ "ImageCropByAlpha",
+ "IntNumber",
+ "JoinWithDelimiter",
+ "LaMaInpainting",
+ "LimitNumber",
+ "LoadImagesFromPath",
+ "LoadImagesFromURL",
+ "LoraNames_",
+ "MergeLayers",
+ "MirroredImage",
+ "MultiplicationNode",
+ "NewLayer",
+ "NoiseImage",
+ "OutlineMask",
+ "PromptImage",
+ "PromptSimplification",
+ "PromptSlide",
+ "RandomPrompt",
+ "ResizeImageMixlab",
+ "SamplerNames_",
+ "SaveImageToLocal",
+ "ScreenShare",
+ "Seed_",
+ "ShowLayer",
+ "ShowTextForGPT",
+ "SmoothMask",
+ "SpeechRecognition",
+ "SpeechSynthesis",
+ "SplitImage",
+ "SplitLongMask",
+ "SvgImage",
+ "SwitchByIndex",
+ "TESTNODE_",
+ "TESTNODE_TOKEN",
+ "TextImage",
+ "TextInput_",
+ "TextToNumber",
+ "TransparentImage",
+ "VAEDecodeConsistencyDecoder",
+ "VAELoaderConsistencyDecoder"
+ ],
+ {
+ "title_aux": "comfyui-mixlab-nodes"
+ }
+ ],
+ "https://github.com/shadowcz007/comfyui-ultralytics-yolo": [
+ [
+ "DetectByLabel"
+ ],
+ {
+ "title_aux": "comfyui-ultralytics-yolo"
+ }
+ ],
+ "https://github.com/shiimizu/ComfyUI-PhotoMaker-Plus": [
+ [
+ "PhotoMakerEncodePlus",
+ "PhotoMakerStyles",
+ "PrepImagesForClipVisionFromPath"
+ ],
+ {
+ "title_aux": "ComfyUI PhotoMaker Plus"
+ }
+ ],
+ "https://github.com/shiimizu/ComfyUI-TiledDiffusion": [
+ [
+ "NoiseInversion",
+ "TiledDiffusion",
+ "VAEDecodeTiled_TiledDiffusion",
+ "VAEEncodeTiled_TiledDiffusion"
+ ],
+ {
+ "title_aux": "Tiled Diffusion & VAE for ComfyUI"
+ }
+ ],
+ "https://github.com/shiimizu/ComfyUI_smZNodes": [
+ [
+ "smZ CLIPTextEncode",
+ "smZ Settings"
+ ],
+ {
+ "title_aux": "smZNodes"
+ }
+ ],
+ "https://github.com/shingo1228/ComfyUI-SDXL-EmptyLatentImage": [
+ [
+ "SDXL Empty Latent Image"
+ ],
+ {
+ "title_aux": "ComfyUI-SDXL-EmptyLatentImage"
+ }
+ ],
+ "https://github.com/shingo1228/ComfyUI-send-eagle-slim": [
+ [
+ "Send Eagle with text",
+ "Send Webp Image to Eagle"
+ ],
+ {
+ "title_aux": "ComfyUI-send-Eagle(slim)"
+ }
+ ],
+ "https://github.com/shockz0rz/ComfyUI_InterpolateEverything": [
+ [
+ "OpenposePreprocessorInterpolate"
+ ],
+ {
+ "title_aux": "InterpolateEverything"
+ }
+ ],
+ "https://github.com/shockz0rz/comfy-easy-grids": [
+ [
+ "FloatToText",
+ "GridFloatList",
+ "GridFloats",
+ "GridIntList",
+ "GridInts",
+ "GridLoras",
+ "GridStringList",
+ "GridStrings",
+ "ImageGridCommander",
+ "IntToText",
+ "SaveImageGrid",
+ "TextConcatenator"
+ ],
+ {
+ "title_aux": "comfy-easy-grids"
+ }
+ ],
+ "https://github.com/siliconflow/onediff_comfy_nodes": [
+ [
+ "CompareModel",
+ "ControlNetGraphLoader",
+ "ControlNetGraphSaver",
+ "ControlNetSpeedup",
+ "ModelGraphLoader",
+ "ModelGraphSaver",
+ "ModelSpeedup",
+ "ModuleDeepCacheSpeedup",
+ "OneDiffCheckpointLoaderSimple",
+ "SVDSpeedup",
+ "ShowImageDiff",
+ "VaeGraphLoader",
+ "VaeGraphSaver",
+ "VaeSpeedup"
+ ],
+ {
+ "title_aux": "OneDiff Nodes"
+ }
+ ],
+ "https://github.com/sipherxyz/comfyui-art-venture": [
+ [
+ "AV_CheckpointMerge",
+ "AV_CheckpointModelsToParametersPipe",
+ "AV_CheckpointSave",
+ "AV_ControlNetEfficientLoader",
+ "AV_ControlNetEfficientLoaderAdvanced",
+ "AV_ControlNetEfficientStacker",
+ "AV_ControlNetEfficientStackerSimple",
+ "AV_ControlNetLoader",
+ "AV_ControlNetPreprocessor",
+ "AV_LoraListLoader",
+ "AV_LoraListStacker",
+ "AV_LoraLoader",
+ "AV_ParametersPipeToCheckpointModels",
+ "AV_ParametersPipeToPrompts",
+ "AV_PromptsToParametersPipe",
+ "AV_SAMLoader",
+ "AV_VAELoader",
+ "AspectRatioSelector",
+ "BLIPCaption",
+ "BLIPLoader",
+ "BooleanPrimitive",
+ "ColorBlend",
+ "ColorCorrect",
+ "DeepDanbooruCaption",
+ "DependenciesEdit",
+ "Fooocus_KSampler",
+ "Fooocus_KSamplerAdvanced",
+ "GetBoolFromJson",
+ "GetFloatFromJson",
+ "GetIntFromJson",
+ "GetObjectFromJson",
+ "GetSAMEmbedding",
+ "GetTextFromJson",
+ "ISNetLoader",
+ "ISNetSegment",
+ "ImageAlphaComposite",
+ "ImageApplyChannel",
+ "ImageExtractChannel",
+ "ImageGaussianBlur",
+ "ImageMuxer",
+ "ImageRepeat",
+ "ImageScaleDown",
+ "ImageScaleDownBy",
+ "ImageScaleDownToSize",
+ "ImageScaleToMegapixels",
+ "LaMaInpaint",
+ "LoadImageAsMaskFromUrl",
+ "LoadImageFromUrl",
+ "LoadJsonFromUrl",
+ "MergeModels",
+ "NumberScaler",
+ "OverlayInpaintedImage",
+ "OverlayInpaintedLatent",
+ "PrepareImageAndMaskForInpaint",
+ "QRCodeGenerator",
+ "RandomFloat",
+ "RandomInt",
+ "SAMEmbeddingToImage",
+ "SDXLAspectRatioSelector",
+ "SDXLPromptStyler",
+ "SeedSelector",
+ "StringToInt",
+ "StringToNumber"
+ ],
+ {
+ "title_aux": "comfyui-art-venture"
+ }
+ ],
+ "https://github.com/skfoo/ComfyUI-Coziness": [
+ [
+ "LoraTextExtractor-b1f83aa2",
+ "MultiLoraLoader-70bf3d77"
+ ],
+ {
+ "title_aux": "ComfyUI-Coziness"
+ }
+ ],
+ "https://github.com/smagnetize/kb-comfyui-nodes": [
+ [
+ "SingleImageDataUrlLoader"
+ ],
+ {
+ "title_aux": "kb-comfyui-nodes"
+ }
+ ],
+ "https://github.com/space-nuko/ComfyUI-Disco-Diffusion": [
+ [
+ "DiscoDiffusion_DiscoDiffusion",
+ "DiscoDiffusion_DiscoDiffusionExtraSettings",
+ "DiscoDiffusion_GuidedDiffusionLoader",
+ "DiscoDiffusion_OpenAICLIPLoader"
+ ],
+ {
+ "title_aux": "Disco Diffusion"
+ }
+ ],
+ "https://github.com/space-nuko/ComfyUI-OpenPose-Editor": [
+ [
+ "Nui.OpenPoseEditor"
+ ],
+ {
+ "title_aux": "OpenPose Editor"
+ }
+ ],
+ "https://github.com/space-nuko/nui-suite": [
+ [
+ "Nui.DynamicPromptsTextGen",
+ "Nui.FeelingLuckyTextGen",
+ "Nui.OutputString"
+ ],
+ {
+ "title_aux": "nui suite"
+ }
+ ],
+ "https://github.com/spacepxl/ComfyUI-HQ-Image-Save": [
+ [
+ "LoadEXR",
+ "LoadLatentEXR",
+ "SaveEXR",
+ "SaveLatentEXR",
+ "SaveTiff"
+ ],
+ {
+ "title_aux": "ComfyUI-HQ-Image-Save"
+ }
+ ],
+ "https://github.com/spacepxl/ComfyUI-Image-Filters": [
+ [
+ "AdainImage",
+ "AdainLatent",
+ "AlphaClean",
+ "AlphaMatte",
+ "BatchAlign",
+ "BatchAverageImage",
+ "BatchAverageUnJittered",
+ "BatchNormalizeImage",
+ "BatchNormalizeLatent",
+ "BlurImageFast",
+ "BlurMaskFast",
+ "ClampOutliers",
+ "ConvertNormals",
+ "DifferenceChecker",
+ "DilateErodeMask",
+ "EnhanceDetail",
+ "ExposureAdjust",
+ "GuidedFilterAlpha",
+ "ImageConstant",
+ "ImageConstantHSV",
+ "JitterImage",
+ "Keyer",
+ "LatentStats",
+ "NormalMapSimple",
+ "OffsetLatentImage",
+ "RemapRange",
+ "Tonemap",
+ "UnJitterImage",
+ "UnTonemap"
+ ],
+ {
+ "title_aux": "ComfyUI-Image-Filters"
+ }
+ ],
+ "https://github.com/spacepxl/ComfyUI-RAVE": [
+ [
+ "ConditioningDebug",
+ "ImageGridCompose",
+ "ImageGridDecompose",
+ "KSamplerRAVE",
+ "LatentGridCompose",
+ "LatentGridDecompose"
+ ],
+ {
+ "title_aux": "ComfyUI-RAVE"
+ }
+ ],
+ "https://github.com/spinagon/ComfyUI-seam-carving": [
+ [
+ "SeamCarving"
+ ],
+ {
+ "title_aux": "ComfyUI-seam-carving"
+ }
+ ],
+ "https://github.com/spinagon/ComfyUI-seamless-tiling": [
+ [
+ "CircularVAEDecode",
+ "MakeCircularVAE",
+ "OffsetImage",
+ "SeamlessTile"
+ ],
+ {
+ "title_aux": "Seamless tiling Node for ComfyUI"
+ }
+ ],
+ "https://github.com/spro/comfyui-mirror": [
+ [
+ "LatentMirror"
+ ],
+ {
+ "title_aux": "Latent Mirror node for ComfyUI"
+ }
+ ],
+ "https://github.com/ssitu/ComfyUI_UltimateSDUpscale": [
+ [
+ "UltimateSDUpscale",
+ "UltimateSDUpscaleNoUpscale"
+ ],
+ {
+ "title_aux": "UltimateSDUpscale"
+ }
+ ],
+ "https://github.com/ssitu/ComfyUI_fabric": [
+ [
+ "FABRICPatchModel",
+ "FABRICPatchModelAdv",
+ "KSamplerAdvFABRICAdv",
+ "KSamplerFABRIC",
+ "KSamplerFABRICAdv"
+ ],
+ {
+ "title_aux": "ComfyUI fabric"
+ }
+ ],
+ "https://github.com/ssitu/ComfyUI_restart_sampling": [
+ [
+ "KRestartSampler",
+ "KRestartSamplerAdv",
+ "KRestartSamplerSimple"
+ ],
+ {
+ "title_aux": "Restart Sampling"
+ }
+ ],
+ "https://github.com/ssitu/ComfyUI_roop": [
+ [
+ "RoopImproved",
+ "roop"
+ ],
+ {
+ "title_aux": "ComfyUI roop"
+ }
+ ],
+ "https://github.com/storyicon/comfyui_segment_anything": [
+ [
+ "GroundingDinoModelLoader (segment anything)",
+ "GroundingDinoSAMSegment (segment anything)",
+ "InvertMask (segment anything)",
+ "IsMaskEmpty",
+ "SAMModelLoader (segment anything)"
+ ],
+ {
+ "title_aux": "segment anything"
+ }
+ ],
+ "https://github.com/strimmlarn/ComfyUI_Strimmlarns_aesthetic_score": [
+ [
+ "AesthetlcScoreSorter",
+ "CalculateAestheticScore",
+ "LoadAesteticModel",
+ "ScoreToNumber"
+ ],
+ {
+ "title_aux": "ComfyUI_Strimmlarns_aesthetic_score"
+ }
+ ],
+ "https://github.com/styler00dollar/ComfyUI-deepcache": [
+ [
+ "DeepCache"
+ ],
+ {
+ "title_aux": "ComfyUI-deepcache"
+ }
+ ],
+ "https://github.com/styler00dollar/ComfyUI-sudo-latent-upscale": [
+ [
+ "SudoLatentUpscale"
+ ],
+ {
+ "title_aux": "ComfyUI-sudo-latent-upscale"
+ }
+ ],
+ "https://github.com/syllebra/bilbox-comfyui": [
+ [
+ "BilboXLut",
+ "BilboXPhotoPrompt",
+ "BilboXVignette"
+ ],
+ {
+ "title_aux": "BilboX's ComfyUI Custom Nodes"
+ }
+ ],
+ "https://github.com/sylym/comfy_vid2vid": [
+ [
+ "CheckpointLoaderSimpleSequence",
+ "DdimInversionSequence",
+ "KSamplerSequence",
+ "LoadImageMaskSequence",
+ "LoadImageSequence",
+ "LoraLoaderSequence",
+ "SetLatentNoiseSequence",
+ "TrainUnetSequence",
+ "VAEEncodeForInpaintSequence"
+ ],
+ {
+ "title_aux": "Vid2vid"
+ }
+ ],
+ "https://github.com/szhublox/ambw_comfyui": [
+ [
+ "Auto Merge Block Weighted",
+ "CLIPMergeSimple",
+ "CheckpointSave",
+ "ModelMergeBlocks",
+ "ModelMergeSimple"
+ ],
+ {
+ "title_aux": "Auto-MBW"
+ }
+ ],
+ "https://github.com/taabata/Comfy_Syrian_Falcon_Nodes/raw/main/SyrianFalconNodes.py": [
+ [
+ "CompositeImage",
+ "KSamplerAlternate",
+ "KSamplerPromptEdit",
+ "KSamplerPromptEditAndAlternate",
+ "LoopBack",
+ "QRGenerate",
+ "WordAsImage"
+ ],
+ {
+ "title_aux": "Syrian Falcon Nodes"
+ }
+ ],
+ "https://github.com/taabata/LCM_Inpaint-Outpaint_Comfy": [
+ [
+ "ComfyNodesToSaveCanvas",
+ "FloatNumber",
+ "FreeU_LCM",
+ "ImageOutputToComfyNodes",
+ "ImageShuffle",
+ "ImageSwitch",
+ "LCMGenerate",
+ "LCMGenerate_ReferenceOnly",
+ "LCMGenerate_SDTurbo",
+ "LCMGenerate_img2img",
+ "LCMGenerate_img2img_IPAdapter",
+ "LCMGenerate_img2img_controlnet",
+ "LCMGenerate_inpaintv2",
+ "LCMGenerate_inpaintv3",
+ "LCMLoader",
+ "LCMLoader_RefInpaint",
+ "LCMLoader_ReferenceOnly",
+ "LCMLoader_SDTurbo",
+ "LCMLoader_controlnet",
+ "LCMLoader_controlnet_inpaint",
+ "LCMLoader_img2img",
+ "LCMLoraLoader_inpaint",
+ "LCMLoraLoader_ipadapter",
+ "LCMLora_inpaint",
+ "LCMLora_ipadapter",
+ "LCMT2IAdapter",
+ "LCM_IPAdapter",
+ "LCM_IPAdapter_inpaint",
+ "LCM_outpaint_prep",
+ "LoadImageNode_LCM",
+ "Loader_SegmindVega",
+ "OutpaintCanvasTool",
+ "SaveImage_Canvas",
+ "SaveImage_LCM",
+ "SaveImage_Puzzle",
+ "SaveImage_PuzzleV2",
+ "SegmindVega",
+ "SettingsSwitch",
+ "stitch"
+ ],
+ {
+ "title_aux": "LCM_Inpaint-Outpaint_Comfy"
+ }
+ ],
+ "https://github.com/talesofai/comfyui-browser": [
+ [
+ "DifyTextGenerator //Browser",
+ "LoadImageByUrl //Browser",
+ "SelectInputs //Browser",
+ "UploadToRemote //Browser",
+ "XyzPlot //Browser"
+ ],
+ {
+ "title_aux": "ComfyUI Browser"
+ }
+ ],
+ "https://github.com/theUpsider/ComfyUI-Logic": [
+ [
+ "Bool",
+ "Compare",
+ "DebugPrint",
+ "Float",
+ "If ANY execute A else B",
+ "Int",
+ "String"
+ ],
+ {
+ "title_aux": "ComfyUI-Logic"
+ }
+ ],
+ "https://github.com/theUpsider/ComfyUI-Styles_CSV_Loader": [
+ [
+ "Load Styles CSV"
+ ],
+ {
+ "title_aux": "Styles CSV Loader Extension for ComfyUI"
+ }
+ ],
+ "https://github.com/thecooltechguy/ComfyUI-MagicAnimate": [
+ [
+ "MagicAnimate",
+ "MagicAnimateModelLoader"
+ ],
+ {
+ "title_aux": "ComfyUI-MagicAnimate"
+ }
+ ],
+ "https://github.com/thecooltechguy/ComfyUI-Stable-Video-Diffusion": [
+ [
+ "SVDDecoder",
+ "SVDModelLoader",
+ "SVDSampler",
+ "SVDSimpleImg2Vid"
+ ],
+ {
+ "title_aux": "ComfyUI Stable Video Diffusion"
+ }
+ ],
+ "https://github.com/thedyze/save-image-extended-comfyui": [
+ [
+ "SaveImageExtended"
+ ],
+ {
+ "title_aux": "Save Image Extended for ComfyUI"
+ }
+ ],
+ "https://github.com/tocubed/ComfyUI-AudioReactor": [
+ [
+ "AudioFrameTransformBeats",
+ "AudioFrameTransformShadertoy",
+ "AudioLoadPath",
+ "Shadertoy"
+ ],
+ {
+ "title_aux": "ComfyUI-AudioReactor"
+ }
+ ],
+ "https://github.com/toyxyz/ComfyUI_toyxyz_test_nodes": [
+ [
+ "CaptureWebcam",
+ "LatentDelay",
+ "LoadWebcamImage",
+ "SaveImagetoPath"
+ ],
+ {
+ "title_aux": "ComfyUI_toyxyz_test_nodes"
+ }
+ ],
+ "https://github.com/trojblue/trNodes": [
+ [
+ "JpgConvertNode",
+ "trColorCorrection",
+ "trLayering",
+ "trRouter",
+ "trRouterLonger"
+ ],
+ {
+ "title_aux": "trNodes"
+ }
+ ],
+ "https://github.com/trumanwong/ComfyUI-NSFW-Detection": [
+ [
+ "NSFWDetection"
+ ],
+ {
+ "title_aux": "ComfyUI-NSFW-Detection"
+ }
+ ],
+ "https://github.com/ttulttul/ComfyUI-Iterative-Mixer": [
+ [
+ "Batch Unsampler",
+ "Iterative Mixing KSampler",
+ "Iterative Mixing KSampler Advanced",
+ "IterativeMixingSampler",
+ "IterativeMixingScheduler",
+ "IterativeMixingSchedulerAdvanced",
+ "Latent Batch Comparison Plot",
+ "Latent Batch Statistics Plot",
+ "MixingMaskGenerator"
+ ],
+ {
+ "title_aux": "ComfyUI Iterative Mixing Nodes"
+ }
+ ],
+ "https://github.com/ttulttul/ComfyUI-Tensor-Operations": [
+ [
+ "Image Match Normalize",
+ "Latent Match Normalize"
+ ],
+ {
+ "title_aux": "ComfyUI-Tensor-Operations"
+ }
+ ],
+ "https://github.com/tudal/Hakkun-ComfyUI-nodes/raw/main/hakkun_nodes.py": [
+ [
+ "Any Converter",
+ "Calculate Upscale",
+ "Image Resize To Height",
+ "Image Resize To Width",
+ "Image size to string",
+ "Load Random Image",
+ "Load Text",
+ "Multi Text Merge",
+ "Prompt Parser",
+ "Random Line",
+ "Random Line 4"
+ ],
+ {
+ "title_aux": "Hakkun-ComfyUI-nodes"
+ }
+ ],
+ "https://github.com/tusharbhutt/Endless-Nodes": [
+ [
+ "ESS Aesthetic Scoring",
+ "ESS Aesthetic Scoring Auto",
+ "ESS Combo Parameterizer",
+ "ESS Combo Parameterizer & Prompts",
+ "ESS Eight Input Random",
+ "ESS Eight Input Text Switch",
+ "ESS Float to Integer",
+ "ESS Float to Number",
+ "ESS Float to String",
+ "ESS Float to X",
+ "ESS Global Envoy",
+ "ESS Image Reward",
+ "ESS Image Reward Auto",
+ "ESS Image Saver with JSON",
+ "ESS Integer to Float",
+ "ESS Integer to Number",
+ "ESS Integer to String",
+ "ESS Integer to X",
+ "ESS Number to Float",
+ "ESS Number to Integer",
+ "ESS Number to String",
+ "ESS Number to X",
+ "ESS Parameterizer",
+ "ESS Parameterizer & Prompts",
+ "ESS Six Float Output",
+ "ESS Six Input Random",
+ "ESS Six Input Text Switch",
+ "ESS Six Integer IO Switch",
+ "ESS Six Integer IO Widget",
+ "ESS String to Float",
+ "ESS String to Integer",
+ "ESS String to Num",
+ "ESS String to X",
+ "\u267e\ufe0f\ud83c\udf0a\u2728 Image Saver with JSON"
+ ],
+ {
+ "author": "BiffMunky",
+ "description": "A small set of nodes I created for various numerical and text inputs. Features image saver with ability to have JSON saved to separate folder, parameter collection nodes, two aesthetic scoring models, switches for text and numbers, and conversion of string to numeric and vice versa.",
+ "nickname": "\u267e\ufe0f\ud83c\udf0a\u2728",
+ "title": "Endless \ufe0f\ud83c\udf0a\u2728 Nodes",
+ "title_aux": "Endless \ufe0f\ud83c\udf0a\u2728 Nodes"
+ }
+ ],
+ "https://github.com/twri/sdxl_prompt_styler": [
+ [
+ "SDXLPromptStyler",
+ "SDXLPromptStylerAdvanced"
+ ],
+ {
+ "title_aux": "SDXL Prompt Styler"
+ }
+ ],
+ "https://github.com/uarefans/ComfyUI-Fans": [
+ [
+ "Fans Prompt Styler Negative",
+ "Fans Prompt Styler Positive",
+ "Fans Styler",
+ "Fans Text Concatenate"
+ ],
+ {
+ "title_aux": "ComfyUI-Fans"
+ }
+ ],
+ "https://github.com/vanillacode314/SimpleWildcardsComfyUI": [
+ [
+ "SimpleConcat",
+ "SimpleWildcard"
+ ],
+ {
+ "author": "VanillaCode314",
+ "description": "A simple wildcard node for ComfyUI. Can also be used a style prompt node.",
+ "nickname": "Simple Wildcard",
+ "title": "Simple Wildcard",
+ "title_aux": "Simple Wildcard"
+ }
+ ],
+ "https://github.com/vienteck/ComfyUI-Chat-GPT-Integration": [
+ [
+ "ChatGptPrompt"
+ ],
+ {
+ "title_aux": "ComfyUI-Chat-GPT-Integration"
+ }
+ ],
+ "https://github.com/violet-chen/comfyui-psd2png": [
+ [
+ "Psd2Png"
+ ],
+ {
+ "title_aux": "comfyui-psd2png"
+ }
+ ],
+ "https://github.com/wallish77/wlsh_nodes": [
+ [
+ "Alternating KSampler (WLSH)",
+ "Build Filename String (WLSH)",
+ "CLIP +/- w/Text Unified (WLSH)",
+ "CLIP Positive-Negative (WLSH)",
+ "CLIP Positive-Negative XL (WLSH)",
+ "CLIP Positive-Negative XL w/Text (WLSH)",
+ "CLIP Positive-Negative w/Text (WLSH)",
+ "Checkpoint Loader w/Name (WLSH)",
+ "Empty Latent by Pixels (WLSH)",
+ "Empty Latent by Ratio (WLSH)",
+ "Empty Latent by Size (WLSH)",
+ "Generate Border Mask (WLSH)",
+ "Grayscale Image (WLSH)",
+ "Image Load with Metadata (WLSH)",
+ "Image Save with Prompt (WLSH)",
+ "Image Save with Prompt File (WLSH)",
+ "Image Save with Prompt/Info (WLSH)",
+ "Image Save with Prompt/Info File (WLSH)",
+ "Image Scale By Factor (WLSH)",
+ "Image Scale by Shortside (WLSH)",
+ "KSamplerAdvanced (WLSH)",
+ "Multiply Integer (WLSH)",
+ "Outpaint to Image (WLSH)",
+ "Prompt Weight (WLSH)",
+ "Quick Resolution Multiply (WLSH)",
+ "Resolutions by Ratio (WLSH)",
+ "SDXL Quick Empty Latent (WLSH)",
+ "SDXL Quick Image Scale (WLSH)",
+ "SDXL Resolutions (WLSH)",
+ "SDXL Steps (WLSH)",
+ "Save Positive Prompt(WLSH)",
+ "Save Prompt (WLSH)",
+ "Save Prompt/Info (WLSH)",
+ "Seed and Int (WLSH)",
+ "Seed to Number (WLSH)",
+ "Simple Pattern Replace (WLSH)",
+ "Simple String Combine (WLSH)",
+ "Time String (WLSH)",
+ "Upscale by Factor with Model (WLSH)",
+ "VAE Encode for Inpaint w/Padding (WLSH)"
+ ],
+ {
+ "title_aux": "wlsh_nodes"
+ }
+ ],
+ "https://github.com/whatbirdisthat/cyberdolphin": [
+ [
+ "\ud83d\udc2c Gradio ChatInterface",
+ "\ud83d\udc2c OpenAI Advanced",
+ "\ud83d\udc2c OpenAI Compatible",
+ "\ud83d\udc2c OpenAI DALL\u00b7E",
+ "\ud83d\udc2c OpenAI Simple"
+ ],
+ {
+ "title_aux": "cyberdolphin"
+ }
+ ],
+ "https://github.com/whmc76/ComfyUI-Openpose-Editor-Plus": [
+ [
+ "CDL.OpenPoseEditorPlus"
+ ],
+ {
+ "title_aux": "ComfyUI-Openpose-Editor-Plus"
+ }
+ ],
+ "https://github.com/wmatson/easy-comfy-nodes": [
+ [
+ "EZAssocDictNode",
+ "EZAssocImgNode",
+ "EZAssocStrNode",
+ "EZEmptyDictNode",
+ "EZHttpPostNode",
+ "EZLoadImgBatchFromUrlsNode",
+ "EZLoadImgFromUrlNode",
+ "EZRemoveImgBackground",
+ "EZS3Uploader",
+ "EZVideoCombiner"
+ ],
+ {
+ "title_aux": "easy-comfy-nodes"
+ }
+ ],
+ "https://github.com/wolfden/ComfyUi_PromptStylers": [
+ [
+ "SDXLPromptStylerAll",
+ "SDXLPromptStylerHorror",
+ "SDXLPromptStylerMisc",
+ "SDXLPromptStylerbyArtist",
+ "SDXLPromptStylerbyCamera",
+ "SDXLPromptStylerbyComposition",
+ "SDXLPromptStylerbyCyberpunkSurrealism",
+ "SDXLPromptStylerbyDepth",
+ "SDXLPromptStylerbyEnvironment",
+ "SDXLPromptStylerbyFantasySetting",
+ "SDXLPromptStylerbyFilter",
+ "SDXLPromptStylerbyFocus",
+ "SDXLPromptStylerbyImpressionism",
+ "SDXLPromptStylerbyLighting",
+ "SDXLPromptStylerbyMileHigh",
+ "SDXLPromptStylerbyMood",
+ "SDXLPromptStylerbyMythicalCreature",
+ "SDXLPromptStylerbyOriginal",
+ "SDXLPromptStylerbyQuantumRealism",
+ "SDXLPromptStylerbySteamPunkRealism",
+ "SDXLPromptStylerbySubject",
+ "SDXLPromptStylerbySurrealism",
+ "SDXLPromptStylerbyTheme",
+ "SDXLPromptStylerbyTimeofDay",
+ "SDXLPromptStylerbyWyvern",
+ "SDXLPromptbyCelticArt",
+ "SDXLPromptbyContemporaryNordicArt",
+ "SDXLPromptbyFashionArt",
+ "SDXLPromptbyGothicRevival",
+ "SDXLPromptbyIrishFolkArt",
+ "SDXLPromptbyRomanticNationalismArt",
+ "SDXLPromptbySportsArt",
+ "SDXLPromptbyStreetArt",
+ "SDXLPromptbyVikingArt",
+ "SDXLPromptbyWildlifeArt"
+ ],
+ {
+ "title_aux": "SDXL Prompt Styler (customized version by wolfden)"
+ }
+ ],
+ "https://github.com/wolfden/ComfyUi_String_Function_Tree": [
+ [
+ "StringFunction"
+ ],
+ {
+ "title_aux": "ComfyUi_String_Function_Tree"
+ }
+ ],
+ "https://github.com/wsippel/comfyui_ws/raw/main/sdxl_utility.py": [
+ [
+ "SDXLResolutionPresets"
+ ],
+ {
+ "title_aux": "SDXLResolutionPresets"
+ }
+ ],
+ "https://github.com/wutipong/ComfyUI-TextUtils": [
+ [
+ "Text Utils - Join N-Elements of String List",
+ "Text Utils - Join String List",
+ "Text Utils - Join Strings",
+ "Text Utils - Split String to List"
+ ],
+ {
+ "title_aux": "ComfyUI-TextUtils"
+ }
+ ],
+ "https://github.com/wwwins/ComfyUI-Simple-Aspect-Ratio": [
+ [
+ "SimpleAspectRatio"
+ ],
+ {
+ "title_aux": "ComfyUI-Simple-Aspect-Ratio"
+ }
+ ],
+ "https://github.com/xXAdonesXx/NodeGPT": [
+ [
+ "AppendAgent",
+ "Assistant",
+ "Chat",
+ "ChatGPT",
+ "CombineInput",
+ "Conditioning",
+ "CostumeAgent_1",
+ "CostumeAgent_2",
+ "CostumeMaster_1",
+ "Critic",
+ "DisplayString",
+ "DisplayTextAsImage",
+ "EVAL",
+ "Engineer",
+ "Executor",
+ "GroupChat",
+ "Image_generation_Conditioning",
+ "LM_Studio",
+ "LoadAPIconfig",
+ "LoadTXT",
+ "MemGPT",
+ "Memory_Excel",
+ "Model_1",
+ "Ollama",
+ "Output2String",
+ "Planner",
+ "Scientist",
+ "TextCombine",
+ "TextGeneration",
+ "TextGenerator",
+ "TextInput",
+ "TextOutput",
+ "UserProxy",
+ "llama-cpp",
+ "llava",
+ "oobaboogaOpenAI"
+ ],
+ {
+ "title_aux": "NodeGPT"
+ }
+ ],
+ "https://github.com/xiaoxiaodesha/hd_node": [
+ [
+ "Combine HDMasks",
+ "Cover HDMasks",
+ "HD FaceIndex",
+ "HD GetMaskArea",
+ "HD Image Levels",
+ "HD SmoothEdge",
+ "HD UltimateSDUpscale"
+ ],
+ {
+ "title_aux": "hd-nodes-comfyui"
+ }
+ ],
+ "https://github.com/yffyhk/comfyui_auto_danbooru": [
+ [
+ "GetDanbooru",
+ "TagEncode"
+ ],
+ {
+ "title_aux": "comfyui_auto_danbooru"
+ }
+ ],
+ "https://github.com/yolain/ComfyUI-Easy-Use": [
+ [
+ "dynamicThresholdingFull",
+ "easy LLLiteLoader",
+ "easy XYInputs: CFG Scale",
+ "easy XYInputs: Checkpoint",
+ "easy XYInputs: ControlNet",
+ "easy XYInputs: Denoise",
+ "easy XYInputs: Lora",
+ "easy XYInputs: ModelMergeBlocks",
+ "easy XYInputs: NegativeCond",
+ "easy XYInputs: NegativeCondList",
+ "easy XYInputs: PositiveCond",
+ "easy XYInputs: PositiveCondList",
+ "easy XYInputs: PromptSR",
+ "easy XYInputs: Sampler/Scheduler",
+ "easy XYInputs: Seeds++ Batch",
+ "easy XYInputs: Steps",
+ "easy XYPlot",
+ "easy XYPlotAdvanced",
+ "easy a1111Loader",
+ "easy boolean",
+ "easy cascadeLoader",
+ "easy cleanGpuUsed",
+ "easy comfyLoader",
+ "easy compare",
+ "easy controlnetLoader",
+ "easy controlnetLoaderADV",
+ "easy convertAnything",
+ "easy detailerFix",
+ "easy float",
+ "easy fooocusInpaintLoader",
+ "easy fullLoader",
+ "easy fullkSampler",
+ "easy globalSeed",
+ "easy hiresFix",
+ "easy if",
+ "easy imageInsetCrop",
+ "easy imagePixelPerfect",
+ "easy imageRemoveBG",
+ "easy imageSave",
+ "easy imageScaleDown",
+ "easy imageScaleDownBy",
+ "easy imageScaleDownToSize",
+ "easy imageSize",
+ "easy imageSizeByLongerSide",
+ "easy imageSizeBySide",
+ "easy imageSwitch",
+ "easy imageToMask",
+ "easy int",
+ "easy isSDXL",
+ "easy joinImageBatch",
+ "easy kSampler",
+ "easy kSamplerDownscaleUnet",
+ "easy kSamplerInpainting",
+ "easy kSamplerSDTurbo",
+ "easy kSamplerTiled",
+ "easy latentCompositeMaskedWithCond",
+ "easy latentNoisy",
+ "easy loraStack",
+ "easy negative",
+ "easy pipeIn",
+ "easy pipeOut",
+ "easy pipeToBasicPipe",
+ "easy portraitMaster",
+ "easy poseEditor",
+ "easy positive",
+ "easy preDetailerFix",
+ "easy preSampling",
+ "easy preSamplingAdvanced",
+ "easy preSamplingCascade",
+ "easy preSamplingDynamicCFG",
+ "easy preSamplingSdTurbo",
+ "easy promptList",
+ "easy rangeFloat",
+ "easy rangeInt",
+ "easy samLoaderPipe",
+ "easy seed",
+ "easy showAnything",
+ "easy showLoaderSettingsNames",
+ "easy showSpentTime",
+ "easy string",
+ "easy stylesSelector",
+ "easy svdLoader",
+ "easy ultralyticsDetectorPipe",
+ "easy unSampler",
+ "easy wildcards",
+ "easy xyAny",
+ "easy zero123Loader"
+ ],
+ {
+ "title_aux": "ComfyUI Easy Use"
+ }
+ ],
+ "https://github.com/yolanother/DTAIComfyImageSubmit": [
+ [
+ "DTSimpleSubmitImage",
+ "DTSubmitImage"
+ ],
+ {
+ "title_aux": "Comfy AI DoubTech.ai Image Sumission Node"
+ }
+ ],
+ "https://github.com/yolanother/DTAIComfyLoaders": [
+ [
+ "DTCLIPLoader",
+ "DTCLIPVisionLoader",
+ "DTCheckpointLoader",
+ "DTCheckpointLoaderSimple",
+ "DTControlNetLoader",
+ "DTDiffControlNetLoader",
+ "DTDiffusersLoader",
+ "DTGLIGENLoader",
+ "DTLoadImage",
+ "DTLoadImageMask",
+ "DTLoadLatent",
+ "DTLoraLoader",
+ "DTLorasLoader",
+ "DTStyleModelLoader",
+ "DTUpscaleModelLoader",
+ "DTVAELoader",
+ "DTunCLIPCheckpointLoader"
+ ],
+ {
+ "title_aux": "Comfy UI Online Loaders"
+ }
+ ],
+ "https://github.com/yolanother/DTAIComfyPromptAgent": [
+ [
+ "DTPromptAgent",
+ "DTPromptAgentString"
+ ],
+ {
+ "title_aux": "Comfy UI Prompt Agent"
+ }
+ ],
+ "https://github.com/yolanother/DTAIComfyQRCodes": [
+ [
+ "QRCode"
+ ],
+ {
+ "title_aux": "Comfy UI QR Codes"
+ }
+ ],
+ "https://github.com/yolanother/DTAIComfyVariables": [
+ [
+ "DTCLIPTextEncode",
+ "DTSingleLineStringVariable",
+ "DTSingleLineStringVariableNoClip",
+ "FloatVariable",
+ "IntVariable",
+ "StringFormat",
+ "StringFormatSingleLine",
+ "StringVariable"
+ ],
+ {
+ "title_aux": "Variables for Comfy UI"
+ }
+ ],
+ "https://github.com/yolanother/DTAIImageToTextNode": [
+ [
+ "DTAIImageToTextNode",
+ "DTAIImageUrlToTextNode"
+ ],
+ {
+ "title_aux": "Image to Text Node"
+ }
+ ],
+ "https://github.com/youyegit/tdxh_node_comfyui": [
+ [
+ "TdxhBoolNumber",
+ "TdxhClipVison",
+ "TdxhControlNetApply",
+ "TdxhControlNetProcessor",
+ "TdxhFloatInput",
+ "TdxhImageToSize",
+ "TdxhImageToSizeAdvanced",
+ "TdxhImg2ImgLatent",
+ "TdxhIntInput",
+ "TdxhLoraLoader",
+ "TdxhOnOrOff",
+ "TdxhReference",
+ "TdxhStringInput",
+ "TdxhStringInputTranslator"
+ ],
+ {
+ "title_aux": "tdxh_node_comfyui"
+ }
+ ],
+ "https://github.com/yuvraj108c/ComfyUI-Pronodes": [
+ [
+ "LoadYoutubeVideoNode"
+ ],
+ {
+ "title_aux": "ComfyUI-Pronodes"
+ }
+ ],
+ "https://github.com/yuvraj108c/ComfyUI-Whisper": [
+ [
+ "Add Subtitles To Background",
+ "Add Subtitles To Frames",
+ "Apply Whisper",
+ "Resize Cropped Subtitles"
+ ],
+ {
+ "title_aux": "ComfyUI Whisper"
+ }
+ ],
+ "https://github.com/zcfrank1st/Comfyui-Toolbox": [
+ [
+ "PreviewJson",
+ "PreviewVideo",
+ "SaveJson",
+ "TestJsonPreview"
+ ],
+ {
+ "title_aux": "Comfyui-Toolbox"
+ }
+ ],
+ "https://github.com/zcfrank1st/Comfyui-Yolov8": [
+ [
+ "Yolov8Detection",
+ "Yolov8Segmentation"
+ ],
+ {
+ "title_aux": "ComfyUI Yolov8"
+ }
+ ],
+ "https://github.com/zcfrank1st/comfyui_visual_anagrams": [
+ [
+ "VisualAnagramsAnimate",
+ "VisualAnagramsSample"
+ ],
+ {
+ "title_aux": "comfyui_visual_anagram"
+ }
+ ],
+ "https://github.com/zer0TF/cute-comfy": [
+ [
+ "Cute.Placeholder"
+ ],
+ {
+ "title_aux": "Cute Comfy"
+ }
+ ],
+ "https://github.com/zfkun/ComfyUI_zfkun": [
+ [
+ "ZFLoadImagePath",
+ "ZFPreviewText",
+ "ZFPreviewTextMultiline",
+ "ZFShareScreen",
+ "ZFTextTranslation"
+ ],
+ {
+ "title_aux": "ComfyUI_zfkun"
+ }
+ ],
+ "https://github.com/zhongpei/ComfyUI-InstructIR": [
+ [
+ "InstructIRProcess",
+ "LoadInstructIRModel"
+ ],
+ {
+ "title_aux": "ComfyUI for InstructIR"
+ }
+ ],
+ "https://github.com/zhongpei/Comfyui_image2prompt": [
+ [
+ "Image2Text",
+ "LoadImage2TextModel"
+ ],
+ {
+ "title_aux": "Comfyui_image2prompt"
+ }
+ ],
+ "https://github.com/zhuanqianfish/ComfyUI-EasyNode": [
+ [
+ "EasyCaptureNode",
+ "EasyVideoOutputNode",
+ "SendImageWebSocket"
+ ],
+ {
+ "title_aux": "EasyCaptureNode for ComfyUI"
+ }
+ ],
+ "https://raw.githubusercontent.com/throttlekitty/SDXLCustomAspectRatio/main/SDXLAspectRatio.py": [
+ [
+ "SDXLAspectRatio"
+ ],
+ {
+ "title_aux": "SDXLCustomAspectRatio"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/custom_nodes/ComfyUI-Manager/node_db/new/model-list.json b/custom_nodes/ComfyUI-Manager/node_db/new/model-list.json
new file mode 100644
index 0000000000000000000000000000000000000000..dfabc35751ae67c9bc76b8316bc0b6d8c1313d33
--- /dev/null
+++ b/custom_nodes/ComfyUI-Manager/node_db/new/model-list.json
@@ -0,0 +1,819 @@
+{
+ "models": [
+ {
+ "name": "stabilityai/Stable Cascade: effnet_encoder.safetensors (VAE)",
+ "type": "VAE",
+ "base": "Stable Cascade",
+ "save_path": "vae/Stable-Cascade",
+ "description": "[81.5MB] Stable Cascade: effnet_encoder.\nVAE encoder for stage_c latent.",
+ "reference": "https://huggingface.co/stabilityai/stable-cascade",
+ "filename": "effnet_encoder.safetensors",
+ "url": "https://huggingface.co/stabilityai/stable-cascade/resolve/main/effnet_encoder.safetensors"
+ },
+ {
+ "name": "stabilityai/Stable Cascade: stage_a.safetensors (VAE)",
+ "type": "VAE",
+ "base": "Stable Cascade",
+ "save_path": "vae/Stable-Cascade",
+ "description": "[73.7MB] Stable Cascade: stage_a",
+ "reference": "https://huggingface.co/stabilityai/stable-cascade",
+ "filename": "stage_a.safetensors",
+ "url": "https://huggingface.co/stabilityai/stable-cascade/resolve/main/stage_a.safetensors"
+ },
+ {
+ "name": "stabilityai/Stable Cascade: stage_b.safetensors (UNET)",
+ "type": "unet",
+ "base": "Stable Cascade",
+ "save_path": "unet/Stable-Cascade",
+ "description": "[6.25GB] Stable Cascade: stage_b",
+ "reference": "https://huggingface.co/stabilityai/stable-cascade",
+ "filename": "stage_b.safetensors",
+ "url": "https://huggingface.co/stabilityai/stable-cascade/resolve/main/stage_b.safetensors"
+ },
+ {
+ "name": "stabilityai/Stable Cascade: stage_b_bf16.safetensors (UNET)",
+ "type": "unet",
+ "base": "Stable Cascade",
+ "save_path": "unet/Stable-Cascade",
+ "description": "[3.13GB] Stable Cascade: stage_b/bf16",
+ "reference": "https://huggingface.co/stabilityai/stable-cascade",
+ "filename": "stage_b_bf16.safetensors",
+ "url": "https://huggingface.co/stabilityai/stable-cascade/resolve/main/stage_b_bf16.safetensors"
+ },
+ {
+ "name": "stabilityai/Stable Cascade: stage_b_lite.safetensors (UNET)",
+ "type": "unet",
+ "base": "Stable Cascade",
+ "save_path": "unet/Stable-Cascade",
+ "description": "[2.8GB] Stable Cascade: stage_b/lite",
+ "reference": "https://huggingface.co/stabilityai/stable-cascade",
+ "filename": "stage_b_lite.safetensors",
+ "url": "https://huggingface.co/stabilityai/stable-cascade/resolve/main/stage_b_lite.safetensors"
+ },
+ {
+ "name": "stabilityai/Stable Cascade: stage_b_lite.safetensors (UNET)",
+ "type": "unet",
+ "base": "Stable Cascade",
+ "save_path": "unet/Stable-Cascade",
+ "description": "[1.4GB] Stable Cascade: stage_b/bf16,lite",
+ "reference": "https://huggingface.co/stabilityai/stable-cascade",
+ "filename": "stage_b_lite_bf16.safetensors",
+ "url": "https://huggingface.co/stabilityai/stable-cascade/resolve/main/stage_b_lite_bf16.safetensors"
+ },
+ {
+ "name": "stabilityai/Stable Cascade: stage_c.safetensors (UNET)",
+ "type": "unet",
+ "base": "Stable Cascade",
+ "save_path": "unet/Stable-Cascade",
+ "description": "[14.4GB] Stable Cascade: stage_c",
+ "reference": "https://huggingface.co/stabilityai/stable-cascade",
+ "filename": "stage_c.safetensors",
+ "url": "https://huggingface.co/stabilityai/stable-cascade/resolve/main/stage_c.safetensors"
+ },
+ {
+ "name": "stabilityai/Stable Cascade: stage_c_bf16.safetensors (UNET)",
+ "type": "unet",
+ "base": "Stable Cascade",
+ "save_path": "unet/Stable-Cascade",
+ "description": "[7.18GB] Stable Cascade: stage_c/bf16",
+ "reference": "https://huggingface.co/stabilityai/stable-cascade",
+ "filename": "stage_c_bf16.safetensors",
+ "url": "https://huggingface.co/stabilityai/stable-cascade/resolve/main/stage_c_bf16.safetensors"
+ },
+ {
+ "name": "stabilityai/Stable Cascade: stage_c_lite.safetensors (UNET)",
+ "type": "unet",
+ "base": "Stable Cascade",
+ "save_path": "unet/Stable-Cascade",
+ "description": "[4.12GB] Stable Cascade: stage_c/lite",
+ "reference": "https://huggingface.co/stabilityai/stable-cascade",
+ "filename": "stage_c_lite.safetensors",
+ "url": "https://huggingface.co/stabilityai/stable-cascade/resolve/main/stage_c_lite.safetensors"
+ },
+ {
+ "name": "stabilityai/Stable Cascade: stage_c_lite.safetensors (UNET)",
+ "type": "unet",
+ "base": "Stable Cascade",
+ "save_path": "unet/Stable-Cascade",
+ "description": "[2.06GB] Stable Cascade: stage_c/bf16,lite",
+ "reference": "https://huggingface.co/stabilityai/stable-cascade",
+ "filename": "stage_c_lite_bf16.safetensors",
+ "url": "https://huggingface.co/stabilityai/stable-cascade/resolve/main/stage_c_lite_bf16.safetensors"
+ },
+ {
+ "name": "stabilityai/Stable Cascade: text_encoder (CLIP)",
+ "type": "clip",
+ "base": "Stable Cascade",
+ "save_path": "clip/Stable-Cascade",
+ "description": "[1.39GB] Stable Cascade: text_encoder",
+ "reference": "https://huggingface.co/stabilityai/stable-cascade",
+ "filename": "model.safetensors",
+ "url": "https://huggingface.co/stabilityai/stable-cascade/resolve/main/text_encoder/model.safetensors"
+ },
+
+ {
+ "name": "1k3d68.onnx",
+ "type": "insightface",
+ "base": "inswapper",
+ "save_path": "insightface/models/antelopev2",
+ "description": "Antelopev2 1k3d68.onnx model for InstantId. (InstantId needs all Antelopev2 models)",
+ "reference": "https://github.com/cubiq/ComfyUI_InstantID#installation",
+ "filename": "1k3d68.onnx",
+ "url": "https://huggingface.co/MonsterMMORPG/tools/resolve/main/1k3d68.onnx"
+ },
+ {
+ "name": "2d106det.onnx",
+ "type": "insightface",
+ "base": "inswapper",
+ "save_path": "insightface/models/antelopev2",
+ "description": "Antelopev2 2d106det.onnx model for InstantId. (InstantId needs all Antelopev2 models)",
+ "reference": "https://github.com/cubiq/ComfyUI_InstantID#installation",
+ "filename": "2d106det.onnx",
+ "url": "https://huggingface.co/MonsterMMORPG/tools/resolve/main/2d106det.onnx"
+ },
+ {
+ "name": "genderage.onnx",
+ "type": "insightface",
+ "base": "inswapper",
+ "save_path": "insightface/models/antelopev2",
+ "description": "Antelopev2 genderage.onnx model for InstantId. (InstantId needs all Antelopev2 models)",
+ "reference": "https://github.com/cubiq/ComfyUI_InstantID#installation",
+ "filename": "genderage.onnx",
+ "url": "https://huggingface.co/MonsterMMORPG/tools/resolve/main/genderage.onnx"
+ },
+ {
+ "name": "glintr100.onnx",
+ "type": "insightface",
+ "base": "inswapper",
+ "save_path": "insightface/models/antelopev2",
+ "description": "Antelopev2 glintr100.onnx model for InstantId. (InstantId needs all Antelopev2 models)",
+ "reference": "https://github.com/cubiq/ComfyUI_InstantID#installation",
+ "filename": "glintr100.onnx",
+ "url": "https://huggingface.co/MonsterMMORPG/tools/resolve/main/glintr100.onnx"
+ },
+ {
+ "name": "scrfd_10g_bnkps.onnx",
+ "type": "insightface",
+ "base": "inswapper",
+ "save_path": "insightface/models/antelopev2",
+ "description": "Antelopev2 scrfd_10g_bnkps.onnx model for InstantId. (InstantId needs all Antelopev2 models)",
+ "reference": "https://github.com/cubiq/ComfyUI_InstantID#installation",
+ "filename": "scrfd_10g_bnkps.onnx",
+ "url": "https://huggingface.co/MonsterMMORPG/tools/resolve/main/scrfd_10g_bnkps.onnx"
+ },
+
+ {
+ "name": "photomaker-v1.bin",
+ "type": "photomaker",
+ "base": "SDXL",
+ "save_path": "photomaker",
+ "description": "PhotoMaker model. This model is compatible with SDXL.",
+ "reference": "https://huggingface.co/TencentARC/PhotoMaker",
+ "filename": "photomaker-v1.bin",
+ "url": "https://huggingface.co/TencentARC/PhotoMaker/resolve/main/photomaker-v1.bin"
+ },
+ {
+ "name": "ip-adapter-faceid_sdxl.bin",
+ "type": "IP-Adapter",
+ "base": "SD1.5",
+ "save_path": "ipadapter",
+ "description": "IP-Adapter-FaceID Model (SDXL) [ipadapter]",
+ "reference": "https://huggingface.co/h94/IP-Adapter-FaceID",
+ "filename": "ip-adapter-faceid_sdxl.bin",
+ "url": "https://huggingface.co/h94/IP-Adapter-FaceID/resolve/main/ip-adapter-faceid_sdxl.bin"
+ },
+ {
+ "name": "ip-adapter-faceid-plusv2_sdxl.bin",
+ "type": "IP-Adapter",
+ "base": "SD1.5",
+ "save_path": "ipadapter",
+ "description": "IP-Adapter-FaceID Plus Model (SDXL) [ipadapter]",
+ "reference": "https://huggingface.co/h94/IP-Adapter-FaceID",
+ "filename": "ip-adapter-faceid-plusv2_sdxl.bin",
+ "url": "https://huggingface.co/h94/IP-Adapter-FaceID/resolve/main/ip-adapter-faceid-plusv2_sdxl.bin"
+ },
+ {
+ "name": "ip-adapter-faceid_sdxl_lora.safetensors",
+ "type": "lora",
+ "base": "SDXL",
+ "save_path": "loras/ipadapter",
+ "description": "IP-Adapter-FaceID LoRA Model (SDXL) [ipadapter]",
+ "reference": "https://huggingface.co/h94/IP-Adapter-FaceID",
+ "filename": "ip-adapter-faceid_sdxl_lora.safetensors",
+ "url": "https://huggingface.co/h94/IP-Adapter-FaceID/resolve/main/ip-adapter-faceid_sdxl_lora.safetensors"
+ },
+ {
+ "name": "ip-adapter-faceid-plusv2_sdxl_lora.safetensors",
+ "type": "lora",
+ "base": "SDXL",
+ "save_path": "loras/ipadapter",
+ "description": "IP-Adapter-FaceID-Plus V2 LoRA Model (SDXL) [ipadapter]",
+ "reference": "https://huggingface.co/h94/IP-Adapter-FaceID",
+ "filename": "ip-adapter-faceid-plusv2_sdxl_lora.safetensors",
+ "url": "https://huggingface.co/h94/IP-Adapter-FaceID/resolve/main/ip-adapter-faceid-plusv2_sdxl_lora.safetensors"
+ },
+
+ {
+ "name": "TencentARC/motionctrl.pth",
+ "type": "checkpoints",
+ "base": "MotionCtrl",
+ "save_path": "checkpoints/motionctrl",
+ "description": "To use the ComfyUI-MotionCtrl extension, downloading this model is required.",
+ "reference": "https://huggingface.co/TencentARC/MotionCtrl",
+ "filename": "motionctrl.pth",
+ "url": "https://huggingface.co/TencentARC/MotionCtrl/resolve/main/motionctrl.pth"
+ },
+ {
+ "name": "ip-adapter-faceid-plusv2_sd15.bin",
+ "type": "IP-Adapter",
+ "base": "SD1.5",
+ "save_path": "ipadapter",
+ "description": "IP-Adapter-FaceID-Plus V2 Model (SD1.5)",
+ "reference": "https://huggingface.co/h94/IP-Adapter-FaceID",
+ "filename": "ip-adapter-faceid-plusv2_sd15.bin",
+ "url": "https://huggingface.co/h94/IP-Adapter-FaceID/resolve/main/ip-adapter-faceid-plusv2_sd15.bin"
+ },
+ {
+ "name": "ip-adapter-faceid-plusv2_sd15_lora.safetensors",
+ "type": "lora",
+ "base": "SD1.5",
+ "save_path": "loras/ipadapter",
+ "description": "IP-Adapter-FaceID-Plus V2 LoRA Model (SD1.5)",
+ "reference": "https://huggingface.co/h94/IP-Adapter-FaceID",
+ "filename": "ip-adapter-faceid-plusv2_sd15_lora.safetensors",
+ "url": "https://huggingface.co/h94/IP-Adapter-FaceID/resolve/main/ip-adapter-faceid-plusv2_sd15_lora.safetensors"
+ },
+ {
+ "name": "ip-adapter-faceid-plus_sd15_lora.safetensors",
+ "type": "lora",
+ "base": "SD1.5",
+ "save_path": "loras/ipadapter",
+ "description": "IP-Adapter-FaceID Plus LoRA Model (SD1.5) [ipadapter]",
+ "reference": "https://huggingface.co/h94/IP-Adapter-FaceID",
+ "filename": "ip-adapter-faceid-plus_sd15_lora.safetensors",
+ "url": "https://huggingface.co/h94/IP-Adapter-FaceID/resolve/main/ip-adapter-faceid-plus_sd15_lora.safetensors"
+ },
+
+ {
+ "name": "ControlNet-HandRefiner-pruned (inpaint-depth-hand; fp16)",
+ "type": "controlnet",
+ "base": "SD1.5",
+ "save_path": "default",
+ "description": "This inpaint-depth controlnet model is specialized for the hand refiner.",
+ "reference": "https://huggingface.co/hr16/ControlNet-HandRefiner-pruned",
+ "filename": "control_sd15_inpaint_depth_hand_fp16.safetensors",
+ "url": "https://huggingface.co/hr16/ControlNet-HandRefiner-pruned/resolve/main/control_sd15_inpaint_depth_hand_fp16.safetensors"
+ },
+ {
+ "name": "stabilityai/stable-diffusion-x4-upscaler",
+ "type": "checkpoints",
+ "base": "upscale",
+ "save_path": "checkpoints/upscale",
+ "description": "[3.53GB] This upscaling model is a latent text-guided diffusion model and should be used with SD_4XUpscale_Conditioning and KSampler.",
+ "reference": "https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler",
+ "filename": "x4-upscaler-ema.safetensors",
+ "url": "https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler/resolve/main/x4-upscaler-ema.safetensors"
+ },
+ {
+ "name": "LDSR(Latent Diffusion Super Resolution)",
+ "type": "upscale",
+ "base": "upscale",
+ "save_path": "upscale_models/ldsr",
+ "description": "LDSR upscale model. Through the [a/ComfyUI-Flowty-LDSR](https://github.com/flowtyone/ComfyUI-Flowty-LDSR) extension, the upscale model can be utilized.",
+ "reference": "https://github.com/CompVis/latent-diffusion",
+ "filename": "last.ckpt",
+ "url": "https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1"
+ },
+ {
+ "name": "control_boxdepth_LooseControlfp16 (fp16)",
+ "type": "controlnet",
+ "base": "SD1.5",
+ "save_path": "default",
+ "description": "Loose ControlNet model",
+ "reference": "https://huggingface.co/ioclab/LooseControl_WebUICombine",
+ "filename": "control_boxdepth_LooseControlfp16.safetensors",
+ "url": "https://huggingface.co/ioclab/LooseControl_WebUICombine/resolve/main/control_boxdepth_LooseControlfp16.safetensors"
+ },
+
+ {
+ "name": "ip-adapter-faceid-portrait_sd15.bin",
+ "type": "IP-Adapter",
+ "base": "SD1.5",
+ "save_path": "ipadapter",
+ "description": "IP-Adapter-FaceID Portrait Model (SD1.5) [ipadapter]",
+ "reference": "https://huggingface.co/h94/IP-Adapter-FaceID",
+ "filename": "ip-adapter-faceid-portrait_sd15.bin",
+ "url": "https://huggingface.co/h94/IP-Adapter-FaceID/resolve/main/ip-adapter-faceid-portrait_sd15.bin"
+ },
+ {
+ "name": "ip-adapter-faceid-plus_sd15.bin",
+ "type": "IP-Adapter",
+ "base": "SD1.5",
+ "save_path": "ipadapter",
+ "description": "IP-Adapter-FaceID Plus Model (SD1.5) [ipadapter]",
+ "reference": "https://huggingface.co/h94/IP-Adapter-FaceID",
+ "filename": "ip-adapter-faceid-plus_sd15.bin",
+ "url": "https://huggingface.co/h94/IP-Adapter-FaceID/resolve/main/ip-adapter-faceid-plus_sd15.bin"
+ },
+ {
+ "name": "ip-adapter-faceid_sd15.bin",
+ "type": "IP-Adapter",
+ "base": "SD1.5",
+ "save_path": "ipadapter",
+ "description": "IP-Adapter-FaceID Model (SD1.5)",
+ "reference": "https://huggingface.co/h94/IP-Adapter-FaceID",
+ "filename": "ip-adapter-faceid_sd15.bin",
+ "url": "https://huggingface.co/h94/IP-Adapter-FaceID/resolve/main/ip-adapter-faceid_sd15.bin"
+ },
+ {
+ "name": "ip-adapter-faceid_sd15_lora.safetensors",
+ "type": "lora",
+ "base": "SD1.5",
+ "save_path": "loras/ipadapter",
+ "description": "IP-Adapter-FaceID LoRA Model (SD1.5)",
+ "reference": "https://huggingface.co/h94/IP-Adapter-FaceID",
+ "filename": "ip-adapter-faceid_sd15_lora.safetensors",
+ "url": "https://huggingface.co/h94/IP-Adapter-FaceID/resolve/main/ip-adapter-faceid_sd15_lora.safetensors"
+ },
+
+ {
+ "name": "LongAnimatediff/lt_long_mm_16_64_frames_v1.1.ckpt (ComfyUI-AnimateDiff-Evolved) (Updated path)",
+ "type": "animatediff",
+ "base": "SD1.x",
+ "save_path": "animatediff_models",
+ "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node.",
+ "reference": "https://huggingface.co/Lightricks/LongAnimateDiff",
+ "filename": "lt_long_mm_16_64_frames_v1.1.ckpt",
+ "url": "https://huggingface.co/Lightricks/LongAnimateDiff/resolve/main/lt_long_mm_16_64_frames_v1.1.ckpt"
+ },
+
+ {
+ "name": "animatediff/v3_sd15_sparsectrl_rgb.ckpt (ComfyUI-AnimateDiff-Evolved)",
+ "type": "controlnet",
+ "base": "SD1.x",
+ "save_path": "controlnet/SD1.5/animatediff",
+ "description": "AnimateDiff SparseCtrl RGB ControlNet model",
+ "reference": "https://huggingface.co/guoyww/animatediff",
+ "filename": "v3_sd15_sparsectrl_rgb.ckpt",
+ "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v3_sd15_sparsectrl_rgb.ckpt"
+ },
+ {
+ "name": "animatediff/v3_sd15_sparsectrl_scribble.ckpt",
+ "type": "controlnet",
+ "base": "SD1.x",
+ "save_path": "controlnet/SD1.5/animatediff",
+ "description": "AnimateDiff SparseCtrl Scribble ControlNet model",
+ "reference": "https://huggingface.co/guoyww/animatediff",
+ "filename": "v3_sd15_sparsectrl_scribble.ckpt",
+ "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v3_sd15_sparsectrl_scribble.ckpt"
+ },
+ {
+ "name": "animatediff/v3_sd15_mm.ckpt (ComfyUI-AnimateDiff-Evolved)",
+ "type": "animatediff",
+ "base": "SD1.x",
+ "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/models",
+ "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)",
+ "reference": "https://huggingface.co/guoyww/animatediff",
+ "filename": "v3_sd15_mm.ckpt",
+ "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v3_sd15_mm.ckpt"
+ },
+ {
+ "name": "animatediff/v3_sd15_adapter.ckpt",
+ "type": "lora",
+ "base": "SD1.x",
+ "save_path": "loras/SD1.5/animatediff",
+ "description": "AnimateDiff Adapter LoRA (SD1.5)",
+ "reference": "https://huggingface.co/guoyww/animatediff",
+ "filename": "v3_sd15_adapter.ckpt",
+ "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v3_sd15_adapter.ckpt"
+ },
+
+ {
+ "name": "Segmind-Vega",
+ "type": "checkpoints",
+ "base": "segmind-vega",
+ "save_path": "checkpoints/segmind-vega",
+ "description": "The Segmind-Vega Model is a distilled version of the Stable Diffusion XL (SDXL), offering a remarkable 70% reduction in size and an impressive 100% speedup while retaining high-quality text-to-image generation capabilities.",
+ "reference": "https://huggingface.co/segmind/Segmind-Vega",
+ "filename": "segmind-vega.safetensors",
+ "url": "https://huggingface.co/segmind/Segmind-Vega/resolve/main/segmind-vega.safetensors"
+ },
+ {
+ "name": "Segmind-VegaRT - Latent Consistency Model (LCM) LoRA of Segmind-Vega",
+ "type": "lora",
+ "base": "segmind-vega",
+ "save_path": "loras/segmind-vega",
+ "description": "Segmind-VegaRT a distilled consistency adapter for Segmind-Vega that allows to reduce the number of inference steps to only between 2 - 8 steps.",
+ "reference": "https://huggingface.co/segmind/Segmind-VegaRT",
+ "filename": "pytorch_lora_weights.safetensors",
+ "url": "https://huggingface.co/segmind/Segmind-VegaRT/resolve/main/pytorch_lora_weights.safetensors"
+ },
+
+ {
+ "name": "stabilityai/Stable Zero123",
+ "type": "zero123",
+ "base": "zero123",
+ "save_path": "checkpoints/zero123",
+ "description": "Stable Zero123 is a model for view-conditioned image generation based on [a/Zero123](https://github.com/cvlab-columbia/zero123).",
+ "reference": "https://huggingface.co/stabilityai/stable-zero123",
+ "filename": "stable_zero123.ckpt",
+ "url": "https://huggingface.co/stabilityai/stable-zero123/resolve/main/stable_zero123.ckpt"
+ },
+ {
+ "name": "LongAnimatediff/lt_long_mm_32_frames.ckpt (ComfyUI-AnimateDiff-Evolved) (Updated path)",
+ "type": "animatediff",
+ "base": "SD1.x",
+ "save_path": "animatediff_models",
+ "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node.",
+ "reference": "https://huggingface.co/Lightricks/LongAnimateDiff",
+ "filename": "lt_long_mm_32_frames.ckpt",
+ "url": "https://huggingface.co/Lightricks/LongAnimateDiff/resolve/main/lt_long_mm_32_frames.ckpt"
+ },
+ {
+ "name": "LongAnimatediff/lt_long_mm_16_64_frames.ckpt (ComfyUI-AnimateDiff-Evolved) (Updated path)",
+ "type": "animatediff",
+ "base": "SD1.x",
+ "save_path": "animatediff_models",
+ "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node.",
+ "reference": "https://huggingface.co/Lightricks/LongAnimateDiff",
+ "filename": "lt_long_mm_16_64_frames.ckpt",
+ "url": "https://huggingface.co/Lightricks/LongAnimateDiff/resolve/main/lt_long_mm_16_64_frames.ckpt"
+ },
+ {
+ "name": "ip-adapter_sd15.safetensors",
+ "type": "IP-Adapter",
+ "base": "SD1.5",
+ "save_path": "ipadapter",
+ "description": "You can use this model in the [a/ComfyUI IPAdapter plus](https://github.com/cubiq/ComfyUI_IPAdapter_plus) extension.",
+ "reference": "https://huggingface.co/h94/IP-Adapter",
+ "filename": "ip-adapter_sd15.safetensors",
+ "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter_sd15.safetensors"
+ },
+ {
+ "name": "ip-adapter_sd15_light.safetensors",
+ "type": "IP-Adapter",
+ "base": "SD1.5",
+ "save_path": "ipadapter",
+ "description": "You can use this model in the [a/ComfyUI IPAdapter plus](https://github.com/cubiq/ComfyUI_IPAdapter_plus) extension.",
+ "reference": "https://huggingface.co/h94/IP-Adapter",
+ "filename": "ip-adapter_sd15_light.safetensors",
+ "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter_sd15_light.safetensors"
+ },
+ {
+ "name": "ip-adapter_sd15_vit-G.safetensors",
+ "type": "IP-Adapter",
+ "base": "SD1.5",
+ "save_path": "ipadapter",
+ "description": "You can use this model in the [a/ComfyUI IPAdapter plus](https://github.com/cubiq/ComfyUI_IPAdapter_plus) extension.",
+ "reference": "https://huggingface.co/h94/IP-Adapter",
+ "filename": "ip-adapter_sd15_vit-G.safetensors",
+ "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter_sd15_vit-G.safetensors"
+ },
+ {
+ "name": "ip-adapter-plus_sd15.safetensors",
+ "type": "IP-Adapter",
+ "base": "SD1.5",
+ "save_path": "ipadapter",
+ "description": "You can use this model in the [a/ComfyUI IPAdapter plus](https://github.com/cubiq/ComfyUI_IPAdapter_plus) extension.",
+ "reference": "https://huggingface.co/h94/IP-Adapter",
+ "filename": "ip-adapter-plus_sd15.safetensors",
+ "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter-plus_sd15.safetensors"
+ },
+ {
+ "name": "ip-adapter-plus-face_sd15.safetensors",
+ "type": "IP-Adapter",
+ "base": "SD1.5",
+ "save_path": "ipadapter",
+ "description": "You can use this model in the [a/ComfyUI IPAdapter plus](https://github.com/cubiq/ComfyUI_IPAdapter_plus) extension.",
+ "reference": "https://huggingface.co/h94/IP-Adapter",
+ "filename": "ip-adapter-plus-face_sd15.safetensors",
+ "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter-plus-face_sd15.safetensors"
+ },
+ {
+ "name": "ip-adapter-full-face_sd15.safetensors",
+ "type": "IP-Adapter",
+ "base": "SD1.5",
+ "save_path": "ipadapter",
+ "description": "You can use this model in the [a/ComfyUI IPAdapter plus](https://github.com/cubiq/ComfyUI_IPAdapter_plus) extension.",
+ "reference": "https://huggingface.co/h94/IP-Adapter",
+ "filename": "ip-adapter-full-face_sd15.safetensors",
+ "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/models/ip-adapter-full-face_sd15.safetensors"
+ },
+ {
+ "name": "ip-adapter_sdxl.safetensors",
+ "type": "IP-Adapter",
+ "base": "SDXL",
+ "save_path": "ipadapter",
+ "description": "You can use this model in the [a/ComfyUI IPAdapter plus](https://github.com/cubiq/ComfyUI_IPAdapter_plus) extension.",
+ "reference": "https://huggingface.co/h94/IP-Adapter",
+ "filename": "ip-adapter_sdxl.safetensors",
+ "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/ip-adapter_sdxl.safetensors"
+ },
+ {
+ "name": "ip-adapter_sdxl_vit-h.safetensors",
+ "type": "IP-Adapter",
+ "base": "SDXL",
+ "save_path": "ipadapter",
+ "description": "This model requires the use of the SD1.5 encoder despite being for SDXL checkpoints",
+ "reference": "https://huggingface.co/h94/IP-Adapter",
+ "filename": "ip-adapter_sdxl_vit-h.safetensors",
+ "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/ip-adapter_sdxl_vit-h.safetensors"
+ },
+ {
+ "name": "ip-adapter-plus_sdxl_vit-h.safetensors",
+ "type": "IP-Adapter",
+ "base": "SDXL",
+ "save_path": "ipadapter",
+ "description": "This model requires the use of the SD1.5 encoder despite being for SDXL checkpoints",
+ "reference": "https://huggingface.co/h94/IP-Adapter",
+ "filename": "ip-adapter-plus_sdxl_vit-h.safetensors",
+ "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/ip-adapter-plus_sdxl_vit-h.safetensors"
+ },
+ {
+ "name": "ip-adapter-plus-face_sdxl_vit-h.safetensors",
+ "type": "IP-Adapter",
+ "base": "SDXL",
+ "save_path": "ipadapter",
+ "description": "This model requires the use of the SD1.5 encoder despite being for SDXL checkpoints",
+ "reference": "https://huggingface.co/h94/IP-Adapter",
+ "filename": "ip-adapter-plus-face_sdxl_vit-h.safetensors",
+ "url": "https://huggingface.co/h94/IP-Adapter/resolve/main/sdxl_models/ip-adapter-plus-face_sdxl_vit-h.safetensors"
+ },
+
+ {
+ "name": "SDXL-Turbo 1.0 (fp16)",
+ "type": "checkpoints",
+ "base": "SDXL",
+ "save_path": "checkpoints/SDXL-TURBO",
+ "description": "[6.9GB] SDXL-Turbo 1.0 fp16",
+ "reference": "https://huggingface.co/stabilityai/sdxl-turbo",
+ "filename": "sd_xl_turbo_1.0_fp16.safetensors",
+ "url": "https://huggingface.co/stabilityai/sdxl-turbo/resolve/main/sd_xl_turbo_1.0_fp16.safetensors"
+ },
+ {
+ "name": "SDXL-Turbo 1.0",
+ "type": "checkpoints",
+ "base": "SDXL",
+ "save_path": "checkpoints/SDXL-TURBO",
+ "description": "[13.9GB] SDXL-Turbo 1.0",
+ "reference": "https://huggingface.co/stabilityai/sdxl-turbo",
+ "filename": "sd_xl_turbo_1.0.safetensors",
+ "url": "https://huggingface.co/stabilityai/sdxl-turbo/resolve/main/sd_xl_turbo_1.0.safetensors"
+ },
+ {
+ "name": "Stable Video Diffusion Image-to-Video",
+ "type": "checkpoints",
+ "base": "SVD",
+ "save_path": "checkpoints/SVD",
+ "description": "Stable Video Diffusion (SVD) Image-to-Video is a diffusion model that takes in a still image as a conditioning frame, and generates a video from it.\nNOTE: 14 frames @ 576x1024",
+ "reference": "https://huggingface.co/stabilityai/stable-video-diffusion-img2vid",
+ "filename": "svd.safetensors",
+ "url": "https://huggingface.co/stabilityai/stable-video-diffusion-img2vid/resolve/main/svd.safetensors"
+ },
+ {
+ "name": "Stable Video Diffusion Image-to-Video (XT)",
+ "type": "checkpoints",
+ "base": "SVD",
+ "save_path": "checkpoints/SVD",
+ "description": "Stable Video Diffusion (SVD) Image-to-Video is a diffusion model that takes in a still image as a conditioning frame, and generates a video from it.\nNOTE: 25 frames @ 576x1024 ",
+ "reference": "https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt",
+ "filename": "svd_xt.safetensors",
+ "url": "https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt/resolve/main/svd_xt.safetensors"
+ },
+
+ {
+ "name": "animatediff/mm_sdxl_v10_beta.ckpt (ComfyUI-AnimateDiff-Evolved) (Updated path)",
+ "type": "animatediff",
+ "base": "SDXL",
+ "save_path": "animatediff_models",
+ "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node.",
+ "reference": "https://huggingface.co/guoyww/animatediff",
+ "filename": "mm_sdxl_v10_beta.ckpt",
+ "url": "https://huggingface.co/guoyww/animatediff/resolve/main/mm_sdxl_v10_beta.ckpt"
+ },
+ {
+ "name": "animatediff/v2_lora_PanLeft.ckpt (ComfyUI-AnimateDiff-Evolved) (Updated path)",
+ "type": "motion lora",
+ "base": "SD1.x",
+ "save_path": "animatediff_motion_lora",
+ "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node.",
+ "reference": "https://huggingface.co/guoyww/animatediff",
+ "filename": "v2_lora_PanLeft.ckpt",
+ "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_PanLeft.ckpt"
+ },
+ {
+ "name": "animatediff/v2_lora_PanRight.ckpt (ComfyUI-AnimateDiff-Evolved) (Updated path)",
+ "type": "motion lora",
+ "base": "SD1.x",
+ "save_path": "animatediff_motion_lora",
+ "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node.",
+ "reference": "https://huggingface.co/guoyww/animatediff",
+ "filename": "v2_lora_PanRight.ckpt",
+ "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_PanRight.ckpt"
+ },
+ {
+ "name": "animatediff/v2_lora_RollingAnticlockwise.ckpt (ComfyUI-AnimateDiff-Evolved) (Updated path)",
+ "type": "motion lora",
+ "base": "SD1.x",
+ "save_path": "animatediff_motion_lora",
+ "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node.",
+ "reference": "https://huggingface.co/guoyww/animatediff",
+ "filename": "v2_lora_RollingAnticlockwise.ckpt",
+ "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_RollingAnticlockwise.ckpt"
+ },
+ {
+ "name": "animatediff/v2_lora_RollingClockwise.ckpt (ComfyUI-AnimateDiff-Evolved) (Updated path)",
+ "type": "motion lora",
+ "base": "SD1.x",
+ "save_path": "animatediff_motion_lora",
+ "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node.",
+ "reference": "https://huggingface.co/guoyww/animatediff",
+ "filename": "v2_lora_RollingClockwise.ckpt",
+ "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_RollingClockwise.ckpt"
+ },
+ {
+ "name": "animatediff/v2_lora_TiltDown.ckpt (ComfyUI-AnimateDiff-Evolved) (Updated path)",
+ "type": "motion lora",
+ "base": "SD1.x",
+ "save_path": "animatediff_motion_lora",
+ "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node.",
+ "reference": "https://huggingface.co/guoyww/animatediff",
+ "filename": "v2_lora_TiltDown.ckpt",
+ "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_TiltDown.ckpt"
+ },
+ {
+ "name": "animatediff/v2_lora_TiltUp.ckpt (ComfyUI-AnimateDiff-Evolved) (Updated path)",
+ "type": "motion lora",
+ "base": "SD1.x",
+ "save_path": "animatediff_motion_lora",
+ "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node.",
+ "reference": "https://huggingface.co/guoyww/animatediff",
+ "filename": "v2_lora_TiltUp.ckpt",
+ "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_TiltUp.ckpt"
+ },
+ {
+ "name": "animatediff/v2_lora_ZoomIn.ckpt (ComfyUI-AnimateDiff-Evolved) (Updated path)",
+ "type": "motion lora",
+ "base": "SD1.x",
+ "save_path": "animatediff_motion_lora",
+ "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node.",
+ "reference": "https://huggingface.co/guoyww/animatediff",
+ "filename": "v2_lora_ZoomIn.ckpt",
+ "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_ZoomIn.ckpt"
+ },
+ {
+ "name": "animatediff/v2_lora_ZoomOut.ckpt (ComfyUI-AnimateDiff-Evolved) (Updated path)",
+ "type": "motion lora",
+ "base": "SD1.x",
+ "save_path": "animatediff_motion_lora",
+ "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node.",
+ "reference": "https://huggingface.co/guoyww/animatediff",
+ "filename": "v2_lora_ZoomOut.ckpt",
+ "url": "https://huggingface.co/guoyww/animatediff/resolve/main/v2_lora_ZoomOut.ckpt"
+ },
+
+ {
+ "name": "CiaraRowles/TemporalNet1XL (1.0)",
+ "type": "controlnet",
+ "base": "SD1.5",
+ "save_path": "controlnet/TemporalNet1XL",
+ "description": "This is TemporalNet1XL, it is a re-train of the controlnet TemporalNet1 with Stable Diffusion XL.",
+ "reference": "https://huggingface.co/CiaraRowles/controlnet-temporalnet-sdxl-1.0",
+ "filename": "diffusion_pytorch_model.safetensors",
+ "url": "https://huggingface.co/CiaraRowles/controlnet-temporalnet-sdxl-1.0/resolve/main/diffusion_pytorch_model.safetensors"
+ },
+
+ {
+ "name": "LCM LoRA SD1.5",
+ "type": "lora",
+ "base": "SD1.5",
+ "save_path": "loras/lcm/SD1.5",
+ "description": "Latent Consistency LoRA for SD1.5",
+ "reference": "https://huggingface.co/latent-consistency/lcm-lora-sdv1-5",
+ "filename": "pytorch_lora_weights.safetensors",
+ "url": "https://huggingface.co/latent-consistency/lcm-lora-sdv1-5/resolve/main/pytorch_lora_weights.safetensors"
+ },
+ {
+ "name": "LCM LoRA SSD-1B",
+ "type": "lora",
+ "base": "SSD-1B",
+ "save_path": "loras/lcm/SSD-1B",
+ "description": "Latent Consistency LoRA for SSD-1B",
+ "reference": "https://huggingface.co/latent-consistency/lcm-lora-ssd-1b",
+ "filename": "pytorch_lora_weights.safetensors",
+ "url": "https://huggingface.co/latent-consistency/lcm-lora-ssd-1b/resolve/main/pytorch_lora_weights.safetensors"
+ },
+ {
+ "name": "LCM LoRA SDXL",
+ "type": "lora",
+ "base": "SSD-1B",
+ "save_path": "loras/lcm/SDXL",
+ "description": "Latent Consistency LoRA for SDXL",
+ "reference": "https://huggingface.co/latent-consistency/lcm-lora-sdxl",
+ "filename": "pytorch_lora_weights.safetensors",
+ "url": "https://huggingface.co/latent-consistency/lcm-lora-sdxl/resolve/main/pytorch_lora_weights.safetensors"
+ },
+
+ {
+ "name": "face_yolov8m-seg_60.pt (segm)",
+ "type": "Ultralytics",
+ "base": "Ultralytics",
+ "save_path": "ultralytics/segm",
+ "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.",
+ "reference": "https://github.com/hben35096/assets/releases/tag/yolo8",
+ "filename": "face_yolov8m-seg_60.pt",
+ "url": "https://github.com/hben35096/assets/releases/download/yolo8/face_yolov8m-seg_60.pt"
+ },
+ {
+ "name": "face_yolov8n-seg2_60.pt (segm)",
+ "type": "Ultralytics",
+ "base": "Ultralytics",
+ "save_path": "ultralytics/segm",
+ "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.",
+ "reference": "https://github.com/hben35096/assets/releases/tag/yolo8",
+ "filename": "face_yolov8n-seg2_60.pt",
+ "url": "https://github.com/hben35096/assets/releases/download/yolo8/face_yolov8n-seg2_60.pt"
+ },
+ {
+ "name": "hair_yolov8n-seg_60.pt (segm)",
+ "type": "Ultralytics",
+ "base": "Ultralytics",
+ "save_path": "ultralytics/segm",
+ "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.",
+ "reference": "https://github.com/hben35096/assets/releases/tag/yolo8",
+ "filename": "hair_yolov8n-seg_60.pt",
+ "url": "https://github.com/hben35096/assets/releases/download/yolo8/hair_yolov8n-seg_60.pt"
+ },
+ {
+ "name": "skin_yolov8m-seg_400.pt (segm)",
+ "type": "Ultralytics",
+ "base": "Ultralytics",
+ "save_path": "ultralytics/segm",
+ "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.",
+ "reference": "https://github.com/hben35096/assets/releases/tag/yolo8",
+ "filename": "skin_yolov8m-seg_400.pt",
+ "url": "https://github.com/hben35096/assets/releases/download/yolo8/skin_yolov8m-seg_400.pt"
+ },
+ {
+ "name": "skin_yolov8n-seg_400.pt (segm)",
+ "type": "Ultralytics",
+ "base": "Ultralytics",
+ "save_path": "ultralytics/segm",
+ "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.",
+ "reference": "https://github.com/hben35096/assets/releases/tag/yolo8",
+ "filename": "skin_yolov8n-seg_400.pt",
+ "url": "https://github.com/hben35096/assets/releases/download/yolo8/skin_yolov8n-seg_400.pt"
+ },
+ {
+ "name": "skin_yolov8n-seg_800.pt (segm)",
+ "type": "Ultralytics",
+ "base": "Ultralytics",
+ "save_path": "ultralytics/segm",
+ "description": "These are the available models in the UltralyticsDetectorProvider of Impact Pack.",
+ "reference": "https://github.com/hben35096/assets/releases/tag/yolo8",
+ "filename": "skin_yolov8n-seg_800.pt",
+ "url": "https://github.com/hben35096/assets/releases/download/yolo8/skin_yolov8n-seg_800.pt"
+ },
+
+ {
+ "name": "CiaraRowles/temporaldiff-v1-animatediff.ckpt (ComfyUI-AnimateDiff-Evolved) (Updated path)",
+ "type": "animatediff",
+ "base": "SD1.x",
+ "save_path": "animatediff_models",
+ "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node.",
+ "reference": "https://huggingface.co/CiaraRowles/TemporalDiff",
+ "filename": "temporaldiff-v1-animatediff.ckpt",
+ "url": "https://huggingface.co/CiaraRowles/TemporalDiff/resolve/main/temporaldiff-v1-animatediff.ckpt"
+ },
+ {
+ "name": "animatediff/mm_sd_v15_v2.ckpt (ComfyUI-AnimateDiff-Evolved)",
+ "type": "animatediff",
+ "base": "SD1.x",
+ "save_path": "custom_nodes/ComfyUI-AnimateDiff-Evolved/models",
+ "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node. (Note: Requires ComfyUI-Manager V0.24 or above)",
+ "reference": "https://huggingface.co/guoyww/animatediff",
+ "filename": "mm_sd_v15_v2.ckpt",
+ "url": "https://huggingface.co/guoyww/animatediff/resolve/main/mm_sd_v15_v2.ckpt"
+ },
+ {
+ "name": "AD_Stabilized_Motion/mm-Stabilized_high.pth (ComfyUI-AnimateDiff-Evolved) (Updated path)",
+ "type": "animatediff",
+ "base": "SD1.x",
+ "save_path": "animatediff_models",
+ "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node.",
+ "reference": "https://huggingface.co/manshoety/AD_Stabilized_Motion",
+ "filename": "mm-Stabilized_high.pth",
+ "url": "https://huggingface.co/manshoety/AD_Stabilized_Motion/resolve/main/mm-Stabilized_high.pth"
+ },
+ {
+ "name": "AD_Stabilized_Motion/mm-Stabilized_mid.pth (ComfyUI-AnimateDiff-Evolved) (Updated path)",
+ "type": "animatediff",
+ "base": "SD1.x",
+ "save_path": "animatediff_models",
+ "description": "Pressing 'install' directly downloads the model from the Kosinkadink/ComfyUI-AnimateDiff-Evolved extension node.",
+ "reference": "https://huggingface.co/manshoety/AD_Stabilized_Motion",
+ "filename": "mm-Stabilized_mid.pth",
+ "url": "https://huggingface.co/manshoety/AD_Stabilized_Motion/resolve/main/mm-Stabilized_mid.pth"
+ }
+ ]
+}
diff --git a/custom_nodes/ComfyUI-Manager/node_db/tutorial/custom-node-list.json b/custom_nodes/ComfyUI-Manager/node_db/tutorial/custom-node-list.json
new file mode 100644
index 0000000000000000000000000000000000000000..d191d7159a8aa4476bb5a4e46372d1e978ba453c
--- /dev/null
+++ b/custom_nodes/ComfyUI-Manager/node_db/tutorial/custom-node-list.json
@@ -0,0 +1,124 @@
+{
+ "custom_nodes": [
+ {
+ "author": "Suzie1",
+ "title": "Guide To Making Custom Nodes in ComfyUI",
+ "reference": "https://github.com/Suzie1/ComfyUI_Guide_To_Making_Custom_Nodes",
+ "files": [
+ "https://github.com/Suzie1/ComfyUI_Guide_To_Making_Custom_Nodes"
+ ],
+ "install_type": "git-clone",
+ "description": "There is a small node pack attached to this guide. This includes the init file and 3 nodes associated with the tutorials."
+ },
+ {
+ "author": "dynamixar",
+ "title": "Atluris",
+ "reference": "https://github.com/dynamixar/Atluris",
+ "files": [
+ "https://github.com/dynamixar/Atluris"
+ ],
+ "install_type": "git-clone",
+ "description": "Nodes:Random Line"
+ },
+ {
+ "author": "et118",
+ "title": "ComfyUI-ElGogh-Nodes",
+ "reference": "https://github.com/et118/ComfyUI-ElGogh-Nodes",
+ "files": [
+ "https://github.com/et118/ComfyUI-ElGogh-Nodes"
+ ],
+ "install_type": "git-clone",
+ "description": "Nodes:ElGogh Positive Prompt, ElGogh NEGATIVE Prompt, ElGogh Empty Latent Image, ElGogh Checkpoint Loader Simple"
+ },
+ {
+ "author": "LarryJane491",
+ "title": "Custom-Node-Base",
+ "reference": "https://github.com/LarryJane491/Custom-Node-Base",
+ "files": [
+ "https://github.com/LarryJane491/Custom-Node-Base"
+ ],
+ "install_type": "git-clone",
+ "description": "This project is an `empty` custom node that is already in its own folder. It serves as a base to build any custom node. Whenever you want to create a custom node, you can download that, put it in custom_nodes, then you just have to change the names and fill it with code!"
+ },
+ {
+ "author": "foxtrot-roger",
+ "title": "comfyui-custom-nodes",
+ "reference": "https://github.com/foxtrot-roger/comfyui-custom-nodes",
+ "files": [
+ "https://github.com/foxtrot-roger/comfyui-custom-nodes"
+ ],
+ "install_type": "git-clone",
+ "description": "Tutorial nodes"
+ },
+ {
+ "author": "GraftingRayman",
+ "title": "ComfyUI-Trajectory",
+ "reference": "https://github.com/GraftingRayman/ComfyUI-Trajectory",
+ "files": [
+ "https://github.com/GraftingRayman/ComfyUI-Trajectory"
+ ],
+ "install_type": "git-clone",
+ "description": "Nodes:GR Trajectory"
+ },
+ {
+ "author": "wailovet",
+ "title": "ComfyUI-WW",
+ "reference": "https://github.com/wailovet/ComfyUI-WW",
+ "files": [
+ "https://github.com/wailovet/ComfyUI-WW"
+ ],
+ "install_type": "git-clone",
+ "description": "Nodes:WW_ImageResize"
+ },
+ {
+ "author": "bmz55",
+ "title": "bmz nodes",
+ "reference": "https://github.com/bmz55/comfyui-bmz-nodes",
+ "files": [
+ "https://github.com/bmz55/comfyui-bmz-nodes"
+ ],
+ "install_type": "git-clone",
+ "description": "Nodes:Load Images From Dir With Name (Inspire - BMZ), Count Images In Dir (BMZ), Get Level Text (BMZ), Get Level Float (BMZ)"
+ },
+ {
+ "author": "azure-dragon-ai",
+ "title": "ComfyUI-HPSv2-Nodes",
+ "reference": "https://github.com/azure-dragon-ai/ComfyUI-HPSv2-Nodes",
+ "files": [
+ "https://github.com/azure-dragon-ai/ComfyUI-HPSv2-Nodes"
+ ],
+ "install_type": "git-clone",
+ "description": "Nodes:Loader, Image Processor, Text Processor, ImageScore"
+ },
+ {
+ "author": "kappa54m",
+ "title": "ComfyUI-HPSv2-Nodes",
+ "reference": "https://github.com/kappa54m/ComfyUI_Usability",
+ "files": [
+ "https://github.com/kappa54m/ComfyUI_Usability"
+ ],
+ "install_type": "git-clone",
+ "description": "Nodes:Load Image Dedup"
+ },
+ {
+ "author": "IvanRybakov",
+ "title": "comfyui-node-int-to-string-convertor",
+ "reference": "https://github.com/IvanRybakov/comfyui-node-int-to-string-convertor",
+ "files": [
+ "https://github.com/IvanRybakov/comfyui-node-int-to-string-convertor"
+ ],
+ "install_type": "git-clone",
+ "description": "Nodes:Int To String Convertor"
+ },
+ {
+ "author": "yowipr",
+ "title": "ComfyUI-Manual",
+ "reference": "https://github.com/yowipr/ComfyUI-Manual",
+ "files": [
+ "https://github.com/yowipr/ComfyUI-Manual"
+ ],
+ "install_type": "git-clone",
+ "description": "Nodes:M_Layer, M_Output"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/custom_nodes/ComfyUI-Manager/node_db/tutorial/extension-node-map.json b/custom_nodes/ComfyUI-Manager/node_db/tutorial/extension-node-map.json
new file mode 100644
index 0000000000000000000000000000000000000000..9e26dfeeb6e641a33dae4961196235bdb965b21b
--- /dev/null
+++ b/custom_nodes/ComfyUI-Manager/node_db/tutorial/extension-node-map.json
@@ -0,0 +1 @@
+{}
\ No newline at end of file
diff --git a/custom_nodes/ComfyUI-Manager/node_db/tutorial/model-list.json b/custom_nodes/ComfyUI-Manager/node_db/tutorial/model-list.json
new file mode 100644
index 0000000000000000000000000000000000000000..8e3e1dc4858a08aa46190aa53ba320d565206cf4
--- /dev/null
+++ b/custom_nodes/ComfyUI-Manager/node_db/tutorial/model-list.json
@@ -0,0 +1,3 @@
+{
+ "models": []
+}
diff --git a/custom_nodes/ComfyUI-Manager/node_db/tutorial/scan.sh b/custom_nodes/ComfyUI-Manager/node_db/tutorial/scan.sh
new file mode 100755
index 0000000000000000000000000000000000000000..5d8d8c48b6e3f48dc1491738c1226f574909c05d
--- /dev/null
+++ b/custom_nodes/ComfyUI-Manager/node_db/tutorial/scan.sh
@@ -0,0 +1,4 @@
+#!/bin/bash
+source ../../../../venv/bin/activate
+rm .tmp/*.py > /dev/null
+python ../../scanner.py
diff --git a/custom_nodes/ComfyUI-Manager/notebooks/comfyui_colab_with_manager.ipynb b/custom_nodes/ComfyUI-Manager/notebooks/comfyui_colab_with_manager.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..36bab4fe83f42901cb136c273806537e6b6baa0d
--- /dev/null
+++ b/custom_nodes/ComfyUI-Manager/notebooks/comfyui_colab_with_manager.ipynb
@@ -0,0 +1,353 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "aaaaaaaaaa"
+ },
+ "source": [
+ "Git clone the repo and install the requirements. (ignore the pip errors about protobuf)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "bbbbbbbbbb"
+ },
+ "outputs": [],
+ "source": [
+ "# #@title Environment Setup\n",
+ "\n",
+ "from pathlib import Path\n",
+ "\n",
+ "OPTIONS = {}\n",
+ "\n",
+ "USE_GOOGLE_DRIVE = True #@param {type:\"boolean\"}\n",
+ "UPDATE_COMFY_UI = True #@param {type:\"boolean\"}\n",
+ "USE_COMFYUI_MANAGER = True #@param {type:\"boolean\"}\n",
+ "INSTALL_CUSTOM_NODES_DEPENDENCIES = True #@param {type:\"boolean\"}\n",
+ "OPTIONS['USE_GOOGLE_DRIVE'] = USE_GOOGLE_DRIVE\n",
+ "OPTIONS['UPDATE_COMFY_UI'] = UPDATE_COMFY_UI\n",
+ "OPTIONS['USE_COMFYUI_MANAGER'] = USE_COMFYUI_MANAGER\n",
+ "OPTIONS['INSTALL_CUSTOM_NODES_DEPENDENCIES'] = INSTALL_CUSTOM_NODES_DEPENDENCIES\n",
+ "\n",
+ "current_dir = !pwd\n",
+ "WORKSPACE = f\"{current_dir[0]}/ComfyUI\"\n",
+ "\n",
+ "if OPTIONS['USE_GOOGLE_DRIVE']:\n",
+ " !echo \"Mounting Google Drive...\"\n",
+ " %cd /\n",
+ "\n",
+ " from google.colab import drive\n",
+ " drive.mount('/content/drive')\n",
+ "\n",
+ " WORKSPACE = \"/content/drive/MyDrive/ComfyUI\"\n",
+ " %cd /content/drive/MyDrive\n",
+ "\n",
+ "![ ! -d $WORKSPACE ] && echo -= Initial setup ComfyUI =- && git clone https://github.com/comfyanonymous/ComfyUI\n",
+ "%cd $WORKSPACE\n",
+ "\n",
+ "if OPTIONS['UPDATE_COMFY_UI']:\n",
+ " !echo -= Updating ComfyUI =-\n",
+ " !git pull\n",
+ "\n",
+ "!echo -= Install dependencies =-\n",
+ "#Remove cu121 as it causes issues in Colab.\n",
+ "#!pip install xformers!=0.0.18 -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cu121 --extra-index-url https://download.pytorch.org/whl/cu118 --extra-index-url https://download.pytorch.org/whl/cu117\n",
+ "!pip3 install accelerate\n",
+ "!pip3 install einops transformers>=4.25.1 safetensors>=0.3.0 aiohttp pyyaml Pillow scipy tqdm psutil\n",
+ "!pip3 install xformers!=0.0.18 torch==2.1.0 torchvision torchaudio --index-url https://download.pytorch.org/whl/cu121\n",
+ "!pip3 install torchsde\n",
+ "\n",
+ "if OPTIONS['USE_COMFYUI_MANAGER']:\n",
+ " %cd custom_nodes\n",
+ " ![ ! -d ComfyUI-Manager ] && echo -= Initial setup ComfyUI-Manager =- && git clone https://github.com/ltdrdata/ComfyUI-Manager\n",
+ " %cd ComfyUI-Manager\n",
+ " !git pull\n",
+ "\n",
+ "%cd $WORKSPACE\n",
+ "\n",
+ "if OPTIONS['INSTALL_CUSTOM_NODES_DEPENDENCIES']:\n",
+ " !pwd\n",
+ " !echo -= Install custom nodes dependencies =-\n",
+ " ![ -f \"custom_nodes/ComfyUI-Manager/scripts/colab-dependencies.py\" ] && python \"custom_nodes/ComfyUI-Manager/scripts/colab-dependencies.py\"\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "cccccccccc"
+ },
+ "source": [
+ "Download some models/checkpoints/vae or custom comfyui nodes (uncomment the commands for the ones you want)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "dddddddddd"
+ },
+ "outputs": [],
+ "source": [
+ "# Checkpoints\n",
+ "\n",
+ "### SDXL\n",
+ "### I recommend these workflow examples: https://comfyanonymous.github.io/ComfyUI_examples/sdxl/\n",
+ "\n",
+ "#!wget -c https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_base_1.0.safetensors -P ./models/checkpoints/\n",
+ "#!wget -c https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0/resolve/main/sd_xl_refiner_1.0.safetensors -P ./models/checkpoints/\n",
+ "\n",
+ "# SDXL ReVision\n",
+ "#!wget -c https://huggingface.co/comfyanonymous/clip_vision_g/resolve/main/clip_vision_g.safetensors -P ./models/clip_vision/\n",
+ "\n",
+ "# SD1.5\n",
+ "!wget -c https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt -P ./models/checkpoints/\n",
+ "\n",
+ "# SD2\n",
+ "#!wget -c https://huggingface.co/stabilityai/stable-diffusion-2-1-base/resolve/main/v2-1_512-ema-pruned.safetensors -P ./models/checkpoints/\n",
+ "#!wget -c https://huggingface.co/stabilityai/stable-diffusion-2-1/resolve/main/v2-1_768-ema-pruned.safetensors -P ./models/checkpoints/\n",
+ "\n",
+ "# Some SD1.5 anime style\n",
+ "#!wget -c https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix2/AbyssOrangeMix2_hard.safetensors -P ./models/checkpoints/\n",
+ "#!wget -c https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix3/AOM3A1_orangemixs.safetensors -P ./models/checkpoints/\n",
+ "#!wget -c https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/Models/AbyssOrangeMix3/AOM3A3_orangemixs.safetensors -P ./models/checkpoints/\n",
+ "#!wget -c https://huggingface.co/Linaqruf/anything-v3.0/resolve/main/anything-v3-fp16-pruned.safetensors -P ./models/checkpoints/\n",
+ "\n",
+ "# Waifu Diffusion 1.5 (anime style SD2.x 768-v)\n",
+ "#!wget -c https://huggingface.co/waifu-diffusion/wd-1-5-beta3/resolve/main/wd-illusion-fp16.safetensors -P ./models/checkpoints/\n",
+ "\n",
+ "\n",
+ "# unCLIP models\n",
+ "#!wget -c https://huggingface.co/comfyanonymous/illuminatiDiffusionV1_v11_unCLIP/resolve/main/illuminatiDiffusionV1_v11-unclip-h-fp16.safetensors -P ./models/checkpoints/\n",
+ "#!wget -c https://huggingface.co/comfyanonymous/wd-1.5-beta2_unCLIP/resolve/main/wd-1-5-beta2-aesthetic-unclip-h-fp16.safetensors -P ./models/checkpoints/\n",
+ "\n",
+ "\n",
+ "# VAE\n",
+ "!wget -c https://huggingface.co/stabilityai/sd-vae-ft-mse-original/resolve/main/vae-ft-mse-840000-ema-pruned.safetensors -P ./models/vae/\n",
+ "#!wget -c https://huggingface.co/WarriorMama777/OrangeMixs/resolve/main/VAEs/orangemix.vae.pt -P ./models/vae/\n",
+ "#!wget -c https://huggingface.co/hakurei/waifu-diffusion-v1-4/resolve/main/vae/kl-f8-anime2.ckpt -P ./models/vae/\n",
+ "\n",
+ "\n",
+ "# Loras\n",
+ "#!wget -c https://civitai.com/api/download/models/10350 -O ./models/loras/theovercomer8sContrastFix_sd21768.safetensors #theovercomer8sContrastFix SD2.x 768-v\n",
+ "#!wget -c https://civitai.com/api/download/models/10638 -O ./models/loras/theovercomer8sContrastFix_sd15.safetensors #theovercomer8sContrastFix SD1.x\n",
+ "#!wget -c https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/resolve/main/sd_xl_offset_example-lora_1.0.safetensors -P ./models/loras/ #SDXL offset noise lora\n",
+ "\n",
+ "\n",
+ "# T2I-Adapter\n",
+ "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_depth_sd14v1.pth -P ./models/controlnet/\n",
+ "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_seg_sd14v1.pth -P ./models/controlnet/\n",
+ "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_sketch_sd14v1.pth -P ./models/controlnet/\n",
+ "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_keypose_sd14v1.pth -P ./models/controlnet/\n",
+ "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_openpose_sd14v1.pth -P ./models/controlnet/\n",
+ "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_color_sd14v1.pth -P ./models/controlnet/\n",
+ "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_canny_sd14v1.pth -P ./models/controlnet/\n",
+ "\n",
+ "# T2I Styles Model\n",
+ "#!wget -c https://huggingface.co/TencentARC/T2I-Adapter/resolve/main/models/t2iadapter_style_sd14v1.pth -P ./models/style_models/\n",
+ "\n",
+ "# CLIPVision model (needed for styles model)\n",
+ "#!wget -c https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/pytorch_model.bin -O ./models/clip_vision/clip_vit14.bin\n",
+ "\n",
+ "\n",
+ "# ControlNet\n",
+ "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11e_sd15_ip2p_fp16.safetensors -P ./models/controlnet/\n",
+ "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11e_sd15_shuffle_fp16.safetensors -P ./models/controlnet/\n",
+ "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_canny_fp16.safetensors -P ./models/controlnet/\n",
+ "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11f1p_sd15_depth_fp16.safetensors -P ./models/controlnet/\n",
+ "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_inpaint_fp16.safetensors -P ./models/controlnet/\n",
+ "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_lineart_fp16.safetensors -P ./models/controlnet/\n",
+ "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_mlsd_fp16.safetensors -P ./models/controlnet/\n",
+ "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_normalbae_fp16.safetensors -P ./models/controlnet/\n",
+ "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_openpose_fp16.safetensors -P ./models/controlnet/\n",
+ "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_scribble_fp16.safetensors -P ./models/controlnet/\n",
+ "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_seg_fp16.safetensors -P ./models/controlnet/\n",
+ "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15_softedge_fp16.safetensors -P ./models/controlnet/\n",
+ "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11p_sd15s2_lineart_anime_fp16.safetensors -P ./models/controlnet/\n",
+ "#!wget -c https://huggingface.co/comfyanonymous/ControlNet-v1-1_fp16_safetensors/resolve/main/control_v11u_sd15_tile_fp16.safetensors -P ./models/controlnet/\n",
+ "\n",
+ "# ControlNet SDXL\n",
+ "#!wget -c https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-canny-rank256.safetensors -P ./models/controlnet/\n",
+ "#!wget -c https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-depth-rank256.safetensors -P ./models/controlnet/\n",
+ "#!wget -c https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-recolor-rank256.safetensors -P ./models/controlnet/\n",
+ "#!wget -c https://huggingface.co/stabilityai/control-lora/resolve/main/control-LoRAs-rank256/control-lora-sketch-rank256.safetensors -P ./models/controlnet/\n",
+ "\n",
+ "# Controlnet Preprocessor nodes by Fannovel16\n",
+ "#!cd custom_nodes && git clone https://github.com/Fannovel16/comfy_controlnet_preprocessors; cd comfy_controlnet_preprocessors && python install.py\n",
+ "\n",
+ "\n",
+ "# GLIGEN\n",
+ "#!wget -c https://huggingface.co/comfyanonymous/GLIGEN_pruned_safetensors/resolve/main/gligen_sd14_textbox_pruned_fp16.safetensors -P ./models/gligen/\n",
+ "\n",
+ "\n",
+ "# ESRGAN upscale model\n",
+ "#!wget -c https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth -P ./models/upscale_models/\n",
+ "#!wget -c https://huggingface.co/sberbank-ai/Real-ESRGAN/resolve/main/RealESRGAN_x2.pth -P ./models/upscale_models/\n",
+ "#!wget -c https://huggingface.co/sberbank-ai/Real-ESRGAN/resolve/main/RealESRGAN_x4.pth -P ./models/upscale_models/\n",
+ "\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "kkkkkkkkkkkkkkk"
+ },
+ "source": [
+ "### Run ComfyUI with cloudflared (Recommended Way)\n",
+ "\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "jjjjjjjjjjjjjj"
+ },
+ "outputs": [],
+ "source": [
+ "!wget https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64.deb\n",
+ "!dpkg -i cloudflared-linux-amd64.deb\n",
+ "\n",
+ "import subprocess\n",
+ "import threading\n",
+ "import time\n",
+ "import socket\n",
+ "import urllib.request\n",
+ "\n",
+ "def iframe_thread(port):\n",
+ " while True:\n",
+ " time.sleep(0.5)\n",
+ " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
+ " result = sock.connect_ex(('127.0.0.1', port))\n",
+ " if result == 0:\n",
+ " break\n",
+ " sock.close()\n",
+ " print(\"\\nComfyUI finished loading, trying to launch cloudflared (if it gets stuck here cloudflared is having issues)\\n\")\n",
+ "\n",
+ " p = subprocess.Popen([\"cloudflared\", \"tunnel\", \"--url\", \"http://127.0.0.1:{}\".format(port)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n",
+ " for line in p.stderr:\n",
+ " l = line.decode()\n",
+ " if \"trycloudflare.com \" in l:\n",
+ " print(\"This is the URL to access ComfyUI:\", l[l.find(\"http\"):], end='')\n",
+ " #print(l, end='')\n",
+ "\n",
+ "\n",
+ "threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n",
+ "\n",
+ "!python main.py --dont-print-server"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "kkkkkkkkkkkkkk"
+ },
+ "source": [
+ "### Run ComfyUI with localtunnel\n",
+ "\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "jjjjjjjjjjjjj"
+ },
+ "outputs": [],
+ "source": [
+ "!npm install -g localtunnel\n",
+ "\n",
+ "import subprocess\n",
+ "import threading\n",
+ "import time\n",
+ "import socket\n",
+ "import urllib.request\n",
+ "\n",
+ "def iframe_thread(port):\n",
+ " while True:\n",
+ " time.sleep(0.5)\n",
+ " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
+ " result = sock.connect_ex(('127.0.0.1', port))\n",
+ " if result == 0:\n",
+ " break\n",
+ " sock.close()\n",
+ " print(\"\\nComfyUI finished loading, trying to launch localtunnel (if it gets stuck here localtunnel is having issues)\\n\")\n",
+ "\n",
+ " print(\"The password/enpoint ip for localtunnel is:\", urllib.request.urlopen('https://ipv4.icanhazip.com').read().decode('utf8').strip(\"\\n\"))\n",
+ " p = subprocess.Popen([\"lt\", \"--port\", \"{}\".format(port)], stdout=subprocess.PIPE)\n",
+ " for line in p.stdout:\n",
+ " print(line.decode(), end='')\n",
+ "\n",
+ "\n",
+ "threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n",
+ "\n",
+ "!python main.py --dont-print-server"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "gggggggggg"
+ },
+ "source": [
+ "### Run ComfyUI with colab iframe (use only in case the previous way with localtunnel doesn't work)\n",
+ "\n",
+ "You should see the ui appear in an iframe. If you get a 403 error, it's your firefox settings or an extension that's messing things up.\n",
+ "\n",
+ "If you want to open it in another window use the link.\n",
+ "\n",
+ "Note that some UI features like live image previews won't work because the colab iframe blocks websockets."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "id": "hhhhhhhhhh"
+ },
+ "outputs": [],
+ "source": [
+ "import threading\n",
+ "import time\n",
+ "import socket\n",
+ "def iframe_thread(port):\n",
+ " while True:\n",
+ " time.sleep(0.5)\n",
+ " sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n",
+ " result = sock.connect_ex(('127.0.0.1', port))\n",
+ " if result == 0:\n",
+ " break\n",
+ " sock.close()\n",
+ " from google.colab import output\n",
+ " output.serve_kernel_port_as_iframe(port, height=1024)\n",
+ " print(\"to open it in a window you can open this link here:\")\n",
+ " output.serve_kernel_port_as_window(port)\n",
+ "\n",
+ "threading.Thread(target=iframe_thread, daemon=True, args=(8188,)).start()\n",
+ "\n",
+ "!python main.py --dont-print-server"
+ ]
+ }
+ ],
+ "metadata": {
+ "accelerator": "GPU",
+ "colab": {
+ "provenance": []
+ },
+ "gpuClass": "standard",
+ "kernelspec": {
+ "display_name": "Python 3",
+ "name": "python3"
+ },
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/custom_nodes/ComfyUI-Manager/prestartup_script.py b/custom_nodes/ComfyUI-Manager/prestartup_script.py
new file mode 100644
index 0000000000000000000000000000000000000000..31c445e86560ad87f10e7a95bb129890a28d1589
--- /dev/null
+++ b/custom_nodes/ComfyUI-Manager/prestartup_script.py
@@ -0,0 +1,525 @@
+import datetime
+import os
+import subprocess
+import sys
+import atexit
+import threading
+import re
+import locale
+import platform
+
+
+glob_path = os.path.join(os.path.dirname(__file__), "glob")
+sys.path.append(glob_path)
+
+import cm_global
+
+
+message_collapses = []
+import_failed_extensions = set()
+cm_global.variables['cm.on_revision_detected_handler'] = []
+enable_file_logging = True
+
+
+def register_message_collapse(f):
+ global message_collapses
+ message_collapses.append(f)
+
+
+def is_import_failed_extension(name):
+ global import_failed_extensions
+ return name in import_failed_extensions
+
+
+def check_file_logging():
+ global enable_file_logging
+ try:
+ import configparser
+ config_path = os.path.join(os.path.dirname(__file__), "config.ini")
+ config = configparser.ConfigParser()
+ config.read(config_path)
+ default_conf = config['default']
+
+ if 'file_logging' in default_conf and default_conf['file_logging'].lower() == 'false':
+ enable_file_logging = False
+ except Exception:
+ pass
+
+
+check_file_logging()
+
+
+sys.__comfyui_manager_register_message_collapse = register_message_collapse
+sys.__comfyui_manager_is_import_failed_extension = is_import_failed_extension
+cm_global.register_api('cm.register_message_collapse', register_message_collapse)
+cm_global.register_api('cm.is_import_failed_extension', is_import_failed_extension)
+
+
+comfyui_manager_path = os.path.dirname(__file__)
+custom_nodes_path = os.path.abspath(os.path.join(comfyui_manager_path, ".."))
+startup_script_path = os.path.join(comfyui_manager_path, "startup-scripts")
+restore_snapshot_path = os.path.join(startup_script_path, "restore-snapshot.json")
+git_script_path = os.path.join(comfyui_manager_path, "git_helper.py")
+
+std_log_lock = threading.Lock()
+
+
+class TerminalHook:
+ def __init__(self):
+ self.hooks = {}
+
+ def add_hook(self, k, v):
+ self.hooks[k] = v
+
+ def remove_hook(self, k):
+ if k in self.hooks:
+ del self.hooks[k]
+
+ def write_stderr(self, msg):
+ for v in self.hooks.values():
+ try:
+ v.write_stderr(msg)
+ except Exception:
+ pass
+
+ def write_stdout(self, msg):
+ for v in self.hooks.values():
+ try:
+ v.write_stdout(msg)
+ except Exception:
+ pass
+
+
+terminal_hook = TerminalHook()
+sys.__comfyui_manager_terminal_hook = terminal_hook
+
+
+def handle_stream(stream, prefix):
+ stream.reconfigure(encoding=locale.getpreferredencoding(), errors='replace')
+ for msg in stream:
+ if prefix == '[!]' and ('it/s]' in msg or 's/it]' in msg) and ('%|' in msg or 'it [' in msg):
+ if msg.startswith('100%'):
+ print('\r' + msg, end="", file=sys.stderr),
+ else:
+ print('\r' + msg[:-1], end="", file=sys.stderr),
+ else:
+ if prefix == '[!]':
+ print(prefix, msg, end="", file=sys.stderr)
+ else:
+ print(prefix, msg, end="")
+
+
+def process_wrap(cmd_str, cwd_path, handler=None):
+ process = subprocess.Popen(cmd_str, cwd=cwd_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, bufsize=1)
+
+ if handler is None:
+ handler = handle_stream
+
+ stdout_thread = threading.Thread(target=handler, args=(process.stdout, ""))
+ stderr_thread = threading.Thread(target=handler, args=(process.stderr, "[!]"))
+
+ stdout_thread.start()
+ stderr_thread.start()
+
+ stdout_thread.join()
+ stderr_thread.join()
+
+ return process.wait()
+
+
+try:
+ if '--port' in sys.argv:
+ port_index = sys.argv.index('--port')
+ if port_index + 1 < len(sys.argv):
+ port = int(sys.argv[port_index + 1])
+ postfix = f"_{port}"
+ else:
+ postfix = ""
+
+ # Logger setup
+ if enable_file_logging:
+ if os.path.exists(f"comfyui{postfix}.log"):
+ if os.path.exists(f"comfyui{postfix}.prev.log"):
+ if os.path.exists(f"comfyui{postfix}.prev2.log"):
+ os.remove(f"comfyui{postfix}.prev2.log")
+ os.rename(f"comfyui{postfix}.prev.log", f"comfyui{postfix}.prev2.log")
+ os.rename(f"comfyui{postfix}.log", f"comfyui{postfix}.prev.log")
+
+ log_file = open(f"comfyui{postfix}.log", "w", encoding="utf-8", errors="ignore")
+
+ log_lock = threading.Lock()
+
+ original_stdout = sys.stdout
+ original_stderr = sys.stderr
+
+ if original_stdout.encoding.lower() == 'utf-8':
+ write_stdout = original_stdout.write
+ write_stderr = original_stderr.write
+ else:
+ def wrapper_stdout(msg):
+ original_stdout.write(msg.encode('utf-8').decode(original_stdout.encoding, errors="ignore"))
+
+ def wrapper_stderr(msg):
+ original_stderr.write(msg.encode('utf-8').decode(original_stderr.encoding, errors="ignore"))
+
+ write_stdout = wrapper_stdout
+ write_stderr = wrapper_stderr
+
+ pat_tqdm = r'\d+%.*\[(.*?)\]'
+ pat_import_fail = r'seconds \(IMPORT FAILED\):'
+ pat_custom_node = r'[/\\]custom_nodes[/\\](.*)$'
+
+ is_start_mode = True
+ is_import_fail_mode = False
+
+ class ComfyUIManagerLogger:
+ def __init__(self, is_stdout):
+ self.is_stdout = is_stdout
+ self.encoding = "utf-8"
+ self.last_char = ''
+
+ def fileno(self):
+ try:
+ if self.is_stdout:
+ return original_stdout.fileno()
+ else:
+ return original_stderr.fileno()
+ except AttributeError:
+ # Handle error
+ raise ValueError("The object does not have a fileno method")
+
+ def write(self, message):
+ global is_start_mode
+ global is_import_fail_mode
+
+ if any(f(message) for f in message_collapses):
+ return
+
+ if is_start_mode:
+ if is_import_fail_mode:
+ match = re.search(pat_custom_node, message)
+ if match:
+ import_failed_extensions.add(match.group(1))
+ is_import_fail_mode = False
+ else:
+ match = re.search(pat_import_fail, message)
+ if match:
+ is_import_fail_mode = True
+ else:
+ is_import_fail_mode = False
+
+ if 'Starting server' in message:
+ is_start_mode = False
+
+ if not self.is_stdout:
+ match = re.search(pat_tqdm, message)
+ if match:
+ message = re.sub(r'([#|])\d', r'\1▌', message)
+ message = re.sub('#', '█', message)
+ if '100%' in message:
+ self.sync_write(message)
+ else:
+ write_stderr(message)
+ original_stderr.flush()
+ else:
+ self.sync_write(message)
+ else:
+ self.sync_write(message)
+
+ def sync_write(self, message):
+ with log_lock:
+ timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')[:-3]
+ if self.last_char != '\n':
+ log_file.write(message)
+ else:
+ log_file.write(f"[{timestamp}] {message}")
+ log_file.flush()
+ self.last_char = message if message == '' else message[-1]
+
+ with std_log_lock:
+ if self.is_stdout:
+ write_stdout(message)
+ original_stdout.flush()
+ terminal_hook.write_stderr(message)
+ else:
+ write_stderr(message)
+ original_stderr.flush()
+ terminal_hook.write_stdout(message)
+
+ def flush(self):
+ log_file.flush()
+
+ with std_log_lock:
+ if self.is_stdout:
+ original_stdout.flush()
+ else:
+ original_stderr.flush()
+
+ def close(self):
+ self.flush()
+
+ def reconfigure(self, *args, **kwargs):
+ pass
+
+ # You can close through sys.stderr.close_log()
+ def close_log(self):
+ sys.stderr = original_stderr
+ sys.stdout = original_stdout
+ log_file.close()
+
+ def close_log():
+ sys.stderr = original_stderr
+ sys.stdout = original_stdout
+ log_file.close()
+
+
+ if enable_file_logging:
+ sys.stdout = ComfyUIManagerLogger(True)
+ sys.stderr = ComfyUIManagerLogger(False)
+
+ atexit.register(close_log)
+ else:
+ sys.stdout.close_log = lambda: None
+
+except Exception as e:
+ print(f"[ComfyUI-Manager] Logging failed: {e}")
+
+
+print("** ComfyUI startup time:", datetime.datetime.now())
+print("** Platform:", platform.system())
+print("** Python version:", sys.version)
+print("** Python executable:", sys.executable)
+
+if enable_file_logging:
+ print("** Log path:", os.path.abspath('comfyui.log'))
+else:
+ print("** Log path: file logging is disabled")
+
+
+def check_bypass_ssl():
+ try:
+ import configparser
+ import ssl
+ config_path = os.path.join(os.path.dirname(__file__), "config.ini")
+ config = configparser.ConfigParser()
+ config.read(config_path)
+ default_conf = config['default']
+
+ if 'bypass_ssl' in default_conf and default_conf['bypass_ssl'].lower() == 'true':
+ print(f"[ComfyUI-Manager] WARN: Unsafe - SSL verification bypass option is Enabled. (see ComfyUI-Manager/config.ini)")
+ ssl._create_default_https_context = ssl._create_unverified_context # SSL certificate error fix.
+ except Exception:
+ pass
+
+
+check_bypass_ssl()
+
+
+# Perform install
+processed_install = set()
+script_list_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "startup-scripts", "install-scripts.txt")
+pip_list = None
+
+
+def get_installed_packages():
+ global pip_list
+
+ if pip_list is None:
+ try:
+ result = subprocess.check_output([sys.executable, '-m', 'pip', 'list'], universal_newlines=True)
+ pip_list = set([line.split()[0].lower() for line in result.split('\n') if line.strip()])
+ except subprocess.CalledProcessError as e:
+ print(f"[ComfyUI-Manager] Failed to retrieve the information of installed pip packages.")
+ return set()
+
+ return pip_list
+
+
+def is_installed(name):
+ name = name.strip()
+
+ if name.startswith('#'):
+ return True
+
+ pattern = r'([^<>!=]+)([<>!=]=?)'
+ match = re.search(pattern, name)
+
+ if match:
+ name = match.group(1)
+
+ return name.lower() in get_installed_packages()
+
+
+if os.path.exists(restore_snapshot_path):
+ try:
+ import json
+
+ cloned_repos = []
+
+ def msg_capture(stream, prefix):
+ stream.reconfigure(encoding=locale.getpreferredencoding(), errors='replace')
+ for msg in stream:
+ if msg.startswith("CLONE: "):
+ cloned_repos.append(msg[7:])
+ if prefix == '[!]':
+ print(prefix, msg, end="", file=sys.stderr)
+ else:
+ print(prefix, msg, end="")
+
+ elif prefix == '[!]' and ('it/s]' in msg or 's/it]' in msg) and ('%|' in msg or 'it [' in msg):
+ if msg.startswith('100%'):
+ print('\r' + msg, end="", file=sys.stderr),
+ else:
+ print('\r'+msg[:-1], end="", file=sys.stderr),
+ else:
+ if prefix == '[!]':
+ print(prefix, msg, end="", file=sys.stderr)
+ else:
+ print(prefix, msg, end="")
+
+ print(f"[ComfyUI-Manager] Restore snapshot.")
+ cmd_str = [sys.executable, git_script_path, '--apply-snapshot', restore_snapshot_path]
+ exit_code = process_wrap(cmd_str, custom_nodes_path, handler=msg_capture)
+
+ with open(restore_snapshot_path, 'r', encoding="UTF-8", errors="ignore") as json_file:
+ info = json.load(json_file)
+ for url in cloned_repos:
+ try:
+ repository_name = url.split("/")[-1].strip()
+ repo_path = os.path.join(custom_nodes_path, repository_name)
+ repo_path = os.path.abspath(repo_path)
+
+ requirements_path = os.path.join(repo_path, 'requirements.txt')
+ install_script_path = os.path.join(repo_path, 'install.py')
+
+ this_exit_code = 0
+
+ if os.path.exists(requirements_path):
+ with open(requirements_path, 'r', encoding="UTF-8", errors="ignore") as file:
+ for line in file:
+ package_name = line.strip()
+ if package_name and not is_installed(package_name):
+ install_cmd = [sys.executable, "-m", "pip", "install", package_name]
+ this_exit_code += process_wrap(install_cmd, repo_path)
+
+ if os.path.exists(install_script_path) and f'{repo_path}/install.py' not in processed_install:
+ processed_install.add(f'{repo_path}/install.py')
+ install_cmd = [sys.executable, install_script_path]
+ print(f">>> {install_cmd} / {repo_path}")
+ this_exit_code += process_wrap(install_cmd, repo_path)
+
+ if this_exit_code != 0:
+ print(f"[ComfyUI-Manager] Restoring '{repository_name}' is failed.")
+
+ except Exception as e:
+ print(e)
+ print(f"[ComfyUI-Manager] Restoring '{repository_name}' is failed.")
+
+ if exit_code != 0:
+ print(f"[ComfyUI-Manager] Restore snapshot failed.")
+ else:
+ print(f"[ComfyUI-Manager] Restore snapshot done.")
+
+ except Exception as e:
+ print(e)
+ print(f"[ComfyUI-Manager] Restore snapshot failed.")
+
+ os.remove(restore_snapshot_path)
+
+
+def execute_lazy_install_script(repo_path, executable):
+ global processed_install
+
+ install_script_path = os.path.join(repo_path, "install.py")
+ requirements_path = os.path.join(repo_path, "requirements.txt")
+
+ if os.path.exists(requirements_path):
+ print(f"Install: pip packages for '{repo_path}'")
+ with open(requirements_path, "r") as requirements_file:
+ for line in requirements_file:
+ package_name = line.strip()
+ if package_name and not is_installed(package_name):
+ install_cmd = [executable, "-m", "pip", "install", package_name]
+ process_wrap(install_cmd, repo_path)
+
+ if os.path.exists(install_script_path) and f'{repo_path}/install.py' not in processed_install:
+ processed_install.add(f'{repo_path}/install.py')
+ print(f"Install: install script for '{repo_path}'")
+ install_cmd = [executable, "install.py"]
+ process_wrap(install_cmd, repo_path)
+
+
+# Check if script_list_path exists
+if os.path.exists(script_list_path):
+ print("\n#######################################################################")
+ print("[ComfyUI-Manager] Starting dependency installation/(de)activation for the extension\n")
+
+ executed = set()
+ # Read each line from the file and convert it to a list using eval
+ with open(script_list_path, 'r', encoding="UTF-8", errors="ignore") as file:
+ for line in file:
+ if line in executed:
+ continue
+
+ executed.add(line)
+
+ try:
+ script = eval(line)
+
+ if script[1].startswith('#') and script[1] != '#FORCE':
+ if script[1] == "#LAZY-INSTALL-SCRIPT":
+ execute_lazy_install_script(script[0], script[2])
+
+ elif os.path.exists(script[0]):
+ if script[1] == "#FORCE":
+ del script[1]
+ else:
+ if 'pip' in script[1:] and 'install' in script[1:] and is_installed(script[-1]):
+ continue
+
+ print(f"\n## ComfyUI-Manager: EXECUTE => {script[1:]}")
+ print(f"\n## Execute install/(de)activation script for '{script[0]}'")
+
+ exit_code = process_wrap(script[1:], script[0])
+
+ if exit_code != 0:
+ print(f"install/(de)activation script failed: {script[0]}")
+ else:
+ print(f"\n## ComfyUI-Manager: CANCELED => {script[1:]}")
+
+ except Exception as e:
+ print(f"[ERROR] Failed to execute install/(de)activation script: {line} / {e}")
+
+ # Remove the script_list_path file
+ if os.path.exists(script_list_path):
+ os.remove(script_list_path)
+
+ print("\n[ComfyUI-Manager] Startup script completed.")
+ print("#######################################################################\n")
+
+del processed_install
+del pip_list
+
+
+def check_windows_event_loop_policy():
+ try:
+ import configparser
+ config_path = os.path.join(os.path.dirname(__file__), "config.ini")
+ config = configparser.ConfigParser()
+ config.read(config_path)
+ default_conf = config['default']
+
+ if 'windows_selector_event_loop_policy' in default_conf and default_conf['windows_selector_event_loop_policy'].lower() == 'true':
+ try:
+ import asyncio
+ import asyncio.windows_events
+ asyncio.set_event_loop_policy(asyncio.windows_events.WindowsSelectorEventLoopPolicy())
+ print(f"[ComfyUI-Manager] Windows event loop policy mode enabled")
+ except Exception as e:
+ print(f"[ComfyUI-Manager] WARN: Windows initialization fail: {e}")
+ except Exception:
+ pass
+
+
+if platform.system() == 'Windows':
+ check_windows_event_loop_policy()
diff --git a/custom_nodes/ComfyUI-Manager/requirements.txt b/custom_nodes/ComfyUI-Manager/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2435feff83d12d96797c42d02e55c3224cba3b9d
--- /dev/null
+++ b/custom_nodes/ComfyUI-Manager/requirements.txt
@@ -0,0 +1,4 @@
+GitPython
+matrix-client==0.4.0
+transformers
+huggingface-hub>0.20
\ No newline at end of file
diff --git a/custom_nodes/ComfyUI-Manager/scan.sh b/custom_nodes/ComfyUI-Manager/scan.sh
new file mode 100755
index 0000000000000000000000000000000000000000..1b3cc3771790ebedf0c538ff3464125d52e8668c
--- /dev/null
+++ b/custom_nodes/ComfyUI-Manager/scan.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+rm ~/.tmp/default/*.py > /dev/null 2>&1
+python scanner.py ~/.tmp/default
+cp extension-node-map.json node_db/new/.
+
+echo Integrity check
+./check.sh
diff --git a/custom_nodes/ComfyUI-Manager/scanner.py b/custom_nodes/ComfyUI-Manager/scanner.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b8d4de2aa72325c3f5c9d1203cb10bc79bf02fc
--- /dev/null
+++ b/custom_nodes/ComfyUI-Manager/scanner.py
@@ -0,0 +1,336 @@
+import ast
+import re
+import os
+import json
+from git import Repo
+from torchvision.datasets.utils import download_url
+import concurrent
+
+builtin_nodes = set()
+
+import sys
+
+
+# prepare temp dir
+if len(sys.argv) > 1:
+ temp_dir = sys.argv[1]
+else:
+ temp_dir = os.path.join(os.getcwd(), ".tmp")
+
+if not os.path.exists(temp_dir):
+ os.makedirs(temp_dir)
+
+print(f"TEMP DIR: {temp_dir}")
+
+
+def extract_nodes(code_text):
+ try:
+ parsed_code = ast.parse(code_text)
+
+ assignments = (node for node in parsed_code.body if isinstance(node, ast.Assign))
+
+ for assignment in assignments:
+ if isinstance(assignment.targets[0], ast.Name) and assignment.targets[0].id == 'NODE_CLASS_MAPPINGS':
+ node_class_mappings = assignment.value
+ break
+ else:
+ node_class_mappings = None
+
+ if node_class_mappings:
+ s = set([key.s.strip() for key in node_class_mappings.keys if key is not None])
+ return s
+ else:
+ return set()
+ except:
+ return set()
+
+
+# scan
+def scan_in_file(filename, is_builtin=False):
+ global builtin_nodes
+
+ try:
+ with open(filename, encoding='utf-8') as file:
+ code = file.read()
+ except UnicodeDecodeError:
+ with open(filename, encoding='cp949') as file:
+ code = file.read()
+
+ pattern = r"_CLASS_MAPPINGS\s*=\s*{([^}]*)}"
+ regex = re.compile(pattern, re.MULTILINE | re.DOTALL)
+
+ nodes = set()
+ class_dict = {}
+
+ nodes |= extract_nodes(code)
+
+ pattern2 = r'^[^=]*_CLASS_MAPPINGS\["(.*?)"\]'
+ keys = re.findall(pattern2, code)
+ for key in keys:
+ nodes.add(key.strip())
+
+ pattern3 = r'^[^=]*_CLASS_MAPPINGS\[\'(.*?)\'\]'
+ keys = re.findall(pattern3, code)
+ for key in keys:
+ nodes.add(key.strip())
+
+ matches = regex.findall(code)
+ for match in matches:
+ dict_text = match
+
+ key_value_pairs = re.findall(r"\"([^\"]*)\"\s*:\s*([^,\n]*)", dict_text)
+ for key, value in key_value_pairs:
+ class_dict[key.strip()] = value.strip()
+
+ key_value_pairs = re.findall(r"'([^']*)'\s*:\s*([^,\n]*)", dict_text)
+ for key, value in key_value_pairs:
+ class_dict[key.strip()] = value.strip()
+
+ for key, value in class_dict.items():
+ nodes.add(key.strip())
+
+ update_pattern = r"_CLASS_MAPPINGS.update\s*\({([^}]*)}\)"
+ update_match = re.search(update_pattern, code)
+ if update_match:
+ update_dict_text = update_match.group(1)
+ update_key_value_pairs = re.findall(r"\"([^\"]*)\"\s*:\s*([^,\n]*)", update_dict_text)
+ for key, value in update_key_value_pairs:
+ class_dict[key.strip()] = value.strip()
+ nodes.add(key.strip())
+
+ metadata = {}
+ lines = code.strip().split('\n')
+ for line in lines:
+ if line.startswith('@'):
+ if line.startswith("@author:") or line.startswith("@title:") or line.startswith("@nickname:") or line.startswith("@description:"):
+ key, value = line[1:].strip().split(':', 1)
+ metadata[key.strip()] = value.strip()
+
+ if is_builtin:
+ builtin_nodes += set(nodes)
+ else:
+ for x in builtin_nodes:
+ if x in nodes:
+ nodes.remove(x)
+
+ return nodes, metadata
+
+
+def get_py_file_paths(dirname):
+ file_paths = []
+
+ for root, dirs, files in os.walk(dirname):
+ if ".git" in root or "__pycache__" in root:
+ continue
+
+ for file in files:
+ if file.endswith(".py"):
+ file_path = os.path.join(root, file)
+ file_paths.append(file_path)
+
+ return file_paths
+
+
+def get_nodes(target_dir):
+ py_files = []
+ directories = []
+
+ for item in os.listdir(target_dir):
+ if ".git" in item or "__pycache__" in item:
+ continue
+
+ path = os.path.abspath(os.path.join(target_dir, item))
+
+ if os.path.isfile(path) and item.endswith(".py"):
+ py_files.append(path)
+ elif os.path.isdir(path):
+ directories.append(path)
+
+ return py_files, directories
+
+
+def get_git_urls_from_json(json_file):
+ with open(json_file, encoding='utf-8') as file:
+ data = json.load(file)
+
+ custom_nodes = data.get('custom_nodes', [])
+ git_clone_files = []
+ for node in custom_nodes:
+ if node.get('install_type') == 'git-clone':
+ files = node.get('files', [])
+ if files:
+ git_clone_files.append((files[0], node.get('title'), node.get('nodename_pattern')))
+
+ git_clone_files.append(("https://github.com/comfyanonymous/ComfyUI", "ComfyUI", None))
+
+ return git_clone_files
+
+
+def get_py_urls_from_json(json_file):
+ with open(json_file, encoding='utf-8') as file:
+ data = json.load(file)
+
+ custom_nodes = data.get('custom_nodes', [])
+ py_files = []
+ for node in custom_nodes:
+ if node.get('install_type') == 'copy':
+ files = node.get('files', [])
+ if files:
+ py_files.append((files[0], node.get('title'), node.get('nodename_pattern')))
+
+ return py_files
+
+
+def clone_or_pull_git_repository(git_url):
+ repo_name = git_url.split("/")[-1].split(".")[0]
+ repo_dir = os.path.join(temp_dir, repo_name)
+
+ if os.path.exists(repo_dir):
+ try:
+ repo = Repo(repo_dir)
+ origin = repo.remote(name="origin")
+ origin.pull(rebase=True)
+ repo.git.submodule('update', '--init', '--recursive')
+ print(f"Pulling {repo_name}...")
+ except Exception as e:
+ print(f"Pulling {repo_name} failed: {e}")
+ else:
+ try:
+ Repo.clone_from(git_url, repo_dir, recursive=True)
+ print(f"Cloning {repo_name}...")
+ except Exception as e:
+ print(f"Cloning {repo_name} failed: {e}")
+
+
+def update_custom_nodes():
+ if not os.path.exists(temp_dir):
+ os.makedirs(temp_dir)
+
+ node_info = {}
+
+ git_url_titles = get_git_urls_from_json('custom-node-list.json')
+
+ def process_git_url_title(url, title, node_pattern):
+ name = os.path.basename(url)
+ if name.endswith(".git"):
+ name = name[:-4]
+
+ node_info[name] = (url, title, node_pattern)
+ clone_or_pull_git_repository(url)
+
+ with concurrent.futures.ThreadPoolExecutor(10) as executor:
+ for url, title, node_pattern in git_url_titles:
+ executor.submit(process_git_url_title, url, title, node_pattern)
+
+ py_url_titles_and_pattern = get_py_urls_from_json('custom-node-list.json')
+
+ def download_and_store_info(url_title_and_pattern):
+ url, title, node_pattern = url_title_and_pattern
+ name = os.path.basename(url)
+ if name.endswith(".py"):
+ node_info[name] = (url, title, node_pattern)
+
+ try:
+ download_url(url, temp_dir)
+ except:
+ print(f"[ERROR] Cannot download '{url}'")
+
+ with concurrent.futures.ThreadPoolExecutor(10) as executor:
+ executor.map(download_and_store_info, py_url_titles_and_pattern)
+
+ return node_info
+
+
+def gen_json(node_info):
+ # scan from .py file
+ node_files, node_dirs = get_nodes(temp_dir)
+
+ comfyui_path = os.path.abspath(os.path.join(temp_dir, "ComfyUI"))
+ node_dirs.remove(comfyui_path)
+ node_dirs = [comfyui_path] + node_dirs
+
+ data = {}
+ for dirname in node_dirs:
+ py_files = get_py_file_paths(dirname)
+ metadata = {}
+
+ nodes = set()
+ for py in py_files:
+ nodes_in_file, metadata_in_file = scan_in_file(py, dirname == "ComfyUI")
+ nodes.update(nodes_in_file)
+ metadata.update(metadata_in_file)
+
+ dirname = os.path.basename(dirname)
+
+ if len(nodes) > 0 or (dirname in node_info and node_info[dirname][2] is not None):
+ nodes = list(nodes)
+ nodes.sort()
+
+ if dirname in node_info:
+ git_url, title, node_pattern = node_info[dirname]
+ metadata['title_aux'] = title
+ if node_pattern is not None:
+ metadata['nodename_pattern'] = node_pattern
+ data[git_url] = (nodes, metadata)
+ else:
+ print(f"WARN: {dirname} is removed from custom-node-list.json")
+
+ for file in node_files:
+ nodes, metadata = scan_in_file(file)
+
+ if len(nodes) > 0 or (dirname in node_info and node_info[dirname][2] is not None):
+ nodes = list(nodes)
+ nodes.sort()
+
+ file = os.path.basename(file)
+
+ if file in node_info:
+ url, title, node_pattern = node_info[file]
+ metadata['title_aux'] = title
+ if node_pattern is not None:
+ metadata['nodename_pattern'] = node_pattern
+ data[url] = (nodes, metadata)
+ else:
+ print(f"Missing info: {file}")
+
+ # scan from node_list.json file
+ extensions = [name for name in os.listdir(temp_dir) if os.path.isdir(os.path.join(temp_dir, name))]
+
+ for extension in extensions:
+ node_list_json_path = os.path.join(temp_dir, extension, 'node_list.json')
+ if os.path.exists(node_list_json_path):
+ git_url, title, node_pattern = node_info[extension]
+
+ with open(node_list_json_path, 'r', encoding='utf-8') as f:
+ node_list_json = json.load(f)
+
+ metadata_in_url = {}
+ if git_url not in data:
+ nodes = set()
+ else:
+ nodes_in_url, metadata_in_url = data[git_url]
+ nodes = set(nodes_in_url)
+
+ for x, desc in node_list_json.items():
+ nodes.add(x.strip())
+
+ metadata_in_url['title_aux'] = title
+ if node_pattern is not None:
+ metadata_in_url['nodename_pattern'] = node_pattern
+ nodes = list(nodes)
+ nodes.sort()
+ data[git_url] = (nodes, metadata_in_url)
+
+ json_path = f"extension-node-map.json"
+ with open(json_path, "w", encoding='utf-8') as file:
+ json.dump(data, file, indent=4, sort_keys=True)
+
+
+print("### ComfyUI Manager Node Scanner ###")
+
+print("\n# Updating extensions\n")
+updated_node_info = update_custom_nodes()
+
+print("\n# 'extension-node-map.json' file is generated.\n")
+gen_json(updated_node_info)
+
diff --git a/custom_nodes/ComfyUI-Manager/scripts/colab-dependencies.py b/custom_nodes/ComfyUI-Manager/scripts/colab-dependencies.py
new file mode 100644
index 0000000000000000000000000000000000000000..d5a70ed6dd92ba90e8084e07fbb9097fe3096ea5
--- /dev/null
+++ b/custom_nodes/ComfyUI-Manager/scripts/colab-dependencies.py
@@ -0,0 +1,39 @@
+import os
+import subprocess
+
+
+def get_enabled_subdirectories_with_files(base_directory):
+ subdirs_with_files = []
+ for subdir in os.listdir(base_directory):
+ try:
+ full_path = os.path.join(base_directory, subdir)
+ if os.path.isdir(full_path) and not subdir.endswith(".disabled") and not subdir.startswith('.') and subdir != '__pycache__':
+ print(f"## Install dependencies for '{subdir}'")
+ requirements_file = os.path.join(full_path, "requirements.txt")
+ install_script = os.path.join(full_path, "install.py")
+
+ if os.path.exists(requirements_file) or os.path.exists(install_script):
+ subdirs_with_files.append((full_path, requirements_file, install_script))
+ except Exception as e:
+ print(f"EXCEPTION During Dependencies INSTALL on '{subdir}':\n{e}")
+
+ return subdirs_with_files
+
+
+def install_requirements(requirements_file_path):
+ if os.path.exists(requirements_file_path):
+ subprocess.run(["pip", "install", "-r", requirements_file_path])
+
+
+def run_install_script(install_script_path):
+ if os.path.exists(install_script_path):
+ subprocess.run(["python", install_script_path])
+
+
+custom_nodes_directory = "custom_nodes"
+subdirs_with_files = get_enabled_subdirectories_with_files(custom_nodes_directory)
+
+
+for subdir, requirements_file, install_script in subdirs_with_files:
+ install_requirements(requirements_file)
+ run_install_script(install_script)
diff --git a/custom_nodes/ComfyUI-Manager/scripts/install-comfyui-venv-linux.sh b/custom_nodes/ComfyUI-Manager/scripts/install-comfyui-venv-linux.sh
new file mode 100755
index 0000000000000000000000000000000000000000..be473dc66f8eeb36c48d409945eb5ae83a030171
--- /dev/null
+++ b/custom_nodes/ComfyUI-Manager/scripts/install-comfyui-venv-linux.sh
@@ -0,0 +1,21 @@
+git clone https://github.com/comfyanonymous/ComfyUI
+cd ComfyUI/custom_nodes
+git clone https://github.com/ltdrdata/ComfyUI-Manager
+cd ..
+python -m venv venv
+source venv/bin/activate
+python -m pip install -r requirements.txt
+python -m pip install -r custom_nodes/ComfyUI-Manager/requirements.txt
+python -m pip install torchvision
+cd ..
+echo "#!/bin/bash" > run_gpu.sh
+echo "cd ComfyUI" >> run_gpu.sh
+echo "source venv/bin/activate" >> run_gpu.sh
+echo "python main.py --preview-method auto" >> run_gpu.sh
+chmod +x run_gpu.sh
+
+echo "#!/bin/bash" > run_cpu.sh
+echo "cd ComfyUI" >> run_cpu.sh
+echo "source venv/bin/activate" >> run_cpu.sh
+echo "python main.py --preview-method auto --cpu" >> run_cpu.sh
+chmod +x run_cpu.sh
diff --git a/custom_nodes/ComfyUI-Manager/scripts/install-comfyui-venv-win.bat b/custom_nodes/ComfyUI-Manager/scripts/install-comfyui-venv-win.bat
new file mode 100755
index 0000000000000000000000000000000000000000..6bb0e8364b5170530c2a85341ad754764c6788ae
--- /dev/null
+++ b/custom_nodes/ComfyUI-Manager/scripts/install-comfyui-venv-win.bat
@@ -0,0 +1,20 @@
+git clone https://github.com/comfyanonymous/ComfyUI
+cd ComfyUI/custom_nodes
+git clone https://github.com/ltdrdata/ComfyUI-Manager
+cd ..
+python -m venv venv
+call venv/Scripts/activate
+python -m pip install -r requirements.txt
+python -m pip install -r custom_nodes/ComfyUI-Manager/requirements.txt
+python -m pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu118 xformers
+cd ..
+echo "cd ComfyUI" >> run_gpu.sh
+echo "call venv/Scripts/activate" >> run_gpu.sh
+echo "python main.py" >> run_gpu.sh
+chmod +x run_gpu.sh
+
+echo "#!/bin/bash" > run_cpu.sh
+echo "cd ComfyUI" >> run_cpu.sh
+echo "call venv/Scripts/activate" >> run_cpu.sh
+echo "python main.py --cpu" >> run_cpu.sh
+chmod +x run_cpu.sh
diff --git a/custom_nodes/ComfyUI-Manager/scripts/install-manager-for-portable-version.bat b/custom_nodes/ComfyUI-Manager/scripts/install-manager-for-portable-version.bat
new file mode 100644
index 0000000000000000000000000000000000000000..7b067dfd770d197ccd68e760087536552223f260
--- /dev/null
+++ b/custom_nodes/ComfyUI-Manager/scripts/install-manager-for-portable-version.bat
@@ -0,0 +1,2 @@
+.\python_embeded\python.exe -s -m pip install gitpython
+.\python_embeded\python.exe -c "import git; git.Repo.clone_from('https://github.com/ltdrdata/ComfyUI-Manager', './ComfyUI/custom_nodes/ComfyUI-Manager')"
diff --git a/custom_nodes/ComfyUI-Manager/scripts/update-fix.py b/custom_nodes/ComfyUI-Manager/scripts/update-fix.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2ac10074607544d0b9cdaf4372e43c7f62bb8d0
--- /dev/null
+++ b/custom_nodes/ComfyUI-Manager/scripts/update-fix.py
@@ -0,0 +1,12 @@
+import git
+
+commit_hash = "a361cc1"
+
+repo = git.Repo('.')
+
+if repo.is_dirty():
+ repo.git.stash()
+
+repo.git.update_ref("refs/remotes/origin/main", commit_hash)
+repo.remotes.origin.fetch()
+repo.git.pull("origin", "main")
diff --git a/custom_nodes/ComfyUI-Manager/snapshots/the_snapshot_files_are_located_here b/custom_nodes/ComfyUI-Manager/snapshots/the_snapshot_files_are_located_here
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/LICENSE b/custom_nodes/ComfyUI-VideoHelperSuite/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/README.md b/custom_nodes/ComfyUI-VideoHelperSuite/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..3b21911a54cf6f006186fc351edef71a643f0a9a
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/README.md
@@ -0,0 +1,110 @@
+# ComfyUI-VideoHelperSuite
+Nodes related to video workflows
+
+## I/O Nodes
+### Load Video
+Converts a video file into a series of images
+- video: The video file to be loaded
+- force_rate: Discards or duplicates frames as needed to hit a target frame rate. Disabled by setting to 0. This can be used to quickly match a suggested frame rate like the 8 fps of AnimateDiff.
+- force_size: Allows for quick resizing to a number of suggested sizes. Several options allow you to set only width or height and determine the other from aspect ratio.
+- frame_load_cap: The maximum number of frames which will be returned. This could also be thought of as the maximum batch size.
+- skip_first_frames: How many frames to skip from the start of the video after adjusting for a forced frame rate. By incrementing this number by the frame_load_cap, you can easily process a longer input video in parts.
+- select_every_nth: Allows for skipping a number of frames without considering the base frame rate or risking frame duplication. Often useful when working with animated gifs
+A path variant of the Load Video node exists that allows loading videos from external paths
+
+
+If [Advanced Previews](#advanced-previews) is enabled in the options menu of the web ui, the preview will reflect the current settings on the node.
+### Load Image Sequence
+Loads all image files from a subfolder. Options are similar to Load Video.
+- image_load_cap: The maximum number of images which will be returned. This could also be thought of as the maximum batch size.
+- skip_first_images: How many images to skip. By incrementing this number by image_load_cap, you can easily divide a long sequence of images into multiple batches.
+- select_every_nth: Allows for skipping a number of images between every returned frame.
+
+A path variant of Load Image sequence also exists.
+### Video Combine
+Combines a series of images into an output video
+If the optional audio input is provided, it will also be combined into the output video
+- frame_rate: How many of the input frames are displayed per second. A higher frame rate means that the output video plays faster and has less duration. This should usually be kept to 8 for AnimateDiff, or matched to the force_rate of a Load Video node.
+- loop_count: How many additional times the video should repeat
+- filename_prefix: The base file name used for output.
+ - You can save output to a subfolder: `subfolder/video`
+ - Like the builtin Save Image node, you can add timestamps. `%date:yyyy-MM-ddThh:mm:ss%` might become 2023-10-31T6:45:25
+- format: The file format to use. Advanced information on configuring or adding additional video formats can be found in the [Video Formats](#video-formats) section.
+- pingpong: Causes the input to be played back in the reverse to create a clean loop.
+- save_output: Whether the image should be put into the output directory or the temp directory.
+Returns: a `VHS_FILENAMES` which consists of a boolean indicating if save_output is enabled and a list of the full filepaths of all generated outputs in the order created. Accordingly `output[1][-1]` will be the most complete output.
+
+Depending on the format chosen, additional options may become available, including
+- crf: Describes the quality of the output video. A lower number gives a higher quality video and a larger file size, while a higher number gives a lower quality video with a smaller size. Scaling varies by codec, but visually lossless output generally occurs around 20.
+- save_metadata: Includes a copy of the workflow in the ouput video which can be loaded by dragging and dropping the video, just like with images.
+- pix_fmt: Changes how the pixel data is stored. `yuv420p10le` has higher color quality, but won't work on all devices
+### Load Audio
+Provides a way to load standalone audio files.
+- seek_seconds: An optional start time for the audio file in seconds.
+
+## Latent/Image Nodes
+A number of utility nodes exist for managing latents. For each, there is an equivalent node which works on images.
+### Split Batch
+Divides the latents into two sets. The first `split_index` latents go to ouput A and the remainder to output B. If less then `split_index` latents are provided as input, all are passed to output A and output B is empty.
+### Merge Batch
+Combines two groups of latents into a single output. The order of the output is the latents in A followed by the latents in B.
+If the input groups are not the same size, the node provides options for rescaling the latents before merging.
+### Select Every Nth
+The first of every `select_every_nth` input is passed and the remainder are discarded
+### Get Count
+### Duplicate Batch
+
+## Video Previews
+Load Video (Upload), Load Video (Path), Load Images (Upload), Load Images (Path) and Video Combine provide animated previews.
+Nodes with previews provide additional functionality when right clicked
+- Open preview
+- Save preview
+- Pause preview: Can improve performance with very large videos
+- Hide preview: Can improve performance, save space
+- Sync preview: Restarts all previews for side-by-side comparisons
+
+### Advanced Previews
+Advanced Previews must be manually enabled by clicking the settings gear next to Queue Prompt and checking the box for VHS Advanced Previews.
+If enabled, videos which are displayed in the ui will be converted with ffmpeg on request. This has several benefits
+- Previews for Load Video nodes will reflect the settings on the node such as skip_first_frames and frame_load_cap
+ - This makes it easy to select an exact portion of an input video and sync it with outputs
+- It can use substantially less bandwidth if running the server remotely
+- It can greatly improve the browser performance by downsizing videos to the in ui resolution, particularly useful with animated gifs
+- It allows for previews of videos that would not normally be playable in browser.
+- Can be limited to subdirectories of ComyUI if `VHS_STRICT_PATHS` is set as an environment variable.
+
+This fucntionality is disabled since it comes with several downsides
+- There is a delay before videos show in the browser. This delay can become quite large if the input video is long
+- The preview videos are lower quality (The original can always be viewed with Right Click -> Open preview)
+
+## Video Formats
+Those familiar with ffmpeg are able to add json files to the video_formats folders to add new output types to Video Combine.
+Consider the following example for av1-webm
+```json
+{
+ "main_pass":
+ [
+ "-n", "-c:v", "libsvtav1",
+ "-pix_fmt", "yuv420p10le",
+ "-crf", ["crf","INT", {"default": 23, "min": 0, "max": 100, "step": 1}]
+ ],
+ "audio_pass": ["-c:a", "libopus"],
+ "extension": "webm",
+ "environment": {"SVT_LOG": "1"}
+}
+```
+Most configuration takes place in `main_pass`, which is a list of arguments that are passed to ffmpeg.
+- `"-n"` designates that the command should fail if a file of the same name already exists. This should never happen, but if some bug were to occur, it would ensure other files aren't overwritten.
+- `"-c:v", "libsvtav1"` designates that the video should be encoded with an av1 codec using the new SVT-AV1 encoder. SVT-AV1 is much faster than libaom-av1, but may not exist in older versions of ffmpeg. Alternatively, av1_nvenc could be used for gpu encoding with newer nvidia cards.
+- `"-pix_fmt", "yuv420p10le"` designates the standard pixel format with 10-bit color. It's important that some pixel format be specified to ensure a nonconfigurable input pix_fmt isn't used.
+
+`audio pass` contains a list of arguments which are passed to ffmpeg when audio is passed into Video Combine
+
+`extension` designates both the file extension and the container format that is used. If some of the above options are omitted from `main_pass` it can affect what default options are chosen.
+`environment` can optionally be provided to set environment variables during execution. For av1 it's used to reduce the verbosity of logging so that only major errors are displayed.
+`input_color_depth` effects the format in which pixels are passed to the ffmpeg subprocess. Current valid options are `8bit` and `16bit`. The later will produce higher quality output, but is experimental.
+
+Fields can be exposed in the webui as a widget using a format similar to what is used in the creation of custom nodes. In the above example, the argument for `-crf` will be exposed as a format widget in the webui. Format widgets are a list of up to 3 terms
+- The name of the widget that will be displayed in the web ui
+- Either a primitive such as "INT" or "BOOLEAN", or a list of string options
+- A dictionary of options
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/__init__.py b/custom_nodes/ComfyUI-VideoHelperSuite/__init__.py
new file mode 100755
index 0000000000000000000000000000000000000000..cae39593a7307fe8dd8a9055e643fd572fb988e8
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/__init__.py
@@ -0,0 +1,6 @@
+from .videohelpersuite.nodes import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS
+import folder_paths
+from .videohelpersuite.server import server
+
+WEB_DIRECTORY = "./web"
+__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"]
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/requirements.txt b/custom_nodes/ComfyUI-VideoHelperSuite/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4fa34aa21b85c4b974e2a2b6891eae5fd6dd4164
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/requirements.txt
@@ -0,0 +1,2 @@
+opencv-python
+imageio-ffmpeg
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/16bit-png.json b/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/16bit-png.json
new file mode 100644
index 0000000000000000000000000000000000000000..b768bdbcfe8950cb7bde37d02444f0191ad29f51
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/16bit-png.json
@@ -0,0 +1,9 @@
+{
+ "main_pass":
+ [
+ "-n",
+ "-pix_fmt", "rgba64"
+ ],
+ "input_color_depth": "16bit",
+ "extension": "%03d.png"
+}
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/ProRes.json b/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/ProRes.json
new file mode 100644
index 0000000000000000000000000000000000000000..84ff1fe38e9aa610c98b99b5987b34132fd21e5c
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/ProRes.json
@@ -0,0 +1,10 @@
+{
+ "main_pass":
+ [
+ "-n", "-c:v", "prores_ks",
+ "-profile:v","3",
+ "-pix_fmt", "yuv422p10"
+ ],
+ "audio_pass": ["-c:a", "pcm_s16le"],
+ "extension": "mov"
+}
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/av1-webm.json b/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/av1-webm.json
new file mode 100644
index 0000000000000000000000000000000000000000..ceb53b4dae01df7a016a8859e2ccc53d9d849038
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/av1-webm.json
@@ -0,0 +1,13 @@
+{
+ "main_pass":
+ [
+ "-n", "-c:v", "libsvtav1",
+ "-pix_fmt", ["pix_fmt", ["yuv420p10le", "yuv420p"]],
+ "-crf", ["crf","INT", {"default": 23, "min": 0, "max": 100, "step": 1}]
+ ],
+ "audio_pass": ["-c:a", "libopus"],
+ "input_color_depth": ["input_color_depth", ["8bit", "16bit"]],
+ "save_metadata": ["save_metadata", "BOOLEAN", {"default": true}],
+ "extension": "webm",
+ "environment": {"SVT_LOG": "1"}
+}
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/gifski.json b/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/gifski.json
new file mode 100644
index 0000000000000000000000000000000000000000..27a06ff732718a6fd9af0b727ba495f8df024625
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/gifski.json
@@ -0,0 +1,13 @@
+{
+ "main_pass":
+ [
+ "-n",
+ "-pix_fmt", "yuv420p",
+ "-crf", "20",
+ "-b:v", "0"
+ ],
+ "extension": "webm",
+ "gifski_pass": [
+ "-Q", ["quality","INT", {"default": 90, "min": 1, "max": 100, "step": 1}]
+ ]
+}
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/h264-mp4.json b/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/h264-mp4.json
new file mode 100644
index 0000000000000000000000000000000000000000..c860f921c32231996a6fe7355172e65a214b00ad
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/h264-mp4.json
@@ -0,0 +1,11 @@
+{
+ "main_pass":
+ [
+ "-n", "-c:v", "libx264",
+ "-pix_fmt", ["pix_fmt", ["yuv420p", "yuv420p10le"]],
+ "-crf", ["crf","INT", {"default": 19, "min": 0, "max": 100, "step": 1}]
+ ],
+ "audio_pass": ["-c:a", "aac"],
+ "save_metadata": ["save_metadata", "BOOLEAN", {"default": true}],
+ "extension": "mp4"
+}
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/h265-mp4.json b/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/h265-mp4.json
new file mode 100644
index 0000000000000000000000000000000000000000..7fe0218b23b2a8bd330b4bb6ee2f96c8007a4cd0
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/h265-mp4.json
@@ -0,0 +1,14 @@
+{
+ "main_pass":
+ [
+ "-n", "-c:v", "libx265",
+ "-vtag", "hvc1",
+ "-pix_fmt", ["pix_fmt", ["yuv420p10le", "yuv420p"]],
+ "-crf", ["crf","INT", {"default": 22, "min": 0, "max": 100, "step": 1}],
+ "-preset", "medium",
+ "-x265-params", "log-level=quiet"
+ ],
+ "audio_pass": ["-c:a", "aac"],
+ "save_metadata": ["save_metadata", "BOOLEAN", {"default": true}],
+ "extension": "mp4"
+}
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/nvenc_h264-mp4.json b/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/nvenc_h264-mp4.json
new file mode 100644
index 0000000000000000000000000000000000000000..4253a7c9b81bb9ae30c26ef06fcfcc6b80040044
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/nvenc_h264-mp4.json
@@ -0,0 +1,12 @@
+{
+ "main_pass":
+ [
+ "-n", "-c:v", "h264_nvenc",
+ "-pix_fmt", ["pix_fmt", ["yuv420p", "yuv420p10le"]]
+ ],
+ "audio_pass": ["-c:a", "aac"],
+ "bitrate": ["bitrate","INT", {"default": 10, "min": 1, "max": 999, "step": 1 }],
+ "megabit": ["megabit","BOOLEAN", {"default": true}],
+ "save_metadata": ["save_metadata", "BOOLEAN", {"default": true}],
+ "extension": "mp4"
+}
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/nvenc_hevc-mp4.json b/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/nvenc_hevc-mp4.json
new file mode 100644
index 0000000000000000000000000000000000000000..e412ca1cda10e77a0c5d93f0ac1f403593630227
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/nvenc_hevc-mp4.json
@@ -0,0 +1,13 @@
+{
+ "main_pass":
+ [
+ "-n", "-c:v", "hevc_nvenc",
+ "-vtag", "hvc1",
+ "-pix_fmt", ["pix_fmt", ["yuv420p", "yuv420p10le"]]
+ ],
+ "audio_pass": ["-c:a", "aac"],
+ "bitrate": ["bitrate","INT", {"default": 10, "min": 1, "max": 999, "step": 1 }],
+ "megabit": ["megabit","BOOLEAN", {"default": true}],
+ "save_metadata": ["save_metadata", "BOOLEAN", {"default": true}],
+ "extension": "mp4"
+}
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/webm.json b/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/webm.json
new file mode 100644
index 0000000000000000000000000000000000000000..66eacb1144704d05680dab38d59d399f966bc723
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/video_formats/webm.json
@@ -0,0 +1,12 @@
+{
+ "main_pass":
+ [
+ "-n",
+ "-pix_fmt", "yuv420p",
+ "-crf", ["crf","INT", {"default": 20, "min": 0, "max": 100, "step": 1}],
+ "-b:v", "0"
+ ],
+ "audio_pass": ["-c:a", "libvorbis"],
+ "save_metadata": ["save_metadata", "BOOLEAN", {"default": true}],
+ "extension": "webm"
+}
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/batched_nodes.py b/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/batched_nodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..c627ef913b02bd7c7a92186685d4d1904b63cdf7
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/batched_nodes.py
@@ -0,0 +1,48 @@
+import torch
+from nodes import VAEEncode
+
+
+class VAEDecodeBatched:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "samples": ("LATENT", ),
+ "vae": ("VAE", ),
+ "per_batch": ("INT", {"default": 16, "min": 1})
+ }
+ }
+
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/batched nodes"
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "decode"
+
+ def decode(self, vae, samples, per_batch):
+ decoded = []
+ for start_idx in range(0, samples["samples"].shape[0], per_batch):
+ decoded.append(vae.decode(samples["samples"][start_idx:start_idx+per_batch]))
+ return (torch.cat(decoded, dim=0), )
+
+
+class VAEEncodeBatched:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "pixels": ("IMAGE", ), "vae": ("VAE", ),
+ "per_batch": ("INT", {"default": 16, "min": 1})
+ }
+ }
+
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/batched nodes"
+
+ RETURN_TYPES = ("LATENT",)
+ FUNCTION = "encode"
+
+ def encode(self, vae, pixels, per_batch):
+ t = []
+ for start_idx in range(0, pixels.shape[0], per_batch):
+ sub_pixels = VAEEncode.vae_encode_crop_pixels(pixels[start_idx:start_idx+per_batch])
+ t.append(vae.encode(sub_pixels[:,:,:,:3]))
+ return ({"samples": torch.cat(t, dim=0)}, )
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/image_latent_nodes.py b/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/image_latent_nodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..a89a56ad278586235c47cf63affca2bd8d64076e
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/image_latent_nodes.py
@@ -0,0 +1,458 @@
+from torch import Tensor
+import torch
+
+import comfy.utils
+
+from .utils import BIGMIN, BIGMAX
+
+
+class MergeStrategies:
+ MATCH_A = "match A"
+ MATCH_B = "match B"
+ MATCH_SMALLER = "match smaller"
+ MATCH_LARGER = "match larger"
+
+ list_all = [MATCH_A, MATCH_B, MATCH_SMALLER, MATCH_LARGER]
+
+
+class ScaleMethods:
+ NEAREST_EXACT = "nearest-exact"
+ BILINEAR = "bilinear"
+ AREA = "area"
+ BICUBIC = "bicubic"
+ BISLERP = "bislerp"
+
+ list_all = [NEAREST_EXACT, BILINEAR, AREA, BICUBIC, BISLERP]
+
+
+class CropMethods:
+ DISABLED = "disabled"
+ CENTER = "center"
+
+ list_all = [DISABLED, CENTER]
+
+
+class SplitLatents:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "latents": ("LATENT",),
+ "split_index": ("INT", {"default": 0, "step": 1, "min": BIGMIN, "max": BIGMAX}),
+ },
+ }
+
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/latent"
+
+ RETURN_TYPES = ("LATENT", "INT", "LATENT", "INT")
+ RETURN_NAMES = ("LATENT_A", "A_count", "LATENT_B", "B_count")
+ FUNCTION = "split_latents"
+
+ def split_latents(self, latents: dict, split_index: int):
+ latents = latents.copy()
+ group_a = latents["samples"][:split_index]
+ group_b = latents["samples"][split_index:]
+ group_a_latent = {"samples": group_a}
+ group_b_latent = {"samples": group_b}
+ return (group_a_latent, group_a.size(0), group_b_latent, group_b.size(0))
+
+
+class SplitImages:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "images": ("IMAGE",),
+ "split_index": ("INT", {"default": 0, "step": 1, "min": BIGMIN, "max": BIGMAX}),
+ },
+ }
+
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/image"
+
+ RETURN_TYPES = ("IMAGE", "INT", "IMAGE", "INT")
+ RETURN_NAMES = ("IMAGE_A", "A_count", "IMAGE_B", "B_count")
+ FUNCTION = "split_images"
+
+ def split_images(self, images: Tensor, split_index: int):
+ group_a = images[:split_index]
+ group_b = images[split_index:]
+ return (group_a, group_a.size(0), group_b, group_b.size(0))
+
+
+class SplitMasks:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "mask": ("MASK",),
+ "split_index": ("INT", {"default": 0, "step": 1, "min": BIGMIN, "max": BIGMAX}),
+ },
+ }
+
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/mask"
+
+ RETURN_TYPES = ("MASK", "INT", "MASK", "INT")
+ RETURN_NAMES = ("MASK_A", "A_count", "MASK_B", "B_count")
+ FUNCTION = "split_masks"
+
+ def split_masks(self, mask: Tensor, split_index: int):
+ group_a = mask[:split_index]
+ group_b = mask[split_index:]
+ return (group_a, group_a.size(0), group_b, group_b.size(0))
+
+
+class MergeLatents:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "latents_A": ("LATENT",),
+ "latents_B": ("LATENT",),
+ "merge_strategy": (MergeStrategies.list_all,),
+ "scale_method": (ScaleMethods.list_all,),
+ "crop": (CropMethods.list_all,),
+ }
+ }
+
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/latent"
+
+ RETURN_TYPES = ("LATENT", "INT",)
+ RETURN_NAMES = ("LATENT", "count",)
+ FUNCTION = "merge"
+
+ def merge(self, latents_A: dict, latents_B: dict, merge_strategy: str, scale_method: str, crop: str):
+ latents = []
+ latents_A = latents_A.copy()["samples"]
+ latents_B = latents_B.copy()["samples"]
+
+ # if not same dimensions, do scaling
+ if latents_A.shape[3] != latents_B.shape[3] or latents_A.shape[2] != latents_B.shape[2]:
+ A_size = latents_A.shape[3] * latents_A.shape[2]
+ B_size = latents_B.shape[3] * latents_B.shape[2]
+ # determine which to use
+ use_A_as_template = True
+ if merge_strategy == MergeStrategies.MATCH_A:
+ pass
+ elif merge_strategy == MergeStrategies.MATCH_B:
+ use_A_as_template = False
+ elif merge_strategy in (MergeStrategies.MATCH_SMALLER, MergeStrategies.MATCH_LARGER):
+ if A_size <= B_size:
+ use_A_as_template = True if merge_strategy == MergeStrategies.MATCH_SMALLER else False
+ # apply scaling
+ if use_A_as_template:
+ latents_B = comfy.utils.common_upscale(latents_B, latents_A.shape[3], latents_A.shape[2], scale_method, crop)
+ else:
+ latents_A = comfy.utils.common_upscale(latents_A, latents_B.shape[3], latents_B.shape[2], scale_method, crop)
+
+ latents.append(latents_A)
+ latents.append(latents_B)
+
+ merged = {"samples": torch.cat(latents, dim=0)}
+ return (merged, len(merged["samples"]),)
+
+
+class MergeImages:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "images_A": ("IMAGE",),
+ "images_B": ("IMAGE",),
+ "merge_strategy": (MergeStrategies.list_all,),
+ "scale_method": (ScaleMethods.list_all,),
+ "crop": (CropMethods.list_all,),
+ }
+ }
+
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/image"
+
+ RETURN_TYPES = ("IMAGE", "INT",)
+ RETURN_NAMES = ("IMAGE", "count",)
+ FUNCTION = "merge"
+
+ def merge(self, images_A: Tensor, images_B: Tensor, merge_strategy: str, scale_method: str, crop: str):
+ images = []
+ # if not same dimensions, do scaling
+ if images_A.shape[3] != images_B.shape[3] or images_A.shape[2] != images_B.shape[2]:
+ images_A = images_A.movedim(-1,1)
+ images_B = images_B.movedim(-1,1)
+
+ A_size = images_A.shape[3] * images_A.shape[2]
+ B_size = images_B.shape[3] * images_B.shape[2]
+ # determine which to use
+ use_A_as_template = True
+ if merge_strategy == MergeStrategies.MATCH_A:
+ pass
+ elif merge_strategy == MergeStrategies.MATCH_B:
+ use_A_as_template = False
+ elif merge_strategy in (MergeStrategies.MATCH_SMALLER, MergeStrategies.MATCH_LARGER):
+ if A_size <= B_size:
+ use_A_as_template = True if merge_strategy == MergeStrategies.MATCH_SMALLER else False
+ # apply scaling
+ if use_A_as_template:
+ images_B = comfy.utils.common_upscale(images_B, images_A.shape[3], images_A.shape[2], scale_method, crop)
+ else:
+ images_A = comfy.utils.common_upscale(images_A, images_B.shape[3], images_B.shape[2], scale_method, crop)
+ images_A = images_A.movedim(1,-1)
+ images_B = images_B.movedim(1,-1)
+
+ images.append(images_A)
+ images.append(images_B)
+ all_images = torch.cat(images, dim=0)
+ return (all_images, all_images.size(0),)
+
+
+class MergeMasks:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "mask_A": ("MASK",),
+ "mask_B": ("MASK",),
+ "merge_strategy": (MergeStrategies.list_all,),
+ "scale_method": (ScaleMethods.list_all,),
+ "crop": (CropMethods.list_all,),
+ }
+ }
+
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/mask"
+
+ RETURN_TYPES = ("MASK", "INT",)
+ RETURN_NAMES = ("MASK", "count",)
+ FUNCTION = "merge"
+
+ def merge(self, mask_A: Tensor, mask_B: Tensor, merge_strategy: str, scale_method: str, crop: str):
+ masks = []
+ # if not same dimensions, do scaling
+ if mask_A.shape[2] != mask_B.shape[2] or mask_A.shape[1] != mask_B.shape[1]:
+ A_size = mask_A.shape[2] * mask_A.shape[1]
+ B_size = mask_B.shape[2] * mask_B.shape[1]
+ # determine which to use
+ use_A_as_template = True
+ if merge_strategy == MergeStrategies.MATCH_A:
+ pass
+ elif merge_strategy == MergeStrategies.MATCH_B:
+ use_A_as_template = False
+ elif merge_strategy in (MergeStrategies.MATCH_SMALLER, MergeStrategies.MATCH_LARGER):
+ if A_size <= B_size:
+ use_A_as_template = True if merge_strategy == MergeStrategies.MATCH_SMALLER else False
+ # add dimension where image channels would be expected to work with common_upscale
+ mask_A = torch.unsqueeze(mask_A, 1)
+ mask_B = torch.unsqueeze(mask_B, 1)
+ # apply scaling
+ if use_A_as_template:
+ mask_B = comfy.utils.common_upscale(mask_B, mask_A.shape[3], mask_A.shape[2], scale_method, crop)
+ else:
+ mask_A = comfy.utils.common_upscale(mask_A, mask_B.shape[3], mask_B.shape[2], scale_method, crop)
+ # undo dimension increase
+ mask_A = torch.squeeze(mask_A, 1)
+ mask_B = torch.squeeze(mask_B, 1)
+
+ masks.append(mask_A)
+ masks.append(mask_B)
+ all_masks = torch.cat(masks, dim=0)
+ return (all_masks, all_masks.size(0),)
+
+
+class SelectEveryNthLatent:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "latents": ("LATENT",),
+ "select_every_nth": ("INT", {"default": 1, "min": 1, "max": BIGMAX, "step": 1}),
+ },
+ }
+
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/latent"
+
+ RETURN_TYPES = ("LATENT", "INT",)
+ RETURN_NAMES = ("LATENT", "count",)
+ FUNCTION = "select_latents"
+
+ def select_latents(self, latents: dict, select_every_nth: int):
+ sub_latents = latents.copy()["samples"][0::select_every_nth]
+ return ({"samples": sub_latents}, sub_latents.size(0))
+
+
+class SelectEveryNthImage:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "images": ("IMAGE",),
+ "select_every_nth": ("INT", {"default": 1, "min": 1, "max": BIGMAX, "step": 1}),
+ },
+ }
+
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/image"
+
+ RETURN_TYPES = ("IMAGE", "INT",)
+ RETURN_NAMES = ("IMAGE", "count",)
+ FUNCTION = "select_images"
+
+ def select_images(self, images: Tensor, select_every_nth: int):
+ sub_images = images[0::select_every_nth]
+ return (sub_images, sub_images.size(0))
+
+
+class SelectEveryNthMask:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "mask": ("MASK",),
+ "select_every_nth": ("INT", {"default": 1, "min": 1, "max": BIGMAX, "step": 1}),
+ },
+ }
+
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/mask"
+
+ RETURN_TYPES = ("MASK", "INT",)
+ RETURN_NAMES = ("MASK", "count",)
+ FUNCTION = "select_masks"
+
+ def select_masks(self, mask: Tensor, select_every_nth: int):
+ sub_mask = mask[0::select_every_nth]
+ return (sub_mask, sub_mask.size(0))
+
+
+class GetLatentCount:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "latents": ("LATENT",),
+ }
+ }
+
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/latent"
+
+ RETURN_TYPES = ("INT",)
+ RETURN_NAMES = ("count",)
+ FUNCTION = "count_input"
+
+ def count_input(self, latents: dict):
+ return (latents["samples"].size(0),)
+
+
+class GetImageCount:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "images": ("IMAGE",),
+ }
+ }
+
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/image"
+
+ RETURN_TYPES = ("INT",)
+ RETURN_NAMES = ("count",)
+ FUNCTION = "count_input"
+
+ def count_input(self, images: Tensor):
+ return (images.size(0),)
+
+
+class GetMaskCount:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "mask": ("MASK",),
+ }
+ }
+
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/mask"
+
+ RETURN_TYPES = ("INT",)
+ RETURN_NAMES = ("count",)
+ FUNCTION = "count_input"
+
+ def count_input(self, mask: Tensor):
+ return (mask.size(0),)
+
+
+class DuplicateLatents:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "latents": ("LATENT",),
+ "multiply_by": ("INT", {"default": 1, "min": 1, "max": BIGMAX, "step": 1})
+ }
+ }
+
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/latent"
+
+ RETURN_TYPES = ("LATENT", "INT",)
+ RETURN_NAMES = ("LATENT", "count",)
+ FUNCTION = "duplicate_input"
+
+ def duplicate_input(self, latents: dict[str, Tensor], multiply_by: int):
+ new_latents = latents.copy()
+ full_latents = []
+ for n in range(0, multiply_by):
+ full_latents.append(new_latents["samples"])
+ new_latents["samples"] = torch.cat(full_latents, dim=0)
+ return (new_latents, new_latents["samples"].size(0),)
+
+
+class DuplicateImages:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "images": ("IMAGE",),
+ "multiply_by": ("INT", {"default": 1, "min": 1, "max": BIGMAX, "step": 1})
+ }
+ }
+
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/image"
+
+ RETURN_TYPES = ("IMAGE", "INT",)
+ RETURN_NAMES = ("IMAGE", "count",)
+ FUNCTION = "duplicate_input"
+
+ def duplicate_input(self, images: Tensor, multiply_by: int):
+ full_images = []
+ for n in range(0, multiply_by):
+ full_images.append(images)
+ new_images = torch.cat(full_images, dim=0)
+ return (new_images, new_images.size(0),)
+
+
+class DuplicateMasks:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "mask": ("MASK",),
+ "multiply_by": ("INT", {"default": 1, "min": 1, "max": BIGMAX, "step": 1})
+ }
+ }
+
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢/mask"
+
+ RETURN_TYPES = ("MASK", "INT",)
+ RETURN_NAMES = ("MASK", "count",)
+ FUNCTION = "duplicate_input"
+
+ def duplicate_input(self, mask: Tensor, multiply_by: int):
+ full_masks = []
+ for n in range(0, multiply_by):
+ full_masks.append(mask)
+ new_mask = torch.cat(full_masks, dim=0)
+ return (new_mask, new_mask.size(0),)
+
+
+# class SelectLatents:
+# @classmethod
+# def INPUT_TYPES(s):
+# return {
+# "required": {
+# "images": ("IMAGE",),
+# "select_indeces": ("STRING", {"default": ""}),
+# },
+# }
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/load_images_nodes.py b/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/load_images_nodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e708171eed47831215f8d5c299b1e698498ab4b
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/load_images_nodes.py
@@ -0,0 +1,157 @@
+import os
+import hashlib
+import numpy as np
+import torch
+from PIL import Image, ImageOps
+
+import folder_paths
+from comfy.k_diffusion.utils import FolderOfImages
+from .logger import logger
+from .utils import BIGMAX, calculate_file_hash, get_sorted_dir_files_from_directory, validate_path
+
+
+def is_changed_load_images(directory: str, image_load_cap: int = 0, skip_first_images: int = 0, select_every_nth: int = 1):
+ if not os.path.isdir(directory):
+ return False
+
+ dir_files = get_sorted_dir_files_from_directory(directory, skip_first_images, select_every_nth, FolderOfImages.IMG_EXTENSIONS)
+ if image_load_cap != 0:
+ dir_files = dir_files[:image_load_cap]
+
+ m = hashlib.sha256()
+ for filepath in dir_files:
+ m.update(calculate_file_hash(filepath).encode()) # strings must be encoded before hashing
+ return m.digest().hex()
+
+
+def validate_load_images(directory: str):
+ if not os.path.isdir(directory):
+ return f"Directory '{directory}' cannot be found."
+ dir_files = os.listdir(directory)
+ if len(dir_files) == 0:
+ return f"No files in directory '{directory}'."
+
+ return True
+
+
+def load_images(directory: str, image_load_cap: int = 0, skip_first_images: int = 0, select_every_nth: int = 1):
+ if not os.path.isdir(directory):
+ raise FileNotFoundError(f"Directory '{directory} cannot be found.")
+
+ dir_files = get_sorted_dir_files_from_directory(directory, skip_first_images, select_every_nth, FolderOfImages.IMG_EXTENSIONS)
+
+ if len(dir_files) == 0:
+ raise FileNotFoundError(f"No files in directory '{directory}'.")
+
+ images = []
+ masks = []
+
+ limit_images = False
+ if image_load_cap > 0:
+ limit_images = True
+ image_count = 0
+ loaded_alpha = False
+ zero_mask = torch.zeros((64,64), dtype=torch.float32, device="cpu")
+
+ for image_path in dir_files:
+ if limit_images and image_count >= image_load_cap:
+ break
+ i = Image.open(image_path)
+ i = ImageOps.exif_transpose(i)
+ image = i.convert("RGB")
+ image = np.array(image).astype(np.float32) / 255.0
+ image = torch.from_numpy(image)[None,]
+ if 'A' in i.getbands():
+ mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
+ mask = 1. - torch.from_numpy(mask)
+ if not loaded_alpha:
+ loaded_alpha = True
+ zero_mask = torch.zeros((len(image[0]),len(image[0][0])), dtype=torch.float32, device="cpu")
+ masks = [zero_mask] * image_count
+ else:
+ mask = zero_mask
+ images.append(image)
+ masks.append(mask)
+ image_count += 1
+
+ if len(images) == 0:
+ raise FileNotFoundError(f"No images could be loaded from directory '{directory}'.")
+
+ return (torch.cat(images, dim=0), torch.stack(masks, dim=0), image_count)
+
+
+class LoadImagesFromDirectoryUpload:
+ @classmethod
+ def INPUT_TYPES(s):
+ input_dir = folder_paths.get_input_directory()
+ directories = []
+ for item in os.listdir(input_dir):
+ if not os.path.isfile(os.path.join(input_dir, item)) and item != "clipspace":
+ directories.append(item)
+ return {
+ "required": {
+ "directory": (directories,),
+ },
+ "optional": {
+ "image_load_cap": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1}),
+ "skip_first_images": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1}),
+ "select_every_nth": ("INT", {"default": 1, "min": 1, "max": BIGMAX, "step": 1}),
+ }
+ }
+
+ RETURN_TYPES = ("IMAGE", "MASK", "INT")
+ FUNCTION = "load_images"
+
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢"
+
+ def load_images(self, directory: str, **kwargs):
+ directory = folder_paths.get_annotated_filepath(directory.strip())
+ return load_images(directory, **kwargs)
+
+ @classmethod
+ def IS_CHANGED(s, directory: str, **kwargs):
+ directory = folder_paths.get_annotated_filepath(directory.strip())
+ return is_changed_load_images(directory, **kwargs)
+
+ @classmethod
+ def VALIDATE_INPUTS(s, directory: str, **kwargs):
+ directory = folder_paths.get_annotated_filepath(directory.strip())
+ return validate_load_images(directory)
+
+
+class LoadImagesFromDirectoryPath:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "directory": ("STRING", {"default": "X://path/to/images", "vhs_path_extensions": []}),
+ },
+ "optional": {
+ "image_load_cap": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1}),
+ "skip_first_images": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1}),
+ "select_every_nth": ("INT", {"default": 1, "min": 1, "max": BIGMAX, "step": 1}),
+ }
+ }
+
+ RETURN_TYPES = ("IMAGE", "MASK", "INT")
+ FUNCTION = "load_images"
+
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢"
+
+ def load_images(self, directory: str, **kwargs):
+ if directory is None or validate_load_images(directory) != True:
+ raise Exception("directory is not valid: " + directory)
+
+ return load_images(directory, **kwargs)
+
+ @classmethod
+ def IS_CHANGED(s, directory: str, **kwargs):
+ if directory is None:
+ return "input"
+ return is_changed_load_images(directory, **kwargs)
+
+ @classmethod
+ def VALIDATE_INPUTS(s, directory: str, **kwargs):
+ if directory is None:
+ return True
+ return validate_load_images(directory)
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/load_video_nodes.py b/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/load_video_nodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..1292d915a07b9b5185c07d56ea2e492478ea26c7
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/load_video_nodes.py
@@ -0,0 +1,236 @@
+import os
+import itertools
+import numpy as np
+import torch
+from PIL import Image, ImageOps
+import cv2
+
+import folder_paths
+from comfy.utils import common_upscale
+from .logger import logger
+from .utils import BIGMAX, DIMMAX, calculate_file_hash, get_sorted_dir_files_from_directory, get_audio, lazy_eval, hash_path, validate_path
+
+
+video_extensions = ['webm', 'mp4', 'mkv', 'gif']
+
+
+def is_gif(filename) -> bool:
+ file_parts = filename.split('.')
+ return len(file_parts) > 1 and file_parts[-1] == "gif"
+
+
+def target_size(width, height, force_size, custom_width, custom_height) -> tuple[int, int]:
+ if force_size == "Custom":
+ return (custom_width, custom_height)
+ elif force_size == "Custom Height":
+ force_size = "?x"+str(custom_height)
+ elif force_size == "Custom Width":
+ force_size = str(custom_width)+"x?"
+
+ if force_size != "Disabled":
+ force_size = force_size.split("x")
+ if force_size[0] == "?":
+ width = (width*int(force_size[1]))//height
+ #Limit to a multple of 8 for latent conversion
+ width = int(width)+4 & ~7
+ height = int(force_size[1])
+ elif force_size[1] == "?":
+ height = (height*int(force_size[0]))//width
+ height = int(height)+4 & ~7
+ width = int(force_size[0])
+ else:
+ width = int(force_size[0])
+ height = int(force_size[1])
+ return (width, height)
+
+def cv_frame_generator(video, force_rate, frame_load_cap, skip_first_frames,
+ select_every_nth, batch_manager=None, unique_id=None):
+ try:
+ video_cap = cv2.VideoCapture(video)
+ if not video_cap.isOpened():
+ raise ValueError(f"{video} could not be loaded with cv.")
+ # set video_cap to look at start_index frame
+ total_frame_count = 0
+ total_frames_evaluated = -1
+ frames_added = 0
+ base_frame_time = 1/video_cap.get(cv2.CAP_PROP_FPS)
+ width = video_cap.get(cv2.CAP_PROP_FRAME_WIDTH)
+ height = video_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
+ prev_frame = None
+ if force_rate == 0:
+ target_frame_time = base_frame_time
+ else:
+ target_frame_time = 1/force_rate
+ yield (width, height, target_frame_time)
+ time_offset=target_frame_time - base_frame_time
+ while video_cap.isOpened():
+ if time_offset < target_frame_time:
+ is_returned = video_cap.grab()
+ # if didn't return frame, video has ended
+ if not is_returned:
+ break
+ time_offset += base_frame_time
+ if time_offset < target_frame_time:
+ continue
+ time_offset -= target_frame_time
+ # if not at start_index, skip doing anything with frame
+ total_frame_count += 1
+ if total_frame_count <= skip_first_frames:
+ continue
+ else:
+ total_frames_evaluated += 1
+
+ # if should not be selected, skip doing anything with frame
+ if total_frames_evaluated%select_every_nth != 0:
+ continue
+
+ # opencv loads images in BGR format (yuck), so need to convert to RGB for ComfyUI use
+ # follow up: can videos ever have an alpha channel?
+ # To my testing: No. opencv has no support for alpha
+ unused, frame = video_cap.retrieve()
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
+ # convert frame to comfyui's expected format
+ # TODO: frame contains no exif information. Check if opencv2 has already applied
+ frame = np.array(frame, dtype=np.float32) / 255.0
+ if prev_frame is not None:
+ inp = yield prev_frame
+ if inp is not None:
+ #ensure the finally block is called
+ return
+ prev_frame = frame
+ frames_added += 1
+ # if cap exists and we've reached it, stop processing frames
+ if frame_load_cap > 0 and frames_added >= frame_load_cap:
+ break
+ if batch_manager is not None:
+ batch_manager.inputs.pop(unique_id)
+ batch_manager.has_closed_inputs = True
+ if prev_frame is not None:
+ yield prev_frame
+ finally:
+ video_cap.release()
+
+def load_video_cv(video: str, force_rate: int, force_size: str,
+ custom_width: int,custom_height: int, frame_load_cap: int,
+ skip_first_frames: int, select_every_nth: int,
+ batch_manager=None, unique_id=None):
+ if batch_manager is None or unique_id not in batch_manager.inputs:
+ gen = cv_frame_generator(video, force_rate, frame_load_cap, skip_first_frames,
+ select_every_nth, batch_manager, unique_id)
+ (width, height, target_frame_time) = next(gen)
+ width = int(width)
+ height = int(height)
+ if batch_manager is not None:
+ batch_manager.inputs[unique_id] = (gen, width, height, target_frame_time)
+ else:
+ (gen, width, height, target_frame_time) = batch_manager.inputs[unique_id]
+ if batch_manager is not None:
+ gen = itertools.islice(gen, batch_manager.frames_per_batch)
+
+ #Some minor wizardry to eliminate a copy and reduce max memory by a factor of ~2
+ images = torch.from_numpy(np.fromiter(gen, np.dtype((np.float32, (height, width, 3)))))
+ if len(images) == 0:
+ raise RuntimeError("No frames generated")
+ if force_size != "Disabled":
+ new_size = target_size(width, height, force_size, custom_width, custom_height)
+ if new_size[0] != width or new_size[1] != height:
+ s = images.movedim(-1,1)
+ s = common_upscale(s, new_size[0], new_size[1], "lanczos", "center")
+ images = s.movedim(1,-1)
+
+ #Setup lambda for lazy audio capture
+ audio = lambda : get_audio(video, skip_first_frames * target_frame_time,
+ frame_load_cap*target_frame_time*select_every_nth)
+ return (images, len(images), lazy_eval(audio))
+
+
+class LoadVideoUpload:
+ @classmethod
+ def INPUT_TYPES(s):
+ input_dir = folder_paths.get_input_directory()
+ files = []
+ for f in os.listdir(input_dir):
+ if os.path.isfile(os.path.join(input_dir, f)):
+ file_parts = f.split('.')
+ if len(file_parts) > 1 and (file_parts[-1] in video_extensions):
+ files.append(f)
+ return {"required": {
+ "video": (sorted(files),),
+ "force_rate": ("INT", {"default": 0, "min": 0, "max": 60, "step": 1}),
+ "force_size": (["Disabled", "Custom Height", "Custom Width", "Custom", "256x?", "?x256", "256x256", "512x?", "?x512", "512x512"],),
+ "custom_width": ("INT", {"default": 512, "min": 0, "max": DIMMAX, "step": 8}),
+ "custom_height": ("INT", {"default": 512, "min": 0, "max": DIMMAX, "step": 8}),
+ "frame_load_cap": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1}),
+ "skip_first_frames": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1}),
+ "select_every_nth": ("INT", {"default": 1, "min": 1, "max": BIGMAX, "step": 1}),
+ },
+ "optional": {
+ "batch_manager": ("VHS_BatchManager",)
+ },
+ "hidden": {
+ "unique_id": "UNIQUE_ID"
+ },
+ }
+
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢"
+
+ RETURN_TYPES = ("IMAGE", "INT", "VHS_AUDIO", )
+ RETURN_NAMES = ("IMAGE", "frame_count", "audio",)
+ FUNCTION = "load_video"
+
+ def load_video(self, **kwargs):
+ kwargs['video'] = folder_paths.get_annotated_filepath(kwargs['video'].strip("\""))
+ return load_video_cv(**kwargs)
+
+ @classmethod
+ def IS_CHANGED(s, video, **kwargs):
+ image_path = folder_paths.get_annotated_filepath(video)
+ return calculate_file_hash(image_path)
+
+ @classmethod
+ def VALIDATE_INPUTS(s, video, force_size, **kwargs):
+ if not folder_paths.exists_annotated_filepath(video):
+ return "Invalid video file: {}".format(video)
+ return True
+
+
+class LoadVideoPath:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "video": ("STRING", {"default": "X://insert/path/here.mp4", "vhs_path_extensions": video_extensions}),
+ "force_rate": ("INT", {"default": 0, "min": 0, "max": 60, "step": 1}),
+ "force_size": (["Disabled", "Custom Height", "Custom Width", "Custom", "256x?", "?x256", "256x256", "512x?", "?x512", "512x512"],),
+ "custom_width": ("INT", {"default": 512, "min": 0, "max": DIMMAX, "step": 8}),
+ "custom_height": ("INT", {"default": 512, "min": 0, "max": DIMMAX, "step": 8}),
+ "frame_load_cap": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1}),
+ "skip_first_frames": ("INT", {"default": 0, "min": 0, "max": BIGMAX, "step": 1}),
+ "select_every_nth": ("INT", {"default": 1, "min": 1, "max": BIGMAX, "step": 1}),
+ },
+ "optional": {
+ "batch_manager": ("VHS_BatchManager",)
+ },
+ "hidden": {
+ "unique_id": "UNIQUE_ID"
+ },
+ }
+
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢"
+
+ RETURN_TYPES = ("IMAGE", "INT", "VHS_AUDIO", )
+ RETURN_NAMES = ("IMAGE", "frame_count", "audio",)
+ FUNCTION = "load_video"
+
+ def load_video(self, **kwargs):
+ if kwargs['video'] is None or validate_path(kwargs['video']) != True:
+ raise Exception("video is not a valid path: " + kwargs['video'])
+ return load_video_cv(**kwargs)
+
+ @classmethod
+ def IS_CHANGED(s, video, **kwargs):
+ return hash_path(video)
+
+ @classmethod
+ def VALIDATE_INPUTS(s, video, **kwargs):
+ return validate_path(video, allow_none=True)
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/logger.py b/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/logger.py
new file mode 100755
index 0000000000000000000000000000000000000000..6e7b8d64bda275608ba6bf8ee28d2a2112e3e2be
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/logger.py
@@ -0,0 +1,36 @@
+import sys
+import copy
+import logging
+
+
+class ColoredFormatter(logging.Formatter):
+ COLORS = {
+ "DEBUG": "\033[0;36m", # CYAN
+ "INFO": "\033[0;32m", # GREEN
+ "WARNING": "\033[0;33m", # YELLOW
+ "ERROR": "\033[0;31m", # RED
+ "CRITICAL": "\033[0;37;41m", # WHITE ON RED
+ "RESET": "\033[0m", # RESET COLOR
+ }
+
+ def format(self, record):
+ colored_record = copy.copy(record)
+ levelname = colored_record.levelname
+ seq = self.COLORS.get(levelname, self.COLORS["RESET"])
+ colored_record.levelname = f"{seq}{levelname}{self.COLORS['RESET']}"
+ return super().format(colored_record)
+
+
+# Create a new logger
+logger = logging.getLogger("VideoHelperSuite")
+logger.propagate = False
+
+# Add handler if we don't have one.
+if not logger.handlers:
+ handler = logging.StreamHandler(sys.stdout)
+ handler.setFormatter(ColoredFormatter("[%(name)s] - %(levelname)s - %(message)s"))
+ logger.addHandler(handler)
+
+# Configure logger
+loglevel = logging.INFO
+logger.setLevel(loglevel)
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/nodes.py b/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/nodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..e9b13437c59c0aaaad1c6426254351c8cec85584
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/nodes.py
@@ -0,0 +1,620 @@
+import os
+import sys
+import json
+import subprocess
+import numpy as np
+import re
+import datetime
+from typing import List
+from PIL import Image, ExifTags
+from PIL.PngImagePlugin import PngInfo
+from pathlib import Path
+
+import folder_paths
+from .logger import logger
+from .image_latent_nodes import *
+from .load_video_nodes import LoadVideoUpload, LoadVideoPath
+from .load_images_nodes import LoadImagesFromDirectoryUpload, LoadImagesFromDirectoryPath
+from .batched_nodes import VAEEncodeBatched, VAEDecodeBatched
+from .utils import ffmpeg_path, get_audio, hash_path, validate_path, requeue_workflow, gifski_path
+
+folder_paths.folder_names_and_paths["VHS_video_formats"] = (
+ [
+ os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "video_formats"),
+ ],
+ [".json"]
+)
+
+def gen_format_widgets(video_format):
+ for k in video_format:
+ if k.endswith("_pass"):
+ for i in range(len(video_format[k])):
+ if isinstance(video_format[k][i], list):
+ item = [video_format[k][i]]
+ yield item
+ video_format[k][i] = item[0]
+ else:
+ if isinstance(video_format[k], list):
+ item = [video_format[k]]
+ yield item
+ video_format[k] = item[0]
+
+def get_video_formats():
+ formats = []
+ for format_name in folder_paths.get_filename_list("VHS_video_formats"):
+ format_name = format_name[:-5]
+ video_format_path = folder_paths.get_full_path("VHS_video_formats", format_name + ".json")
+ with open(video_format_path, 'r') as stream:
+ video_format = json.load(stream)
+ if "gifski_pass" in video_format and gifski_path is None:
+ #Skip format
+ continue
+ widgets = [w[0] for w in gen_format_widgets(video_format)]
+ if (len(widgets) > 0):
+ formats.append(["video/" + format_name, widgets])
+ else:
+ formats.append("video/" + format_name)
+ return formats
+
+def get_format_widget_defaults(format_name):
+ video_format_path = folder_paths.get_full_path("VHS_video_formats", format_name + ".json")
+ with open(video_format_path, 'r') as stream:
+ video_format = json.load(stream)
+ results = {}
+ for w in gen_format_widgets(video_format):
+ if len(w[0]) > 2 and 'default' in w[0][2]:
+ default = w[0][2]['default']
+ else:
+ if type(w[0][1]) is list:
+ default = w[0][1][0]
+ else:
+ #NOTE: This doesn't respect max/min, but should be good enough as a fallback to a fallback to a fallback
+ default = {"BOOLEAN": False, "INT": 0, "FLOAT": 0, "STRING": ""}[w[0][1]]
+ results[w[0][0]] = default
+ return results
+
+
+def apply_format_widgets(format_name, kwargs):
+ video_format_path = folder_paths.get_full_path("VHS_video_formats", format_name + ".json")
+ with open(video_format_path, 'r') as stream:
+ video_format = json.load(stream)
+ for w in gen_format_widgets(video_format):
+ assert(w[0][0] in kwargs)
+ w[0] = str(kwargs[w[0][0]])
+ return video_format
+
+def tensor_to_int(tensor, bits):
+ #TODO: investigate benefit of rounding by adding 0.5 before clip/cast
+ tensor = tensor.cpu().numpy() * (2**bits-1)
+ return np.clip(tensor, 0, (2**bits-1))
+def tensor_to_shorts(tensor):
+ return tensor_to_int(tensor, 16).astype(np.uint16)
+def tensor_to_bytes(tensor):
+ return tensor_to_int(tensor, 8).astype(np.uint8)
+
+def ffmpeg_process(args, video_format, video_metadata, file_path, env):
+
+ res = None
+ frame_data = yield
+ if video_format.get('save_metadata', 'False') != 'False':
+ os.makedirs(folder_paths.get_temp_directory(), exist_ok=True)
+ metadata = json.dumps(video_metadata)
+ metadata_path = os.path.join(folder_paths.get_temp_directory(), "metadata.txt")
+ #metadata from file should escape = ; # \ and newline
+ metadata = metadata.replace("\\","\\\\")
+ metadata = metadata.replace(";","\\;")
+ metadata = metadata.replace("#","\\#")
+ metadata = metadata.replace("=","\\=")
+ metadata = metadata.replace("\n","\\\n")
+ metadata = "comment=" + metadata
+ with open(metadata_path, "w") as f:
+ f.write(";FFMETADATA1\n")
+ f.write(metadata)
+ m_args = args[:1] + ["-i", metadata_path] + args[1:] + ["-metadata", "creation_time=now"]
+ with subprocess.Popen(m_args + [file_path], stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE, env=env) as proc:
+ try:
+ while frame_data is not None:
+ proc.stdin.write(frame_data)
+ #TODO: skip flush for increased speed
+ proc.stdin.flush()
+ frame_data = yield
+ proc.stdin.close()
+ res = proc.stderr.read()
+ except BrokenPipeError as e:
+ err = proc.stderr.read()
+ #Check if output file exists. If it does, the re-execution
+ #will also fail. This obscures the cause of the error
+ #and seems to never occur concurrent to the metadata issue
+ if os.path.exists(file_path):
+ raise Exception("An error occured in the ffmpeg subprocess:\n" \
+ + err.decode("utf-8"))
+ #Res was not set
+ print(err.decode("utf-8"), end="", file=sys.stderr)
+ logger.warn("An error occurred when saving with metadata")
+ if res != b'':
+ with subprocess.Popen(args + [file_path], stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE, env=env) as proc:
+ try:
+ while frame_data is not None:
+ proc.stdin.write(frame_data)
+ proc.stdin.flush()
+ frame_data = yield
+ proc.stdin.close()
+ res = proc.stderr.read()
+ except BrokenPipeError as e:
+ res = proc.stderr.read()
+ raise Exception("An error occured in the ffmpeg subprocess:\n" \
+ + res.decode("utf-8"))
+ if len(res) > 0:
+ print(res.decode("utf-8"), end="", file=sys.stderr)
+
+class VideoCombine:
+ @classmethod
+ def INPUT_TYPES(s):
+ #Hide ffmpeg formats if ffmpeg isn't available
+ if ffmpeg_path is not None:
+ ffmpeg_formats = get_video_formats()
+ else:
+ ffmpeg_formats = []
+ return {
+ "required": {
+ "images": ("IMAGE",),
+ "frame_rate": (
+ "INT",
+ {"default": 8, "min": 1, "step": 1},
+ ),
+ "loop_count": ("INT", {"default": 0, "min": 0, "max": 100, "step": 1}),
+ "filename_prefix": ("STRING", {"default": "AnimateDiff"}),
+ "format": (["image/gif", "image/webp"] + ffmpeg_formats,),
+ "pingpong": ("BOOLEAN", {"default": False}),
+ "save_output": ("BOOLEAN", {"default": True}),
+ },
+ "optional": {
+ "audio": ("VHS_AUDIO",),
+ "batch_manager": ("VHS_BatchManager",)
+ },
+ "hidden": {
+ "prompt": "PROMPT",
+ "extra_pnginfo": "EXTRA_PNGINFO",
+ "unique_id": "UNIQUE_ID"
+ },
+ }
+
+ RETURN_TYPES = ("VHS_FILENAMES",)
+ RETURN_NAMES = ("Filenames",)
+ OUTPUT_NODE = True
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢"
+ FUNCTION = "combine_video"
+
+ def combine_video(
+ self,
+ images,
+ frame_rate: int,
+ loop_count: int,
+ filename_prefix="AnimateDiff",
+ format="image/gif",
+ pingpong=False,
+ save_output=True,
+ prompt=None,
+ extra_pnginfo=None,
+ audio=None,
+ unique_id=None,
+ manual_format_widgets=None,
+ batch_manager=None
+ ):
+ # get output information
+ output_dir = (
+ folder_paths.get_output_directory()
+ if save_output
+ else folder_paths.get_temp_directory()
+ )
+ (
+ full_output_folder,
+ filename,
+ _,
+ subfolder,
+ _,
+ ) = folder_paths.get_save_image_path(filename_prefix, output_dir)
+ output_files = []
+
+ metadata = PngInfo()
+ video_metadata = {}
+ if prompt is not None:
+ metadata.add_text("prompt", json.dumps(prompt))
+ video_metadata["prompt"] = prompt
+ if extra_pnginfo is not None:
+ for x in extra_pnginfo:
+ metadata.add_text(x, json.dumps(extra_pnginfo[x]))
+ video_metadata[x] = extra_pnginfo[x]
+ metadata.add_text("CreationTime", datetime.datetime.now().isoformat(" ")[:19])
+
+ if batch_manager is not None and unique_id in batch_manager.outputs:
+ (counter, output_process) = batch_manager.outputs[unique_id]
+ else:
+ # comfy counter workaround
+ max_counter = 0
+
+ # Loop through the existing files
+ matcher = re.compile(f"{re.escape(filename)}_(\d+)\D*\..+")
+ for existing_file in os.listdir(full_output_folder):
+ # Check if the file matches the expected format
+ match = matcher.fullmatch(existing_file)
+ if match:
+ # Extract the numeric portion of the filename
+ file_counter = int(match.group(1))
+ # Update the maximum counter value if necessary
+ if file_counter > max_counter:
+ max_counter = file_counter
+
+ # Increment the counter by 1 to get the next available value
+ counter = max_counter + 1
+ output_process = None
+
+ # save first frame as png to keep metadata
+ file = f"{filename}_{counter:05}.png"
+ file_path = os.path.join(full_output_folder, file)
+ Image.fromarray(tensor_to_bytes(images[0])).save(
+ file_path,
+ pnginfo=metadata,
+ compress_level=4,
+ )
+ output_files.append(file_path)
+
+ format_type, format_ext = format.split("/")
+ if format_type == "image":
+ if batch_manager is not None:
+ raise Exception("Pillow('image/') formats are not compatible with batched output")
+ image_kwargs = {}
+ if format_ext == "gif":
+ image_kwargs['disposal'] = 2
+ if format_ext == "webp":
+ #Save timestamp information
+ exif = Image.Exif()
+ exif[ExifTags.IFD.Exif] = {36867: datetime.datetime.now().isoformat(" ")[:19]}
+ image_kwargs['exif'] = exif
+ file = f"{filename}_{counter:05}.{format_ext}"
+ file_path = os.path.join(full_output_folder, file)
+ images = tensor_to_bytes(images)
+ if pingpong:
+ images = np.concatenate((images, images[-2:0:-1]))
+ frames = [Image.fromarray(f) for f in images]
+ # Use pillow directly to save an animated image
+ frames[0].save(
+ file_path,
+ format=format_ext.upper(),
+ save_all=True,
+ append_images=frames[1:],
+ duration=round(1000 / frame_rate),
+ loop=loop_count,
+ compress_level=4,
+ **image_kwargs
+ )
+ output_files.append(file_path)
+ else:
+ # Use ffmpeg to save a video
+ if ffmpeg_path is None:
+ #Should never be reachable
+ raise ProcessLookupError("Could not find ffmpeg")
+
+ #Acquire additional format_widget values
+ kwargs = None
+ if manual_format_widgets is None:
+ if prompt is not None:
+ kwargs = prompt[unique_id]['inputs']
+ else:
+ manual_format_widgets = {}
+ if kwargs is None:
+ kwargs = get_format_widget_defaults(format_ext)
+ missing = {}
+ for k in kwargs.keys():
+ if k in manual_format_widgets:
+ kwargs[k] = manual_format_widgets[k]
+ else:
+ missing[k] = kwargs[k]
+ if len(missing) > 0:
+ logger.warn("Extra format values were not provided, the following defaults will be used: " + str(kwargs) + "\nThis is likely due to usage of ComfyUI-to-python. These values can be manually set by supplying a manual_format_widgets argument")
+
+ video_format = apply_format_widgets(format_ext, kwargs)
+ if video_format.get('input_color_depth', '8bit') == '16bit':
+ images = tensor_to_shorts(images)
+ if images.shape[-1] == 4:
+ i_pix_fmt = 'rgba64'
+ else:
+ i_pix_fmt = 'rgb48'
+ else:
+ images = tensor_to_bytes(images)
+ if images.shape[-1] == 4:
+ i_pix_fmt = 'rgba'
+ else:
+ i_pix_fmt = 'rgb24'
+ if pingpong:
+ if batch_manager is not None:
+ logger.error("pingpong is incompatible with batched output")
+ images = np.concatenate((images, images[-2:0:-1]))
+ file = f"{filename}_{counter:05}.{video_format['extension']}"
+ file_path = os.path.join(full_output_folder, file)
+ dimensions = f"{len(images[0][0])}x{len(images[0])}"
+ loop_args = ["-vf", "loop=loop=" + str(loop_count)+":size=" + str(len(images))]
+ bitrate_arg = []
+ bitrate = video_format.get('bitrate')
+ if bitrate is not None:
+ bitrate_arg = ["-b:v", str(bitrate) + "M" if video_format.get('megabit') == 'True' else str(bitrate) + "K"]
+ args = [ffmpeg_path, "-v", "error", "-f", "rawvideo", "-pix_fmt", i_pix_fmt,
+ "-s", dimensions, "-r", str(frame_rate), "-i", "-"] \
+ + loop_args + video_format['main_pass'] + bitrate_arg
+
+ env=os.environ.copy()
+ if "environment" in video_format:
+ env.update(video_format["environment"])
+
+ if output_process is None:
+ output_process = ffmpeg_process(args, video_format, video_metadata, file_path, env)
+ #Proceed to first yield
+ output_process.send(None)
+ if batch_manager is not None:
+ batch_manager.outputs[unique_id] = (counter, output_process)
+
+ output_process.send(images.tobytes())
+ if batch_manager is not None:
+ requeue_workflow((batch_manager.unique_id, not batch_manager.has_closed_inputs))
+ if batch_manager is None or batch_manager.has_closed_inputs:
+ #Close pipe and wait for termination.
+ try:
+ output_process.send(None)
+ except StopIteration:
+ pass
+ if batch_manager is not None:
+ batch_manager.outputs.pop(unique_id)
+ if len(batch_manager.outputs) == 0:
+ batch_manager.reset()
+ else:
+ #batch is unfinished
+ #TODO: Check if empty output breaks other custom nodes
+ return {"ui": {"unfinished_batch": [True]}, "result": ((save_output, []),)}
+
+ output_files.append(file_path)
+
+ if "gifski_pass" in video_format:
+ gif_output = f"{filename}_{counter:05}.gif"
+ gif_output_path = os.path.join( full_output_folder, gif_output)
+ gifski_args = [gifski_path] + video_format["gifski_pass"] \
+ + ["-o", gif_output_path, file_path]
+ try:
+ res = subprocess.run(gifski_args, env=env, check=True, capture_output=True)
+ except subprocess.CalledProcessError as e:
+ raise Exception("An error occured in the gifski subprocess:\n" \
+ + e.stderr.decode("utf-8"))
+ if res.stderr:
+ print(res.stderr.decode("utf-8"), end="", file=sys.stderr)
+ #output format is actually an image and should be correctly marked
+ #TODO: Evaluate a more consistent solution for this
+ format = "image/gif"
+ output_files.append(gif_output_path)
+ file = gif_output
+
+ elif audio is not None and audio() is not False:
+ # Create audio file if input was provided
+ output_file_with_audio = f"{filename}_{counter:05}-audio.{video_format['extension']}"
+ output_file_with_audio_path = os.path.join(full_output_folder, output_file_with_audio)
+ if "audio_pass" not in video_format:
+ logger.warn("Selected video format does not have explicit audio support")
+ video_format["audio_pass"] = ["-c:a", "libopus"]
+
+
+ # FFmpeg command with audio re-encoding
+ #TODO: expose audio quality options if format widgets makes it in
+ #Reconsider forcing apad/shortest
+ mux_args = [ffmpeg_path, "-v", "error", "-n", "-i", file_path,
+ "-i", "-", "-c:v", "copy"] \
+ + video_format["audio_pass"] \
+ + ["-af", "apad", "-shortest", output_file_with_audio_path]
+
+ try:
+ res = subprocess.run(mux_args, input=audio(), env=env,
+ capture_output=True, check=True)
+ except subprocess.CalledProcessError as e:
+ raise Exception("An error occured in the ffmpeg subprocess:\n" \
+ + e.stderr.decode("utf-8"))
+ if res.stderr:
+ print(res.stderr.decode("utf-8"), end="", file=sys.stderr)
+ output_files.append(output_file_with_audio_path)
+ #Return this file with audio to the webui.
+ #It will be muted unless opened or saved with right click
+ file = output_file_with_audio
+
+ previews = [
+ {
+ "filename": file,
+ "subfolder": subfolder,
+ "type": "output" if save_output else "temp",
+ "format": format,
+ }
+ ]
+ return {"ui": {"gifs": previews}, "result": ((save_output, output_files),)}
+ @classmethod
+ def VALIDATE_INPUTS(self, format, **kwargs):
+ return True
+
+class LoadAudio:
+ @classmethod
+ def INPUT_TYPES(s):
+ #Hide ffmpeg formats if ffmpeg isn't available
+ return {
+ "required": {
+ "audio_file": ("STRING", {"default": "input/", "vhs_path_extensions": ['wav','mp3','ogg','m4a','flac']}),
+ },
+ "optional" : {"seek_seconds": ("FLOAT", {"default": 0, "min": 0})}
+ }
+
+ RETURN_TYPES = ("VHS_AUDIO",)
+ RETURN_NAMES = ("audio",)
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢"
+ FUNCTION = "load_audio"
+ def load_audio(self, audio_file, seek_seconds):
+ if audio_file is None or validate_path(audio_file) != True:
+ raise Exception("audio_file is not a valid path: " + audio_file)
+ #Eagerly fetch the audio since the user must be using it if the
+ #node executes, unlike Load Video
+ audio = get_audio(audio_file, start_time=seek_seconds)
+ return (lambda : audio,)
+
+ @classmethod
+ def IS_CHANGED(s, audio_file, seek_seconds):
+ return hash_path(audio_file)
+
+ @classmethod
+ def VALIDATE_INPUTS(s, audio_file, **kwargs):
+ return validate_path(audio_file, allow_none=True)
+
+class PruneOutputs:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "filenames": ("VHS_FILENAMES",),
+ "options": (["Intermediate", "Intermediate and Utility"],)
+ }
+ }
+
+ RETURN_TYPES = ()
+ OUTPUT_NODE = True
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢"
+ FUNCTION = "prune_outputs"
+
+ def prune_outputs(self, filenames, options):
+ if len(filenames[1]) == 0:
+ return ()
+ assert(len(filenames[1]) <= 3 and len(filenames[1]) >= 2)
+ delete_list = []
+ if options in ["Intermediate", "Intermediate and Utility", "All"]:
+ delete_list += filenames[1][1:-1]
+ if options in ["Intermediate and Utility", "All"]:
+ delete_list.append(filenames[1][0])
+ if options in ["All"]:
+ delete_list.append(filenames[1][-1])
+
+ output_dirs = [os.path.abspath("output"), os.path.abspath("temp")]
+ for file in delete_list:
+ #Check that path is actually an output directory
+ if (os.path.commonpath([output_dirs[0], file]) != output_dirs[0]) \
+ and (os.path.commonpath([output_dirs[1], file]) != output_dirs[1]):
+ raise Exception("Tried to prune output from invalid directory: " + file)
+ if os.path.exists(file):
+ os.remove(file)
+ return ()
+
+class BatchManager:
+ def __init__(self, frames_per_batch=-1):
+ self.frames_per_batch = frames_per_batch
+ self.inputs = {}
+ self.outputs = {}
+ self.unique_id = None
+ self.has_closed_inputs = False
+ def reset(self):
+ self.close_inputs()
+ for key in self.outputs:
+ if getattr(self.outputs[key][-1], "gi_suspended", False):
+ try:
+ self.outputs[key][-1].send(None)
+ except StopIteration:
+ pass
+ self.__init__(self.frames_per_batch)
+ def has_open_inputs(self):
+ return len(self.inputs) > 0
+ def close_inputs(self):
+ for key in self.inputs:
+ if getattr(self.inputs[key][-1], "gi_suspended", False):
+ try:
+ self.inputs[key][-1].send(1)
+ except StopIteration:
+ pass
+ self.inputs = {}
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "frames_per_batch": ("INT", {"default": 16, "min": 1, "max": 128, "step": 1})
+ },
+ "hidden": {
+ "prompt": "PROMPT",
+ "unique_id": "UNIQUE_ID"
+ },
+ }
+
+ RETURN_TYPES = ("VHS_BatchManager",)
+ CATEGORY = "Video Helper Suite 🎥🅥🅗🅢"
+ FUNCTION = "update_batch"
+
+ def update_batch(self, frames_per_batch, prompt=None, unique_id=None):
+ if unique_id is not None and prompt is not None:
+ requeue = prompt[unique_id]['inputs'].get('requeue', 0)
+ else:
+ requeue = 0
+ if requeue == 0:
+ self.reset()
+ self.frames_per_batch = frames_per_batch
+ self.unique_id = unique_id
+ #onExecuted seems to not be called unless some message is sent
+ return (self,)
+
+
+NODE_CLASS_MAPPINGS = {
+ "VHS_VideoCombine": VideoCombine,
+ "VHS_LoadVideo": LoadVideoUpload,
+ "VHS_LoadVideoPath": LoadVideoPath,
+ "VHS_LoadImages": LoadImagesFromDirectoryUpload,
+ "VHS_LoadImagesPath": LoadImagesFromDirectoryPath,
+ "VHS_LoadAudio": LoadAudio,
+ "VHS_PruneOutputs": PruneOutputs,
+ "VHS_BatchManager": BatchManager,
+ # Latent and Image nodes
+ "VHS_SplitLatents": SplitLatents,
+ "VHS_SplitImages": SplitImages,
+ "VHS_SplitMasks": SplitMasks,
+ "VHS_MergeLatents": MergeLatents,
+ "VHS_MergeImages": MergeImages,
+ "VHS_MergeMasks": MergeMasks,
+ "VHS_SelectEveryNthLatent": SelectEveryNthLatent,
+ "VHS_SelectEveryNthImage": SelectEveryNthImage,
+ "VHS_SelectEveryNthMask": SelectEveryNthMask,
+ "VHS_GetLatentCount": GetLatentCount,
+ "VHS_GetImageCount": GetImageCount,
+ "VHS_GetMaskCount": GetMaskCount,
+ "VHS_DuplicateLatents": DuplicateLatents,
+ "VHS_DuplicateImages": DuplicateImages,
+ "VHS_DuplicateMasks": DuplicateMasks,
+ # Batched Nodes
+ "VHS_VAEEncodeBatched": VAEEncodeBatched,
+ "VHS_VAEDecodeBatched": VAEDecodeBatched,
+}
+NODE_DISPLAY_NAME_MAPPINGS = {
+ "VHS_VideoCombine": "Video Combine 🎥🅥🅗🅢",
+ "VHS_LoadVideo": "Load Video (Upload) 🎥🅥🅗🅢",
+ "VHS_LoadVideoPath": "Load Video (Path) 🎥🅥🅗🅢",
+ "VHS_LoadImages": "Load Images (Upload) 🎥🅥🅗🅢",
+ "VHS_LoadImagesPath": "Load Images (Path) 🎥🅥🅗🅢",
+ "VHS_LoadAudio": "Load Audio (Path)🎥🅥🅗🅢",
+ "VHS_PruneOutputs": "Prune Outputs 🎥🅥🅗🅢",
+ "VHS_BatchManager": "Batch Manager 🎥🅥🅗🅢",
+ # Latent and Image nodes
+ "VHS_SplitLatents": "Split Latent Batch 🎥🅥🅗🅢",
+ "VHS_SplitImages": "Split Image Batch 🎥🅥🅗🅢",
+ "VHS_SplitMasks": "Split Mask Batch 🎥🅥🅗🅢",
+ "VHS_MergeLatents": "Merge Latent Batches 🎥🅥🅗🅢",
+ "VHS_MergeImages": "Merge Image Batches 🎥🅥🅗🅢",
+ "VHS_MergeMasks": "Merge Mask Batches 🎥🅥🅗🅢",
+ "VHS_SelectEveryNthLatent": "Select Every Nth Latent 🎥🅥🅗🅢",
+ "VHS_SelectEveryNthImage": "Select Every Nth Image 🎥🅥🅗🅢",
+ "VHS_SelectEveryNthMask": "Select Every Nth Mask 🎥🅥🅗🅢",
+ "VHS_GetLatentCount": "Get Latent Count 🎥🅥🅗🅢",
+ "VHS_GetImageCount": "Get Image Count 🎥🅥🅗🅢",
+ "VHS_GetMaskCount": "Get Mask Count 🎥🅥🅗🅢",
+ "VHS_DuplicateLatents": "Duplicate Latent Batch 🎥🅥🅗🅢",
+ "VHS_DuplicateImages": "Duplicate Image Batch 🎥🅥🅗🅢",
+ "VHS_DuplicateMasks": "Duplicate Mask Batch 🎥🅥🅗🅢",
+ # Batched Nodes
+ "VHS_VAEEncodeBatched": "VAE Encode Batched 🎥🅥🅗🅢",
+ "VHS_VAEDecodeBatched": "VAE Decode Batched 🎥🅥🅗🅢",
+}
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/server.py b/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/server.py
new file mode 100755
index 0000000000000000000000000000000000000000..24ef45d0172ae9e5bc601a2c64941d01fc786e51
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/server.py
@@ -0,0 +1,157 @@
+import server
+import folder_paths
+import os
+import time
+import subprocess
+from .utils import is_url, get_sorted_dir_files_from_directory, ffmpeg_path, validate_sequence
+from comfy.k_diffusion.utils import FolderOfImages
+
+web = server.web
+
+def is_safe(path):
+ if "VHS_STRICT_PATHS" not in os.environ:
+ return True
+ basedir = os.path.abspath('.')
+ try:
+ common_path = os.path.commonpath([basedir, path])
+ except:
+ #Different drive on windows
+ return False
+ return common_path == basedir
+
+@server.PromptServer.instance.routes.get("/viewvideo")
+async def view_video(request):
+ query = request.rel_url.query
+ if "filename" not in query:
+ return web.Response(status=404)
+ filename = query["filename"]
+
+ #Path code misformats urls on windows and must be skipped
+ if is_url(filename):
+ file = filename
+ else:
+ filename, output_dir = folder_paths.annotated_filepath(filename)
+
+ type = request.rel_url.query.get("type", "output")
+ if type == "path":
+ #special case for path_based nodes
+ #NOTE: output_dir may be empty, but non-None
+ output_dir, filename = os.path.split(filename)
+ if output_dir is None:
+ output_dir = folder_paths.get_directory_by_type(type)
+
+ if output_dir is None:
+ return web.Response(status=400)
+
+ if not is_safe(output_dir):
+ return web.Response(status=403)
+
+ if "subfolder" in request.rel_url.query:
+ output_dir = os.path.join(output_dir, request.rel_url.query["subfolder"])
+
+ filename = os.path.basename(filename)
+ file = os.path.join(output_dir, filename)
+
+ if query.get('format', 'video') == 'folder':
+ if not os.path.isdir(file):
+ return web.Response(status=404)
+ else:
+ if not os.path.isfile(file) and not validate_sequence(file):
+ return web.Response(status=404)
+
+ if query.get('format', 'video') == "folder":
+ #Check that folder contains some valid image file, get it's extension
+ #ffmpeg seems to not support list globs, so support for mixed extensions seems unfeasible
+ os.makedirs(folder_paths.get_temp_directory(), exist_ok=True)
+ concat_file = os.path.join(folder_paths.get_temp_directory(), "image_sequence_preview.txt")
+ skip_first_images = int(query.get('skip_first_images', 0))
+ select_every_nth = int(query.get('select_every_nth', 1))
+ valid_images = get_sorted_dir_files_from_directory(file, skip_first_images, select_every_nth, FolderOfImages.IMG_EXTENSIONS)
+ if len(valid_images) == 0:
+ return web.Response(status=400)
+ with open(concat_file, "w") as f:
+ f.write("ffconcat version 1.0\n")
+ for path in valid_images:
+ f.write("file '" + os.path.abspath(path) + "'\n")
+ f.write("duration 0.125\n")
+ in_args = ["-safe", "0", "-i", concat_file]
+ else:
+ in_args = ["-an", "-i", file]
+
+ args = [ffmpeg_path, "-v", "error"] + in_args
+ vfilters = []
+ if int(query.get('force_rate',0)) != 0:
+ vfilters.append("fps=fps="+query['force_rate'] + ":round=up:start_time=0.001")
+ if int(query.get('skip_first_frames', 0)) > 0:
+ vfilters.append(f"select=gt(n\\,{int(query['skip_first_frames'])-1})")
+ if int(query.get('select_every_nth', 1)) > 1:
+ vfilters.append(f"select=not(mod(n\\,{query['select_every_nth']}))")
+ if query.get('force_size','Disabled') != "Disabled":
+ size = query['force_size'].split('x')
+ if size[0] == '?' or size[1] == '?':
+ size[0] = "-2" if size[0] == '?' else f"'min({size[0]},iw)'"
+ size[1] = "-2" if size[1] == '?' else f"'min({size[1]},ih)'"
+ else:
+ #Aspect ratio is likely changed. A more complex command is required
+ #to crop the output to the new aspect ratio
+ ar = float(size[0])/float(size[1])
+ vfilters.append(f"crop=if(gt({ar}\\,a)\\,iw\\,ih*{ar}):if(gt({ar}\\,a)\\,iw/{ar}\\,ih)")
+ size = ':'.join(size)
+ vfilters.append(f"scale={size}")
+ vfilters.append("setpts=PTS-STARTPTS")
+ if len(vfilters) > 0:
+ args += ["-vf", ",".join(vfilters)]
+ if int(query.get('frame_load_cap', 0)) > 0:
+ args += ["-frames:v", query['frame_load_cap']]
+ #TODO:reconsider adding high frame cap/setting default frame cap on node
+
+ args += ['-c:v', 'libvpx-vp9','-deadline', 'realtime', '-cpu-used', '8', '-f', 'webm', '-']
+
+ try:
+ with subprocess.Popen(args, stdout=subprocess.PIPE) as proc:
+ try:
+ resp = web.StreamResponse()
+ resp.content_type = 'video/webm'
+ resp.headers["Content-Disposition"] = f"filename=\"{filename}\""
+ await resp.prepare(request)
+ while True:
+ bytes_read = proc.stdout.read()
+ if bytes_read is None:
+ #TODO: check for timeout here
+ time.sleep(.1)
+ continue
+ if len(bytes_read) == 0:
+ break
+ await resp.write(bytes_read)
+ except ConnectionResetError as e:
+ #Kill ffmpeg before stdout closes
+ proc.kill()
+ except BrokenPipeError as e:
+ pass
+ return resp
+
+@server.PromptServer.instance.routes.get("/getpath")
+async def get_path(request):
+ query = request.rel_url.query
+ if "path" not in query:
+ return web.Response(status=404)
+ path = os.path.abspath(query["path"])
+
+ if not os.path.exists(path) or not is_safe(path):
+ return web.json_response([])
+
+ #Use get so None is default instead of keyerror
+ valid_extensions = query.get("extensions")
+ valid_items = []
+ for item in os.scandir(path):
+ try:
+ if item.is_dir():
+ valid_items.append(item.name + "/")
+ continue
+ if valid_extensions is None or item.name.split(".")[-1] in valid_extensions:
+ valid_items.append(item.name)
+ except OSError:
+ #Broken symlinks can throw a very unhelpful "Invalid argument"
+ pass
+
+ return web.json_response(valid_items)
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/utils.py b/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a8652044df2016f76c0a4d998d7ef996de500f0
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/videohelpersuite/utils.py
@@ -0,0 +1,207 @@
+import hashlib
+import os
+from typing import Iterable
+import shutil
+import subprocess
+import re
+
+import server
+from .logger import logger
+
+BIGMIN = -(2**53-1)
+BIGMAX = (2**53-1)
+
+DIMMAX = 8192
+
+def ffmpeg_suitability(path):
+ try:
+ version = subprocess.run([path, "-version"], check=True,
+ capture_output=True).stdout.decode("utf-8")
+ except:
+ return 0
+ score = 0
+ #rough layout of the importance of various features
+ simple_criterion = [("libvpx", 20),("264",10), ("265",3),
+ ("svtav1",5),("libopus", 1)]
+ for criterion in simple_criterion:
+ if version.find(criterion[0]) >= 0:
+ score += criterion[1]
+ #obtain rough compile year from copyright information
+ copyright_index = version.find('2000-2')
+ if copyright_index >= 0:
+ copyright_year = version[copyright_index+6:copyright_index+9]
+ if copyright_year.isnumeric():
+ score += int(copyright_year)
+ return score
+
+
+if "VHS_FORCE_FFMPEG_PATH" in os.environ:
+ ffmpeg_path = os.environ.get("VHS_FORCE_FFMPEG_PATH")
+else:
+ ffmpeg_paths = []
+ try:
+ from imageio_ffmpeg import get_ffmpeg_exe
+ imageio_ffmpeg_path = get_ffmpeg_exe()
+ ffmpeg_paths.append(imageio_ffmpeg_path)
+ except:
+ if "VHS_USE_IMAGEIO_FFMPEG" in os.environ:
+ raise
+ logger.warn("Failed to import imageio_ffmpeg")
+ if "VHS_USE_IMAGEIO_FFMPEG" in os.environ:
+ ffmpeg_path = imageio_ffmpeg_path
+ else:
+ system_ffmpeg = shutil.which("ffmpeg")
+ if system_ffmpeg is not None:
+ ffmpeg_paths.append(system_ffmpeg)
+ if len(ffmpeg_paths) == 0:
+ logger.error("No valid ffmpeg found.")
+ ffmpeg_path = None
+ elif len(ffmpeg_paths) == 1:
+ #Evaluation of suitability isn't required, can take sole option
+ #to reduce startup time
+ ffmpeg_path = ffmpeg_paths[0]
+ else:
+ ffmpeg_path = max(ffmpeg_paths, key=ffmpeg_suitability)
+gifski_path = os.environ.get("VHS_GIFSKI", None)
+if gifski_path is None:
+ gifski_path = os.environ.get("JOV_GIFSKI", None)
+ if gifski_path is None:
+ gifski_path = shutil.which("gifski")
+
+def get_sorted_dir_files_from_directory(directory: str, skip_first_images: int=0, select_every_nth: int=1, extensions: Iterable=None):
+ directory = directory.strip()
+ dir_files = os.listdir(directory)
+ dir_files = sorted(dir_files)
+ dir_files = [os.path.join(directory, x) for x in dir_files]
+ dir_files = list(filter(lambda filepath: os.path.isfile(filepath), dir_files))
+ # filter by extension, if needed
+ if extensions is not None:
+ extensions = list(extensions)
+ new_dir_files = []
+ for filepath in dir_files:
+ ext = "." + filepath.split(".")[-1]
+ if ext.lower() in extensions:
+ new_dir_files.append(filepath)
+ dir_files = new_dir_files
+ # start at skip_first_images
+ dir_files = dir_files[skip_first_images:]
+ dir_files = dir_files[0::select_every_nth]
+ return dir_files
+
+
+# modified from https://stackoverflow.com/questions/22058048/hashing-a-file-in-python
+def calculate_file_hash(filename: str, hash_every_n: int = 1):
+ #Larger video files were taking >.5 seconds to hash even when cached,
+ #so instead the modified time from the filesystem is used as a hash
+ h = hashlib.sha256()
+ h.update(filename.encode())
+ h.update(str(os.path.getmtime(filename)).encode())
+ return h.hexdigest()
+
+prompt_queue = server.PromptServer.instance.prompt_queue
+def requeue_workflow_unchecked():
+ """Requeues the current workflow without checking for multiple requeues"""
+ currently_running = prompt_queue.currently_running
+ (_, _, prompt, extra_data, outputs_to_execute) = next(iter(currently_running.values()))
+
+ #Ensure batch_managers are marked stale
+ prompt = prompt.copy()
+ for uid in prompt:
+ if prompt[uid]['class_type'] == 'VHS_BatchManager':
+ prompt[uid]['inputs']['requeue'] = prompt[uid]['inputs'].get('requeue',0)+1
+
+ #execution.py has guards for concurrency, but server doesn't.
+ #TODO: Check that this won't be an issue
+ number = -server.PromptServer.instance.number
+ server.PromptServer.instance.number += 1
+ prompt_id = str(server.uuid.uuid4())
+ prompt_queue.put((number, prompt_id, prompt, extra_data, outputs_to_execute))
+
+requeue_guard = [None, 0, 0, {}]
+def requeue_workflow(requeue_required=(-1,True)):
+ assert(len(prompt_queue.currently_running) == 1)
+ global requeue_guard
+ (run_number, _, prompt, _, _) = next(iter(prompt_queue.currently_running.values()))
+ if requeue_guard[0] != run_number:
+ #Calculate a count of how many outputs are managed by a batch manager
+ managed_outputs=0
+ for bm_uid in prompt:
+ if prompt[bm_uid]['class_type'] == 'VHS_BatchManager':
+ for output_uid in prompt:
+ if prompt[output_uid]['class_type'] in ["VHS_VideoCombine"]:
+ for inp in prompt[output_uid]['inputs'].values():
+ if inp == [bm_uid, 0]:
+ managed_outputs+=1
+ requeue_guard = [run_number, 0, managed_outputs, {}]
+ requeue_guard[1] = requeue_guard[1]+1
+ requeue_guard[3][requeue_required[0]] = requeue_required[1]
+ if requeue_guard[1] == requeue_guard[2] and max(requeue_guard[3].values()):
+ requeue_workflow_unchecked()
+
+def get_audio(file, start_time=0, duration=0):
+ args = [ffmpeg_path, "-v", "error", "-i", file]
+ if start_time > 0:
+ args += ["-ss", str(start_time)]
+ if duration > 0:
+ args += ["-t", str(duration)]
+ try:
+ res = subprocess.run(args + ["-f", "wav", "-"],
+ stdout=subprocess.PIPE, check=True).stdout
+ except subprocess.CalledProcessError as e:
+ logger.warning(f"Failed to extract audio from: {file}")
+ return False
+ return res
+
+
+def lazy_eval(func):
+ class Cache:
+ def __init__(self, func):
+ self.res = None
+ self.func = func
+ def get(self):
+ if self.res is None:
+ self.res = self.func()
+ return self.res
+ cache = Cache(func)
+ return lambda : cache.get()
+
+
+def is_url(url):
+ return url.split("://")[0] in ["http", "https"]
+
+def validate_sequence(path):
+ #Check if path is a valid ffmpeg sequence that points to at least one file
+ (path, file) = os.path.split(path)
+ if not os.path.isdir(path):
+ return False
+ match = re.search('%0?\d+d', file)
+ if not match:
+ return False
+ seq = match.group()
+ if seq == '%d':
+ seq = '\\\\d+'
+ else:
+ seq = '\\\\d{%s}' % seq[1:-1]
+ file_matcher = re.compile(re.sub('%0?\d+d', seq, file))
+ for file in os.listdir(path):
+ if file_matcher.fullmatch(file):
+ return True
+ return False
+
+def hash_path(path):
+ if path is None:
+ return "input"
+ if is_url(path):
+ return "url"
+ return calculate_file_hash(path.strip("\""))
+
+
+def validate_path(path, allow_none=False, allow_url=True):
+ if path is None:
+ return allow_none
+ if is_url(path):
+ #Probably not feasible to check if url resolves here
+ return True if allow_url else "URLs are unsupported for this path"
+ if not os.path.isfile(path.strip("\"")):
+ return "Invalid file path: {}".format(path)
+ return True
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/web/js/VHS.core.js b/custom_nodes/ComfyUI-VideoHelperSuite/web/js/VHS.core.js
new file mode 100755
index 0000000000000000000000000000000000000000..522398600c07c38675b7f8860dd1a23346f8cb3b
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/web/js/VHS.core.js
@@ -0,0 +1,1090 @@
+import { app } from '../../../scripts/app.js'
+import { api } from '../../../scripts/api.js'
+import { applyTextReplacements } from "../../../scripts/utils.js";
+
+function chainCallback(object, property, callback) {
+ if (object == undefined) {
+ //This should not happen.
+ console.error("Tried to add callback to non-existant object")
+ return;
+ }
+ if (property in object) {
+ const callback_orig = object[property]
+ object[property] = function () {
+ const r = callback_orig.apply(this, arguments);
+ callback.apply(this, arguments);
+ return r
+ };
+ } else {
+ object[property] = callback;
+ }
+}
+
+function injectHidden(widget) {
+ widget.computeSize = (target_width) => {
+ if (widget.hidden) {
+ return [0, -4];
+ }
+ return [target_width, 20];
+ };
+ widget._type = widget.type
+ Object.defineProperty(widget, "type", {
+ set : function(value) {
+ widget._type = value;
+ },
+ get : function() {
+ if (widget.hidden) {
+ return "hidden";
+ }
+ return widget._type;
+ }
+ });
+}
+
+const convDict = {
+ VHS_LoadImages : ["directory", null, "image_load_cap", "skip_first_images", "select_every_nth"],
+ VHS_LoadImagesPath : ["directory", "image_load_cap", "skip_first_images", "select_every_nth"],
+ VHS_VideoCombine : ["frame_rate", "loop_count", "filename_prefix", "format", "pingpong", "save_image"],
+ VHS_LoadVideo : ["video", "force_rate", "force_size", "frame_load_cap", "skip_first_frames", "select_every_nth"],
+ VHS_LoadVideoPath : ["video", "force_rate", "force_size", "frame_load_cap", "skip_first_frames", "select_every_nth"]
+};
+const renameDict = {VHS_VideoCombine : {save_output : "save_image"}}
+function useKVState(nodeType) {
+ chainCallback(nodeType.prototype, "onNodeCreated", function () {
+ chainCallback(this, "onConfigure", function(info) {
+ if (!this.widgets) {
+ //Node has no widgets, there is nothing to restore
+ return
+ }
+ if (typeof(info.widgets_values) != "object") {
+ //widgets_values is in some unknown inactionable format
+ return
+ }
+ let widgetDict = info.widgets_values
+ if (info.widgets_values.length) {
+ //widgets_values is in the old list format
+ if (this.type in convDict) {
+ //widget does not have a conversion format provided
+ let convList = convDict[this.type];
+ if(info.widgets_values.length >= convList.length) {
+ //has all required fields
+ widgetDict = {}
+ for (let i = 0; i < convList.length; i++) {
+ if(!convList[i]) {
+ //Element should not be processed (upload button on load image sequence)
+ continue
+ }
+ widgetDict[convList[i]] = info.widgets_values[i];
+ }
+ } else {
+ //widgets_values is missing elements marked as required
+ //let it fall through to failure state
+ }
+ }
+ }
+ if (widgetDict.length == undefined) {
+ for (let w of this.widgets) {
+ if (w.name in widgetDict) {
+ w.value = widgetDict[w.name];
+ } else {
+ //Check for a legacy name that needs migrating
+ if (this.type in renameDict && w.name in renameDict[this.type]) {
+ if (renameDict[this.type][w.name] in widgetDict) {
+ w.value = widgetDict[renameDict[this.type][w.name]]
+ continue
+ }
+ }
+ //attempt to restore default value
+ let inputs = LiteGraph.getNodeType(this.type).nodeData.input;
+ let initialValue = null;
+ if (inputs?.required?.hasOwnProperty(w.name)) {
+ if (inputs.required[w.name][1]?.hasOwnProperty("default")) {
+ initialValue = inputs.required[w.name][1].default;
+ } else if (inputs.required[w.name][0].length) {
+ initialValue = inputs.required[w.name][0][0];
+ }
+ } else if (inputs?.optional?.hasOwnProperty(w.name)) {
+ if (inputs.optional[w.name][1]?.hasOwnProperty("default")) {
+ initialValue = inputs.optional[w.name][1].default;
+ } else if (inputs.optional[w.name][0].length) {
+ initialValue = inputs.optional[w.name][0][0];
+ }
+ }
+ if (initialValue) {
+ w.value = initialValue;
+ }
+ }
+ }
+ } else {
+ //Saved data was not a map made by this method
+ //and a conversion dict for it does not exist
+ //It's likely an array and that has been blindly applied
+ if (info?.widgets_values?.length != this.widgets.length) {
+ //Widget could not have restored properly
+ //Note if multiple node loads fail, only the latest error dialog displays
+ app.ui.dialog.show("Failed to restore node: " + this.title + "\nPlease remove and re-add it.")
+ this.bgcolor = "#C00"
+ }
+ }
+ });
+ chainCallback(this, "onSerialize", function(info) {
+ info.widgets_values = {};
+ if (!this.widgets) {
+ //object has no widgets, there is nothing to store
+ return;
+ }
+ for (let w of this.widgets) {
+ info.widgets_values[w.name] = w.value;
+ }
+ });
+ })
+}
+
+function fitHeight(node) {
+ node.setSize([node.size[0], node.computeSize([node.size[0], node.size[1]])[1]])
+ node?.graph?.setDirtyCanvas(true);
+}
+
+async function uploadFile(file) {
+ //TODO: Add uploaded file to cache with Cache.put()?
+ try {
+ // Wrap file in formdata so it includes filename
+ const body = new FormData();
+ const i = file.webkitRelativePath.lastIndexOf('/');
+ const subfolder = file.webkitRelativePath.slice(0,i+1)
+ const new_file = new File([file], file.name, {
+ type: file.type,
+ lastModified: file.lastModified,
+ });
+ body.append("image", new_file);
+ if (i > 0) {
+ body.append("subfolder", subfolder);
+ }
+ const resp = await api.fetchApi("/upload/image", {
+ method: "POST",
+ body,
+ });
+
+ if (resp.status === 200) {
+ return resp.status
+ } else {
+ alert(resp.status + " - " + resp.statusText);
+ }
+ } catch (error) {
+ alert(error);
+ }
+}
+
+function addDateFormatting(nodeType, field, timestamp_widget = false) {
+ chainCallback(nodeType.prototype, "onNodeCreated", function() {
+ const widget = this.widgets.find((w) => w.name === field);
+ widget.serializeValue = () => {
+ return applyTextReplacements(app, widget.value);
+ };
+ });
+}
+function addTimestampWidget(nodeType, nodeData, targetWidget) {
+ const newWidgets = {};
+ for (let key in nodeData.input.required) {
+ if (key == targetWidget) {
+ //TODO: account for duplicate entries?
+ newWidgets["timestamp_directory"] = ["BOOLEAN", {"default": true}]
+ }
+ newWidgets[key] = nodeData.input.required[key];
+ }
+ nodeDta.input.required = newWidgets;
+ chainCallback(nodeType.prototype, "onNodeCreated", function () {
+ const directoryWidget = this.widgets.find((w) => w.name === "directory_name");
+ const timestampWidget = this.widgets.find((w) => w.name === "timestamp_directory");
+ directoryWidget.serializeValue = () => {
+ if (timestampWidget.value) {
+ //ignore actual value and return timestamp
+ return formatDate("yyyy-MM-ddThh:mm:ss", new Date());
+ }
+ return directoryWidget.value
+ };
+ timestampWidget._value = value;
+ Object.definteProperty(timestampWidget, "value", {
+ set : function(value) {
+ this._value = value;
+ directoryWidget.disabled = value;
+ },
+ get : function() {
+ return this._value;
+ }
+ });
+ });
+}
+
+function addCustomSize(nodeType, nodeData, widgetName) {
+ //Add a callback which sets up the actual logic once the node is created
+ chainCallback(nodeType.prototype, "onNodeCreated", function() {
+ const node = this;
+ const sizeOptionWidget = node.widgets.find((w) => w.name === widgetName);
+ const widthWidget = node.widgets.find((w) => w.name === "custom_width");
+ const heightWidget = node.widgets.find((w) => w.name === "custom_height");
+ injectHidden(widthWidget);
+ injectHidden(heightWidget);
+ sizeOptionWidget._value = sizeOptionWidget.value;
+ Object.defineProperty(sizeOptionWidget, "value", {
+ set : function(value) {
+ //TODO: Only modify hidden/reset size when a change occurs
+ if (value == "Custom Width") {
+ widthWidget.hidden = false;
+ heightWidget.hidden = true;
+ } else if (value == "Custom Height") {
+ widthWidget.hidden = true;
+ heightWidget.hidden = false;
+ } else if (value == "Custom") {
+ widthWidget.hidden = false;
+ heightWidget.hidden = false;
+ } else{
+ widthWidget.hidden = true;
+ heightWidget.hidden = true;
+ }
+ node.setSize([node.size[0], node.computeSize([node.size[0], node.size[1]])[1]])
+ this._value = value;
+ },
+ get : function() {
+ return this._value;
+ }
+ });
+ //Ensure proper visibility/size state for initial value
+ sizeOptionWidget.value = sizeOptionWidget._value;
+
+ sizeOptionWidget.serializePreview = function() {
+ if (this.value == "Custom Width") {
+ return widthWidget.value + "x?";
+ } else if (this.value == "Custom Height") {
+ return "?x" + heightWidget.value;
+ } else if (this.value == "Custom") {
+ return widthWidget.value + "x" + heightWidget.value;
+ } else {
+ return this.value;
+ }
+ };
+ });
+}
+function addUploadWidget(nodeType, nodeData, widgetName, type="video") {
+ chainCallback(nodeType.prototype, "onNodeCreated", function() {
+ const pathWidget = this.widgets.find((w) => w.name === widgetName);
+ const fileInput = document.createElement("input");
+ chainCallback(this, "onRemoved", () => {
+ fileInput?.remove();
+ });
+ if (type == "folder") {
+ Object.assign(fileInput, {
+ type: "file",
+ style: "display: none",
+ webkitdirectory: true,
+ onchange: async () => {
+ const directory = fileInput.files[0].webkitRelativePath;
+ const i = directory.lastIndexOf('/');
+ if (i <= 0) {
+ throw "No directory found";
+ }
+ const path = directory.slice(0,directory.lastIndexOf('/'))
+ if (pathWidget.options.values.includes(path)) {
+ alert("A folder of the same name already exists");
+ return;
+ }
+ let successes = 0;
+ for(const file of fileInput.files) {
+ if (await uploadFile(file) == 200) {
+ successes++;
+ } else {
+ //Upload failed, but some prior uploads may have succeeded
+ //Stop future uploads to prevent cascading failures
+ //and only add to list if an upload has succeeded
+ if (successes > 0) {
+ break
+ } else {
+ return;
+ }
+ }
+ }
+ pathWidget.options.values.push(path);
+ pathWidget.value = path;
+ if (pathWidget.callback) {
+ pathWidget.callback(path)
+ }
+ },
+ });
+ } else if (type == "video") {
+ Object.assign(fileInput, {
+ type: "file",
+ accept: "video/webm,video/mp4,video/mkv,image/gif",
+ style: "display: none",
+ onchange: async () => {
+ if (fileInput.files.length) {
+ if (await uploadFile(fileInput.files[0]) != 200) {
+ //upload failed and file can not be added to options
+ return;
+ }
+ const filename = fileInput.files[0].name;
+ pathWidget.options.values.push(filename);
+ pathWidget.value = filename;
+ if (pathWidget.callback) {
+ pathWidget.callback(filename)
+ }
+ }
+ },
+ });
+ } else {
+ throw "Unknown upload type"
+ }
+ document.body.append(fileInput);
+ let uploadWidget = this.addWidget("button", "choose " + type + " to upload", "image", () => {
+ //clear the active click event
+ app.canvas.node_widget = null
+
+ fileInput.click();
+ });
+ uploadWidget.options.serialize = false;
+ });
+}
+
+function addVideoPreview(nodeType) {
+ chainCallback(nodeType.prototype, "onNodeCreated", function() {
+ var element = document.createElement("div");
+ const previewNode = this;
+ var previewWidget = this.addDOMWidget("videopreview", "preview", element, {
+ serialize: false,
+ hideOnZoom: false,
+ getValue() {
+ return element.value;
+ },
+ setValue(v) {
+ element.value = v;
+ },
+ });
+ previewWidget.computeSize = function(width) {
+ if (this.aspectRatio && !this.parentEl.hidden) {
+ let height = (previewNode.size[0]-20)/ this.aspectRatio + 10;
+ if (!(height > 0)) {
+ height = 0;
+ }
+ this.computedHeight = height + 10;
+ return [width, height];
+ }
+ return [width, -4];//no loaded src, widget should not display
+ }
+ element.style['pointer-events'] = "none"
+ previewWidget.value = {hidden: false, paused: false, params: {}}
+ previewWidget.parentEl = document.createElement("div");
+ previewWidget.parentEl.className = "vhs_preview";
+ previewWidget.parentEl.style['width'] = "100%"
+ element.appendChild(previewWidget.parentEl);
+ previewWidget.videoEl = document.createElement("video");
+ previewWidget.videoEl.controls = false;
+ previewWidget.videoEl.loop = true;
+ previewWidget.videoEl.muted = true;
+ previewWidget.videoEl.style['width'] = "100%"
+ previewWidget.videoEl.addEventListener("loadedmetadata", () => {
+
+ previewWidget.aspectRatio = previewWidget.videoEl.videoWidth / previewWidget.videoEl.videoHeight;
+ fitHeight(this);
+ });
+ previewWidget.videoEl.addEventListener("error", () => {
+ //TODO: consider a way to properly notify the user why a preview isn't shown.
+ previewWidget.parentEl.hidden = true;
+ fitHeight(this);
+ });
+
+ previewWidget.imgEl = document.createElement("img");
+ previewWidget.imgEl.style['width'] = "100%"
+ previewWidget.imgEl.hidden = true;
+ previewWidget.imgEl.onload = () => {
+ previewWidget.aspectRatio = previewWidget.imgEl.naturalWidth / previewWidget.imgEl.naturalHeight;
+ fitHeight(this);
+ };
+
+ var timeout = null;
+ this.updateParameters = (params, force_update) => {
+ if (!previewWidget.value.params) {
+ if(typeof(previewWidget.value != 'object')) {
+ previewWidget.value = {hidden: false, paused: false}
+ }
+ previewWidget.value.params = {}
+ }
+ Object.assign(previewWidget.value.params, params)
+ if (!force_update &&
+ !app.ui.settings.getSettingValue("VHS.AdvancedPreviews", false)) {
+ return;
+ }
+ if (timeout) {
+ clearTimeout(timeout);
+ }
+ if (force_update) {
+ previewWidget.updateSource();
+ } else {
+ timeout = setTimeout(() => previewWidget.updateSource(),100);
+ }
+ };
+ previewWidget.updateSource = function () {
+ if (this.value.params == undefined) {
+ return;
+ }
+ let params = {}
+ Object.assign(params, this.value.params);//shallow copy
+ this.parentEl.hidden = this.value.hidden;
+ if (params.format?.split('/')[0] == 'video' ||
+ app.ui.settings.getSettingValue("VHS.AdvancedPreviews", false) &&
+ (params.format?.split('/')[1] == 'gif') || params.format == 'folder') {
+ this.videoEl.autoplay = !this.value.paused && !this.value.hidden;
+ let target_width = 256
+ if (element.style?.width) {
+ //overscale to allow scrolling. Endpoint won't return higher than native
+ target_width = element.style.width.slice(0,-2)*2;
+ }
+ if (!params.force_size || params.force_size.includes("?") || params.force_size == "Disabled") {
+ params.force_size = target_width+"x?"
+ } else {
+ let size = params.force_size.split("x")
+ let ar = parseInt(size[0])/parseInt(size[1])
+ params.force_size = target_width+"x"+(target_width/ar)
+ }
+ if (app.ui.settings.getSettingValue("VHS.AdvancedPreviews", false)) {
+ this.videoEl.src = api.apiURL('/viewvideo?' + new URLSearchParams(params));
+ } else {
+ previewWidget.videoEl.src = api.apiURL('/view?' + new URLSearchParams(params));
+ }
+ this.videoEl.hidden = false;
+ this.imgEl.hidden = true;
+ } else if (params.format?.split('/')[0] == 'image'){
+ //Is animated image
+ this.imgEl.src = api.apiURL('/view?' + new URLSearchParams(params));
+ this.videoEl.hidden = true;
+ this.imgEl.hidden = false;
+ }
+ }
+ previewWidget.parentEl.appendChild(previewWidget.videoEl)
+ previewWidget.parentEl.appendChild(previewWidget.imgEl)
+ });
+}
+function addPreviewOptions(nodeType) {
+ chainCallback(nodeType.prototype, "getExtraMenuOptions", function(_, options) {
+ // The intended way of appending options is returning a list of extra options,
+ // but this isn't used in widgetInputs.js and would require
+ // less generalization of chainCallback
+ let optNew = []
+ const previewWidget = this.widgets.find((w) => w.name === "videopreview");
+
+ let url = null
+ if (previewWidget.videoEl?.hidden == false && previewWidget.videoEl.src) {
+ //Use full quality video
+ url = api.apiURL('/view?' + new URLSearchParams(previewWidget.value.params));
+ } else if (previewWidget.imgEl?.hidden == false && previewWidget.imgEl.src) {
+ url = previewWidget.imgEl.src;
+ url = new URL(url);
+ }
+ if (url) {
+ optNew.push(
+ {
+ content: "Open preview",
+ callback: () => {
+ window.open(url, "_blank")
+ },
+ },
+ {
+ content: "Save preview",
+ callback: () => {
+ const a = document.createElement("a");
+ a.href = url;
+ a.setAttribute("download", new URLSearchParams(previewWidget.value.params).get("filename"));
+ document.body.append(a);
+ a.click();
+ requestAnimationFrame(() => a.remove());
+ },
+ }
+ );
+ }
+ const PauseDesc = (previewWidget.value.paused ? "Resume" : "Pause") + " preview";
+ if(previewWidget.videoEl.hidden == false) {
+ optNew.push({content: PauseDesc, callback: () => {
+ //animated images can't be paused and are more likely to cause performance issues.
+ //changing src to a single keyframe is possible,
+ //For now, the option is disabled if an animated image is being displayed
+ if(previewWidget.value.paused) {
+ previewWidget.videoEl?.play();
+ } else {
+ previewWidget.videoEl?.pause();
+ }
+ previewWidget.value.paused = !previewWidget.value.paused;
+ }});
+ }
+ //TODO: Consider hiding elements if no video preview is available yet.
+ //It would reduce confusion at the cost of functionality
+ //(if a video preview lags the computer, the user should be able to hide in advance)
+ const visDesc = (previewWidget.value.hidden ? "Show" : "Hide") + " preview";
+ optNew.push({content: visDesc, callback: () => {
+ if (!previewWidget.videoEl.hidden && !previewWidget.value.hidden) {
+ previewWidget.videoEl.pause();
+ } else if (previewWidget.value.hidden && !previewWidget.videoEl.hidden && !previewWidget.value.paused) {
+ previewWidget.videoEl.play();
+ }
+ previewWidget.value.hidden = !previewWidget.value.hidden;
+ previewWidget.parentEl.hidden = previewWidget.value.hidden;
+ fitHeight(this);
+
+ }});
+ optNew.push({content: "Sync preview", callback: () => {
+ //TODO: address case where videos have varying length
+ //Consider a system of sync groups which are opt-in?
+ for (let p of document.getElementsByClassName("vhs_preview")) {
+ for (let child of p.children) {
+ if (child.tagName == "VIDEO") {
+ child.currentTime=0;
+ } else if (child.tagName == "IMG") {
+ child.src = child.src;
+ }
+ }
+ }
+ }});
+ if(options.length > 0 && options[0] != null && optNew.length > 0) {
+ optNew.push(null);
+ }
+ options.unshift(...optNew);
+ });
+}
+function addFormatWidgets(nodeType) {
+ function parseFormats(options) {
+ options.fullvalues = options._values;
+ options._values = [];
+ for (let format of options.fullvalues) {
+ if (Array.isArray(format)) {
+ options._values.push(format[0]);
+ } else {
+ options._values.push(format);
+ }
+ }
+ }
+ chainCallback(nodeType.prototype, "onNodeCreated", function() {
+ var formatWidget = null;
+ var formatWidgetIndex = -1;
+ for(let i = 0; i < this.widgets.length; i++) {
+ if (this.widgets[i].name === "format"){
+ formatWidget = this.widgets[i];
+ formatWidgetIndex = i+1;
+ }
+ }
+ let formatWidgetsCount = 0;
+ //Pre-process options to just names
+ formatWidget.options._values = formatWidget.options.values;
+ parseFormats(formatWidget.options);
+ Object.defineProperty(formatWidget.options, "values", {
+ set : (value) => {
+ formatWidget.options._values = value;
+ parseFormats(formatWidget.options);
+ },
+ get : () => {
+ return formatWidget.options._values;
+ }
+ })
+
+ formatWidget._value = formatWidget.value;
+ Object.defineProperty(formatWidget, "value", {
+ set : (value) => {
+ formatWidget._value = value;
+ let newWidgets = [];
+ const fullDef = formatWidget.options.fullvalues.find((w) => Array.isArray(w) ? w[0] === value : w === value);
+ if (!Array.isArray(fullDef)) {
+ formatWidget._value = value;
+ } else {
+ formatWidget._value = fullDef[0];
+ for (let wDef of fullDef[1]) {
+ //create widgets. Heavy borrowed from web/scripts/app.js
+ //default implementation doesn't work since it automatically adds
+ //the widget in the wrong spot.
+ //TODO: consider letting this happen and just removing from list?
+ let w = {};
+ w.name = wDef[0];
+ let inputData = wDef.slice(1);
+ w.type = inputData[0];
+ w.options = inputData[1] ? inputData[1] : {};
+ if (Array.isArray(w.type)) {
+ w.value = w.type[0];
+ w.options.values = w.type;
+ w.type = "combo";
+ }
+ if(inputData[1]?.default) {
+ w.value = inputData[1].default;
+ }
+ if (w.type == "INT") {
+ Object.assign(w.options, {"precision": 0, "step": 10})
+ w.callback = function (v) {
+ const s = this.options.step / 10;
+ this.value = Math.round(v / s) * s;
+ }
+ }
+ const typeTable = {BOOLEAN: "toggle", STRING: "text", INT: "number", FLOAT: "number"};
+ if (w.type in typeTable) {
+ w.type = typeTable[w.type];
+ }
+ newWidgets.push(w);
+ }
+ }
+ this.widgets.splice(formatWidgetIndex, formatWidgetsCount, ...newWidgets);
+ fitHeight(this);
+ formatWidgetsCount = newWidgets.length;
+ },
+ get : () => {
+ return formatWidget._value;
+ }
+ });
+ });
+}
+function addLoadVideoCommon(nodeType, nodeData) {
+ addCustomSize(nodeType, nodeData, "force_size")
+ addVideoPreview(nodeType);
+ addPreviewOptions(nodeType);
+ chainCallback(nodeType.prototype, "onNodeCreated", function() {
+ const pathWidget = this.widgets.find((w) => w.name === "video");
+ const frameCapWidget = this.widgets.find((w) => w.name === 'frame_load_cap');
+ const frameSkipWidget = this.widgets.find((w) => w.name === 'skip_first_frames');
+ const rateWidget = this.widgets.find((w) => w.name === 'force_rate');
+ const skipWidget = this.widgets.find((w) => w.name === 'select_every_nth');
+ const sizeWidget = this.widgets.find((w) => w.name === 'force_size');
+ //widget.callback adds unused arguements which need culling
+ let update = function (value, _, node) {
+ let param = {}
+ param[this.name] = value
+ node.updateParameters(param);
+ }
+ chainCallback(frameCapWidget, "callback", update);
+ chainCallback(frameSkipWidget, "callback", update);
+ chainCallback(rateWidget, "callback", update);
+ chainCallback(skipWidget, "callback", update);
+ let priorSize = sizeWidget.value;
+ let updateSize = function(value, _, node) {
+ if (sizeWidget.value == 'Custom' || priorSize != sizeWidget.value) {
+ node.updateParameters({"force_size": sizeWidget.serializePreview()});
+ }
+ priorSize = sizeWidget.value;
+ }
+ chainCallback(sizeWidget, "callback", updateSize);
+ chainCallback(this.widgets.find((w) => w.name === "custom_width"), "callback", updateSize);
+ chainCallback(this.widgets.find((w) => w.name === "custom_height"), "callback", updateSize);
+
+ //do first load
+ requestAnimationFrame(() => {
+ for (let w of [frameCapWidget, frameSkipWidget, rateWidget, pathWidget, skipWidget]) {
+ w.callback(w.value, null, this);
+ }
+ });
+ });
+}
+function addLoadImagesCommon(nodeType, nodeData) {
+ addVideoPreview(nodeType);
+ addPreviewOptions(nodeType);
+ chainCallback(nodeType.prototype, "onNodeCreated", function() {
+ const pathWidget = this.widgets.find((w) => w.name === "directory");
+ const frameCapWidget = this.widgets.find((w) => w.name === 'image_load_cap');
+ const frameSkipWidget = this.widgets.find((w) => w.name === 'skip_first_images');
+ const skipWidget = this.widgets.find((w) => w.name === 'select_every_nth');
+ //widget.callback adds unused arguements which need culling
+ let update = function (value, _, node) {
+ let param = {}
+ param[this.name] = value
+ node.updateParameters(param);
+ }
+ chainCallback(frameCapWidget, "callback", (value, _, node) => {
+ node.updateParameters({frame_load_cap: value})
+ });
+ chainCallback(frameSkipWidget, "callback", update);
+ chainCallback(skipWidget, "callback", update);
+ //do first load
+ requestAnimationFrame(() => {
+ for (let w of [frameCapWidget, frameSkipWidget, pathWidget, skipWidget]) {
+ w.callback(w.value, null, this);
+ }
+ });
+ });
+}
+
+function path_stem(path) {
+ let i = path.lastIndexOf("/");
+ if (i >= 0) {
+ return [path.slice(0,i+1),path.slice(i+1)];
+ }
+ return ["",path];
+}
+function searchBox(event, [x,y], node) {
+ //Ensure only one dialogue shows at a time
+ if (this.prompt)
+ return;
+ this.prompt = true;
+
+ let pathWidget = this;
+ let dialog = document.createElement("div");
+ dialog.className = "litegraph litesearchbox graphdialog rounded"
+ dialog.innerHTML = 'Path '
+ dialog.close = () => {
+ dialog.remove();
+ }
+ document.body.append(dialog);
+ if (app.canvas.ds.scale > 1) {
+ dialog.style.transform = "scale(" + app.canvas.ds.scale + ")";
+ }
+ var name_element = dialog.querySelector(".name");
+ var input = dialog.querySelector(".value");
+ var options_element = dialog.querySelector(".helper");
+ input.value = pathWidget.value;
+
+ var timeout = null;
+ let last_path = null;
+ let extensions = pathWidget.options.extensions
+
+ input.addEventListener("keydown", (e) => {
+ dialog.is_modified = true;
+ if (e.keyCode == 27) {
+ //ESC
+ dialog.close();
+ } else if (e.keyCode == 13 && e.target.localName != "textarea") {
+ pathWidget.value = input.value;
+ if (pathWidget.callback) {
+ pathWidget.callback(pathWidget.value);
+ }
+ dialog.close();
+ } else {
+ if (e.keyCode == 9) {
+ //TAB
+ input.value = last_path + options_element.firstChild.innerText;
+ e.preventDefault();
+ e.stopPropagation();
+ } else if (e.ctrlKey && e.keyCode == 87) {
+ //Ctrl+w
+ //most browsers won't support, but it's good QOL for those that do
+ input.value = path_stem(input.value.slice(0,-1))[0]
+ e.preventDefault();
+ e.stopPropagation();
+ } else if (e.ctrlKey && e.keyCode == 71) {
+ //Ctrl+g
+ //Temporarily disables extension filtering to show all files
+ e.preventDefault();
+ e.stopPropagation();
+ extensions = undefined
+ last_path = null;
+ }
+ if (timeout) {
+ clearTimeout(timeout);
+ }
+ timeout = setTimeout(updateOptions, 10);
+ return;
+ }
+ this.prompt=false;
+ e.preventDefault();
+ e.stopPropagation();
+ });
+
+ var button = dialog.querySelector("button");
+ button.addEventListener("click", (e) => {
+ pathWidget.value = input.value;
+ if (pathWidget.callback) {
+ pathWidget.callback(pathWidget.value);
+ }
+ //unsure why dirty is set here, but not on enter-key above
+ node.graph.setDirtyCanvas(true);
+ dialog.close();
+ this.prompt = false;
+ });
+ var rect = app.canvas.canvas.getBoundingClientRect();
+ var offsetx = -20;
+ var offsety = -20;
+ if (rect) {
+ offsetx -= rect.left;
+ offsety -= rect.top;
+ }
+
+ if (event) {
+ dialog.style.left = event.clientX + offsetx + "px";
+ dialog.style.top = event.clientY + offsety + "px";
+ } else {
+ dialog.style.left = canvas.width * 0.5 + offsetx + "px";
+ dialog.style.top = canvas.height * 0.5 + offsety + "px";
+ }
+ //Search code
+ let options = []
+ function addResult(name, isDir) {
+ let el = document.createElement("div");
+ el.innerText = name;
+ el.className = "litegraph lite-search-item";
+ if (isDir) {
+ el.className += " is-dir";
+ el.addEventListener("click", (e) => {
+ input.value = last_path+name
+ if (timeout) {
+ clearTimeout(timeout);
+ }
+ timeout = setTimeout(updateOptions, 10);
+ });
+ } else {
+ el.addEventListener("click", (e) => {
+ pathWidget.value = last_path+name;
+ if (pathWidget.callback) {
+ pathWidget.callback(pathWidget.value);
+ }
+ dialog.close();
+ pathWidget.prompt = false;
+ });
+ }
+ options_element.appendChild(el);
+ }
+ async function updateOptions() {
+ timeout = null;
+ let [path, remainder] = path_stem(input.value);
+ if (last_path != path) {
+ //fetch options. Must block execution here, so update should be async?
+ let params = {path : path}
+ if (extensions) {
+ params.extensions = extensions
+ }
+ let optionsURL = api.apiURL('getpath?' + new URLSearchParams(params));
+ try {
+ let resp = await fetch(optionsURL);
+ options = await resp.json();
+ } catch(e) {
+ options = []
+ }
+ last_path = path;
+ }
+ options_element.innerHTML = '';
+ //filter options based on remainder
+ for (let option of options) {
+ if (option.startsWith(remainder)) {
+ let isDir = option.endsWith('/')
+ addResult(option, isDir);
+ }
+ }
+ }
+
+ setTimeout(async function() {
+ input.focus();
+ await updateOptions();
+ }, 10);
+
+ return dialog;
+}
+
+app.ui.settings.addSetting({
+ id: "VHS.AdvancedPreviews",
+ name: "🎥🅥🅗🅢 Advanced Previews",
+ type: "boolean",
+ defaultValue: false,
+});
+
+app.registerExtension({
+ name: "VideoHelperSuite.Core",
+ async beforeRegisterNodeDef(nodeType, nodeData, app) {
+ if(nodeData?.name?.startsWith("VHS_")) {
+ useKVState(nodeType);
+ chainCallback(nodeType.prototype, "onNodeCreated", function () {
+ let new_widgets = []
+ if (this.widgets) {
+ for (let w of this.widgets) {
+ let input = this.constructor.nodeData.input
+ let config = input?.required[w.name] ?? input.optional[w.name]
+ if (!config) {
+ continue
+ }
+ if (w?.type == "text" && config[1].vhs_path_extensions) {
+ new_widgets.push(app.widgets.VHSPATH({}, w.name, ["VHSPATH", config[1]]));
+ } else {
+ new_widgets.push(w)
+ }
+ }
+ this.widgets = new_widgets;
+ }
+ });
+ }
+ if (nodeData?.name == "VHS_LoadImages") {
+ addUploadWidget(nodeType, nodeData, "directory", "folder");
+ chainCallback(nodeType.prototype, "onNodeCreated", function() {
+ const pathWidget = this.widgets.find((w) => w.name === "directory");
+ chainCallback(pathWidget, "callback", (value) => {
+ if (!value) {
+ return;
+ }
+ let params = {filename : value, type : "input", format: "folder"};
+ this.updateParameters(params, true);
+ });
+ });
+ addLoadImagesCommon(nodeType, nodeData);
+ } else if (nodeData?.name == "VHS_LoadImagesPath") {
+ addUploadWidget(nodeType, nodeData, "directory", "folder");
+ chainCallback(nodeType.prototype, "onNodeCreated", function() {
+ const pathWidget = this.widgets.find((w) => w.name === "directory");
+ chainCallback(pathWidget, "callback", (value) => {
+ if (!value) {
+ return;
+ }
+ let params = {filename : value, type : "path", format: "folder"};
+ this.updateParameters(params, true);
+ });
+ });
+ addLoadImagesCommon(nodeType, nodeData);
+ } else if (nodeData?.name == "VHS_LoadVideo") {
+ addUploadWidget(nodeType, nodeData, "video");
+ chainCallback(nodeType.prototype, "onNodeCreated", function() {
+ const pathWidget = this.widgets.find((w) => w.name === "video");
+ chainCallback(pathWidget, "callback", (value) => {
+ if (!value) {
+ return;
+ }
+ let parts = ["input", value];
+ let extension_index = parts[1].lastIndexOf(".");
+ let extension = parts[1].slice(extension_index+1);
+ let format = "video"
+ if (["gif", "webp", "avif"].includes(extension)) {
+ format = "image"
+ }
+ format += "/" + extension;
+ let params = {filename : parts[1], type : parts[0], format: format};
+ this.updateParameters(params, true);
+ });
+ });
+ addLoadVideoCommon(nodeType, nodeData);
+ } else if (nodeData?.name =="VHS_LoadVideoPath") {
+ chainCallback(nodeType.prototype, "onNodeCreated", function() {
+ const pathWidget = this.widgets.find((w) => w.name === "video");
+ chainCallback(pathWidget, "callback", (value) => {
+ let extension_index = value.lastIndexOf(".");
+ let extension = value.slice(extension_index+1);
+ let format = "video"
+ if (["gif", "webp", "avif"].includes(extension)) {
+ format = "image"
+ }
+ format += "/" + extension;
+ let params = {filename : value, type: "path", format: format};
+ this.updateParameters(params, true);
+ });
+ });
+ addLoadVideoCommon(nodeType, nodeData);
+ } else if (nodeData?.name == "VHS_VideoCombine") {
+ addDateFormatting(nodeType, "filename_prefix");
+ chainCallback(nodeType.prototype, "onExecuted", function(message) {
+ if (message?.gifs) {
+ this.updateParameters(message.gifs[0], true);
+ }
+ });
+ addVideoPreview(nodeType);
+ addPreviewOptions(nodeType);
+ addFormatWidgets(nodeType);
+
+ //Hide the information passing 'gif' output
+ //TODO: check how this is implemented for save image
+ chainCallback(nodeType.prototype, "onNodeCreated", function() {
+ this._outputs = this.outputs
+ Object.defineProperty(this, "outputs", {
+ set : function(value) {
+ this._outputs = value;
+ requestAnimationFrame(() => {
+ if (app.nodeOutputs[this.id + ""]) {
+ this.updateParameters(app.nodeOutputs[this.id+""].gifs[0], true);
+ }
+ })
+ },
+ get : function() {
+ return this._outputs;
+ }
+ });
+ //Display previews after reload/ loading workflow
+ requestAnimationFrame(() => {this.updateParameters({}, true);});
+ });
+ } else if (nodeData?.name == "VHS_SaveImageSequence") {
+ //Disabled for safety as VHS_SaveImageSequence is not currently merged
+ //addDateFormating(nodeType, "directory_name", timestamp_widget=true);
+ //addTimestampWidget(nodeType, nodeData, "directory_name")
+ } else if (nodeData?.name == "VHS_BatchManager") {
+ chainCallback(nodeType.prototype, "onNodeCreated", function() {
+ this.widgets.push({name: "count", type: "dummy", value: 0,
+ computeSize: () => {return [0,-4]},
+ afterQueued: function() {this.value++;}});
+ });
+ }
+ },
+ async getCustomWidgets() {
+ return {
+ VHSPATH(node, inputName, inputData) {
+ let w = {
+ name : inputName,
+ type : "VHS.PATH",
+ value : "",
+ draw : function(ctx, node, widget_width, y, H) {
+ //Adapted from litegraph.core.js:drawNodeWidgets
+ var show_text = app.canvas.ds.scale > 0.5;
+ var margin = 15;
+ var text_color = LiteGraph.WIDGET_TEXT_COLOR;
+ var secondary_text_color = LiteGraph.WIDGET_SECONDARY_TEXT_COLOR;
+ ctx.textAlign = "left";
+ ctx.strokeStyle = LiteGraph.WIDGET_OUTLINE_COLOR;
+ ctx.fillStyle = LiteGraph.WIDGET_BGCOLOR;
+ ctx.beginPath();
+ if (show_text)
+ ctx.roundRect(margin, y, widget_width - margin * 2, H, [H * 0.5]);
+ else
+ ctx.rect( margin, y, widget_width - margin * 2, H );
+ ctx.fill();
+ if (show_text) {
+ if(!this.disabled)
+ ctx.stroke();
+ ctx.save();
+ ctx.beginPath();
+ ctx.rect(margin, y, widget_width - margin * 2, H);
+ ctx.clip();
+
+ //ctx.stroke();
+ ctx.fillStyle = secondary_text_color;
+ const label = this.label || this.name;
+ if (label != null) {
+ ctx.fillText(label, margin * 2, y + H * 0.7);
+ }
+ ctx.fillStyle = text_color;
+ ctx.textAlign = "right";
+ let disp_text = this.format_path(String(this.value))
+ ctx.fillText(disp_text, widget_width - margin * 2, y + H * 0.7); //30 chars max
+ ctx.restore();
+ }
+ },
+ mouse : searchBox,
+ options : {},
+ format_path : function(path) {
+ //Formats the full path to be under 30 characters
+ if (path.length <= 30) {
+ return path;
+ }
+ let filename = path_stem(path)[1]
+ if (filename.length > 28) {
+ //may all fit, but can't squeeze more info
+ return filename.substr(0,30);
+ }
+ //TODO: find solution for windows, path[1] == ':'?
+ let isAbs = path[0] == '/';
+ let partial = path.substr(path.length - (isAbs ? 28:29))
+ let cutoff = partial.indexOf('/');
+ if (cutoff < 0) {
+ //Can occur, but there isn't a nicer way to format
+ return path.substr(path.length-30);
+ }
+ return (isAbs ? '/…':'…') + partial.substr(cutoff);
+
+ }
+ };
+ if (inputData.length > 1) {
+ if (inputData[1].vhs_path_extensions) {
+ w.options.extensions = inputData[1].vhs_path_extensions;
+ }
+ if (inputData[1].default) {
+ w.value = inputData[1].default;
+ }
+ }
+
+ if (!node.widgets) {
+ node.widgets = [];
+ }
+ node.widgets.push(w);
+ return w;
+ }
+ }
+ }
+});
diff --git a/custom_nodes/ComfyUI-VideoHelperSuite/web/js/videoinfo.js b/custom_nodes/ComfyUI-VideoHelperSuite/web/js/videoinfo.js
new file mode 100644
index 0000000000000000000000000000000000000000..1947b47e21319268be0b614e80dc85b0da5b505b
--- /dev/null
+++ b/custom_nodes/ComfyUI-VideoHelperSuite/web/js/videoinfo.js
@@ -0,0 +1,102 @@
+import { app } from '../../../scripts/app.js'
+
+
+function getVideoMetadata(file) {
+ return new Promise((r) => {
+ const reader = new FileReader();
+ reader.onload = (event) => {
+ const videoData = new Uint8Array(event.target.result);
+ const dataView = new DataView(videoData.buffer);
+
+ let decoder = new TextDecoder();
+ // Check for known valid magic strings
+ if (dataView.getUint32(0) == 0x1A45DFA3) {
+ //webm
+ //see http://wiki.webmproject.org/webm-metadata/global-metadata
+ //and https://www.matroska.org/technical/elements.html
+ //contrary to specs, tag seems consistently at start
+ //COMMENT + 0x4487 + packed length?
+ //length 0x8d8 becomes 0x48d8
+ //
+ //description for variable length ints https://github.com/ietf-wg-cellar/ebml-specification/blob/master/specification.markdown
+ let offset = 4 + 8; //COMMENT is 7 chars + 1 to realign
+ while(offset < videoData.length-16) {
+ //Check for text tags
+ if (dataView.getUint16(offset) == 0x4487) {
+ //check that name of tag is COMMENT
+ const name = String.fromCharCode(...videoData.slice(offset-7,offset));
+ if (name === "COMMENT") {
+ let vint = dataView.getUint32(offset+2);
+ let n_octets = Math.clz32(vint)+1;
+ if (n_octets < 4) {//250MB sanity cutoff
+ let length = (vint >> (8*(4-n_octets))) & ~(1 << (7*n_octets));
+ const content = decoder.decode(videoData.slice(offset+2+n_octets, offset+2+n_octets+length));
+ const json = JSON.parse(content);
+ r(json);
+ return;
+ }
+ }
+ }
+ offset+=1;
+ }
+ } else if (dataView.getUint32(4) == 0x66747970 && dataView.getUint32(8) == 0x69736F6D) {
+ //mp4
+ //see https://developer.apple.com/documentation/quicktime-file-format
+ //Seems to make no guarantee for alignment
+ let offset = videoData.length-4;
+ while (offset > 16) {//rough safe guess
+ if (dataView.getUint32(offset) == 0x64617461) {//any data tag
+ if (dataView.getUint32(offset - 8) == 0xa9636d74) {//cmt data tag
+ let type = dataView.getUint32(offset+4); //seemingly 1
+ let locale = dataView.getUint32(offset+8); //seemingly 0
+ let size = dataView.getUint32(offset-4) - 4*4;
+ const content = decoder.decode(videoData.slice(offset+12, offset+12+size));
+ const json = JSON.parse(content);
+ r(json);
+ return;
+ }
+ }
+
+ offset-=1;
+ }
+ } else {
+ console.error("Unknown magic: " + dataView.getUint32(0))
+ r();
+ return;
+ }
+
+ };
+
+ reader.readAsArrayBuffer(file);
+ });
+}
+function isVideoFile(file) {
+ if (file?.name?.endsWith(".webm")) {
+ return true;
+ }
+ if (file?.name?.endsWith(".mp4")) {
+ return true;
+ }
+
+ return false;
+}
+
+let originalHandleFile = app.handleFile;
+app.handleFile = handleFile;
+async function handleFile(file) {
+ if (file?.type?.startsWith("video/") || isVideoFile(file)) {
+ const videoInfo = await getVideoMetadata(file);
+ if (videoInfo) {
+ if (videoInfo.workflow) {
+
+ app.loadGraphData(videoInfo.workflow);
+ }
+ //Potentially check for/parse A1111 metadata here.
+ }
+ } else {
+ return await originalHandleFile.apply(this, arguments);
+ }
+}
+
+//hijack comfy-file-input to allow webm/mp4
+document.getElementById("comfy-file-input").accept += ",video/webm,video/mp4";
diff --git a/custom_nodes/ComfyUI_FizzNodes/BatchFuncs.py b/custom_nodes/ComfyUI_FizzNodes/BatchFuncs.py
new file mode 100644
index 0000000000000000000000000000000000000000..e81993e42ea9b85d871a071af9fb6dd8170ae801
--- /dev/null
+++ b/custom_nodes/ComfyUI_FizzNodes/BatchFuncs.py
@@ -0,0 +1,435 @@
+# These nodes were made using code from the Deforum extension for A1111 webui
+# You can find the project here: https://github.com/deforum-art/sd-webui-deforum
+
+import numexpr
+import torch
+import numpy as np
+import pandas as pd
+import re
+
+from .ScheduleFuncs import *
+
+def prepare_batch_prompt(prompt_series, max_frames, frame_idx, prompt_weight_1=0, prompt_weight_2=0, prompt_weight_3=0,
+ prompt_weight_4=0): # calculate expressions from the text input and return a string
+ max_f = max_frames - 1
+ pattern = r'`.*?`' # set so the expression will be read between two backticks (``)
+ regex = re.compile(pattern)
+ prompt_parsed = str(prompt_series)
+
+ for match in regex.finditer(prompt_parsed):
+ matched_string = match.group(0)
+ parsed_string = matched_string.replace('t', f'{frame_idx}').replace("pw_a", f"{prompt_weight_1}").replace("pw_b",
+ f"{prompt_weight_2}").replace("pw_c", f"{prompt_weight_3}").replace("pw_d",
+ f"{prompt_weight_4}").replace("max_f",
+ f"{max_f}").replace('`', '') # replace t, max_f and `` respectively
+ parsed_value = numexpr.evaluate(parsed_string)
+ prompt_parsed = prompt_parsed.replace(matched_string, str(parsed_value))
+ return prompt_parsed.strip()
+
+def batch_split_weighted_subprompts(text, pre_text, app_text):
+ pos = {}
+ neg = {}
+ pre_text = str(pre_text)
+ app_text = str(app_text)
+
+ if "--neg" in pre_text:
+ pre_pos, pre_neg = pre_text.split("--neg")
+ else:
+ pre_pos, pre_neg = pre_text, ""
+
+ if "--neg" in app_text:
+ app_pos, app_neg = app_text.split("--neg")
+ else:
+ app_pos, app_neg = app_text, ""
+
+ for frame, prompt in text.items():
+ negative_prompts = ""
+ positive_prompts = ""
+ prompt_split = prompt.split("--neg")
+
+ if len(prompt_split) > 1:
+ positive_prompts, negative_prompts = prompt_split[0], prompt_split[1]
+ else:
+ positive_prompts = prompt_split[0]
+
+ pos[frame] = ""
+ neg[frame] = ""
+ pos[frame] += (str(pre_pos) + " " + positive_prompts + " " + str(app_pos))
+ neg[frame] += (str(pre_neg) + " " + negative_prompts + " " + str(app_neg))
+ if pos[frame].endswith('0'):
+ pos[frame] = pos[frame][:-1]
+ if neg[frame].endswith('0'):
+ neg[frame] = neg[frame][:-1]
+ return pos, neg
+
+def interpolate_prompt_series(animation_prompts, max_frames, start_frame, pre_text, app_text, prompt_weight_1=[],
+ prompt_weight_2=[], prompt_weight_3=[], prompt_weight_4=[], Is_print = False):
+
+ max_f = max_frames # needed for numexpr even though it doesn't look like it's in use.
+ parsed_animation_prompts = {}
+
+
+ for key, value in animation_prompts.items():
+ if check_is_number(key): # default case 0:(1 + t %5), 30:(5-t%2)
+ parsed_animation_prompts[key] = value
+ else: # math on the left hand side case 0:(1 + t %5), maxKeyframes/2:(5-t%2)
+ parsed_animation_prompts[int(numexpr.evaluate(key))] = value
+
+ sorted_prompts = sorted(parsed_animation_prompts.items(), key=lambda item: int(item[0]))
+
+ # Automatically set the first keyframe to 0 if it's missing
+ if sorted_prompts[0][0] != "0":
+ sorted_prompts.insert(0, ("0", sorted_prompts[0][1]))
+
+ # Automatically set the last keyframe to the maximum number of frames
+ if sorted_prompts[-1][0] != str(max_frames):
+ sorted_prompts.append((str(max_frames), sorted_prompts[-1][1]))
+ # Setup containers for interpolated prompts
+ cur_prompt_series = pd.Series([np.nan for a in range(max_frames)])
+ nxt_prompt_series = pd.Series([np.nan for a in range(max_frames)])
+
+ # simple array for strength values
+ weight_series = [np.nan] * max_frames
+
+ # in case there is only one keyed promt, set all prompts to that prompt
+ if len(sorted_prompts) == 1:
+ for i in range(0, len(cur_prompt_series) - 1):
+ current_prompt = sorted_prompts[0][1]
+ cur_prompt_series[i] = str(current_prompt)
+ nxt_prompt_series[i] = str(current_prompt)
+
+ # Initialized outside of loop for nan check
+ current_key = 0
+ next_key = 0
+
+ if type(prompt_weight_1) in {int, float}:
+ prompt_weight_1 = tuple([prompt_weight_1] * max_frames)
+
+ if type(prompt_weight_2) in {int, float}:
+ prompt_weight_2 = tuple([prompt_weight_2] * max_frames)
+
+ if type(prompt_weight_3) in {int, float}:
+ prompt_weight_3 = tuple([prompt_weight_3] * max_frames)
+
+ if type(prompt_weight_4) in {int, float}:
+ prompt_weight_4 = tuple([prompt_weight_4] * max_frames)
+
+ # For every keyframe prompt except the last
+ for i in range(0, len(sorted_prompts) - 1):
+ # Get current and next keyframe
+ current_key = int(sorted_prompts[i][0])
+ next_key = int(sorted_prompts[i + 1][0])
+
+ # Ensure there's no weird ordering issues or duplication in the animation prompts
+ # (unlikely because we sort above, and the json parser will strip dupes)
+ if current_key >= next_key:
+ print(
+ f"WARNING: Sequential prompt keyframes {i}:{current_key} and {i + 1}:{next_key} are not monotonously increasing; skipping interpolation.")
+ continue
+
+ # Get current and next keyframes' positive and negative prompts (if any)
+ current_prompt = sorted_prompts[i][1]
+ next_prompt = sorted_prompts[i + 1][1]
+
+ # Calculate how much to shift the weight from current to next prompt at each frame.
+ weight_step = 1 / (next_key - current_key)
+
+ for f in range(max(current_key, 0), min(next_key, len(cur_prompt_series))):
+ next_weight = weight_step * (f - current_key)
+ current_weight = 1 - next_weight
+
+ # add the appropriate prompts and weights to their respective containers.
+ weight_series[f] = 0.0
+ cur_prompt_series[f] = str(current_prompt)
+ nxt_prompt_series[f] = str(next_prompt)
+
+ weight_series[f] += current_weight
+
+ current_key = next_key
+ next_key = max_frames
+ current_weight = 0.0
+
+ index_offset = 0
+ # Evaluate the current and next prompt's expressions
+ for i in range(start_frame, len(cur_prompt_series)):
+ cur_prompt_series[i] = prepare_batch_prompt(cur_prompt_series[i], max_frames, i, prompt_weight_1[i],
+ prompt_weight_2[i], prompt_weight_3[i], prompt_weight_4[i])
+ nxt_prompt_series[i] = prepare_batch_prompt(nxt_prompt_series[i], max_frames, i, prompt_weight_1[i],
+ prompt_weight_2[i], prompt_weight_3[i], prompt_weight_4[i])
+ if Is_print == True:
+ # Show the to/from prompts with evaluated expressions for transparency.
+ print("\n", "Max Frames: ", max_frames, "\n", "frame index: ", (start_frame + i), "\n", "Current Prompt: ",
+ cur_prompt_series[i], "\n", "Next Prompt: ", nxt_prompt_series[i], "\n", "Strength : ",
+ weight_series[i], "\n")
+ index_offset = index_offset + 1
+
+
+
+ # Output methods depending if the prompts are the same or if the current frame is a keyframe.
+ # if it is an in-between frame and the prompts differ, composable diffusion will be performed.
+ return (cur_prompt_series, nxt_prompt_series, weight_series)
+
+
+def BatchPoolAnimConditioning(cur_prompt_series, nxt_prompt_series, weight_series, clip):
+ pooled_out = []
+ cond_out = []
+
+ for i in range(len(cur_prompt_series)):
+ tokens = clip.tokenize(str(cur_prompt_series[i]))
+ cond_to, pooled_to = clip.encode_from_tokens(tokens, return_pooled=True)
+
+ if i < len(nxt_prompt_series):
+ tokens = clip.tokenize(str(nxt_prompt_series[i]))
+ cond_from, pooled_from = clip.encode_from_tokens(tokens, return_pooled=True)
+ else:
+ cond_from, pooled_from = torch.zeros_like(cond_to), torch.zeros_like(pooled_to)
+
+ interpolated_conditioning = addWeighted([[cond_to, {"pooled_output": pooled_to}]],
+ [[cond_from, {"pooled_output": pooled_from}]],
+ weight_series[i])
+
+ interpolated_cond = interpolated_conditioning[0][0]
+ interpolated_pooled = interpolated_conditioning[0][1].get("pooled_output", pooled_from)
+
+ cond_out.append(interpolated_cond)
+ pooled_out.append(interpolated_pooled)
+
+ final_pooled_output = torch.cat(pooled_out, dim=0)
+ final_conditioning = torch.cat(cond_out, dim=0)
+
+ return [[final_conditioning, {"pooled_output": final_pooled_output}]]
+
+
+
+
+
+def BatchGLIGENConditioning(cur_prompt_series, nxt_prompt_series, weight_series, clip):
+ pooled_out = []
+ cond_out = []
+
+ for i in range(len(cur_prompt_series)):
+ tokens = clip.tokenize(str(cur_prompt_series[i]))
+ cond_to, pooled_to = clip.encode_from_tokens(tokens, return_pooled=True)
+
+ tokens = clip.tokenize(str(nxt_prompt_series[i]))
+ cond_from, pooled_from = clip.encode_from_tokens(tokens, return_pooled=True)
+
+ interpolated_conditioning = addWeighted([[cond_to, {"pooled_output": pooled_to}]],
+ [[cond_from, {"pooled_output": pooled_from}]],
+ weight_series[i])
+
+ interpolated_cond = interpolated_conditioning[0][0]
+ interpolated_pooled = interpolated_conditioning[0][1].get("pooled_output", pooled_from)
+
+ pooled_out.append(interpolated_pooled)
+ cond_out.append(interpolated_cond)
+
+ final_pooled_output = torch.cat(pooled_out, dim=0)
+ final_conditioning = torch.cat(cond_out, dim=0)
+
+ return cond_out, pooled_out
+
+def BatchPoolAnimConditioningSDXL(cur_prompt_series, nxt_prompt_series, weight_series, clip):
+ pooled_out = []
+ cond_out = []
+
+ for i in range(len(cur_prompt_series)):
+ interpolated_conditioning = addWeighted(cur_prompt_series[i],
+ nxt_prompt_series[i],
+ weight_series[i])
+
+ interpolated_cond = interpolated_conditioning[0][0]
+ interpolated_pooled = interpolated_conditioning[0][1].get("pooled_output")
+
+ pooled_out.append(interpolated_pooled)
+ cond_out.append(interpolated_cond)
+
+ final_pooled_output = torch.cat(pooled_out, dim=0)
+ final_conditioning = torch.cat(cond_out, dim=0)
+
+ return [[final_conditioning, {"pooled_output": final_pooled_output}]]
+
+
+def BatchInterpolatePromptsSDXL(animation_promptsG, animation_promptsL, max_frames, clip, app_text_G,
+ app_text_L, pre_text_G, pre_text_L, pw_a, pw_b, pw_c, pw_d, width, height, crop_w,
+ crop_h, target_width, target_height, Is_print = False):
+
+ # parse the conditioning strength and determine in-betweens.
+ # Get prompts sorted by keyframe
+ max_f = max_frames # needed for numexpr even though it doesn't look like it's in use.
+ parsed_animation_promptsG = {}
+ parsed_animation_promptsL = {}
+ for key, value in animation_promptsG.items():
+ if check_is_number(key): # default case 0:(1 + t %5), 30:(5-t%2)
+ parsed_animation_promptsG[key] = value
+ else: # math on the left hand side case 0:(1 + t %5), maxKeyframes/2:(5-t%2)
+ parsed_animation_promptsG[int(numexpr.evaluate(key))] = value
+
+ sorted_prompts_G = sorted(parsed_animation_promptsG.items(), key=lambda item: int(item[0]))
+
+ for key, value in animation_promptsL.items():
+ if check_is_number(key): # default case 0:(1 + t %5), 30:(5-t%2)
+ parsed_animation_promptsL[key] = value
+ else: # math on the left hand side case 0:(1 + t %5), maxKeyframes/2:(5-t%2)
+ parsed_animation_promptsL[int(numexpr.evaluate(key))] = value
+
+ sorted_prompts_L = sorted(parsed_animation_promptsL.items(), key=lambda item: int(item[0]))
+
+ # Setup containers for interpolated prompts
+ cur_prompt_series_G = pd.Series([np.nan for a in range(max_frames)])
+ nxt_prompt_series_G = pd.Series([np.nan for a in range(max_frames)])
+
+ cur_prompt_series_L = pd.Series([np.nan for a in range(max_frames)])
+ nxt_prompt_series_L = pd.Series([np.nan for a in range(max_frames)])
+
+ # simple array for strength values
+ weight_series = [np.nan] * max_frames
+
+ # in case there is only one keyed promt, set all prompts to that prompt
+ if len(sorted_prompts_G) - 1 == 0:
+ for i in range(0, len(cur_prompt_series_G) - 1):
+ current_prompt_G = sorted_prompts_G[0][1]
+ cur_prompt_series_G[i] = str(pre_text_G) + " " + str(current_prompt_G) + " " + str(app_text_G)
+ nxt_prompt_series_G[i] = str(pre_text_G) + " " + str(current_prompt_G) + " " + str(app_text_G)
+
+ if len(sorted_prompts_L) - 1 == 0:
+ for i in range(0, len(cur_prompt_series_L) - 1):
+ current_prompt_L = sorted_prompts_L[0][1]
+ cur_prompt_series_L[i] = str(pre_text_L) + " " + str(current_prompt_L) + " " + str(app_text_L)
+ nxt_prompt_series_L[i] = str(pre_text_L) + " " + str(current_prompt_L) + " " + str(app_text_L)
+
+ # Initialized outside of loop for nan check
+ current_key = 0
+ next_key = 0
+
+ # For every keyframe prompt except the last
+ for i in range(0, len(sorted_prompts_G) - 1):
+ # Get current and next keyframe
+ current_key = int(sorted_prompts_G[i][0])
+ next_key = int(sorted_prompts_G[i + 1][0])
+
+ # Ensure there's no weird ordering issues or duplication in the animation prompts
+ # (unlikely because we sort above, and the json parser will strip dupes)
+ if current_key >= next_key:
+ print(
+ f"WARNING: Sequential prompt keyframes {i}:{current_key} and {i + 1}:{next_key} are not monotonously increasing; skipping interpolation.")
+ continue
+
+ # Get current and next keyframes' positive and negative prompts (if any)
+ current_prompt_G = sorted_prompts_G[i][1]
+ next_prompt_G = sorted_prompts_G[i + 1][1]
+
+ # Calculate how much to shift the weight from current to next prompt at each frame.
+ weight_step = 1 / (next_key - current_key)
+
+ for f in range(current_key, next_key):
+ next_weight = weight_step * (f - current_key)
+ current_weight = 1 - next_weight
+
+ # add the appropriate prompts and weights to their respective containers.
+ if f < max_frames:
+ cur_prompt_series_G[f] = ''
+ nxt_prompt_series_G[f] = ''
+ weight_series[f] = 0.0
+
+ cur_prompt_series_G[f] += (str(pre_text_G) + " " + str(current_prompt_G) + " " + str(app_text_G))
+ nxt_prompt_series_G[f] += (str(pre_text_G) + " " + str(next_prompt_G) + " " + str(app_text_G))
+
+ weight_series[f] += current_weight
+
+ current_key = next_key
+ next_key = max_frames
+ current_weight = 0.0
+ # second loop to catch any nan runoff
+ for f in range(current_key, next_key):
+ next_weight = weight_step * (f - current_key)
+
+ # add the appropriate prompts and weights to their respective containers.
+ cur_prompt_series_G[f] = ''
+ nxt_prompt_series_G[f] = ''
+ weight_series[f] = current_weight
+
+ cur_prompt_series_G[f] += (str(pre_text_G) + " " + str(current_prompt_G) + " " + str(app_text_G))
+ nxt_prompt_series_G[f] += (str(pre_text_G) + " " + str(next_prompt_G) + " " + str(app_text_G))
+
+ # Reset outside of loop for nan check
+ current_key = 0
+ next_key = 0
+
+ # For every keyframe prompt except the last
+ for i in range(0, len(sorted_prompts_L) - 1):
+ # Get current and next keyframe
+ current_key = int(sorted_prompts_L[i][0])
+ next_key = int(sorted_prompts_L[i + 1][0])
+
+ # Ensure there's no weird ordering issues or duplication in the animation prompts
+ # (unlikely because we sort above, and the json parser will strip dupes)
+ if current_key >= next_key:
+ print(
+ f"WARNING: Sequential prompt keyframes {i}:{current_key} and {i + 1}:{next_key} are not monotonously increasing; skipping interpolation.")
+ continue
+
+ # Get current and next keyframes' positive and negative prompts (if any)
+ current_prompt_L = sorted_prompts_L[i][1]
+ next_prompt_L = sorted_prompts_L[i + 1][1]
+
+ # Calculate how much to shift the weight from current to next prompt at each frame.
+ weight_step = 1 / (next_key - current_key)
+
+ for f in range(current_key, next_key):
+ next_weight = weight_step * (f - current_key)
+ current_weight = 1 - next_weight
+
+ # add the appropriate prompts and weights to their respective containers.
+ if f < max_frames:
+ cur_prompt_series_L[f] = ''
+ nxt_prompt_series_L[f] = ''
+ weight_series[f] = 0.0
+
+ cur_prompt_series_L[f] += (str(pre_text_L) + " " + str(current_prompt_L) + " " + str(app_text_L))
+ nxt_prompt_series_L[f] += (str(pre_text_L) + " " + str(next_prompt_L) + " " + str(app_text_L))
+
+ weight_series[f] += current_weight
+
+ current_key = next_key
+ next_key = max_frames
+ current_weight = 0.0
+ # second loop to catch any nan runoff
+ for f in range(current_key, next_key):
+ next_weight = weight_step * (f - current_key)
+
+ # add the appropriate prompts and weights to their respective containers.
+ cur_prompt_series_L[f] = ''
+ nxt_prompt_series_L[f] = ''
+ weight_series[f] = current_weight
+
+ cur_prompt_series_L[f] += (str(pre_text_L) + " " + str(current_prompt_L) + " " + str(app_text_L))
+ nxt_prompt_series_L[f] += (str(pre_text_L) + " " + str(next_prompt_L) + " " + str(app_text_L))
+
+ # Evaluate the current and next prompt's expressions
+ for i in range(0, max_frames):
+ cur_prompt_series_G[i] = prepare_batch_prompt(cur_prompt_series_G[i], max_frames, i,
+ pw_a, pw_b, pw_c, pw_d)
+ nxt_prompt_series_G[i] = prepare_batch_prompt(nxt_prompt_series_G[i], max_frames, i,
+ pw_a, pw_b, pw_c, pw_d)
+ cur_prompt_series_L[i] = prepare_batch_prompt(cur_prompt_series_L[i], max_frames, i,
+ pw_a, pw_b, pw_c, pw_d)
+ nxt_prompt_series_L[i] = prepare_batch_prompt(nxt_prompt_series_L[i], max_frames, i,
+ pw_a, pw_b, pw_c, pw_d)
+
+ current_conds = []
+ next_conds = []
+ for i in range(0, max_frames):
+ current_conds.append(SDXLencode(clip, width, height, crop_w, crop_h, target_width, target_height,
+ cur_prompt_series_G[i], cur_prompt_series_L[i]))
+ next_conds.append(SDXLencode(clip, width, height, crop_w, crop_h, target_width, target_height,
+ nxt_prompt_series_G[i], nxt_prompt_series_L[i]))
+
+ if Is_print == True:
+ # Show the to/from prompts with evaluated expressions for transparency.
+ for i in range(0, max_frames):
+ print("\n", "Max Frames: ", max_frames, "\n", "Current Prompt G: ", cur_prompt_series_G[i],
+ "\n", "Current Prompt L: ", cur_prompt_series_L[i], "\n", "Next Prompt G: ", nxt_prompt_series_G[i],
+ "\n", "Next Prompt L : ", nxt_prompt_series_L[i], "\n"), "\n", "Current weight: ", weight_series[i]
+
+ return BatchPoolAnimConditioningSDXL(current_conds, next_conds, weight_series, clip)
diff --git a/custom_nodes/ComfyUI_FizzNodes/FrameNodes.py b/custom_nodes/ComfyUI_FizzNodes/FrameNodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..741426fd2b69912cbbf4fb5508cb8dded540fe33
--- /dev/null
+++ b/custom_nodes/ComfyUI_FizzNodes/FrameNodes.py
@@ -0,0 +1,222 @@
+class StringConcatenate:
+ def __init__(self):
+ pass
+
+ defaultPrompt = """"0" :"",
+ "12" :"",
+ "24" :"",
+ "36" :"",
+ "48" :"",
+ "60" :"",
+ "72" :"",
+ "84" :"",
+ "96" :"",
+ "108" :"",
+ "120" :""
+ """
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "text_a": ("STRING", {"forceInput": True, "multiline": True, "default": ""}),
+ "frame_a": ("INT", {"default": 0}),
+ "text_b": ("STRING", {"forceInput": True, "multiline": True, "default": ""}),
+ "frame_b": ("INT", {"default": 12})
+ },
+ "optional": {
+ "text_c": ("STRING", {"forceInput": True, "multiline": True, "default": ""}),
+ "frame_c": ("INT", {"default": 24}),
+ "text_d": ("STRING", {"forceInput": True, "multiline": True, "default": ""}),
+ "frame_d": ("INT", {"default": 36}),
+ "text_e": ("STRING", {"forceInput": True, "multiline": True, "default": ""}),
+ "frame_e": ("INT", {"default": 48}),
+ "text_f": ("STRING", {"forceInput": True, "multiline": True, "default": ""}),
+ "frame_f": ("INT", {"default": 60}),
+ "text_g": ("STRING", {"forceInput": True, "multiline": True, "default": ""}),
+ "frame_g": ("INT", {"default": 72})
+ }
+ }
+ RETURN_TYPES = ("STRING",)
+ FUNCTION = "frame_concatenate_list"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/FrameNodes"
+
+ def frame_concatenate_list(self, text_a, frame_a, text_b, frame_b, text_c=None, frame_c=None, text_d=None,
+ frame_d=None, text_e=None, frame_e=None, text_f=None, frame_f=None, text_g=None,
+ frame_g=None):
+
+ text_a = text_a.replace('\n', '')
+ text_b = text_b.replace('\n', '')
+ text_c = text_c.replace('\n', '') if text_c is not None else None
+ text_d = text_d.replace('\n', '') if text_d is not None else None
+ text_e = text_e.replace('\n', '') if text_e is not None else None
+ text_f = text_f.replace('\n', '') if text_f is not None else None
+ text_g = text_g.replace('\n', '') if text_g is not None else None
+
+ text_list = f'"{frame_a}": "{text_a}",'
+ text_list += f'"{frame_b}": "{text_b}",'
+
+ if frame_c is not None and text_c is not None:
+ text_list += f'"{frame_c}": "{text_c}",'
+
+ if frame_d is not None and text_d is not None:
+ text_list += f'"{frame_d}": "{text_d}",'
+
+ if frame_e is not None and text_e is not None:
+ text_list += f'"{frame_e}": "{text_e}",'
+
+ if frame_f is not None and text_f is not None:
+ text_list += f'"{frame_f}": "{text_f}",'
+
+ if frame_g is not None and text_g is not None:
+ text_list += f'"{frame_g}": "{text_g}",'
+
+ return (text_list,)
+
+
+class InitNodeFrame:
+ def __init__(self):
+ self.frames = {}
+ self.thisFrame = {}
+
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "frame": ("INT", {"default": 0, "min": 0}),
+ "positive_text": ("STRING", {"multiline": True}),
+ },
+ "optional": {
+ "negative_text": ("STRING", {"multiline": True}),
+ "general_positive": ("STRING", {"multiline": True}),
+ "general_negative": ("STRING", {"multiline": True}),
+ "previous_frame": ("FIZZFRAME", {"forceInput": True}),
+ "clip": ("CLIP",),
+ }
+ }
+ RETURN_TYPES = ("FIZZFRAME","CONDITIONING","CONDITIONING",)
+ FUNCTION = "create_frame"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/FrameNodes"
+
+ def create_frame(self, frame, positive_text, negative_text=None, general_positive=None, general_negative=None, previous_frame=None, clip=None):
+ new_frame = {
+ "positive_text": positive_text,
+ "negative_text": negative_text,
+ }
+
+ if previous_frame:
+ prev_frame = previous_frame.thisFrame
+ new_frame["general_positive"] = prev_frame["general_positive"]
+ new_frame["general_negative"] = prev_frame["general_negative"]
+ new_frame["clip"] = prev_frame["clip"]
+ self.frames = previous_frame.frames
+
+ if general_positive:
+ new_frame["general_positive"] = general_positive
+
+ if general_negative:
+ new_frame["general_negative"] = general_negative
+
+ new_positive_text = f"{positive_text}, {new_frame['general_positive']}"
+ new_negative_text = f"{negative_text}, {new_frame['general_negative']}"
+
+ if clip:
+ new_frame["clip"] = clip
+
+ pos_tokens = new_frame["clip"].tokenize(new_positive_text)
+ pos_cond, pos_pooled = new_frame["clip"].encode_from_tokens(pos_tokens, return_pooled=True)
+ new_frame["pos_conditioning"] = {"cond": pos_cond, "pooled": pos_pooled}
+
+ neg_tokens = new_frame["clip"].tokenize(new_negative_text)
+ neg_cond, neg_pooled = new_frame["clip"].encode_from_tokens(neg_tokens, return_pooled=True)
+ new_frame["neg_conditioning"] = {"cond": neg_cond, "pooled": neg_pooled}
+
+ self.frames[frame] = new_frame
+ self.thisFrame = new_frame
+
+ return (self, [[pos_cond, {"pooled_output": pos_pooled}]], [[neg_cond, {"pooled_output": neg_pooled}]])
+
+class NodeFrame:
+
+ def __init__(self):
+ self.frames = {}
+ self.thisFrame = {}
+
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "frame": ("INT", {"default": 0, "min": 0}),
+ "previous_frame": ("FIZZFRAME", {"forceInput": True}),
+ "positive_text": ("STRING", {"multiline": True}),
+ },
+ "optional": {
+ "negative_text": ("STRING", {"multiline": True}),
+ }
+ }
+ RETURN_TYPES = ("FIZZFRAME","CONDITIONING","CONDITIONING",)
+ FUNCTION = "create_frame"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/FrameNodes"
+
+ def create_frame(self, frame, previous_frame, positive_text, negative_text=None):
+ self.frames = previous_frame.frames
+ prev_frame = previous_frame.thisFrame
+
+ new_positive_text = f"{positive_text}, {prev_frame['general_positive']}"
+ new_negative_text = f"{negative_text}, {prev_frame['general_negative']}"
+
+ pos_tokens = prev_frame["clip"].tokenize(new_positive_text)
+ pos_cond, pos_pooled = prev_frame["clip"].encode_from_tokens(pos_tokens, return_pooled=True)
+
+ neg_tokens = prev_frame["clip"].tokenize(new_negative_text)
+ neg_cond, neg_pooled = prev_frame["clip"].encode_from_tokens(neg_tokens, return_pooled=True)
+
+ new_frame = {
+ "positive_text": positive_text,
+ "negative_text": negative_text,
+ "general_positive": prev_frame["general_positive"],
+ "general_negative": prev_frame["general_negative"],
+ "clip": prev_frame["clip"],
+ "pos_conditioning": {"cond": pos_cond, "pooled": pos_pooled},
+ "neg_conditioning": {"cond": neg_cond, "pooled": neg_pooled},
+ }
+ self.thisFrame = new_frame
+ self.frames[frame] = new_frame
+
+ return (self, [[pos_cond, {"pooled_output": pos_pooled}]], [[neg_cond, {"pooled_output": neg_pooled}]])
+
+class FrameConcatenate:
+ def __init__(self):
+ pass
+
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "frame": ("FIZZFRAME", {"forceInput": True})
+ },
+ }
+ RETURN_TYPES = ("STRING",)
+ FUNCTION = "frame_concatenate"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/FrameNodes"
+
+ def frame_concatenate(self, frame):
+ text_list = ""
+ for frame_digit in frame.frames:
+ new_frame = frame.frames[frame_digit]
+ text_list += f'"{frame_digit}": "{new_frame["positive_text"]}'
+ if new_frame.get("general_positive"):
+ text_list += f', {new_frame["general_positive"]}'
+ if new_frame.get("negative_text") or new_frame.get("general_negative"):
+ text_list += f', --neg '
+ if new_frame.get("negative_text"):
+ text_list += f', {new_frame["negative_text"]}'
+ if new_frame.get("general_negative"):
+ text_list += f', {new_frame["general_negative"]}'
+ text_list += f'",\n'
+ text_list = text_list[:-2]
+
+ return (text_list,)
\ No newline at end of file
diff --git a/custom_nodes/ComfyUI_FizzNodes/HelperNodes.py b/custom_nodes/ComfyUI_FizzNodes/HelperNodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..9030346ede313f7ac649566cf21438a921a1fc0b
--- /dev/null
+++ b/custom_nodes/ComfyUI_FizzNodes/HelperNodes.py
@@ -0,0 +1,59 @@
+
+class CalculateFrameOffset:
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "current_frame": ("INT", {"default": 0, "min": 0}),
+ "max_frames": ("INT", {"default": 18, "min": 0}),
+ "num_latent_inputs": ("INT", {"default": 4, "min": 0}),
+ "index": ("INT", {"default": 4, "min": 0}),
+ }
+ }
+ RETURN_TYPES = ("INT", )
+ FUNCTION = "assignFrameNum"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/HelperNodes"
+
+ def assignFrameNum(self, current_frame, max_frames, num_latent_inputs, index):
+ if current_frame == 0:
+ return (index,)
+ else:
+ start_frame = (current_frame - 1) * (num_latent_inputs - 1) + (num_latent_inputs-1)
+ return ((start_frame + index) % max_frames,)
+class ConcatStringSingle:
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "string_a": ("STRING", {"forceInput":True,"default":"","multiline": True}),
+ "string_b": ("STRING", {"forceInput":True,"default":"","multiline": True}),
+ }
+ }
+ RETURN_TYPES = ("STRING", )
+ FUNCTION = "concat"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/HelperNodes"
+
+ def concat(self, string_a, string_b):
+ c = string_a + string_b
+ return (c,)
+
+class convertKeyframeKeysToBatchKeys:
+ @classmethod
+ def INPUT_TYPES(cls):
+ return {
+ "required": {
+ "input": ("INT", {"forceInput": True, "default": 0}),
+ "num_latents": ("INT", {"default": 16}),
+ }
+ }
+
+ RETURN_TYPES = ("INT",)
+ FUNCTION = "concat"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/HelperNodes"
+
+ def concat(self, input, num_latents):
+ c = input * num_latents -1
+ return (c,)
\ No newline at end of file
diff --git a/custom_nodes/ComfyUI_FizzNodes/LICENCE.txt b/custom_nodes/ComfyUI_FizzNodes/LICENCE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..d95e1fa9bf81a3ecd46c45912653f0054b8058b8
--- /dev/null
+++ b/custom_nodes/ComfyUI_FizzNodes/LICENCE.txt
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2023 Fizzledorf
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/custom_nodes/ComfyUI_FizzNodes/README.md b/custom_nodes/ComfyUI_FizzNodes/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..fc8d1642579980aa8c62de3fb10d02ef2bb725c9
--- /dev/null
+++ b/custom_nodes/ComfyUI_FizzNodes/README.md
@@ -0,0 +1,78 @@
+
+# FizzNodes
+Scheduled prompts, scheduled float/int values and wave function nodes for animations and utility. compatable with https://www.framesync.xyz/ and https://www.chigozie.co.uk/keyframe-string-generator/ for audio synced animations in [Comfyui](https://github.com/comfyanonymous/ComfyUI).
+
+** Please see the [Fizznodes wiki](https://github.com/FizzleDorf/ComfyUI_FizzNodes/wiki) for instructions on usage of these nodes as well as handy resources you can use in your projects! **
+
+
+## Installation
+
+For the easiest install experience, install the [Comfyui Manager](https://github.com/ltdrdata/ComfyUI-Manager) and use that to automate the installation process.
+Otherwise, to manually install, simply clone the repo into the custom_nodes directory with this command:
+```
+git clone https://github.com/FizzleDorf/ComfyUI_FizzNodes.git
+```
+and install the requirements using:
+```
+.\python_embed\python.exe -s -m pip install -r requirements.txt
+```
+If you are using a venv, make sure you have it activated before installation and use:
+```
+pip install -r requirements.txt
+```
+
+Example | Instructions
+---|---
+|The nodes will can be accessed in the FizzNodes section of the node menu. You can also use the node search to find the nodes you are looking for.
+
+-----
+
+## Examples
+Some examples using the prompt and value schedulers using base comfyui.
+
+### Simple Animation Workflow
+This example showcases making animations with only scheduled prompts. This method only uses 4.7 GB of memory and makes use of deterministic samplers (Euler in this case).
+
+
+
+
+
+Drag and drop the image in this link into ComfyUI to load the workflow or save the image and load it using the load button.
+
+[Txt2_Img_Example](https://github.com/FizzleDorf/ComfyUI_FizzNodes/assets/46942135/8899f25e-fbc8-423c-bef2-e7c5a91fb7f4)
+
+
+### Noisy Latent Comp Workflow
+This example showcases the [Noisy Laten Composition](https://comfyanonymous.github.io/ComfyUI_examples/noisy_latent_composition/) workflow. The value schedule node schedules the latent composite node's x position. You can also animate the subject while the composite node is being schedules as well!
+
+
+
+
+Drag and drop the image in this link into ComfyUI to load the workflow or save the image and load it using the load button.
+
+[Latent_Comp_Example](https://github.com/FizzleDorf/ComfyUI_FizzNodes/assets/46942135/410fbd99-d06e-489a-b6f5-3b747acd3740)
+
+
+## Helpful tools
+
+Just a list of tools that you may find handy using these nodes.
+
+Link | Description
+--- | ---
+[Desmos Graphing Calculator](https://www.desmos.com/calculator) | online graphing calculator. Handy for visualizing expressions.
+[Keyframe String Generator](https://www.chigozie.co.uk/keyframe-string-generator/) | custom keyframe string generator that is compatable with the valueSchedule node.
+[Audio framesync](https://www.framesync.xyz/) | Audio sync wave functions. Exports keyframes for the valueSchedule node.
+[SD-Parseq](https://github.com/rewbs/sd-parseq) | A powerful scheduling tool for audio sync and easy curve manupulation (my personal fave!)
+-----
+
+## Acknowledgments
+
+**A special thanks to:**
+
+-The developers of [Deforum](https://github.com/deforum-art/sd-webui-deforum) for providing code for these nodes and being overall awesome people!
+
+-Comfyanonamous and the rest of the [ComfyUI](https://github.com/comfyanonymous/ComfyUI/tree/master) contributors for a fantastic UI!
+
+-All the friends I met along the way that motivate me into action!
+
+-and you the user! I hope you have fun using these nodes and exploring latent space.
diff --git a/custom_nodes/ComfyUI_FizzNodes/ScheduleFuncs.py b/custom_nodes/ComfyUI_FizzNodes/ScheduleFuncs.py
new file mode 100644
index 0000000000000000000000000000000000000000..63af241871887e17d6455fa2ca6788ffafd7c7ed
--- /dev/null
+++ b/custom_nodes/ComfyUI_FizzNodes/ScheduleFuncs.py
@@ -0,0 +1,419 @@
+#These nodes were made using code from the Deforum extension for A1111 webui
+#You can find the project here: https://github.com/deforum-art/sd-webui-deforum
+
+import numexpr
+import torch
+import numpy as np
+import pandas as pd
+import re
+import json
+
+#functions used by PromptSchedule nodes
+
+#Addweighted function from Comfyui
+def addWeighted(conditioning_to, conditioning_from, conditioning_to_strength):
+ out = []
+
+ if len(conditioning_from) > 1:
+ print("Warning: ConditioningAverage conditioning_from contains more than 1 cond, only the first one will actually be applied to conditioning_to.")
+
+ cond_from = conditioning_from[0][0]
+ pooled_output_from = conditioning_from[0][1].get("pooled_output", None)
+
+ for i in range(len(conditioning_to)):
+ t1 = conditioning_to[i][0]
+ pooled_output_to = conditioning_to[i][1].get("pooled_output", pooled_output_from)
+
+ max_size = max(t1.shape[1], cond_from.shape[1])
+ t0 = pad_with_zeros(cond_from, max_size)
+ t1 = pad_with_zeros(t1, max_size)
+
+ tw = torch.mul(t1, conditioning_to_strength) + torch.mul(t0, (1.0 - conditioning_to_strength))
+ t_to = conditioning_to[i][1].copy()
+
+ if pooled_output_from is not None and pooled_output_to is not None:
+ # Pad pooled outputs if available
+ pooled_output_to = pad_with_zeros(pooled_output_to, max_size)
+ pooled_output_from = pad_with_zeros(pooled_output_from, max_size)
+ t_to["pooled_output"] = torch.mul(pooled_output_to, conditioning_to_strength) + torch.mul(pooled_output_from, (1.0 - conditioning_to_strength))
+ elif pooled_output_from is not None:
+ t_to["pooled_output"] = pooled_output_from
+
+ n = [tw, t_to]
+ out.append(n)
+
+ return out
+
+def pad_with_zeros(tensor, target_length):
+ current_length = tensor.shape[1]
+ if current_length < target_length:
+ padding = torch.zeros(tensor.shape[0], target_length - current_length, tensor.shape[2]).to(tensor.device)
+ tensor = torch.cat([tensor, padding], dim=1)
+ return tensor
+
+def reverseConcatenation(final_conditioning, final_pooled_output, max_frames):
+ # Split the final_conditioning and final_pooled_output tensors into their original components
+ cond_out = torch.split(final_conditioning, max_frames)
+ pooled_out = torch.split(final_pooled_output, max_frames)
+
+ return cond_out, pooled_out
+
+def check_is_number(value):
+ float_pattern = r'^(?=.)([+-]?([0-9]*)(\.([0-9]+))?)$'
+ return re.match(float_pattern, value)
+
+def split_weighted_subprompts(text, frame=0, pre_text='', app_text=''):
+ pre_text = str(pre_text)
+ app_text = str(app_text)
+
+ if "--neg" in pre_text:
+ pre_pos, pre_neg = pre_text.split("--neg")
+ else:
+ pre_pos, pre_neg = pre_text, ""
+
+ if "--neg" in app_text:
+ app_pos, app_neg = app_text.split("--neg")
+ else:
+ app_pos, app_neg = app_text, ""
+
+ # Check if the text is a string; if not, convert it to a string
+ if not isinstance(text, str):
+ text = str(text)
+
+ math_parser = re.compile("(?P(`[\S\s]*?`))", re.VERBOSE)
+
+ parsed_prompt = re.sub(math_parser, lambda m: str(parse_weight(m, frame)), text)
+
+ negative_prompts = ""
+ positive_prompts = ""
+
+ # Check if the last character is '0' and remove it
+ prompt_split = parsed_prompt.split("--neg")
+ if len(prompt_split) > 1:
+ positive_prompts, negative_prompts = prompt_split[0], prompt_split[1]
+ else:
+ positive_prompts = prompt_split[0]
+
+ pos = {}
+ neg = {}
+ pos[frame] = (str(pre_pos) + " " + str(positive_prompts) + " " + str(app_pos))
+ neg[frame] = (str(pre_neg) + " " + str(negative_prompts) + " " + str(app_neg))
+ if pos[frame].endswith('0'):
+ pos[frame] = pos[frame][:-1]
+ if neg[frame].endswith('0'):
+ neg[frame] = neg[frame][:-1]
+
+ return pos, neg
+
+def parse_weight(match, frame=0, max_frames=0) -> float: #calculate weight steps for in-betweens
+ w_raw = match.group("weight")
+ max_f = max_frames # this line has to be left intact as it's in use by numexpr even though it looks like it doesn't
+ if w_raw is None:
+ return 1
+ if check_is_number(w_raw):
+ return float(w_raw)
+ else:
+ t = frame
+ if len(w_raw) < 3:
+ print('the value inside `-characters cannot represent a math function')
+ return 1
+ return float(numexpr.evaluate(w_raw[1:-1]))
+
+def prepare_prompt(prompt_series, max_frames, frame_idx, prompt_weight_1 = 0, prompt_weight_2 = 0, prompt_weight_3 = 0, prompt_weight_4 = 0): #calculate expressions from the text input and return a string
+ max_f = max_frames - 1
+ pattern = r'`.*?`' #set so the expression will be read between two backticks (``)
+ regex = re.compile(pattern)
+ prompt_parsed = str(prompt_series)
+ for match in regex.finditer(prompt_parsed):
+ matched_string = match.group(0)
+ parsed_string = matched_string.replace('t', f'{frame_idx}').replace("pw_a", f"prompt_weight_1").replace("pw_b", f"prompt_weight_2").replace("pw_c", f"prompt_weight_3").replace("pw_d", f"prompt_weight_4").replace("max_f", f"{max_f}").replace('`', '') #replace t, max_f and `` respectively
+ parsed_value = numexpr.evaluate(parsed_string)
+ prompt_parsed = prompt_parsed.replace(matched_string, str(parsed_value))
+ return prompt_parsed.strip()
+
+def interpolate_string(animation_prompts, max_frames, current_frame, pre_text, app_text, prompt_weight_1,
+ prompt_weight_2, prompt_weight_3,
+ prompt_weight_4): # parse the conditioning strength and determine in-betweens.
+ # Get prompts sorted by keyframe
+ max_f = max_frames # needed for numexpr even though it doesn't look like it's in use.
+ parsed_animation_prompts = {}
+ for key, value in animation_prompts.items():
+ if check_is_number(key): # default case 0:(1 + t %5), 30:(5-t%2)
+ parsed_animation_prompts[key] = value
+ else: # math on the left hand side case 0:(1 + t %5), maxKeyframes/2:(5-t%2)
+ parsed_animation_prompts[int(numexpr.evaluate(key))] = value
+
+ sorted_prompts = sorted(parsed_animation_prompts.items(), key=lambda item: int(item[0]))
+
+ # Setup containers for interpolated prompts
+ cur_prompt_series = pd.Series([np.nan for a in range(max_frames)])
+
+ # simple array for strength values
+ weight_series = [np.nan] * max_frames
+
+ # in case there is only one keyed promt, set all prompts to that prompt
+ if len(sorted_prompts) - 1 == 0:
+ for i in range(0, len(cur_prompt_series) - 1):
+ current_prompt = sorted_prompts[0][1]
+ cur_prompt_series[i] = str(pre_text) + " " + str(current_prompt) + " " + str(app_text)
+
+ # Initialized outside of loop for nan check
+ current_key = 0
+ next_key = 0
+
+ # For every keyframe prompt except the last
+ for i in range(0, len(sorted_prompts) - 1):
+ # Get current and next keyframe
+ current_key = int(sorted_prompts[i][0])
+ next_key = int(sorted_prompts[i + 1][0])
+
+ # Ensure there's no weird ordering issues or duplication in the animation prompts
+ # (unlikely because we sort above, and the json parser will strip dupes)
+ if current_key >= next_key:
+ print(
+ f"WARNING: Sequential prompt keyframes {i}:{current_key} and {i + 1}:{next_key} are not monotonously increasing; skipping interpolation.")
+ continue
+
+ # Get current and next keyframes' positive and negative prompts (if any)
+ current_prompt = sorted_prompts[i][1]
+
+ for f in range(current_key, next_key):
+ # add the appropriate prompts and weights to their respective containers.
+ cur_prompt_series[f] = ''
+ weight_series[f] = 0.0
+
+ cur_prompt_series[f] += (str(pre_text) + " " + str(current_prompt) + " " + str(app_text))
+
+ current_key = next_key
+ next_key = max_frames
+ # second loop to catch any nan runoff
+
+ for f in range(current_key, next_key):
+ # add the appropriate prompts and weights to their respective containers.
+ cur_prompt_series[f] = ''
+ cur_prompt_series[f] += (str(pre_text) + " " + str(current_prompt) + " " + str(app_text))
+
+ # Evaluate the current and next prompt's expressions
+ cur_prompt_series[current_frame] = prepare_prompt(cur_prompt_series[current_frame], max_frames, current_frame,
+ prompt_weight_1, prompt_weight_2, prompt_weight_3,
+ prompt_weight_4)
+
+ # Show the to/from prompts with evaluated expressions for transparency.
+ print("\n", "Max Frames: ", max_frames, "\n", "Current Prompt: ", cur_prompt_series[current_frame], "\n")
+
+ # Output methods depending if the prompts are the same or if the current frame is a keyframe.
+ # if it is an in-between frame and the prompts differ, composable diffusion will be performed.
+ return (cur_prompt_series[current_frame])
+def PoolAnimConditioning(cur_prompt, nxt_prompt, weight, clip):
+ if str(cur_prompt) == str(nxt_prompt):
+ tokens = clip.tokenize(str(cur_prompt))
+ cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
+ return [[cond, {"pooled_output": pooled}]]
+
+ if weight == 1:
+ tokens = clip.tokenize(str(cur_prompt))
+ cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
+ return [[cond, {"pooled_output": pooled}]]
+
+ if weight == 0:
+ tokens = clip.tokenize(str(nxt_prompt))
+ cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
+ return [[cond, {"pooled_output": pooled}]]
+ else:
+ tokens = clip.tokenize(str(nxt_prompt))
+ cond_from, pooled_from = clip.encode_from_tokens(tokens, return_pooled=True)
+ tokens = clip.tokenize(str(cur_prompt))
+ cond_to, pooled_to = clip.encode_from_tokens(tokens, return_pooled=True)
+ return addWeighted([[cond_to, {"pooled_output": pooled_to}]], [[cond_from, {"pooled_output": pooled_from}]], weight)
+
+def SDXLencode(clip, width, height, crop_w, crop_h, target_width, target_height, text_g, text_l):
+ tokens = clip.tokenize(text_g)
+ tokens["l"] = clip.tokenize(text_l)["l"]
+ if len(tokens["l"]) != len(tokens["g"]):
+ empty = clip.tokenize("")
+ while len(tokens["l"]) < len(tokens["g"]):
+ tokens["l"] += empty["l"]
+ while len(tokens["l"]) > len(tokens["g"]):
+ tokens["g"] += empty["g"]
+ cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
+ return [[cond, {"pooled_output": pooled, "width": width, "height": height, "crop_w": crop_w, "crop_h": crop_h, "target_width": target_width, "target_height": target_height}]]
+
+def interpolate_prompts_SDXL(animation_promptsG, animation_promptsL, max_frames, current_frame, clip, app_text_G, app_text_L, pre_text_G, pre_text_L, pw_a, pw_b, pw_c, pw_d, width, height, crop_w, crop_h, target_width, target_height, print_output): #parse the conditioning strength and determine in-betweens.
+ #Get prompts sorted by keyframe
+ max_f = max_frames #needed for numexpr even though it doesn't look like it's in use.
+ parsed_animation_promptsG = {}
+ parsed_animation_promptsL = {}
+ for key, value in animation_promptsG.items():
+ if check_is_number(key): #default case 0:(1 + t %5), 30:(5-t%2)
+ parsed_animation_promptsG[key] = value
+ else: #math on the left hand side case 0:(1 + t %5), maxKeyframes/2:(5-t%2)
+ parsed_animation_promptsG[int(numexpr.evaluate(key))] = value
+
+ sorted_prompts_G = sorted(parsed_animation_promptsG.items(), key=lambda item: int(item[0]))
+
+ for key, value in animation_promptsL.items():
+ if check_is_number(key): #default case 0:(1 + t %5), 30:(5-t%2)
+ parsed_animation_promptsL[key] = value
+ else: #math on the left hand side case 0:(1 + t %5), maxKeyframes/2:(5-t%2)
+ parsed_animation_promptsL[int(numexpr.evaluate(key))] = value
+
+ sorted_prompts_L = sorted(parsed_animation_promptsL.items(), key=lambda item: int(item[0]))
+
+ #Setup containers for interpolated prompts
+ cur_prompt_series_G = pd.Series([np.nan for a in range(max_frames)])
+ nxt_prompt_series_G = pd.Series([np.nan for a in range(max_frames)])
+
+ cur_prompt_series_L = pd.Series([np.nan for a in range(max_frames)])
+ nxt_prompt_series_L = pd.Series([np.nan for a in range(max_frames)])
+
+ #simple array for strength values
+ weight_series = [np.nan] * max_frames
+
+ #in case there is only one keyed promt, set all prompts to that prompt
+ if len(sorted_prompts_G) - 1 == 0:
+ for i in range(0, len(cur_prompt_series_G)-1):
+ current_prompt_G = sorted_prompts_G[0][1]
+ cur_prompt_series_G[i] = str(pre_text_G) + " " + str(current_prompt_G) + " " + str(app_text_G)
+ nxt_prompt_series_G[i] = str(pre_text_G) + " " + str(current_prompt_G) + " " + str(app_text_G)
+
+ if len(sorted_prompts_L) - 1 == 0:
+ for i in range(0, len(cur_prompt_series_L)-1):
+ current_prompt_L = sorted_prompts_L[0][1]
+ cur_prompt_series_L[i] = str(pre_text_L) + " " + str(current_prompt_L) + " " + str(app_text_L)
+ nxt_prompt_series_L[i] = str(pre_text_L) + " " + str(current_prompt_L) + " " + str(app_text_L)
+
+
+
+ #Initialized outside of loop for nan check
+ current_key = 0
+ next_key = 0
+
+ # For every keyframe prompt except the last
+ for i in range(0, len(sorted_prompts_G) - 1):
+ # Get current and next keyframe
+ current_key = int(sorted_prompts_G[i][0])
+ next_key = int(sorted_prompts_G[i + 1][0])
+
+ # Ensure there's no weird ordering issues or duplication in the animation prompts
+ # (unlikely because we sort above, and the json parser will strip dupes)
+ if current_key >= next_key:
+ print(f"WARNING: Sequential prompt keyframes {i}:{current_key} and {i + 1}:{next_key} are not monotonously increasing; skipping interpolation.")
+ continue
+
+ # Get current and next keyframes' positive and negative prompts (if any)
+ current_prompt_G = sorted_prompts_G[i][1]
+ next_prompt_G = sorted_prompts_G[i + 1][1]
+
+ # Calculate how much to shift the weight from current to next prompt at each frame.
+ weight_step = 1 / (next_key - current_key)
+
+ for f in range(current_key, next_key):
+ next_weight = weight_step * (f - current_key)
+ current_weight = 1 - next_weight
+
+ #add the appropriate prompts and weights to their respective containers.
+ cur_prompt_series_G[f] = ''
+ nxt_prompt_series_G[f] = ''
+ weight_series[f] = 0.0
+
+ cur_prompt_series_G[f] += (str(pre_text_G) + " " + str(current_prompt_G) + " " + str(app_text_G))
+ nxt_prompt_series_G[f] += (str(pre_text_G) + " " + str(next_prompt_G) + " " + str(app_text_G))
+
+ weight_series[f] += current_weight
+
+ current_key = next_key
+ next_key = max_frames
+ current_weight = 0.0
+ #second loop to catch any nan runoff
+ for f in range(current_key, next_key):
+ next_weight = weight_step * (f - current_key)
+
+ #add the appropriate prompts and weights to their respective containers.
+ cur_prompt_series_G[f] = ''
+ nxt_prompt_series_G[f] = ''
+ weight_series[f] = current_weight
+
+ cur_prompt_series_G[f] += (str(pre_text_G) + " " + str(current_prompt_G) + " " + str(app_text_G))
+ nxt_prompt_series_G[f] += (str(pre_text_G) + " " + str(next_prompt_G) + " " + str(app_text_G))
+
+
+ #Reset outside of loop for nan check
+ current_key = 0
+ next_key = 0
+
+ # For every keyframe prompt except the last
+ for i in range(0, len(sorted_prompts_L) - 1):
+ # Get current and next keyframe
+ current_key = int(sorted_prompts_L[i][0])
+ next_key = int(sorted_prompts_L[i + 1][0])
+
+ # Ensure there's no weird ordering issues or duplication in the animation prompts
+ # (unlikely because we sort above, and the json parser will strip dupes)
+ if current_key >= next_key:
+ print(f"WARNING: Sequential prompt keyframes {i}:{current_key} and {i + 1}:{next_key} are not monotonously increasing; skipping interpolation.")
+ continue
+
+ # Get current and next keyframes' positive and negative prompts (if any)
+ current_prompt_L = sorted_prompts_L[i][1]
+ next_prompt_L = sorted_prompts_L[i + 1][1]
+
+ # Calculate how much to shift the weight from current to next prompt at each frame.
+ weight_step = 1 / (next_key - current_key)
+
+ for f in range(current_key, next_key):
+ next_weight = weight_step * (f - current_key)
+ current_weight = 1 - next_weight
+
+ #add the appropriate prompts and weights to their respective containers.
+ cur_prompt_series_L[f] = ''
+ nxt_prompt_series_L[f] = ''
+ weight_series[f] = 0.0
+
+ cur_prompt_series_L[f] += (str(pre_text_L) + " " + str(current_prompt_L) + " " + str(app_text_L))
+ nxt_prompt_series_L[f] += (str(pre_text_L) + " " + str(next_prompt_L) + " " + str(app_text_L))
+
+ weight_series[f] += current_weight
+
+ current_key = next_key
+ next_key = max_frames
+ current_weight = 0.0
+ #second loop to catch any nan runoff
+ for f in range(current_key, next_key):
+ next_weight = weight_step * (f - current_key)
+
+ #add the appropriate prompts and weights to their respective containers.
+ cur_prompt_series_L[f] = ''
+ nxt_prompt_series_L[f] = ''
+ weight_series[f] = current_weight
+
+ cur_prompt_series_L[f] += (str(pre_text_L) + " " + str(current_prompt_L) + " " + str(app_text_L))
+ nxt_prompt_series_L[f] += (str(pre_text_L) + " " + str(next_prompt_L) + " " + str(app_text_L))
+
+ #Evaluate the current and next prompt's expressions
+ cur_prompt_series_G[current_frame] = prepare_prompt(cur_prompt_series_G[current_frame], max_frames, current_frame, pw_a, pw_b, pw_c, pw_d)
+ nxt_prompt_series_G[current_frame] = prepare_prompt(nxt_prompt_series_G[current_frame], max_frames, current_frame, pw_a, pw_b, pw_c, pw_d)
+ cur_prompt_series_L[current_frame] = prepare_prompt(cur_prompt_series_L[current_frame], max_frames, current_frame, pw_a, pw_b, pw_c, pw_d)
+ nxt_prompt_series_L[current_frame] = prepare_prompt(nxt_prompt_series_L[current_frame], max_frames, current_frame, pw_a, pw_b, pw_c, pw_d)
+ if print_output == True:
+ #Show the to/from prompts with evaluated expressions for transparency.
+ print("\n", "G_Clip:", "\n", "Max Frames: ", max_frames, "\n", "Current Prompt: ", cur_prompt_series_G[current_frame], "\n", "Next Prompt: ", nxt_prompt_series_G[current_frame], "\n", "Strength : ", weight_series[current_frame], "\n")
+
+ print("\n", "L_Clip:", "\n", "Max Frames: ", max_frames, "\n", "Current Prompt: ", cur_prompt_series_L[current_frame], "\n", "Next Prompt: ", nxt_prompt_series_L[current_frame], "\n", "Strength : ", weight_series[current_frame], "\n")
+
+ #Output methods depending if the prompts are the same or if the current frame is a keyframe.
+ #if it is an in-between frame and the prompts differ, composable diffusion will be performed.
+ current_cond = SDXLencode(clip, width, height, crop_w, crop_h, target_width, target_height, cur_prompt_series_G[current_frame], cur_prompt_series_L[current_frame])
+
+ if str(cur_prompt_series_G[current_frame]) == str(nxt_prompt_series_G[current_frame]) and str(cur_prompt_series_L[current_frame]) == str(nxt_prompt_series_L[current_frame]):
+ return current_cond
+
+ if weight_series[current_frame] == 1:
+ return current_cond
+
+ if weight_series[current_frame] == 0:
+ next_cond = SDXLencode(clip, width, height, crop_w, crop_h, target_width, target_height, cur_prompt_series_G[current_frame], cur_prompt_series_L[current_frame])
+ return next_cond
+
+ else:
+ next_cond = SDXLencode(clip, width, height, crop_w, crop_h, target_width, target_height, cur_prompt_series_G[current_frame], cur_prompt_series_L[current_frame])
+ return addWeighted(current_cond, next_cond, weight_series[current_frame])
\ No newline at end of file
diff --git a/custom_nodes/ComfyUI_FizzNodes/ScheduledNodes.py b/custom_nodes/ComfyUI_FizzNodes/ScheduledNodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..34ee501fb67be6f3ed909710b77704eaeeb5660a
--- /dev/null
+++ b/custom_nodes/ComfyUI_FizzNodes/ScheduledNodes.py
@@ -0,0 +1,651 @@
+#These nodes were made using code from the Deforum extension for A1111 webui
+#You can find the project here: https://github.com/deforum-art/sd-webui-deforum
+import comfy
+import numexpr
+import torch
+import numpy as np
+import pandas as pd
+import re
+import json
+
+
+from .ScheduleFuncs import (
+ check_is_number, interpolate_prompts_SDXL, PoolAnimConditioning,
+ interpolate_string, addWeighted, reverseConcatenation, split_weighted_subprompts
+)
+from .BatchFuncs import interpolate_prompt_series, BatchPoolAnimConditioning, BatchInterpolatePromptsSDXL, batch_split_weighted_subprompts #, BatchGLIGENConditioning
+from .ValueFuncs import batch_get_inbetweens, batch_parse_key_frames, parse_key_frames, get_inbetweens, sanitize_value
+#Max resolution value for Gligen area calculation.
+MAX_RESOLUTION=8192
+
+defaultPrompt=""""0" :"",
+"12" :"",
+"24" :"",
+"36" :"",
+"48" :"",
+"60" :"",
+"72" :"",
+"84" :"",
+"96" :"",
+"108" :"",
+"120" :""
+"""
+
+defaultValue="""0:(0),
+12:(0),
+24:(0),
+36:(0),
+48:(0),
+60:(0),
+72:(0),
+84:(0),
+96:(0),
+108:(0),
+120:(0)
+"""
+
+#This node parses the user's formatted prompt,
+#sequences the current prompt,next prompt, and
+#conditioning strength, evaluates expressions in
+#the prompts, and then returns either current,
+#next or averaged conditioning.
+class PromptSchedule:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"text": ("STRING", {"multiline": True, "default":defaultPrompt}),
+ "clip": ("CLIP", ),
+ "max_frames": ("INT", {"default": 120.0, "min": 1.0, "max": 999999.0, "step": 1.0}),
+ "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 999999.0, "step": 1.0,}),
+ "print_output":("BOOLEAN", {"default": False}),},# "forceInput": True}),},
+ "optional": {"pre_text": ("STRING", {"multiline": True,}),# "forceInput": True}),
+ "app_text": ("STRING", {"multiline": True,}),# "forceInput": True}),
+ "pw_a": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}), #"forceInput": True }),
+ "pw_b": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}), #"forceInput": True }),
+ "pw_c": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}), #"forceInput": True }),
+ "pw_d": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}), #"forceInput": True }),
+ }}
+
+ RETURN_TYPES = ("CONDITIONING", "CONDITIONING",)
+ RETURN_NAMES = ("POS", "NEG",)
+ FUNCTION = "animate"
+ CATEGORY = "FizzNodes 📅🅕🅝/ScheduleNodes"
+
+ def animate(self, text, max_frames, print_output, current_frame, clip, pw_a=0, pw_b=0, pw_c=0, pw_d=0, pre_text='', app_text=''):
+ current_frame = current_frame % max_frames
+ inputText = str("{" + text + "}")
+ inputText = re.sub(r',\s*}', '}', inputText)
+ animation_prompts = json.loads(inputText.strip())
+ start_frame = 0
+ pos, neg = batch_split_weighted_subprompts(animation_prompts, pre_text, app_text)
+
+ pos_cur_prompt, pos_nxt_prompt, weight = interpolate_prompt_series(pos, max_frames, start_frame, pre_text, app_text, pw_a,
+ pw_b, pw_c, pw_d, print_output)
+ pc = PoolAnimConditioning(pos_cur_prompt[current_frame], pos_nxt_prompt[current_frame], weight[current_frame], clip)
+
+ neg_cur_prompt, neg_nxt_prompt, weight = interpolate_prompt_series(neg, max_frames, start_frame, pre_text, app_text, pw_a,
+ pw_b, pw_c, pw_d, print_output)
+ nc = PoolAnimConditioning(neg_cur_prompt[current_frame], neg_nxt_prompt[current_frame], weight[current_frame], clip)
+
+ return (pc, nc,)
+
+class BatchPromptSchedule:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"text": ("STRING", {"multiline": True, "default": defaultPrompt}),
+ "clip": ("CLIP",),
+ "max_frames": ("INT", {"default": 120.0, "min": 1.0, "max": 999999.0, "step": 1.0}),
+ "print_output":("BOOLEAN", {"default": False}),},
+ # "forceInput": True}),},
+ "optional": {"pre_text": ("STRING", {"multiline": True}), # "forceInput": True}),
+ "app_text": ("STRING", {"multiline": True}), # "forceInput": True}),
+ "start_frame": ("INT", {"default": 0, "min": 0, "max": 9999, "step": 1, }),
+ "pw_a": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1, }),
+ # "forceInput": True }),
+ "pw_b": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1, }),
+ # "forceInput": True }),
+ "pw_c": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1, }),
+ # "forceInput": True }),
+ "pw_d": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1, }),
+ # "forceInput": True }),
+ }}
+
+ RETURN_TYPES = ("CONDITIONING", "CONDITIONING",)
+ RETURN_NAMES = ("POS", "NEG",)
+ FUNCTION = "animate"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/BatchScheduleNodes"
+
+ def animate(self, text, max_frames, print_output, clip, start_frame, pw_a, pw_b, pw_c, pw_d, pre_text='', app_text=''):
+ inputText = str("{" + text + "}")
+ inputText = re.sub(r',\s*}', '}', inputText)
+ max_frames += start_frame
+ animation_prompts = json.loads(inputText.strip())
+ pos, neg = batch_split_weighted_subprompts(animation_prompts, pre_text, app_text)
+
+ pos_cur_prompt, pos_nxt_prompt, weight = interpolate_prompt_series(pos, max_frames, start_frame, pre_text, app_text, pw_a, pw_b, pw_c, pw_d, print_output)
+ pc = BatchPoolAnimConditioning( pos_cur_prompt, pos_nxt_prompt, weight, clip,)
+
+ neg_cur_prompt, neg_nxt_prompt, weight = interpolate_prompt_series(neg, max_frames, start_frame, pre_text, app_text, pw_a, pw_b, pw_c, pw_d, print_output)
+ nc = BatchPoolAnimConditioning(neg_cur_prompt, neg_nxt_prompt, weight, clip, )
+
+ return (pc, nc, )
+
+class BatchPromptScheduleLatentInput:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"text": ("STRING", {"multiline": True, "default": defaultPrompt}),
+ "clip": ("CLIP",),
+ "num_latents": ("LATENT", ),
+ "print_output":("BOOLEAN", {"default": False}),},
+ # "forceInput": True}),},
+ "optional": {"pre_text": ("STRING", {"multiline": True, }), # "forceInput": True}),
+ "app_text": ("STRING", {"multiline": True, }), # "forceInput": True}),
+ "start_frame": ("INT", {"default": 0.0, "min": 0, "max": 9999, "step": 1, }),
+ "pw_a": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1, }),
+ # "forceInput": True }),
+ "pw_b": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1, }),
+ # "forceInput": True }),
+ "pw_c": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1, }),
+ # "forceInput": True }),
+ "pw_d": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1, }),
+ # "forceInput": True }),
+ }}
+
+ RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT", )
+ RETURN_NAMES = ("POS", "NEG", "INPUT_LATENTS",)
+ FUNCTION = "animate"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/BatchScheduleNodes"
+
+ def animate(self, text, num_latents, print_output, clip, start_frame, pw_a, pw_b, pw_c, pw_d, pre_text='', app_text=''):
+ max_frames = sum(tensor.size(0) for tensor in num_latents.values())
+ max_frames += start_frame
+ inputText = str("{" + text + "}")
+ inputText = re.sub(r',\s*}', '}', inputText)
+
+ animation_prompts = json.loads(inputText.strip())
+ pos, neg = batch_split_weighted_subprompts(animation_prompts, pre_text, app_text)
+
+ pos_cur_prompt, pos_nxt_prompt, weight = interpolate_prompt_series(pos, max_frames, start_frame, pre_text,
+ app_text, pw_a, pw_b, pw_c, pw_d,
+ print_output)
+ pc = BatchPoolAnimConditioning(pos_cur_prompt, pos_nxt_prompt, weight, clip, )
+
+ neg_cur_prompt, neg_nxt_prompt, weight = interpolate_prompt_series(neg, max_frames, start_frame, pre_text,
+ app_text, pw_a, pw_b, pw_c, pw_d,
+ print_output)
+ nc = BatchPoolAnimConditioning(neg_cur_prompt, neg_nxt_prompt, weight, clip, )
+
+ return (pc, nc, num_latents,)
+class StringSchedule:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"text": ("STRING", {"multiline": True, "default": defaultPrompt}),
+ "max_frames": ("INT", {"default": 120.0, "min": 1.0, "max": 999999.0, "step": 1.0}),
+ "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 999999.0, "step": 1.0, })},
+ # "forceInput": True}),},
+ "optional": {"pre_text": ("STRING", {"multiline": True, }), # "forceInput": True}),
+ "app_text": ("STRING", {"multiline": True, }), # "forceInput": True}),
+ "pw_a": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1, }),
+ # "forceInput": True }),
+ "pw_b": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1, }),
+ # "forceInput": True }),
+ "pw_c": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1, }),
+ # "forceInput": True }),
+ "pw_d": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1, }),
+ # "forceInput": True }),
+ }}
+
+ RETURN_TYPES = ("STRING",)
+ FUNCTION = "animate"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/ScheduleNodes"
+
+ def animate(self, text, max_frames, current_frame, pw_a=0, pw_b=0, pw_c=0, pw_d=0, pre_text='', app_text=''):
+ current_frame = current_frame % max_frames
+ inputText = str("{" + text + "}")
+ inputText = re.sub(r',\s*}', '}', inputText)
+ animation_prompts = json.loads(inputText.strip())
+ cur_prompt = interpolate_string(animation_prompts, max_frames, current_frame, pre_text,
+ app_text, pw_a, pw_b, pw_c, pw_d)
+ #c = PoolAnimConditioning(cur_prompt, nxt_prompt, weight, clip, )
+ return (cur_prompt,)
+
+class PromptScheduleSDXLRefiner:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "ascore": ("FLOAT", {"default": 6.0, "min": 0.0, "max": 1000.0, "step": 0.01}),
+ "width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
+ "height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
+ "text": ("STRING", {"multiline": True, "default":defaultPrompt}), "clip": ("CLIP", ),
+ }}
+ RETURN_TYPES = ("CONDITIONING",)
+ FUNCTION = "encode"
+
+ CATEGORY = "advanced/conditioning"
+
+ def encode(self, clip, ascore, width, height, text):
+ tokens = clip.tokenize(text)
+ cond, pooled = clip.encode_from_tokens(tokens, return_pooled=True)
+ return ([[cond, {"pooled_output": pooled, "aesthetic_score": ascore, "width": width,"height": height}]], )
+
+class BatchStringSchedule:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"text": ("STRING", {"multiline": True, "default": defaultPrompt}),
+ "max_frames": ("INT", {"default": 120.0, "min": 1.0, "max": 999999.0, "step": 1.0}),},
+ # "forceInput": True}),},
+ "optional": {"pre_text": ("STRING", {"multiline": True, }), # "forceInput": True}),
+ "app_text": ("STRING", {"multiline": True, }), # "forceInput": True}),
+ "pw_a": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1, }),
+ # "forceInput": True }),
+ "pw_b": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1, }),
+ # "forceInput": True }),
+ "pw_c": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1, }),
+ # "forceInput": True }),
+ "pw_d": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1, }),
+ # "forceInput": True }),
+ }}
+
+ RETURN_TYPES = ("STRING",)
+ FUNCTION = "animate"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/BatchScheduleNodes"
+
+ def animate(self, text, max_frames, pw_a=0, pw_b=0, pw_c=0, pw_d=0, pre_text='', app_text=''):
+ inputText = str("{" + text + "}")
+ inputText = re.sub(r',\s*}', '}', inputText)
+ start_frame = 0
+ animation_prompts = json.loads(inputText.strip())
+ cur_prompt_series, nxt_prompt_series, weight_series = interpolate_prompt_series(animation_prompts, max_frames, start_frame, pre_text,
+ app_text, pw_a, pw_b, pw_c, pw_d)
+ return (cur_prompt_series,)
+
+class BatchPromptScheduleEncodeSDXL:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
+ "height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
+ "crop_w": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}),
+ "crop_h": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}),
+ "target_width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
+ "target_height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
+ "text_g": ("STRING", {"multiline": True, }), "clip": ("CLIP", ),
+ "text_l": ("STRING", {"multiline": True, }), "clip": ("CLIP", ),
+ "max_frames": ("INT", {"default": 120.0, "min": 1.0, "max": 999999.0, "step": 1.0}),
+ "print_output":("BOOLEAN", {"default": False}),},
+ "optional": {"pre_text_G": ("STRING", {"multiline": True, }),# "forceInput": True}),
+ "app_text_G": ("STRING", {"multiline": True, }),# "forceInput": True}),
+ "pre_text_L": ("STRING", {"multiline": True, }),# "forceInput": True}),
+ "app_text_L": ("STRING", {"multiline": True, }),# "forceInput": True}),
+ "pw_a": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}), #"forceInput": True }),
+ "pw_b": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}), #"forceInput": True }),
+ "pw_c": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}), #"forceInput": True }),
+ "pw_d": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}), #"forceInput": True }),
+ }}
+ RETURN_TYPES = ("CONDITIONING",)
+ FUNCTION = "animate"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/BatchScheduleNodes"
+
+ def animate(self, clip, width, height, crop_w, crop_h, target_width, target_height, text_g, text_l, app_text_G, app_text_L, pre_text_G, pre_text_L, max_frames, print_output, pw_a, pw_b, pw_c, pw_d):
+ inputTextG = str("{" + text_g + "}")
+ inputTextL = str("{" + text_l + "}")
+ inputTextG = re.sub(r',\s*}', '}', inputTextG)
+ inputTextL = re.sub(r',\s*}', '}', inputTextL)
+ animation_promptsG = json.loads(inputTextG.strip())
+ animation_promptsL = json.loads(inputTextL.strip())
+ return (BatchInterpolatePromptsSDXL(animation_promptsG, animation_promptsL, max_frames, clip, app_text_G, app_text_L, pre_text_G, pre_text_L, pw_a, pw_b, pw_c, pw_d, width, height, crop_w, crop_h, target_width, target_height, print_output,),)
+
+class BatchPromptScheduleEncodeSDXLLatentInput:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
+ "height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
+ "crop_w": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}),
+ "crop_h": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}),
+ "target_width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
+ "target_height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
+ "text_g": ("STRING", {"multiline": True, }), "clip": ("CLIP", ),
+ "text_l": ("STRING", {"multiline": True, }), "clip": ("CLIP", ),
+ "num_latents": ("LATENT", ),
+ "print_output":("BOOLEAN", {"default": False}),},
+ "optional": {"pre_text_G": ("STRING", {"multiline": True, }),# "forceInput": True}),
+ "app_text_G": ("STRING", {"multiline": True, }),# "forceInput": True}),
+ "pre_text_L": ("STRING", {"multiline": True, }),# "forceInput": True}),
+ "app_text_L": ("STRING", {"multiline": True, }),# "forceInput": True}),
+ "pw_a": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}), #"forceInput": True }),
+ "pw_b": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}), #"forceInput": True }),
+ "pw_c": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}), #"forceInput": True }),
+ "pw_d": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}), #"forceInput": True }),
+ }}
+ RETURN_TYPES = ("CONDITIONING", "LATENT",)
+ FUNCTION = "animate"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/BatchScheduleNodes"
+
+ def animate(self, clip, width, height, crop_w, crop_h, target_width, target_height, text_g, text_l, app_text_G, app_text_L, pre_text_G, pre_text_L, num_latents, print_output, pw_a, pw_b, pw_c, pw_d):
+ max_frames = sum(tensor.size(0) for tensor in num_latents.values())
+ inputTextG = str("{" + text_g + "}")
+ inputTextL = str("{" + text_l + "}")
+ inputTextG = re.sub(r',\s*}', '}', inputTextG)
+ inputTextL = re.sub(r',\s*}', '}', inputTextL)
+ animation_promptsG = json.loads(inputTextG.strip())
+ animation_promptsL = json.loads(inputTextL.strip())
+ return (BatchInterpolatePromptsSDXL(animation_promptsG, animation_promptsL, max_frames, clip, app_text_G, app_text_L, pre_text_G, pre_text_L, pw_a, pw_b, pw_c, pw_d, width, height, crop_w, crop_h, target_width, target_height, print_output, ), num_latents, )
+
+class PromptScheduleEncodeSDXL:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
+ "height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
+ "crop_w": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}),
+ "crop_h": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}),
+ "target_width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
+ "target_height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
+ "text_g": ("STRING", {"multiline": True, }), "clip": ("CLIP", ),
+ "text_l": ("STRING", {"multiline": True, }), "clip": ("CLIP", ),
+ "max_frames": ("INT", {"default": 120.0, "min": 1.0, "max": 999999.0, "step": 1.0}),
+ "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 999999.0, "step": 1.0}),
+ "print_output":("BOOLEAN", {"default": False})},
+ "optional": {"pre_text_G": ("STRING", {"multiline": True, }),# "forceInput": True}),
+ "app_text_G": ("STRING", {"multiline": True, }),# "forceInput": True}),
+ "pre_text_L": ("STRING", {"multiline": True, }),# "forceInput": True}),
+ "app_text_L": ("STRING", {"multiline": True, }),# "forceInput": True}),
+ "pw_a": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}), #"forceInput": True }),
+ "pw_b": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}), #"forceInput": True }),
+ "pw_c": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}), #"forceInput": True }),
+ "pw_d": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}), #"forceInput": True }),
+ }}
+ RETURN_TYPES = ("CONDITIONING",)
+ FUNCTION = "animate"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/ScheduleNodes"
+
+ def animate(self, clip, width, height, crop_w, crop_h, target_width, target_height, text_g, text_l, app_text_G, app_text_L, pre_text_G, pre_text_L, max_frames, current_frame, print_output, pw_a, pw_b, pw_c, pw_d):
+ current_frame = current_frame % max_frames
+ inputTextG = str("{" + text_g + "}")
+ inputTextL = str("{" + text_l + "}")
+ inputTextG = re.sub(r',\s*}', '}', inputTextG)
+ inputTextL = re.sub(r',\s*}', '}', inputTextL)
+ animation_promptsG = json.loads(inputTextG.strip())
+ animation_promptsL = json.loads(inputTextL.strip())
+ return (interpolate_prompts_SDXL(animation_promptsG, animation_promptsL, max_frames, current_frame, clip, app_text_G, app_text_L, pre_text_G, pre_text_L, pw_a, pw_b, pw_c, pw_d, width, height, crop_w, crop_h, target_width, target_height, print_output,),)
+
+# This node schedules the prompt using separate nodes as the keyframes.
+# The values in the prompt are evaluated in NodeFlowEnd.
+class PromptScheduleNodeFlow:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"text": ("STRING", {"multiline": True}),
+ "num_frames": ("INT", {"default": 24.0, "min": 0.0, "max": 9999.0, "step": 1.0}),},
+ "optional": {"in_text": ("STRING", {"multiline": False, }), # "forceInput": True}),
+ "max_frames": ("INT", {"default": 0.0, "min": 0.0, "max": 999999.0, "step": 1.0,})}} # "forceInput": True}),}}
+
+ RETURN_TYPES = ("INT","STRING",)
+ FUNCTION = "addString"
+ CATEGORY = "FizzNodes 📅🅕🅝/ScheduleNodes"
+
+ def addString(self, text, in_text='', max_frames=0, num_frames=0):
+ if in_text:
+ # Remove trailing comma from in_text if it exists
+ in_text = in_text.rstrip(',')
+
+ new_max = num_frames + max_frames
+
+ if max_frames == 0:
+ # Construct a new JSON object with a single key-value pair
+ new_text = in_text + (', ' if in_text else '') + f'"{max_frames}": "{text}"'
+ else:
+ # Construct a new JSON object with a single key-value pair
+ new_text = in_text + (', ' if in_text else '') + f'"{new_max}": "{text}"'
+
+
+
+ return (new_max, new_text,)
+
+
+#Last node in the Node Flow for evaluating the json produced by the above node.
+class PromptScheduleNodeFlowEnd:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"text": ("STRING", {"multiline": False, "forceInput": True}),
+ "clip": ("CLIP", ),
+ "max_frames": ("INT", {"default": 0.0, "min": 0.0, "max": 999999.0, "step": 1.0,}),
+ "print_output": ("BOOLEAN", {"default": False}),
+ "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 999999.0, "step": 1.0,}),}, #"forceInput": True}),},
+ "optional": {"pre_text": ("STRING", {"multiline": True, }),#"forceInput": True}),
+ "app_text": ("STRING", {"multiline": True, }),#"forceInput": True}),
+ "pw_a": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}),# "forceInput": True}),
+ "pw_b": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}),# "forceInput": True}),
+ "pw_c": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}),# "forceInput": True}),
+ "pw_d": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}),# "forceInput": True}),
+ }}
+ RETURN_TYPES = ("CONDITIONING","CONDITIONING",)
+ RETURN_NAMES = ("POS", "NEG",)
+ FUNCTION = "animate"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/ScheduleNodes"
+
+ def animate(self, text, max_frames, print_output, current_frame, clip, pw_a = 0, pw_b = 0, pw_c = 0, pw_d = 0, pre_text = '', app_text = ''):
+ current_frame = current_frame % max_frames
+ if text[-1] == ",":
+ text = text[:-1]
+ if text[0] == ",":
+ text = text[:0]
+ start_frame = 0
+ inputText = str("{" + text + "}")
+ inputText = re.sub(r',\s*}', '}', inputText)
+ animation_prompts = json.loads(inputText.strip())
+ max_frames += start_frame
+ pos, neg = batch_split_weighted_subprompts(animation_prompts, pre_text, app_text)
+
+ pos_cur_prompt, pos_nxt_prompt, weight = interpolate_prompt_series(pos, max_frames, start_frame, pre_text, app_text, pw_a,
+ pw_b, pw_c, pw_d, print_output)
+ pc = PoolAnimConditioning(pos_cur_prompt[current_frame], pos_nxt_prompt[current_frame], weight[current_frame],
+ clip, )
+
+ neg_cur_prompt, neg_nxt_prompt, weight = interpolate_prompt_series(neg, max_frames, start_frame, pre_text, app_text, pw_a,
+ pw_b, pw_c, pw_d, print_output)
+ nc = PoolAnimConditioning(neg_cur_prompt[current_frame], neg_nxt_prompt[current_frame], weight[current_frame],
+ clip, )
+
+ return (pc, nc,)
+
+class BatchPromptScheduleNodeFlowEnd:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"text": ("STRING", {"multiline": False, "forceInput": True}),
+ "clip": ("CLIP", ),
+ "max_frames": ("INT", {"default": 0.0, "min": 0.0, "max": 999999.0, "step": 1.0,}),
+ "print_output": ("BOOLEAN", {"default": False}),
+ },
+ "optional": {"pre_text": ("STRING", {"multiline": False, }),#"forceInput": True}),
+ "app_text": ("STRING", {"multiline": False, }),#"forceInput": True}),
+ "pw_a": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}),# "forceInput": True}),
+ "pw_b": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}),# "forceInput": True}),
+ "pw_c": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}),# "forceInput": True}),
+ "pw_d": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1,}),# "forceInput": True}),
+ }}
+ RETURN_TYPES = ("CONDITIONING",)
+
+ FUNCTION = "animate"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/BatchScheduleNodes"
+
+ def animate(self, text, max_frames, start_frame, print_output, clip, pw_a=0, pw_b=0, pw_c=0, pw_d=0, pre_text='', current_frame = 0,
+ app_text=''):
+ if text[-1] == ",":
+ text = text[:-1]
+ if text[0] == ",":
+ text = text[:0]
+ inputText = str("{" + text + "}")
+ inputText = re.sub(r',\s*}', '}', inputText)
+ animation_prompts = json.loads(inputText.strip())
+
+ max_frames += start_frame
+
+ pos, neg = batch_split_weighted_subprompts(animation_prompts, pre_text, app_text)
+
+ pos_cur_prompt, pos_nxt_prompt, weight = interpolate_prompt_series(pos, max_frames, start_frame, pre_text, app_text, pw_a,
+ pw_b, pw_c, pw_d, print_output)
+ pc = BatchPoolAnimConditioning(pos_cur_prompt[current_frame], pos_nxt_prompt[current_frame], weight[current_frame],
+ clip, )
+
+ neg_cur_prompt, neg_nxt_prompt, weight = interpolate_prompt_series(neg, max_frames, start_frame, pre_text, app_text, pw_a,
+ pw_b, pw_c, pw_d, print_output)
+ nc = BatchPoolAnimConditioning(neg_cur_prompt[current_frame], neg_nxt_prompt[current_frame], weight[current_frame],
+ clip, )
+
+ return (pc, nc,)
+
+class BatchGLIGENSchedule:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"conditioning_to": ("CONDITIONING",),
+ "clip": ("CLIP",),
+ "gligen_textbox_model": ("GLIGEN",),
+ "text": ("STRING", {"multiline": True, "default":defaultPrompt}),
+ "width": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
+ "height": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}),
+ "x": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
+ "y": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}),
+ "max_frames": ("INT", {"default": 120.0, "min": 1.0, "max": 999999.0, "step": 1.0}),
+ "print_output":("BOOLEAN", {"default": False})},
+ # "forceInput": True}),},
+ "optional": {"pre_text": ("STRING", {"multiline": True, }), # "forceInput": True}),
+ "app_text": ("STRING", {"multiline": True, }), # "forceInput": True}),
+ "pw_a": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1, }),
+ # "forceInput": True }),
+ "pw_b": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1, }),
+ # "forceInput": True }),
+ "pw_c": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1, }),
+ # "forceInput": True }),
+ "pw_d": ("FLOAT", {"default": 0.0, "min": -9999.0, "max": 9999.0, "step": 0.1, }),
+ # "forceInput": True }),
+ }}
+
+ RETURN_TYPES = ("CONDITIONING",)
+ FUNCTION = "animate"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/BatchScheduleNodes"
+
+ def animate(self, conditioning_to, clip, gligen_textbox_model, text, width, height, x, y, max_frames, print_output, pw_a, pw_b, pw_c, pw_d, pre_text='', app_text=''):
+ inputText = str("{" + text + "}")
+ inputText = re.sub(r',\s*}', '}', inputText)
+ animation_prompts = json.loads(inputText.strip())
+
+ cur_series, nxt_series, weight_series = interpolate_prompt_series(animation_prompts, max_frames, pre_text, app_text, pw_a, pw_b, pw_c, pw_d, print_output)
+ out = []
+ for i in range(0, max_frames - 1):
+ # Calculate changes in x and y here, based on your logic
+ x_change = 8
+ y_change = 0
+
+ # Update x and y values
+ x += x_change
+ y += y_change
+ print(x)
+ print(y)
+ out.append(self.append(conditioning_to, clip, gligen_textbox_model, pre_text, width, height, x, y))
+
+ return (out,)
+
+ def append(self, conditioning_to, clip, gligen_textbox_model, text, width, height, x, y):
+ c = []
+ cond, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled=True)
+ for t in range(0, len(conditioning_to)):
+ n = [conditioning_to[t][0], conditioning_to[t][1].copy()]
+ position_params = [(cond_pooled, height // 8, width // 8, y // 8, x // 8)]
+ prev = []
+ if "gligen" in n[1]:
+ prev = n[1]['gligen'][2]
+
+ n[1]['gligen'] = ("position", gligen_textbox_model, prev + position_params)
+ c.append(n)
+ return c
+
+#This node parses the user's test input into
+#interpolated floats. Expressions can be input
+#and evaluated.
+class ValueSchedule:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"text": ("STRING", {"multiline": True, "default":defaultValue}),
+ "max_frames": ("INT", {"default": 120.0, "min": 1.0, "max": 999999.0, "step": 1.0}),
+ "current_frame": ("INT", {"default": 0.0, "min": 0.0, "max": 999999.0, "step": 1.0,}),# "forceInput": True}),
+ "print_output": ("BOOLEAN", {"default": False})}}
+ RETURN_TYPES = ("FLOAT", "INT")
+ FUNCTION = "animate"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/ScheduleNodes"
+
+ def animate(self, text, max_frames, current_frame, print_output):
+ current_frame = current_frame % max_frames
+ t = get_inbetweens(parse_key_frames(text, max_frames), max_frames)
+ if (print_output is True):
+ print("ValueSchedule: ",current_frame,"\n","current_frame: ",current_frame)
+ return (t[current_frame],int(t[current_frame]),)
+
+class BatchValueSchedule:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"text": ("STRING", {"multiline": True, "default": defaultValue}),
+ "max_frames": ("INT", {"default": 120.0, "min": 1.0, "max": 999999.0, "step": 1.0}),
+ "print_output": ("BOOLEAN", {"default": False})}}
+
+ RETURN_TYPES = ("FLOAT", "INT")
+ FUNCTION = "animate"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/BatchScheduleNodes"
+
+ def animate(self, text, max_frames, print_output):
+ t = batch_get_inbetweens(batch_parse_key_frames(text, max_frames), max_frames)
+ if print_output is True:
+ print("ValueSchedule: ", t)
+ return (t, list(map(int,t)),)
+
+class BatchValueScheduleLatentInput:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"text": ("STRING", {"multiline": True, "default": defaultValue}),
+ "num_latents": ("LATENT", ),
+ "print_output": ("BOOLEAN", {"default": False})}}
+
+ RETURN_TYPES = ("FLOAT", "INT", "LATENT", )
+ FUNCTION = "animate"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/BatchScheduleNodes"
+
+ def animate(self, text, num_latents, print_output):
+ num_elements = sum(tensor.size(0) for tensor in num_latents.values())
+ max_frames = num_elements
+ t = batch_get_inbetweens(batch_parse_key_frames(text, max_frames), max_frames)
+ if print_output is True:
+ print("ValueSchedule: ", t)
+ return (t, list(map(int,t)), num_latents, )
+
+# Expects a Batch Value Schedule list input, it exports an image batch with images taken from an input image batch
+class ImageBatchFromValueSchedule:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "images": ("IMAGE",),
+ "values": ("FLOAT", { "default": 1.0, "min": -1.0, "max": 1.0, "label": "values" }),
+ }
+ }
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "animate"
+ CATEGORY = "FizzNodes 📅🅕🅝/BatchScheduleNodes"
+
+ def animate(self, images, values):
+ values = [values] * n if isinstance(values, float) else values
+ min_value, max_value = min(values), max(values)
+ i = [(x - min_value) / (max_value - min_value) * (images.shape[0] - 1) for x in values]
+ return (images[i], )
diff --git a/custom_nodes/ComfyUI_FizzNodes/ValueFuncs.py b/custom_nodes/ComfyUI_FizzNodes/ValueFuncs.py
new file mode 100644
index 0000000000000000000000000000000000000000..026cdab2bdb466185c12718b104661760faeeba1
--- /dev/null
+++ b/custom_nodes/ComfyUI_FizzNodes/ValueFuncs.py
@@ -0,0 +1,113 @@
+import numexpr
+import torch
+import numpy as np
+import pandas as pd
+import re
+import json
+
+from .ScheduleFuncs import check_is_number
+
+
+def sanitize_value(value):
+ # Remove single quotes, double quotes, and parentheses
+ value = value.replace("'", "").replace('"', "").replace('(', "").replace(')', "")
+ return value
+
+
+def get_inbetweens(key_frames, max_frames, integer=False, interp_method='Linear', is_single_string=False):
+ key_frame_series = pd.Series([np.nan for a in range(max_frames)])
+ max_f = max_frames - 1 # needed for numexpr even though it doesn't look like it's in use.
+ value_is_number = False
+ for i in range(0, max_frames):
+ if i in key_frames:
+ value = key_frames[i]
+ value_is_number = check_is_number(sanitize_value(value))
+ if value_is_number: # if it's only a number, leave the rest for the default interpolation
+ key_frame_series[i] = sanitize_value(value)
+ if not value_is_number:
+ t = i
+ # workaround for values formatted like 0:("I am test") //used for sampler schedules
+ key_frame_series[i] = numexpr.evaluate(value) if not is_single_string else sanitize_value(value)
+ elif is_single_string: # take previous string value and replicate it
+ key_frame_series[i] = key_frame_series[i - 1]
+ key_frame_series = key_frame_series.astype(float) if not is_single_string else key_frame_series # as string
+
+ if interp_method == 'Cubic' and len(key_frames.items()) <= 3:
+ interp_method = 'Quadratic'
+ if interp_method == 'Quadratic' and len(key_frames.items()) <= 2:
+ interp_method = 'Linear'
+
+ key_frame_series[0] = key_frame_series[key_frame_series.first_valid_index()]
+ key_frame_series[max_frames - 1] = key_frame_series[key_frame_series.last_valid_index()]
+ key_frame_series = key_frame_series.interpolate(method=interp_method.lower(), limit_direction='both')
+
+ if integer:
+ return key_frame_series.astype(int)
+ return key_frame_series
+
+
+def parse_key_frames(string, max_frames):
+ # because math functions (i.e. sin(t)) can utilize brackets
+ # it extracts the value in form of some stuff
+ # which has previously been enclosed with brackets and
+ # with a comma or end of line existing after the closing one
+ frames = dict()
+ for match_object in string.split(","):
+ frameParam = match_object.split(":")
+ max_f = max_frames - 1 # needed for numexpr even though it doesn't look like it's in use.
+ frame = int(sanitize_value(frameParam[0])) if check_is_number(
+ sanitize_value(frameParam[0].strip())) else int(numexpr.evaluate(
+ frameParam[0].strip().replace("'", "", 1).replace('"', "", 1)[::-1].replace("'", "", 1).replace('"', "", 1)[::-1]))
+ frames[frame] = frameParam[1].strip()
+ if frames == {} and len(string) != 0:
+ raise RuntimeError('Key Frame string not correctly formatted')
+ return frames
+
+def batch_get_inbetweens(key_frames, max_frames, integer=False, interp_method='Linear', is_single_string=False):
+ key_frame_series = pd.Series([np.nan for a in range(max_frames)])
+ max_f = max_frames - 1 # needed for numexpr even though it doesn't look like it's in use.
+ value_is_number = False
+ for i in range(0, max_frames):
+ if i in key_frames:
+ value = str(key_frames[i]) # Convert to string to ensure it's treated as an expression
+ value_is_number = check_is_number(sanitize_value(value))
+ if value_is_number:
+ key_frame_series[i] = sanitize_value(value)
+ if not value_is_number:
+ t = i
+ # workaround for values formatted like 0:("I am test") //used for sampler schedules
+ key_frame_series[i] = numexpr.evaluate(value) if not is_single_string else sanitize_value(value)
+ elif is_single_string: # take previous string value and replicate it
+ key_frame_series[i] = key_frame_series[i - 1]
+ key_frame_series = key_frame_series.astype(float) if not is_single_string else key_frame_series # as string
+
+ if interp_method == 'Cubic' and len(key_frames.items()) <= 3:
+ interp_method = 'Quadratic'
+ if interp_method == 'Quadratic' and len(key_frames.items()) <= 2:
+ interp_method = 'Linear'
+
+ key_frame_series[0] = key_frame_series[key_frame_series.first_valid_index()]
+ key_frame_series[max_frames - 1] = key_frame_series[key_frame_series.last_valid_index()]
+ key_frame_series = key_frame_series.interpolate(method=interp_method.lower(), limit_direction='both')
+
+ if integer:
+ return key_frame_series.astype(int)
+ return key_frame_series
+
+def batch_parse_key_frames(string, max_frames):
+ # because math functions (i.e. sin(t)) can utilize brackets
+ # it extracts the value in form of some stuff
+ # which has previously been enclosed with brackets and
+ # with a comma or end of line existing after the closing one
+ string = re.sub(r',\s*$', '', string)
+ frames = dict()
+ for match_object in string.split(","):
+ frameParam = match_object.split(":")
+ max_f = max_frames - 1 # needed for numexpr even though it doesn't look like it's in use.
+ frame = int(sanitize_value(frameParam[0])) if check_is_number(
+ sanitize_value(frameParam[0].strip())) else int(numexpr.evaluate(
+ frameParam[0].strip().replace("'", "", 1).replace('"', "", 1)[::-1].replace("'", "", 1).replace('"', "",1)[::-1]))
+ frames[frame] = frameParam[1].strip()
+ if frames == {} and len(string) != 0:
+ raise RuntimeError('Key Frame string not correctly formatted')
+ return frames
\ No newline at end of file
diff --git a/custom_nodes/ComfyUI_FizzNodes/WaveNodes.py b/custom_nodes/ComfyUI_FizzNodes/WaveNodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..b79262a14cf1c7483110cc3ec9288d32e6278709
--- /dev/null
+++ b/custom_nodes/ComfyUI_FizzNodes/WaveNodes.py
@@ -0,0 +1,189 @@
+import numpy as np
+
+class Lerp:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"num_Images": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
+ "current_frame": ("INT", {"default": 1.0, "min": 0.0, "max": 9999, "step": 1.0}),
+ }}
+ RETURN_TYPES = ("FLOAT", "INT",)
+ FUNCTION = "lerp"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/WaveNodes"
+
+ def lerp(self, num_Images, strength, current_frame):
+ step = strength/num_Images
+ output = strength - (step * current_frame)
+ return (output, int(output),)
+
+class SinWave:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"phase": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ "amplitude": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 9999.0, "step": 0.1}),
+ "x_translation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ "y_translation": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 9999.0, "step": 0.05}),
+ "current_frame": ("INT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ }}
+ RETURN_TYPES = ("FLOAT","INT",)
+ FUNCTION = "Wave"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/WaveNodes"
+
+ def Wave(self, phase, amplitude, x_translation, y_translation, current_frame):
+ output = (y_translation+(amplitude*(np.sin((2*np.pi*current_frame/phase-x_translation)))))
+ print(output)
+ return (output, int(output),)
+
+class InvSinWave:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"phase": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ "amplitude": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 9999.0, "step": 0.1}),
+ "x_translation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ "y_translation": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 9999.0, "step": 0.05}),
+ "current_frame": ("INT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ }}
+ RETURN_TYPES = ("FLOAT", "INT")
+ FUNCTION = "Wave"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/WaveNodes"
+
+ def Wave(self, phase, amplitude, x_translation, y_translation, current_frame):
+ output = (y_translation+(amplitude*-(np.sin(-1*(2*np.pi*current_frame/phase-x_translation)))))
+ print(output)
+ return (output, int(output),)
+
+class CosWave:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"phase": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ "amplitude": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 9999.0, "step": 0.1}),
+ "x_translation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ "y_translation": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 9999.0, "step": 0.05}),
+ "current_frame": ("INT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ }}
+ RETURN_TYPES = ("FLOAT", "INT", )
+ FUNCTION = "Wave"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/WaveNodes"
+
+ def Wave(self, phase, amplitude, x_translation, y_translation, current_frame):
+ output = (y_translation+(amplitude*(np.cos((2*np.pi*current_frame/phase-x_translation)))))
+ print(output)
+ return (output, int(output),)
+
+class InvCosWave:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"phase": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ "amplitude": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 9999.0, "step": 0.1}),
+ "x_translation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ "y_translation": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 9999.0, "step": 0.05}),
+ "current_frame": ("INT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ }}
+ RETURN_TYPES = ("FLOAT", "INT", )
+ FUNCTION = "Wave"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/WaveNodes"
+
+ def Wave(self, phase, amplitude, x_translation, y_translation, current_frame):
+ output = (y_translation+(amplitude*-(np.cos(-1*(2*np.pi*current_frame/phase-x_translation)))))
+ print(output)
+ return (output, int(output),)
+
+class SquareWave:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"phase": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ "amplitude": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 9999.0, "step": 0.1}),
+ "x_translation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ "y_translation": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 9999.0, "step": 0.05}),
+ "current_frame": ("INT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ }}
+ RETURN_TYPES = ("FLOAT", "INT",)
+ FUNCTION = "Wave"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/WaveNodes"
+
+ def Wave(self, phase, amplitude, x_translation, y_translation, current_frame):
+ output = (y_translation+(amplitude*0**0**(0-np.sin((np.pi*current_frame/phase-x_translation)))))
+ print(output)
+ return (output, int(output),)
+
+class SawtoothWave:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"phase": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ "step_increment": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 9999.0, "step": 0.1}),
+ "x_translation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ "start_value": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 9999.0, "step": 0.05}),
+ "current_frame": ("INT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ }}
+ RETURN_TYPES = ("FLOAT", "INT", )
+ FUNCTION = "Wave"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/WaveNodes"
+
+ def Wave(self, phase, step_increment, x_translation, start_value, current_frame):
+ output = (start_value+(step_increment*(current_frame%phase)-x_translation))
+ print(output)
+ return (output, int(output),)
+
+class TriangleWave:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"phase": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ "amplitude": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 9999.0, "step": 0.1}),
+ "x_translation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ "y_translation": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 9999.0, "step": 0.05}),
+ "current_frame": ("INT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ }}
+ RETURN_TYPES = ("FLOAT", "INT",)
+ FUNCTION = "Wave"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/WaveNodes"
+
+ def Wave(self, phase, amplitude, x_translation, y_translation, current_frame):
+ output = (y_translation+amplitude/np.pi*(np.arcsin(np.sin(2*np.pi/phase*current_frame-x_translation))))
+ print(output)
+ return (output, int(output),)
+
+class AbsCosWave:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"phase": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ "amplitude": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 9999.0, "step": 0.1}),
+ "x_translation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ "max_value": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 9999.0, "step": 0.05}),
+ "current_frame": ("INT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ }}
+ RETURN_TYPES = ("FLOAT", "INT")
+ FUNCTION = "Wave"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/WaveNodes"
+
+ def Wave(self, phase, amplitude, x_translation, max_value, current_frame):
+ output = (max_value-(np.abs(np.cos(current_frame/phase))*amplitude))
+ print(output)
+ return (output, int(output),)
+
+class AbsSinWave:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {"phase": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ "amplitude": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 9999.0, "step": 0.1}),
+ "x_translation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ "max_value": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 9999.0, "step": 0.05}),
+ "current_frame": ("INT", {"default": 1.0, "min": 0.0, "max": 9999.0, "step": 1.0}),
+ }}
+ RETURN_TYPES = ("FLOAT", "INT")
+ FUNCTION = "Wave"
+
+ CATEGORY = "FizzNodes 📅🅕🅝/WaveNodes"
+
+ def Wave(self, phase, amplitude, x_translation, max_value, current_frame):
+ output = (max_value-(np.abs(np.sin(current_frame/phase))*amplitude))
+ print(output)
+ return (output, int(output),)
\ No newline at end of file
diff --git a/custom_nodes/ComfyUI_FizzNodes/__init__.py b/custom_nodes/ComfyUI_FizzNodes/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..706b885dc2bdf19e58aee42f45201a99c1577e71
--- /dev/null
+++ b/custom_nodes/ComfyUI_FizzNodes/__init__.py
@@ -0,0 +1,139 @@
+# Made by Davemane42#0042 for ComfyUI
+import os
+import subprocess
+import importlib.util
+import sys
+import filecmp
+import shutil
+
+import __main__
+
+python = sys.executable
+
+
+extentions_folder = os.path.join(os.path.dirname(os.path.realpath(__main__.__file__)),
+ "web" + os.sep + "extensions" + os.sep + "FizzleDorf")
+javascript_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)), "javascript")
+
+if not os.path.exists(extentions_folder):
+ print('Making the "web\extensions\FizzleDorf" folder')
+ os.makedirs(extentions_folder)
+
+result = filecmp.dircmp(javascript_folder, extentions_folder)
+
+if result.left_only or result.diff_files:
+ print('Update to javascripts files detected')
+ file_list = list(result.left_only)
+ file_list.extend(x for x in result.diff_files if x not in file_list)
+
+ for file in file_list:
+ print(f'Copying {file} to extensions folder')
+ src_file = os.path.join(javascript_folder, file)
+ dst_file = os.path.join(extentions_folder, file)
+ if os.path.exists(dst_file):
+ os.remove(dst_file)
+ #print("disabled")
+ shutil.copy(src_file, dst_file)
+
+
+def is_installed(package, package_overwrite=None):
+ try:
+ spec = importlib.util.find_spec(package)
+ except ModuleNotFoundError:
+ pass
+
+ package = package_overwrite or package
+
+ if spec is None:
+ print(f"Installing {package}...")
+ command = f'"{python}" -m pip install {package}'
+
+ result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=os.environ)
+
+ if result.returncode != 0:
+ print(f"Couldn't install\nCommand: {command}\nError code: {result.returncode}")
+
+from .WaveNodes import Lerp, SinWave, InvSinWave, CosWave, InvCosWave, SquareWave, SawtoothWave, TriangleWave, AbsCosWave, AbsSinWave
+from .ScheduledNodes import (
+ ValueSchedule, PromptSchedule, PromptScheduleNodeFlow, PromptScheduleNodeFlowEnd, PromptScheduleEncodeSDXL,
+ StringSchedule, BatchPromptSchedule, BatchValueSchedule, BatchPromptScheduleEncodeSDXL, BatchStringSchedule,
+ BatchValueScheduleLatentInput, BatchPromptScheduleEncodeSDXLLatentInput, BatchPromptScheduleLatentInput,
+ ImageBatchFromValueSchedule
+ #, BatchPromptScheduleNodeFlowEnd #, BatchGLIGENSchedule
+)
+from .FrameNodes import FrameConcatenate, InitNodeFrame, NodeFrame, StringConcatenate
+from .HelperNodes import ConcatStringSingle, convertKeyframeKeysToBatchKeys, CalculateFrameOffset
+
+NODE_CLASS_MAPPINGS = {
+ "Lerp": Lerp,
+ "SinWave": SinWave,
+ "InvSinWave": InvSinWave,
+ "CosWave": CosWave,
+ "InvCosWave": InvCosWave,
+ "SquareWave":SquareWave,
+ "SawtoothWave": SawtoothWave,
+ "TriangleWave": TriangleWave,
+ "AbsCosWave": AbsCosWave,
+ "AbsSinWave": AbsSinWave,
+ "PromptSchedule": PromptSchedule,
+ "ValueSchedule": ValueSchedule,
+ "PromptScheduleNodeFlow": PromptScheduleNodeFlow,
+ "PromptScheduleNodeFlowEnd": PromptScheduleNodeFlowEnd,
+ "PromptScheduleEncodeSDXL":PromptScheduleEncodeSDXL,
+ "StringSchedule":StringSchedule,
+ "BatchPromptSchedule": BatchPromptSchedule,
+ "BatchValueSchedule": BatchValueSchedule,
+ "BatchPromptScheduleEncodeSDXL": BatchPromptScheduleEncodeSDXL,
+ "BatchStringSchedule": BatchStringSchedule,
+ "BatchValueScheduleLatentInput": BatchValueScheduleLatentInput,
+ "BatchPromptScheduleSDXLLatentInput":BatchPromptScheduleEncodeSDXLLatentInput,
+ "BatchPromptScheduleLatentInput":BatchPromptScheduleLatentInput,
+ "ImageBatchFromValueSchedule":ImageBatchFromValueSchedule,
+ #"BatchPromptScheduleNodeFlowEnd":BatchPromptScheduleNodeFlowEnd,
+ #"BatchGLIGENSchedule": BatchGLIGENSchedule,
+
+ "StringConcatenate":StringConcatenate,
+ "Init FizzFrame":InitNodeFrame,
+ "FizzFrame":NodeFrame,
+ "FizzFrameConcatenate":FrameConcatenate,
+
+ "ConcatStringSingle": ConcatStringSingle,
+ "convertKeyframeKeysToBatchKeys": convertKeyframeKeysToBatchKeys,
+ "CalculateFrameOffset":CalculateFrameOffset,
+}
+
+NODE_DISPLAY_NAME_MAPPINGS = {
+ "Lerp": "Lerp 📅🅕🅝",
+ "SinWave": "SinWave 📅🅕🅝",
+ "InvSinWave": "InvSinWave 📅🅕🅝",
+ "CosWave": "CosWave 📅🅕🅝",
+ "InvCosWave": "InvCosWave 📅🅕🅝",
+ "SquareWave":"SquareWave 📅🅕🅝",
+ "SawtoothWave": "SawtoothWave 📅🅕🅝",
+ "TriangleWave": "TriangleWave 📅🅕🅝",
+ "AbsCosWave": "AbsCosWave 📅🅕🅝",
+ "AbsSinWave": "AbsSinWave 📅🅕🅝",
+ "PromptSchedule": "Prompt Schedule 📅🅕🅝",
+ "ValueSchedule": "Value Schedule 📅🅕🅝",
+ "PromptScheduleNodeFlow": "Prompt Schedule NodeFlow 📅🅕🅝",
+ "PromptScheduleNodeFlowEnd": "Prompt Schedule NodeFlow End 📅🅕🅝",
+ "StringSchedule":"String Schedule 📅🅕🅝",
+ "StringConcatenate":"String Concatenate 📅🅕🅝",
+ "Init FizzFrame":"Init Node Frame 📅🅕🅝",
+ "FizzFrame":"Node Frame 📅🅕🅝",
+ "FizzFrameConcatenate":"Frame Concatenate 📅🅕🅝",
+ "BatchPromptSchedule": "Batch Prompt Schedule 📅🅕🅝",
+ "BatchValueSchedule": "Batch Value Schedule 📅🅕🅝",
+ "PromptScheduleEncodeSDXL": "Prompt Schedule SDXL 📅🅕🅝",
+ "BatchPromptScheduleEncodeSDXL": "Batch Prompt Schedule SDXL 📅🅕🅝",
+ "BatchStringSchedule": "Batch String Schedule 📅🅕🅝",
+ "BatchValueScheduleLatentInput": "Batch Value Schedule (Latent Input) 📅🅕🅝",
+ "BatchPromptScheduleSDXLLatentInput": "Batch Prompt Schedule SDXL (Latent Input) 📅🅕🅝",
+ "BatchPromptScheduleLatentInput": "Batch Prompt Schedule (Latent Input) 📅🅕🅝",
+ "ImageBatchFromValueSchedule":"Image Batch From Value Schedule 📅🅕🅝",
+ "ConcatStringSingle": "Concat String (Single) 📅🅕🅝",
+ "convertKeyframeKeysToBatchKeys":"Keyframe Keys To Batch Keys 📅🅕🅝",
+ "SelectFrameNumber":"Select Frame Number 📅🅕🅝",
+ "CalculateFrameOffset":"Calculate Frame Offset 📅🅕🅝",
+}
+print('\033[34mFizzleDorf Custom Nodes: \033[92mLoaded\033[0m')
diff --git a/custom_nodes/ComfyUI_FizzNodes/javascript/Folder here to satisfy init, eventually I'll have stuff in here..txt b/custom_nodes/ComfyUI_FizzNodes/javascript/Folder here to satisfy init, eventually I'll have stuff in here..txt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/custom_nodes/ComfyUI_FizzNodes/requirements.txt b/custom_nodes/ComfyUI_FizzNodes/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7158bb086a7262e9ff6129e3036b195cd8f10de7
--- /dev/null
+++ b/custom_nodes/ComfyUI_FizzNodes/requirements.txt
@@ -0,0 +1,2 @@
+pandas
+numexpr
diff --git a/custom_nodes/ComfyUI_Noise/LICENSE b/custom_nodes/ComfyUI_Noise/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..f288702d2fa16d3cdf0035b15a9fcbc552cd88e7
--- /dev/null
+++ b/custom_nodes/ComfyUI_Noise/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
diff --git a/custom_nodes/ComfyUI_Noise/README.md b/custom_nodes/ComfyUI_Noise/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..7402929546b9cc995e7346f96f5ee5724b5ab9e6
--- /dev/null
+++ b/custom_nodes/ComfyUI_Noise/README.md
@@ -0,0 +1,88 @@
+# ComfyUI Noise
+
+This repo contains 6 nodes for [ComfyUI](https://github.com/comfyanonymous/ComfyUI) that allows for more control and flexibility over the noise. This allows e.g. for workflows with small variations to generations or finding the accompanying noise to some input image and prompt.
+
+## Nodes
+
+### Noisy Latent Image:
+This node lets you generate noise, you can find this node under `latent>noise` and it the following settings:
+- **source**: where to generate the noise, currently supports GPU and CPU.
+- **seed**: the noise seed.
+- **width**: image width.
+- **height**: image height.
+- **batch_size**: batch size.
+
+### Duplicate Batch Index:
+The functionality of this node has been moved to core, please use: `Latent>Batch>Repeat Latent Batch` and `Latent>Batch>Latent From Batch` instead.
+
+This node lets you duplicate a certain sample in the batch, this can be used to duplicate e.g. encoded images but also noise generated from the node listed above. You can find this node under `latent` and it has the following settings:
+- **latents**: the latents.
+- **batch_index**: which sample in the latents to duplicate.
+- **batch_size**: the new batch size, (i.e. how many times to duplicate the sample).
+
+### Slerp Latents:
+This node lets you mix two latents together. Both of the input latents must share the same dimensions or the node will ignore the mix factor and instead output the top slot. When it comes to other things attached to the latents such as e.g. masks, only those of the top slot are passed on. You can find this node under `latent` and it comes with the following inputs:
+- **latents1**: first batch of latents.
+- **latents2**: second batch of latents. This input is optional.
+- **mask**: determines where in the latents to slerp. This input is optional
+- **factor**: how much of the second batch of latents should be slerped into the first.
+
+### Get Sigma:
+This node can be used to calculate the amount of noise a sampler expects when it starts denoising. You can find this node under `latent>noise` and it comes with the following inputs and settings:
+- **model**: The model for which to calculate the sigma.
+- **sampler_name**: the name of the sampler for which to calculate the sigma.
+- **scheduler**: the type of schedule used in the sampler
+- **steps**: the total number of steps in the schedule
+- **start_at_step**: the start step of the sampler, i.e. how much noise it expects in the input image
+- **end_at_step**: the current end step of the previous sampler, i.e. how much noise already is in the image.
+
+Most of the time you'd simply want to keep `start_at_step` at zero, and `end_at_step` at `steps`, but if you'd want to re-inject some noise in between two samplers, e.g. one sampler that denoises from 0 to 15, and a second that denoises from 10 to 20, you'd want to use a `start_at_step` 10 and an `end_at_step` of 15. So that the image we get, which is at step 15, can be noised back down to step 10, so the second sampler can bring it to 20. Take note that the Advanced Ksampler has a settings for `add_noise` and `return_with_leftover_noise` which when working with these nodes we both want to have disabled.
+
+### Inject Noise:
+This node lets you actually inject the noise into an image latent, you can find this node under `latent>noise` and it comes with the following inputs:
+- **latents**: The latents to inject the noise into.
+- **noise**: The noise. This input is optional
+- **mask**: determines where to inject noise. This input is optional
+- **strength**: The strength of the noise. Note that we can use the node above to calculate for us an appropriate strength value.
+
+### Unsampler:
+This node does the reverse of a sampler. It calculates the noise that would generate the image given the model and the prompt. You can find this node under `sampling` and it takes the following inputs and settings:
+- **model**: The model to target.
+- **steps**: number of steps to noise.
+- **end_step**: to what step to travel back to.
+- **cfg**: classifier free guidance scale.
+- **sampler_name**: The name of the sampling technique to use.
+- **scheduler**: The type of schedule to use.
+- **normalize**: whether to normalize the noise before output. Useful when passing it on to an Inject Noise node which expects normalizes noise.
+- **positive**: Positive prompt.
+- **negative**: Negative prompt.
+- **latent_image**: The image to renoise.
+
+When trying to reconstruct the target image as faithful as possible this works best if both the unsampler and sampler use a cfg scale close to 1.0 and similar number of steps. But it is fun and worth it to play around with these settings to get a better intuition of the results. This node let's you do similar things the A1111 [img2img alternative](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#img2img-alternative-test) script does
+
+## Examples
+
+here are some examples that show how to use the nodes above. Workflows to these examples can be found in the `example_workflow` folder.
+
+
+
+generating variations
+
+
+
+
+To create small variations to a given generation we can do the following: We generate the noise of the seed that we're interested using a `Noisy Latent Image` node, we then create an entire batch of these with a `Duplicate Batch Index` node. Note that if we were doing this for img2img we can use this same node to duplicate the image latents. Next we generate some more noise, but this time we generate a batch of noise rather than a single sample. We then Slerp this newly created noise into the other one with a `Slerp Latents` node. To figure out the required strength for injecting this noise we use a `Get Sigma` node. And finally we inject the slerped noise into a batch of empty latents with a `Inject Noise` node. Take note that we use an advanced Ksampler with the `add_noise` setting disabled
+
+
+
+
+
+"unsampling"
+
+
+
+
+To get the noise that recreates a certain image, we first load an image. Then we use the `Unsampler` node with a low cfg value. To check if this is working we then take the resulting noise and feed it back into an advanced ksampler with the `add_noise` setting disabled, and a cfg of 1.0.
+
+
+
diff --git a/custom_nodes/ComfyUI_Noise/__init__.py b/custom_nodes/ComfyUI_Noise/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d721463be66961a2f388b3a756760d167ea5d510
--- /dev/null
+++ b/custom_nodes/ComfyUI_Noise/__init__.py
@@ -0,0 +1,3 @@
+from .nodes import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS
+
+__all__ = ['NODE_CLASS_MAPPINGS', 'NODE_DISPLAY_NAME_MAPPINGS']
\ No newline at end of file
diff --git a/custom_nodes/ComfyUI_Noise/example_workflows/unsample_example.json b/custom_nodes/ComfyUI_Noise/example_workflows/unsample_example.json
new file mode 100644
index 0000000000000000000000000000000000000000..86ebae968a66c3450636d45465de50d9a628e6ce
--- /dev/null
+++ b/custom_nodes/ComfyUI_Noise/example_workflows/unsample_example.json
@@ -0,0 +1,698 @@
+{
+ "last_node_id": 27,
+ "last_link_id": 66,
+ "nodes": [
+ {
+ "id": 23,
+ "type": "Reroute",
+ "pos": [
+ 228,
+ 840
+ ],
+ "size": [
+ 75,
+ 26
+ ],
+ "flags": {},
+ "order": 5,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "",
+ "type": "*",
+ "link": 50
+ }
+ ],
+ "outputs": [
+ {
+ "name": "",
+ "type": "VAE",
+ "links": [
+ 51,
+ 52
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "showOutputText": false,
+ "horizontal": false
+ }
+ },
+ {
+ "id": 24,
+ "type": "Reroute",
+ "pos": [
+ 400,
+ 740
+ ],
+ "size": [
+ 75,
+ 26
+ ],
+ "flags": {},
+ "order": 2,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "",
+ "type": "*",
+ "link": 53
+ }
+ ],
+ "outputs": [
+ {
+ "name": "",
+ "type": "MODEL",
+ "links": [
+ 54
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "showOutputText": false,
+ "horizontal": false
+ }
+ },
+ {
+ "id": 8,
+ "type": "VAEDecode",
+ "pos": [
+ 970,
+ 640
+ ],
+ "size": {
+ "0": 210,
+ "1": 46
+ },
+ "flags": {},
+ "order": 11,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "samples",
+ "type": "LATENT",
+ "link": 44
+ },
+ {
+ "name": "vae",
+ "type": "VAE",
+ "link": 52
+ }
+ ],
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 9
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "VAEDecode"
+ }
+ },
+ {
+ "id": 9,
+ "type": "SaveImage",
+ "pos": [
+ 1280,
+ 681
+ ],
+ "size": {
+ "0": 367.50909423828125,
+ "1": 383.8414306640625
+ },
+ "flags": {},
+ "order": 12,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "images",
+ "type": "IMAGE",
+ "link": 9
+ }
+ ],
+ "properties": {},
+ "widgets_values": [
+ "ComfyUI"
+ ]
+ },
+ {
+ "id": 7,
+ "type": "CLIPTextEncode",
+ "pos": [
+ -64,
+ 642
+ ],
+ "size": {
+ "0": 425.27801513671875,
+ "1": 180.6060791015625
+ },
+ "flags": {},
+ "order": 4,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "clip",
+ "type": "CLIP",
+ "link": 5
+ }
+ ],
+ "outputs": [
+ {
+ "name": "CONDITIONING",
+ "type": "CONDITIONING",
+ "links": [
+ 56
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "CLIPTextEncode"
+ },
+ "widgets_values": [
+ "text, watermark"
+ ]
+ },
+ {
+ "id": 6,
+ "type": "CLIPTextEncode",
+ "pos": [
+ -68,
+ 432
+ ],
+ "size": {
+ "0": 422.84503173828125,
+ "1": 164.31304931640625
+ },
+ "flags": {},
+ "order": 3,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "clip",
+ "type": "CLIP",
+ "link": 3
+ }
+ ],
+ "outputs": [
+ {
+ "name": "CONDITIONING",
+ "type": "CONDITIONING",
+ "links": [
+ 59
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "CLIPTextEncode"
+ },
+ "widgets_values": [
+ "beautiful scenery nature glass bottle landscape, , purple galaxy bottle,"
+ ]
+ },
+ {
+ "id": 19,
+ "type": "LoadImage",
+ "pos": [
+ -124,
+ 906
+ ],
+ "size": {
+ "0": 434.40911865234375,
+ "1": 440.44140625
+ },
+ "flags": {},
+ "order": 0,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 34
+ ],
+ "slot_index": 0
+ },
+ {
+ "name": "MASK",
+ "type": "MASK",
+ "links": null
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "LoadImage"
+ },
+ "widgets_values": [
+ "example.png",
+ "image"
+ ]
+ },
+ {
+ "id": 12,
+ "type": "KSamplerAdvanced",
+ "pos": [
+ 950,
+ 740
+ ],
+ "size": {
+ "0": 315,
+ "1": 334
+ },
+ "flags": {},
+ "order": 10,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "model",
+ "type": "MODEL",
+ "link": 54
+ },
+ {
+ "name": "positive",
+ "type": "CONDITIONING",
+ "link": 61
+ },
+ {
+ "name": "negative",
+ "type": "CONDITIONING",
+ "link": 58
+ },
+ {
+ "name": "latent_image",
+ "type": "LATENT",
+ "link": 66
+ }
+ ],
+ "outputs": [
+ {
+ "name": "LATENT",
+ "type": "LATENT",
+ "links": [
+ 44
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "KSamplerAdvanced"
+ },
+ "widgets_values": [
+ "disable",
+ 0,
+ "fixed",
+ 25,
+ 1,
+ "dpmpp_2m",
+ "karras",
+ 0,
+ 25,
+ "disable"
+ ]
+ },
+ {
+ "id": 26,
+ "type": "Reroute",
+ "pos": [
+ 450,
+ 670
+ ],
+ "size": [
+ 75,
+ 26
+ ],
+ "flags": {},
+ "order": 6,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "",
+ "type": "*",
+ "link": 59
+ }
+ ],
+ "outputs": [
+ {
+ "name": "",
+ "type": "CONDITIONING",
+ "links": [
+ 61,
+ 62
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "showOutputText": false,
+ "horizontal": false
+ }
+ },
+ {
+ "id": 25,
+ "type": "Reroute",
+ "pos": [
+ 430,
+ 700
+ ],
+ "size": [
+ 75,
+ 26
+ ],
+ "flags": {},
+ "order": 7,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "",
+ "type": "*",
+ "link": 56
+ }
+ ],
+ "outputs": [
+ {
+ "name": "",
+ "type": "CONDITIONING",
+ "links": [
+ 58,
+ 63
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "showOutputText": false,
+ "horizontal": false
+ }
+ },
+ {
+ "id": 20,
+ "type": "VAEEncode",
+ "pos": [
+ 354,
+ 894
+ ],
+ "size": {
+ "0": 210,
+ "1": 46
+ },
+ "flags": {},
+ "order": 8,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "pixels",
+ "type": "IMAGE",
+ "link": 34
+ },
+ {
+ "name": "vae",
+ "type": "VAE",
+ "link": 51
+ }
+ ],
+ "outputs": [
+ {
+ "name": "LATENT",
+ "type": "LATENT",
+ "links": [
+ 64
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "VAEEncode"
+ }
+ },
+ {
+ "id": 4,
+ "type": "CheckpointLoaderSimple",
+ "pos": [
+ -635,
+ 661
+ ],
+ "size": {
+ "0": 315,
+ "1": 98
+ },
+ "flags": {},
+ "order": 1,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "MODEL",
+ "type": "MODEL",
+ "links": [
+ 53,
+ 65
+ ],
+ "slot_index": 0
+ },
+ {
+ "name": "CLIP",
+ "type": "CLIP",
+ "links": [
+ 3,
+ 5
+ ],
+ "slot_index": 1
+ },
+ {
+ "name": "VAE",
+ "type": "VAE",
+ "links": [
+ 50
+ ],
+ "slot_index": 2
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "CheckpointLoaderSimple"
+ },
+ "widgets_values": [
+ "v1-5-pruned-emaonly.safetensors"
+ ]
+ },
+ {
+ "id": 27,
+ "type": "BNK_Unsampler",
+ "pos": [
+ 608,
+ 857
+ ],
+ "size": {
+ "0": 315,
+ "1": 214
+ },
+ "flags": {},
+ "order": 9,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "model",
+ "type": "MODEL",
+ "link": 65
+ },
+ {
+ "name": "positive",
+ "type": "CONDITIONING",
+ "link": 62
+ },
+ {
+ "name": "negative",
+ "type": "CONDITIONING",
+ "link": 63
+ },
+ {
+ "name": "latent_image",
+ "type": "LATENT",
+ "link": 64
+ }
+ ],
+ "outputs": [
+ {
+ "name": "LATENT",
+ "type": "LATENT",
+ "links": [
+ 66
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "BNK_Unsampler"
+ },
+ "widgets_values": [
+ 25,
+ 0,
+ 1,
+ "dpmpp_2m",
+ "karras"
+ ]
+ }
+ ],
+ "links": [
+ [
+ 3,
+ 4,
+ 1,
+ 6,
+ 0,
+ "CLIP"
+ ],
+ [
+ 5,
+ 4,
+ 1,
+ 7,
+ 0,
+ "CLIP"
+ ],
+ [
+ 9,
+ 8,
+ 0,
+ 9,
+ 0,
+ "IMAGE"
+ ],
+ [
+ 34,
+ 19,
+ 0,
+ 20,
+ 0,
+ "IMAGE"
+ ],
+ [
+ 44,
+ 12,
+ 0,
+ 8,
+ 0,
+ "LATENT"
+ ],
+ [
+ 50,
+ 4,
+ 2,
+ 23,
+ 0,
+ "*"
+ ],
+ [
+ 51,
+ 23,
+ 0,
+ 20,
+ 1,
+ "VAE"
+ ],
+ [
+ 52,
+ 23,
+ 0,
+ 8,
+ 1,
+ "VAE"
+ ],
+ [
+ 53,
+ 4,
+ 0,
+ 24,
+ 0,
+ "*"
+ ],
+ [
+ 54,
+ 24,
+ 0,
+ 12,
+ 0,
+ "MODEL"
+ ],
+ [
+ 56,
+ 7,
+ 0,
+ 25,
+ 0,
+ "*"
+ ],
+ [
+ 58,
+ 25,
+ 0,
+ 12,
+ 2,
+ "CONDITIONING"
+ ],
+ [
+ 59,
+ 6,
+ 0,
+ 26,
+ 0,
+ "*"
+ ],
+ [
+ 61,
+ 26,
+ 0,
+ 12,
+ 1,
+ "CONDITIONING"
+ ],
+ [
+ 62,
+ 26,
+ 0,
+ 27,
+ 1,
+ "CONDITIONING"
+ ],
+ [
+ 63,
+ 25,
+ 0,
+ 27,
+ 2,
+ "CONDITIONING"
+ ],
+ [
+ 64,
+ 20,
+ 0,
+ 27,
+ 3,
+ "LATENT"
+ ],
+ [
+ 65,
+ 4,
+ 0,
+ 27,
+ 0,
+ "MODEL"
+ ],
+ [
+ 66,
+ 27,
+ 0,
+ 12,
+ 3,
+ "LATENT"
+ ]
+ ],
+ "groups": [],
+ "config": {},
+ "extra": {},
+ "version": 0.4
+}
\ No newline at end of file
diff --git a/custom_nodes/ComfyUI_Noise/example_workflows/variations_example.json b/custom_nodes/ComfyUI_Noise/example_workflows/variations_example.json
new file mode 100644
index 0000000000000000000000000000000000000000..a9a75e41d34ecaeeb3a2d7f19f1c117a9ff4103d
--- /dev/null
+++ b/custom_nodes/ComfyUI_Noise/example_workflows/variations_example.json
@@ -0,0 +1,868 @@
+{
+ "last_node_id": 39,
+ "last_link_id": 84,
+ "nodes": [
+ {
+ "id": 26,
+ "type": "Reroute",
+ "pos": [
+ 450,
+ 670
+ ],
+ "size": [
+ 75,
+ 26
+ ],
+ "flags": {},
+ "order": 10,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "",
+ "type": "*",
+ "link": 59
+ }
+ ],
+ "outputs": [
+ {
+ "name": "",
+ "type": "CONDITIONING",
+ "links": [
+ 61
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "showOutputText": false,
+ "horizontal": false
+ }
+ },
+ {
+ "id": 25,
+ "type": "Reroute",
+ "pos": [
+ 430,
+ 700
+ ],
+ "size": [
+ 75,
+ 26
+ ],
+ "flags": {},
+ "order": 11,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "",
+ "type": "*",
+ "link": 56
+ }
+ ],
+ "outputs": [
+ {
+ "name": "",
+ "type": "CONDITIONING",
+ "links": [
+ 58
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "showOutputText": false,
+ "horizontal": false
+ }
+ },
+ {
+ "id": 24,
+ "type": "Reroute",
+ "pos": [
+ 400,
+ 740
+ ],
+ "size": [
+ 75,
+ 26
+ ],
+ "flags": {},
+ "order": 4,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "",
+ "type": "*",
+ "link": 53
+ }
+ ],
+ "outputs": [
+ {
+ "name": "",
+ "type": "MODEL",
+ "links": [
+ 54
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "showOutputText": false,
+ "horizontal": false
+ }
+ },
+ {
+ "id": 7,
+ "type": "CLIPTextEncode",
+ "pos": [
+ -64,
+ 642
+ ],
+ "size": {
+ "0": 425.27801513671875,
+ "1": 180.6060791015625
+ },
+ "flags": {},
+ "order": 7,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "clip",
+ "type": "CLIP",
+ "link": 5
+ }
+ ],
+ "outputs": [
+ {
+ "name": "CONDITIONING",
+ "type": "CONDITIONING",
+ "links": [
+ 56
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "CLIPTextEncode"
+ },
+ "widgets_values": [
+ "text, watermark"
+ ]
+ },
+ {
+ "id": 6,
+ "type": "CLIPTextEncode",
+ "pos": [
+ -68,
+ 432
+ ],
+ "size": {
+ "0": 422.84503173828125,
+ "1": 164.31304931640625
+ },
+ "flags": {},
+ "order": 6,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "clip",
+ "type": "CLIP",
+ "link": 3
+ }
+ ],
+ "outputs": [
+ {
+ "name": "CONDITIONING",
+ "type": "CONDITIONING",
+ "links": [
+ 59
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "CLIPTextEncode"
+ },
+ "widgets_values": [
+ "beautiful scenery nature glass bottle landscape, , purple galaxy bottle,"
+ ]
+ },
+ {
+ "id": 12,
+ "type": "KSamplerAdvanced",
+ "pos": [
+ 835,
+ 887
+ ],
+ "size": {
+ "0": 315,
+ "1": 334
+ },
+ "flags": {},
+ "order": 14,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "model",
+ "type": "MODEL",
+ "link": 54
+ },
+ {
+ "name": "positive",
+ "type": "CONDITIONING",
+ "link": 61
+ },
+ {
+ "name": "negative",
+ "type": "CONDITIONING",
+ "link": 58
+ },
+ {
+ "name": "latent_image",
+ "type": "LATENT",
+ "link": 84
+ }
+ ],
+ "outputs": [
+ {
+ "name": "LATENT",
+ "type": "LATENT",
+ "links": [
+ 44
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "KSamplerAdvanced"
+ },
+ "widgets_values": [
+ "disable",
+ 0,
+ "fixed",
+ 25,
+ 8,
+ "dpmpp_2m",
+ "karras",
+ 0,
+ 25,
+ "disable"
+ ]
+ },
+ {
+ "id": 23,
+ "type": "Reroute",
+ "pos": [
+ -230,
+ 1632
+ ],
+ "size": [
+ 75,
+ 26
+ ],
+ "flags": {},
+ "order": 8,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "",
+ "type": "*",
+ "link": 50
+ }
+ ],
+ "outputs": [
+ {
+ "name": "",
+ "type": "VAE",
+ "links": [
+ 52
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "showOutputText": false,
+ "horizontal": false
+ }
+ },
+ {
+ "id": 8,
+ "type": "VAEDecode",
+ "pos": [
+ 1183,
+ 1133
+ ],
+ "size": {
+ "0": 210,
+ "1": 46
+ },
+ "flags": {},
+ "order": 15,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "samples",
+ "type": "LATENT",
+ "link": 44
+ },
+ {
+ "name": "vae",
+ "type": "VAE",
+ "link": 52
+ }
+ ],
+ "outputs": [
+ {
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 9
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "VAEDecode"
+ }
+ },
+ {
+ "id": 9,
+ "type": "SaveImage",
+ "pos": [
+ 771,
+ 1259
+ ],
+ "size": {
+ "0": 494.55535888671875,
+ "1": 524.3897705078125
+ },
+ "flags": {},
+ "order": 16,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "images",
+ "type": "IMAGE",
+ "link": 9
+ }
+ ],
+ "properties": {},
+ "widgets_values": [
+ "ComfyUI"
+ ]
+ },
+ {
+ "id": 4,
+ "type": "CheckpointLoaderSimple",
+ "pos": [
+ -635,
+ 661
+ ],
+ "size": {
+ "0": 315,
+ "1": 98
+ },
+ "flags": {},
+ "order": 0,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "MODEL",
+ "type": "MODEL",
+ "links": [
+ 53,
+ 74
+ ],
+ "slot_index": 0
+ },
+ {
+ "name": "CLIP",
+ "type": "CLIP",
+ "links": [
+ 3,
+ 5
+ ],
+ "slot_index": 1
+ },
+ {
+ "name": "VAE",
+ "type": "VAE",
+ "links": [
+ 50
+ ],
+ "slot_index": 2
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "CheckpointLoaderSimple"
+ },
+ "widgets_values": [
+ "v1-5-pruned-emaonly.safetensors"
+ ]
+ },
+ {
+ "id": 34,
+ "type": "BNK_NoisyLatentImage",
+ "pos": [
+ -216,
+ 980
+ ],
+ "size": {
+ "0": 315,
+ "1": 178
+ },
+ "flags": {},
+ "order": 1,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "LATENT",
+ "type": "LATENT",
+ "links": [
+ 75
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "BNK_NoisyLatentImage"
+ },
+ "widgets_values": [
+ "CPU",
+ 0,
+ "fixed",
+ 512,
+ 512,
+ 1
+ ]
+ },
+ {
+ "id": 35,
+ "type": "BNK_NoisyLatentImage",
+ "pos": [
+ -217,
+ 1197
+ ],
+ "size": {
+ "0": 315,
+ "1": 178
+ },
+ "flags": {},
+ "order": 2,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "LATENT",
+ "type": "LATENT",
+ "links": [
+ 77
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "BNK_NoisyLatentImage"
+ },
+ "widgets_values": [
+ "CPU",
+ 1,
+ "fixed",
+ 512,
+ 512,
+ 4
+ ]
+ },
+ {
+ "id": 37,
+ "type": "BNK_DuplicateBatchIndex",
+ "pos": [
+ 134,
+ 1012
+ ],
+ "size": {
+ "0": 315,
+ "1": 82
+ },
+ "flags": {},
+ "order": 9,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "latents",
+ "type": "LATENT",
+ "link": 75
+ }
+ ],
+ "outputs": [
+ {
+ "name": "LATENT",
+ "type": "LATENT",
+ "links": [
+ 76
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "BNK_DuplicateBatchIndex"
+ },
+ "widgets_values": [
+ 0,
+ 4
+ ]
+ },
+ {
+ "id": 38,
+ "type": "BNK_SlerpLatent",
+ "pos": [
+ 137,
+ 1144
+ ],
+ "size": {
+ "0": 315,
+ "1": 98
+ },
+ "flags": {},
+ "order": 12,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "latents1",
+ "type": "LATENT",
+ "link": 76
+ },
+ {
+ "name": "latents2",
+ "type": "LATENT",
+ "link": 77
+ },
+ {
+ "name": "mask",
+ "type": "MASK",
+ "link": null
+ }
+ ],
+ "outputs": [
+ {
+ "name": "LATENT",
+ "type": "LATENT",
+ "links": [
+ 81
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "BNK_SlerpLatent"
+ },
+ "widgets_values": [
+ 0.05
+ ]
+ },
+ {
+ "id": 39,
+ "type": "BNK_InjectNoise",
+ "pos": [
+ 476,
+ 1131
+ ],
+ "size": [
+ 315,
+ 98
+ ],
+ "flags": {},
+ "order": 13,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "latents",
+ "type": "LATENT",
+ "link": 82
+ },
+ {
+ "name": "noise",
+ "type": "LATENT",
+ "link": 81
+ },
+ {
+ "name": "mask",
+ "type": "MASK",
+ "link": null
+ },
+ {
+ "name": "strength",
+ "type": "FLOAT",
+ "link": 80,
+ "widget": {
+ "name": "strength",
+ "config": [
+ "FLOAT",
+ {
+ "default": 1,
+ "min": 0,
+ "max": 20,
+ "step": 0.01
+ }
+ ]
+ }
+ }
+ ],
+ "outputs": [
+ {
+ "name": "LATENT",
+ "type": "LATENT",
+ "links": [
+ 84
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "BNK_InjectNoise"
+ },
+ "widgets_values": [
+ 1
+ ]
+ },
+ {
+ "id": 33,
+ "type": "EmptyLatentImage",
+ "pos": [
+ 474,
+ 985
+ ],
+ "size": {
+ "0": 315,
+ "1": 106
+ },
+ "flags": {},
+ "order": 3,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "LATENT",
+ "type": "LATENT",
+ "links": [
+ 82
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "EmptyLatentImage"
+ },
+ "widgets_values": [
+ 512,
+ 512,
+ 4
+ ]
+ },
+ {
+ "id": 36,
+ "type": "BNK_GetSigma",
+ "pos": [
+ -221,
+ 1420
+ ],
+ "size": {
+ "0": 315,
+ "1": 154
+ },
+ "flags": {},
+ "order": 5,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "model",
+ "type": "MODEL",
+ "link": 74
+ }
+ ],
+ "outputs": [
+ {
+ "name": "FLOAT",
+ "type": "FLOAT",
+ "links": [
+ 80
+ ],
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "BNK_GetSigma"
+ },
+ "widgets_values": [
+ "dpmpp_2m",
+ "karras",
+ 25,
+ 0,
+ 25
+ ]
+ }
+ ],
+ "links": [
+ [
+ 3,
+ 4,
+ 1,
+ 6,
+ 0,
+ "CLIP"
+ ],
+ [
+ 5,
+ 4,
+ 1,
+ 7,
+ 0,
+ "CLIP"
+ ],
+ [
+ 9,
+ 8,
+ 0,
+ 9,
+ 0,
+ "IMAGE"
+ ],
+ [
+ 44,
+ 12,
+ 0,
+ 8,
+ 0,
+ "LATENT"
+ ],
+ [
+ 50,
+ 4,
+ 2,
+ 23,
+ 0,
+ "*"
+ ],
+ [
+ 52,
+ 23,
+ 0,
+ 8,
+ 1,
+ "VAE"
+ ],
+ [
+ 53,
+ 4,
+ 0,
+ 24,
+ 0,
+ "*"
+ ],
+ [
+ 54,
+ 24,
+ 0,
+ 12,
+ 0,
+ "MODEL"
+ ],
+ [
+ 56,
+ 7,
+ 0,
+ 25,
+ 0,
+ "*"
+ ],
+ [
+ 58,
+ 25,
+ 0,
+ 12,
+ 2,
+ "CONDITIONING"
+ ],
+ [
+ 59,
+ 6,
+ 0,
+ 26,
+ 0,
+ "*"
+ ],
+ [
+ 61,
+ 26,
+ 0,
+ 12,
+ 1,
+ "CONDITIONING"
+ ],
+ [
+ 74,
+ 4,
+ 0,
+ 36,
+ 0,
+ "MODEL"
+ ],
+ [
+ 75,
+ 34,
+ 0,
+ 37,
+ 0,
+ "LATENT"
+ ],
+ [
+ 76,
+ 37,
+ 0,
+ 38,
+ 0,
+ "LATENT"
+ ],
+ [
+ 77,
+ 35,
+ 0,
+ 38,
+ 1,
+ "LATENT"
+ ],
+ [
+ 80,
+ 36,
+ 0,
+ 39,
+ 3,
+ "FLOAT"
+ ],
+ [
+ 81,
+ 38,
+ 0,
+ 39,
+ 1,
+ "LATENT"
+ ],
+ [
+ 82,
+ 33,
+ 0,
+ 39,
+ 0,
+ "LATENT"
+ ],
+ [
+ 84,
+ 39,
+ 0,
+ 12,
+ 3,
+ "LATENT"
+ ]
+ ],
+ "groups": [],
+ "config": {},
+ "extra": {},
+ "version": 0.4
+}
\ No newline at end of file
diff --git a/custom_nodes/ComfyUI_Noise/examples/example_unsample.png b/custom_nodes/ComfyUI_Noise/examples/example_unsample.png
new file mode 100644
index 0000000000000000000000000000000000000000..6296c1d5490484cb7d183ca4974689d23b2bd695
Binary files /dev/null and b/custom_nodes/ComfyUI_Noise/examples/example_unsample.png differ
diff --git a/custom_nodes/ComfyUI_Noise/examples/example_variation.png b/custom_nodes/ComfyUI_Noise/examples/example_variation.png
new file mode 100644
index 0000000000000000000000000000000000000000..44d9a3f5424d9d8db31c090ba031385058cff69b
Binary files /dev/null and b/custom_nodes/ComfyUI_Noise/examples/example_variation.png differ
diff --git a/custom_nodes/ComfyUI_Noise/nodes.py b/custom_nodes/ComfyUI_Noise/nodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf412da1eb04d4ba22a5911e75c6b33b1da749dc
--- /dev/null
+++ b/custom_nodes/ComfyUI_Noise/nodes.py
@@ -0,0 +1,265 @@
+import torch
+
+import os
+import sys
+
+sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "comfy"))
+
+import comfy.model_management
+import comfy.sample
+
+MAX_RESOLUTION=8192
+
+def prepare_mask(mask, shape):
+ mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(shape[2], shape[3]), mode="bilinear")
+ mask = mask.expand((-1,shape[1],-1,-1))
+ if mask.shape[0] < shape[0]:
+ mask = mask.repeat((shape[0] -1) // mask.shape[0] + 1, 1, 1, 1)[:shape[0]]
+ return mask
+
+class NoisyLatentImage:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "source":(["CPU", "GPU"], ),
+ "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
+ "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
+ "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
+ "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}),
+ }}
+ RETURN_TYPES = ("LATENT",)
+ FUNCTION = "create_noisy_latents"
+
+ CATEGORY = "latent/noise"
+
+ def create_noisy_latents(self, source, seed, width, height, batch_size):
+ torch.manual_seed(seed)
+ if source == "CPU":
+ device = "cpu"
+ else:
+ device = comfy.model_management.get_torch_device()
+ noise = torch.randn((batch_size, 4, height // 8, width // 8), dtype=torch.float32, device=device).cpu()
+ return ({"samples":noise}, )
+
+class DuplicateBatchIndex:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "latents":("LATENT",),
+ "batch_index": ("INT", {"default": 0, "min": 0, "max": 63}),
+ "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}),
+ }}
+
+ RETURN_TYPES = ("LATENT",)
+ FUNCTION = "duplicate_index"
+
+ CATEGORY = "latent"
+
+ def duplicate_index(self, latents, batch_index, batch_size):
+ s = latents.copy()
+ batch_index = min(s["samples"].shape[0] - 1, batch_index)
+ target = s["samples"][batch_index:batch_index + 1].clone()
+ target = target.repeat((batch_size,1,1,1))
+ s["samples"] = target
+ return (s,)
+
+# from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475
+def slerp(val, low, high):
+ dims = low.shape
+
+ #flatten to batches
+ low = low.reshape(dims[0], -1)
+ high = high.reshape(dims[0], -1)
+
+ low_norm = low/torch.norm(low, dim=1, keepdim=True)
+ high_norm = high/torch.norm(high, dim=1, keepdim=True)
+
+ # in case we divide by zero
+ low_norm[low_norm != low_norm] = 0.0
+ high_norm[high_norm != high_norm] = 0.0
+
+ omega = torch.acos((low_norm*high_norm).sum(1))
+ so = torch.sin(omega)
+ res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high
+ return res.reshape(dims)
+
+class LatentSlerp:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "latents1":("LATENT",),
+ "factor": ("FLOAT", {"default": .5, "min": 0.0, "max": 1.0, "step": 0.01}),
+ },
+ "optional" :{
+ "latents2":("LATENT",),
+ "mask": ("MASK", ),
+ }}
+
+ RETURN_TYPES = ("LATENT",)
+ FUNCTION = "slerp_latents"
+
+ CATEGORY = "latent"
+
+ def slerp_latents(self, latents1, factor, latents2=None, mask=None):
+ s = latents1.copy()
+ if latents2 is None:
+ return (s,)
+ if latents1["samples"].shape != latents2["samples"].shape:
+ print("warning, shapes in LatentSlerp not the same, ignoring")
+ return (s,)
+ slerped = slerp(factor, latents1["samples"].clone(), latents2["samples"].clone())
+ if mask is not None:
+ mask = prepare_mask(mask, slerped.shape)
+ slerped = mask * slerped + (1-mask) * latents1["samples"]
+ s["samples"] = slerped
+ return (s,)
+
+class GetSigma:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "model": ("MODEL",),
+ "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
+ "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
+ "steps": ("INT", {"default": 10000, "min": 0, "max": 10000}),
+ "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
+ "end_at_step": ("INT", {"default": 10000, "min": 1, "max": 10000}),
+ }}
+
+ RETURN_TYPES = ("FLOAT",)
+ FUNCTION = "calc_sigma"
+
+ CATEGORY = "latent/noise"
+
+ def calc_sigma(self, model, sampler_name, scheduler, steps, start_at_step, end_at_step):
+ device = comfy.model_management.get_torch_device()
+ end_at_step = min(steps, end_at_step)
+ start_at_step = min(start_at_step, end_at_step)
+ real_model = None
+ comfy.model_management.load_model_gpu(model)
+ real_model = model.model
+ sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=1.0, model_options=model.model_options)
+ sigmas = sampler.sigmas
+ sigma = sigmas[start_at_step] - sigmas[end_at_step]
+ sigma /= model.model.latent_format.scale_factor
+ return (sigma.cpu().numpy(),)
+
+class InjectNoise:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required": {
+ "latents":("LATENT",),
+
+ "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 200.0, "step": 0.01}),
+ },
+ "optional":{
+ "noise": ("LATENT",),
+ "mask": ("MASK", ),
+ }}
+
+ RETURN_TYPES = ("LATENT",)
+ FUNCTION = "inject_noise"
+
+ CATEGORY = "latent/noise"
+
+ def inject_noise(self, latents, strength, noise=None, mask=None):
+ s = latents.copy()
+ if noise is None:
+ return (s,)
+ if latents["samples"].shape != noise["samples"].shape:
+ print("warning, shapes in InjectNoise not the same, ignoring")
+ return (s,)
+ noised = s["samples"].clone() + noise["samples"].clone() * strength
+ if mask is not None:
+ mask = prepare_mask(mask, noised.shape)
+ noised = mask * noised + (1-mask) * latents["samples"]
+ s["samples"] = noised
+ return (s,)
+
+class Unsampler:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {"required":
+ {"model": ("MODEL",),
+ "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
+ "end_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
+ "cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}),
+ "sampler_name": (comfy.samplers.KSampler.SAMPLERS, ),
+ "scheduler": (comfy.samplers.KSampler.SCHEDULERS, ),
+ "normalize": (["disable", "enable"], ),
+ "positive": ("CONDITIONING", ),
+ "negative": ("CONDITIONING", ),
+ "latent_image": ("LATENT", ),
+ }}
+
+ RETURN_TYPES = ("LATENT",)
+ FUNCTION = "unsampler"
+
+ CATEGORY = "sampling"
+
+ def unsampler(self, model, cfg, sampler_name, steps, end_at_step, scheduler, normalize, positive, negative, latent_image):
+ normalize = normalize == "enable"
+ device = comfy.model_management.get_torch_device()
+ latent = latent_image
+ latent_image = latent["samples"]
+
+ end_at_step = min(end_at_step, steps-1)
+ end_at_step = steps - end_at_step
+
+ noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
+ noise_mask = None
+ if "noise_mask" in latent:
+ noise_mask = comfy.sample.prepare_mask(latent["noise_mask"], noise.shape, device)
+
+ real_model = None
+ real_model = model.model
+
+ noise = noise.to(device)
+ latent_image = latent_image.to(device)
+
+ positive = comfy.sample.convert_cond(positive)
+ negative = comfy.sample.convert_cond(negative)
+
+ models, inference_memory = comfy.sample.get_additional_models(positive, negative, model.model_dtype())
+
+ comfy.model_management.load_models_gpu([model] + models, model.memory_required(noise.shape) + inference_memory)
+
+ sampler = comfy.samplers.KSampler(real_model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=1.0, model_options=model.model_options)
+
+ sigmas = sigmas = sampler.sigmas.flip(0) + 0.0001
+
+ pbar = comfy.utils.ProgressBar(steps)
+ def callback(step, x0, x, total_steps):
+ pbar.update_absolute(step + 1, total_steps)
+
+ samples = sampler.sample(noise, positive, negative, cfg=cfg, latent_image=latent_image, force_full_denoise=False, denoise_mask=noise_mask, sigmas=sigmas, start_step=0, last_step=end_at_step, callback=callback)
+ if normalize:
+ #technically doesn't normalize because unsampling is not guaranteed to end at a std given by the schedule
+ samples -= samples.mean()
+ samples /= samples.std()
+ samples = samples.cpu()
+
+ comfy.sample.cleanup_additional_models(models)
+
+ out = latent.copy()
+ out["samples"] = samples
+ return (out, )
+
+NODE_CLASS_MAPPINGS = {
+ "BNK_NoisyLatentImage": NoisyLatentImage,
+ #"BNK_DuplicateBatchIndex": DuplicateBatchIndex,
+ "BNK_SlerpLatent": LatentSlerp,
+ "BNK_GetSigma": GetSigma,
+ "BNK_InjectNoise": InjectNoise,
+ "BNK_Unsampler": Unsampler,
+}
+
+NODE_DISPLAY_NAME_MAPPINGS = {
+ "BNK_NoisyLatentImage": "Noisy Latent Image",
+ #"BNK_DuplicateBatchIndex": "Duplicate Batch Index",
+ "BNK_SlerpLatent": "Slerp Latents",
+ "BNK_GetSigma": "Get Sigma",
+ "BNK_InjectNoise": "Inject Noise",
+ "BNK_Unsampler": "Unsampler",
+}
diff --git a/custom_nodes/ComfyUI_UltimateSDUpscale/LICENSE b/custom_nodes/ComfyUI_UltimateSDUpscale/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..e62ec04cdeece724caeeeeaeb6ae1f6af1bb6b9a
--- /dev/null
+++ b/custom_nodes/ComfyUI_UltimateSDUpscale/LICENSE
@@ -0,0 +1,674 @@
+GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
diff --git a/custom_nodes/ComfyUI_UltimateSDUpscale/README.md b/custom_nodes/ComfyUI_UltimateSDUpscale/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..bf9b379ff0c6d8c648e45ebcdf01200964b646cb
--- /dev/null
+++ b/custom_nodes/ComfyUI_UltimateSDUpscale/README.md
@@ -0,0 +1,34 @@
+# ComfyUI_UltimateSDUpscale
+
+ [ComfyUI](https://github.com/comfyanonymous/ComfyUI) nodes for the [Ultimate Stable Diffusion Upscale script by Coyote-A](https://github.com/Coyote-A/ultimate-upscale-for-automatic1111). This is a wrapper for the script used in the A1111 extension.
+
+## Installation
+
+Enter the following command from the commandline starting in ComfyUI/custom_nodes/
+```
+git clone https://github.com/ssitu/ComfyUI_UltimateSDUpscale --recursive
+```
+
+## Usage
+
+Nodes can be found in the node menu under `image/upscaling`:
+
+|Node|Description|
+| --- | --- |
+| Ultimate SD Upscale | The primary node that has the most of the inputs as the original extension script. |
+| Ultimate SD Upscale (No Upscale) | Same as the primary node, but without the upscale inputs and assumes that the input image is already upscaled. Use this if you already have an upscaled image or just want to do the tiled sampling. |
+
+---
+
+Details about most of the parameters can be found [here](https://github.com/Coyote-A/ultimate-upscale-for-automatic1111/wiki/FAQ#parameters-descriptions).
+
+Parameters not found in the original repository:
+
+* `upscale_by` The number to multiply the width and height of the image by. If you want to specify an exact width and height, use the "No Upscale" version of the node and perform the upscaling separately (e.g., ImageUpscaleWithModel -> ImageScale -> UltimateSDUpscaleNoUpscale).
+* `force_uniform_tiles` If enabled, tiles that would be cut off by the edges of the image will expand the tile using the rest of the image to keep the same tile size determined by `tile_width` and `tile_height`, which is what the A1111 Web UI does. If disabled, the minimal size for tiles will be used, which may make the sampling faster but may cause artifacts due to irregular tile sizes.
+
+## Examples
+
+#### Using the ControlNet tile model:
+
+
diff --git a/custom_nodes/ComfyUI_UltimateSDUpscale/__init__.py b/custom_nodes/ComfyUI_UltimateSDUpscale/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..359c0a729ce5e2e7db5ba588fd90e1ce2480e2a8
--- /dev/null
+++ b/custom_nodes/ComfyUI_UltimateSDUpscale/__init__.py
@@ -0,0 +1,39 @@
+import sys
+import os
+repo_dir = os.path.dirname(os.path.realpath(__file__))
+sys.path.insert(0, repo_dir)
+original_modules = sys.modules.copy()
+
+# Place aside potentially conflicting modules
+modules_used = [
+ "modules",
+ "modules.devices",
+ "modules.images",
+ "modules.processing",
+ "modules.scripts",
+ "modules.shared",
+ "modules.upscaler",
+ "utils",
+]
+original_imported_modules = {}
+for module in modules_used:
+ if module in sys.modules:
+ original_imported_modules[module] = sys.modules.pop(module)
+
+# Proceed with node setup
+from .nodes import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS
+__all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS"]
+
+# Clean up imports
+# Remove repo directory from path
+sys.path.remove(repo_dir)
+# Remove any new modules
+modules_to_remove = []
+for module in sys.modules:
+ if module not in original_modules:
+ modules_to_remove.append(module)
+for module in modules_to_remove:
+ del sys.modules[module]
+
+# Restore original modules
+sys.modules.update(original_imported_modules)
diff --git a/custom_nodes/ComfyUI_UltimateSDUpscale/gradio.py b/custom_nodes/ComfyUI_UltimateSDUpscale/gradio.py
new file mode 100644
index 0000000000000000000000000000000000000000..0baca4418b105ad30b5f0084d2bbd72b51d19d20
--- /dev/null
+++ b/custom_nodes/ComfyUI_UltimateSDUpscale/gradio.py
@@ -0,0 +1 @@
+# Empty gradio module for the ultimate-upscale.py import because gradio is not needed
\ No newline at end of file
diff --git a/custom_nodes/ComfyUI_UltimateSDUpscale/modules/devices.py b/custom_nodes/ComfyUI_UltimateSDUpscale/modules/devices.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e37b88d60d4043b032d895bcd9dae5251fd73c2
--- /dev/null
+++ b/custom_nodes/ComfyUI_UltimateSDUpscale/modules/devices.py
@@ -0,0 +1,2 @@
+def torch_gc():
+ pass
diff --git a/custom_nodes/ComfyUI_UltimateSDUpscale/modules/images.py b/custom_nodes/ComfyUI_UltimateSDUpscale/modules/images.py
new file mode 100644
index 0000000000000000000000000000000000000000..502c819a5cd5d7dad43061020f44be7fd01430e9
--- /dev/null
+++ b/custom_nodes/ComfyUI_UltimateSDUpscale/modules/images.py
@@ -0,0 +1,8 @@
+from PIL import Image
+
+
+def flatten(img, bgcolor):
+ # Replace transparency with bgcolor
+ if img.mode in ("RGB"):
+ return img
+ return Image.alpha_composite(Image.new("RGBA", img.size, bgcolor), img).convert("RGB")
diff --git a/custom_nodes/ComfyUI_UltimateSDUpscale/modules/processing.py b/custom_nodes/ComfyUI_UltimateSDUpscale/modules/processing.py
new file mode 100644
index 0000000000000000000000000000000000000000..f001f0e63c4a472fdb8774240391bfdf8e17936b
--- /dev/null
+++ b/custom_nodes/ComfyUI_UltimateSDUpscale/modules/processing.py
@@ -0,0 +1,163 @@
+from PIL import Image, ImageFilter
+import torch
+import math
+from nodes import common_ksampler, VAEEncode, VAEDecode, VAEDecodeTiled
+from utils import pil_to_tensor, tensor_to_pil, get_crop_region, expand_crop, crop_cond
+from modules import shared
+
+if (not hasattr(Image, 'Resampling')): # For older versions of Pillow
+ Image.Resampling = Image
+
+
+class StableDiffusionProcessing:
+
+ def __init__(self, init_img, model, positive, negative, vae, seed, steps, cfg, sampler_name, scheduler, denoise, upscale_by, uniform_tile_mode, tiled_decode):
+ # Variables used by the USDU script
+ self.init_images = [init_img]
+ self.image_mask = None
+ self.mask_blur = 0
+ self.inpaint_full_res_padding = 0
+ self.width = init_img.width
+ self.height = init_img.height
+
+ # ComfyUI Sampler inputs
+ self.model = model
+ self.positive = positive
+ self.negative = negative
+ self.vae = vae
+ self.seed = seed
+ self.steps = steps
+ self.cfg = cfg
+ self.sampler_name = sampler_name
+ self.scheduler = scheduler
+ self.denoise = denoise
+
+ # Variables used only by this script
+ self.init_size = init_img.width, init_img.height
+ self.upscale_by = upscale_by
+ self.uniform_tile_mode = uniform_tile_mode
+ self.tiled_decode = tiled_decode
+ self.vae_decoder = VAEDecode()
+ self.vae_encoder = VAEEncode()
+ self.vae_decoder_tiled = VAEDecodeTiled()
+
+ # Other required A1111 variables for the USDU script that is currently unused in this script
+ self.extra_generation_params = {}
+
+
+class Processed:
+
+ def __init__(self, p: StableDiffusionProcessing, images: list, seed: int, info: str):
+ self.images = images
+ self.seed = seed
+ self.info = info
+
+ def infotext(self, p: StableDiffusionProcessing, index):
+ return None
+
+
+def fix_seed(p: StableDiffusionProcessing):
+ pass
+
+
+def process_images(p: StableDiffusionProcessing) -> Processed:
+ # Where the main image generation happens in A1111
+
+ # Setup
+ image_mask = p.image_mask.convert('L')
+ init_image = p.init_images[0]
+
+ # Locate the white region of the mask outlining the tile and add padding
+ crop_region = get_crop_region(image_mask, p.inpaint_full_res_padding)
+
+ if p.uniform_tile_mode:
+ # Expand the crop region to match the processing size ratio and then resize it to the processing size
+ x1, y1, x2, y2 = crop_region
+ crop_width = x2 - x1
+ crop_height = y2 - y1
+ crop_ratio = crop_width / crop_height
+ p_ratio = p.width / p.height
+ if crop_ratio > p_ratio:
+ target_width = crop_width
+ target_height = round(crop_width / p_ratio)
+ else:
+ target_width = round(crop_height * p_ratio)
+ target_height = crop_height
+ crop_region, _ = expand_crop(crop_region, image_mask.width, image_mask.height, target_width, target_height)
+ tile_size = p.width, p.height
+ else:
+ # Uses the minimal size that can fit the mask, minimizes tile size but may lead to image sizes that the model is not trained on
+ x1, y1, x2, y2 = crop_region
+ crop_width = x2 - x1
+ crop_height = y2 - y1
+ target_width = math.ceil(crop_width / 8) * 8
+ target_height = math.ceil(crop_height / 8) * 8
+ crop_region, tile_size = expand_crop(crop_region, image_mask.width,
+ image_mask.height, target_width, target_height)
+
+ # Blur the mask
+ if p.mask_blur > 0:
+ image_mask = image_mask.filter(ImageFilter.GaussianBlur(p.mask_blur))
+
+ # Crop the images to get the tiles that will be used for generation
+ tiles = [img.crop(crop_region) for img in shared.batch]
+
+ # Assume the same size for all images in the batch
+ initial_tile_size = tiles[0].size
+
+ # Resize if necessary
+ for i, tile in enumerate(tiles):
+ if tile.size != tile_size:
+ tiles[i] = tile.resize(tile_size, Image.Resampling.LANCZOS)
+
+ # Crop conditioning
+ positive_cropped = crop_cond(p.positive, crop_region, p.init_size, init_image.size, tile_size)
+ negative_cropped = crop_cond(p.negative, crop_region, p.init_size, init_image.size, tile_size)
+
+ # Encode the image
+ batched_tiles = torch.cat([pil_to_tensor(tile) for tile in tiles], dim=0)
+ (latent,) = p.vae_encoder.encode(p.vae, batched_tiles)
+
+ # Generate samples
+ (samples,) = common_ksampler(p.model, p.seed, p.steps, p.cfg, p.sampler_name,
+ p.scheduler, positive_cropped, negative_cropped, latent, denoise=p.denoise)
+
+ # Decode the sample
+ if not p.tiled_decode:
+ (decoded,) = p.vae_decoder.decode(p.vae, samples)
+ else:
+ print("[USDU] Using tiled decode")
+ (decoded,) = p.vae_decoder_tiled.decode(p.vae, samples, 512) # Default tile size is 512
+
+ # Convert the sample to a PIL image
+ tiles_sampled = [tensor_to_pil(decoded, i) for i in range(len(decoded))]
+
+ for i, tile_sampled in enumerate(tiles_sampled):
+ init_image = shared.batch[i]
+
+ # Resize back to the original size
+ if tile_sampled.size != initial_tile_size:
+ tile_sampled = tile_sampled.resize(initial_tile_size, Image.Resampling.LANCZOS)
+
+
+ # Put the tile into position
+ image_tile_only = Image.new('RGBA', init_image.size)
+ image_tile_only.paste(tile_sampled, crop_region[:2])
+
+ # Add the mask as an alpha channel
+ # Must make a copy due to the possibility of an edge becoming black
+ temp = image_tile_only.copy()
+ temp.putalpha(image_mask)
+ image_tile_only.paste(temp, image_tile_only)
+
+ # Add back the tile to the initial image according to the mask in the alpha channel
+ result = init_image.convert('RGBA')
+ result.alpha_composite(image_tile_only)
+
+ # Convert back to RGB
+ result = result.convert('RGB')
+
+ shared.batch[i] = result
+
+ processed = Processed(p, [shared.batch[0]], p.seed, None)
+ return processed
diff --git a/custom_nodes/ComfyUI_UltimateSDUpscale/modules/scripts.py b/custom_nodes/ComfyUI_UltimateSDUpscale/modules/scripts.py
new file mode 100644
index 0000000000000000000000000000000000000000..5cbd134fc07811ffaad9b9f05033603d32c29c52
--- /dev/null
+++ b/custom_nodes/ComfyUI_UltimateSDUpscale/modules/scripts.py
@@ -0,0 +1,2 @@
+class Script:
+ pass
diff --git a/custom_nodes/ComfyUI_UltimateSDUpscale/modules/shared.py b/custom_nodes/ComfyUI_UltimateSDUpscale/modules/shared.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d4bdd7217e8a66944c469d782de2474589801a9
--- /dev/null
+++ b/custom_nodes/ComfyUI_UltimateSDUpscale/modules/shared.py
@@ -0,0 +1,24 @@
+class Options:
+ img2img_background_color = "#ffffff" # Set to white for now
+
+
+class State:
+ interrupted = False
+
+ def begin(self):
+ pass
+
+ def end(self):
+ pass
+
+
+opts = Options()
+state = State()
+
+# Will only ever hold 1 upscaler
+sd_upscalers = [None]
+# The upscaler usable by ComfyUI nodes
+actual_upscaler = None
+
+# Batch of images to upscale
+batch = None
diff --git a/custom_nodes/ComfyUI_UltimateSDUpscale/modules/upscaler.py b/custom_nodes/ComfyUI_UltimateSDUpscale/modules/upscaler.py
new file mode 100644
index 0000000000000000000000000000000000000000..b05f547fbc9af775ec528c744b59fa872beb7fc5
--- /dev/null
+++ b/custom_nodes/ComfyUI_UltimateSDUpscale/modules/upscaler.py
@@ -0,0 +1,30 @@
+from PIL import Image
+from utils import tensor_to_pil, pil_to_tensor
+from comfy_extras.nodes_upscale_model import ImageUpscaleWithModel
+from modules import shared
+
+if (not hasattr(Image, 'Resampling')): # For older versions of Pillow
+ Image.Resampling = Image
+
+
+class Upscaler:
+
+ def _upscale(self, img: Image, scale):
+ if (shared.actual_upscaler is None):
+ return img.resize((img.width * scale, img.height * scale), Image.Resampling.NEAREST)
+ tensor = pil_to_tensor(img)
+ image_upscale_node = ImageUpscaleWithModel()
+ (upscaled,) = image_upscale_node.upscale(shared.actual_upscaler, tensor)
+ return tensor_to_pil(upscaled)
+
+ def upscale(self, img: Image, scale, selected_model: str = None):
+ shared.batch = [self._upscale(img, scale) for img in shared.batch]
+ return shared.batch[0]
+
+
+class UpscalerData:
+ name = ""
+ data_path = ""
+
+ def __init__(self):
+ self.scaler = Upscaler()
diff --git a/custom_nodes/ComfyUI_UltimateSDUpscale/nodes.py b/custom_nodes/ComfyUI_UltimateSDUpscale/nodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..6dc5292b5845cc030ef25344d6db61e80cfe1219
--- /dev/null
+++ b/custom_nodes/ComfyUI_UltimateSDUpscale/nodes.py
@@ -0,0 +1,191 @@
+# ComfyUI Node for Ultimate SD Upscale by Coyote-A: https://github.com/Coyote-A/ultimate-upscale-for-automatic1111
+
+import torch
+import comfy
+from usdu_patch import usdu
+from utils import tensor_to_pil, pil_to_tensor
+from modules.processing import StableDiffusionProcessing
+import modules.shared as shared
+from modules.upscaler import UpscalerData
+
+MAX_RESOLUTION = 8192
+# The modes available for Ultimate SD Upscale
+MODES = {
+ "Linear": usdu.USDUMode.LINEAR,
+ "Chess": usdu.USDUMode.CHESS,
+ "None": usdu.USDUMode.NONE,
+}
+# The seam fix modes
+SEAM_FIX_MODES = {
+ "None": usdu.USDUSFMode.NONE,
+ "Band Pass": usdu.USDUSFMode.BAND_PASS,
+ "Half Tile": usdu.USDUSFMode.HALF_TILE,
+ "Half Tile + Intersections": usdu.USDUSFMode.HALF_TILE_PLUS_INTERSECTIONS,
+}
+
+
+def USDU_base_inputs():
+ return [
+ ("image", ("IMAGE",)),
+ # Sampling Params
+ ("model", ("MODEL",)),
+ ("positive", ("CONDITIONING",)),
+ ("negative", ("CONDITIONING",)),
+ ("vae", ("VAE",)),
+ ("upscale_by", ("FLOAT", {"default": 2, "min": 0.05, "max": 4, "step": 0.05})),
+ ("seed", ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff})),
+ ("steps", ("INT", {"default": 20, "min": 1, "max": 10000, "step": 1})),
+ ("cfg", ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0})),
+ ("sampler_name", (comfy.samplers.KSampler.SAMPLERS,)),
+ ("scheduler", (comfy.samplers.KSampler.SCHEDULERS,)),
+ ("denoise", ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.01})),
+ # Upscale Params
+ ("upscale_model", ("UPSCALE_MODEL",)),
+ ("mode_type", (list(MODES.keys()),)),
+ ("tile_width", ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8})),
+ ("tile_height", ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8})),
+ ("mask_blur", ("INT", {"default": 8, "min": 0, "max": 64, "step": 1})),
+ ("tile_padding", ("INT", {"default": 32, "min": 0, "max": MAX_RESOLUTION, "step": 8})),
+ # Seam fix params
+ ("seam_fix_mode", (list(SEAM_FIX_MODES.keys()),)),
+ ("seam_fix_denoise", ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01})),
+ ("seam_fix_width", ("INT", {"default": 64, "min": 0, "max": MAX_RESOLUTION, "step": 8})),
+ ("seam_fix_mask_blur", ("INT", {"default": 8, "min": 0, "max": 64, "step": 1})),
+ ("seam_fix_padding", ("INT", {"default": 16, "min": 0, "max": MAX_RESOLUTION, "step": 8})),
+ # Misc
+ ("force_uniform_tiles", ("BOOLEAN", {"default": True})),
+ ("tiled_decode", ("BOOLEAN", {"default": False})),
+ ]
+
+
+def prepare_inputs(required: list, optional: list = None):
+ inputs = {}
+ if required:
+ inputs["required"] = {}
+ for name, type in required:
+ inputs["required"][name] = type
+ if optional:
+ inputs["optional"] = {}
+ for name, type in optional:
+ inputs["optional"][name] = type
+ return inputs
+
+
+def remove_input(inputs: list, input_name: str):
+ for i, (n, _) in enumerate(inputs):
+ if n == input_name:
+ del inputs[i]
+ break
+
+
+def rename_input(inputs: list, old_name: str, new_name: str):
+ for i, (n, t) in enumerate(inputs):
+ if n == old_name:
+ inputs[i] = (new_name, t)
+ break
+
+
+class UltimateSDUpscale:
+ @classmethod
+ def INPUT_TYPES(s):
+ return prepare_inputs(USDU_base_inputs())
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "upscale"
+ CATEGORY = "image/upscaling"
+
+ def upscale(self, image, model, positive, negative, vae, upscale_by, seed,
+ steps, cfg, sampler_name, scheduler, denoise, upscale_model,
+ mode_type, tile_width, tile_height, mask_blur, tile_padding,
+ seam_fix_mode, seam_fix_denoise, seam_fix_mask_blur,
+ seam_fix_width, seam_fix_padding, force_uniform_tiles, tiled_decode):
+ #
+ # Set up A1111 patches
+ #
+
+ # Upscaler
+ # An object that the script works with
+ shared.sd_upscalers[0] = UpscalerData()
+ # Where the actual upscaler is stored, will be used when the script upscales using the Upscaler in UpscalerData
+ shared.actual_upscaler = upscale_model
+
+ # Set the batch of images
+ shared.batch = [tensor_to_pil(image, i) for i in range(len(image))]
+
+ # Processing
+ sdprocessing = StableDiffusionProcessing(
+ tensor_to_pil(image), model, positive, negative, vae,
+ seed, steps, cfg, sampler_name, scheduler, denoise, upscale_by, force_uniform_tiles, tiled_decode
+ )
+
+ #
+ # Running the script
+ #
+ script = usdu.Script()
+ processed = script.run(p=sdprocessing, _=None, tile_width=tile_width, tile_height=tile_height,
+ mask_blur=mask_blur, padding=tile_padding, seams_fix_width=seam_fix_width,
+ seams_fix_denoise=seam_fix_denoise, seams_fix_padding=seam_fix_padding,
+ upscaler_index=0, save_upscaled_image=False, redraw_mode=MODES[mode_type],
+ save_seams_fix_image=False, seams_fix_mask_blur=seam_fix_mask_blur,
+ seams_fix_type=SEAM_FIX_MODES[seam_fix_mode], target_size_type=2,
+ custom_width=None, custom_height=None, custom_scale=upscale_by)
+
+ # Return the resulting images
+ images = [pil_to_tensor(img) for img in shared.batch]
+ tensor = torch.cat(images, dim=0)
+ return (tensor,)
+
+
+class UltimateSDUpscaleNoUpscale:
+ @classmethod
+ def INPUT_TYPES(s):
+ required = USDU_base_inputs()
+ remove_input(required, "upscale_model")
+ remove_input(required, "upscale_by")
+ rename_input(required, "image", "upscaled_image")
+ return prepare_inputs(required)
+
+ RETURN_TYPES = ("IMAGE",)
+ FUNCTION = "upscale"
+ CATEGORY = "image/upscaling"
+
+ def upscale(self, upscaled_image, model, positive, negative, vae, seed,
+ steps, cfg, sampler_name, scheduler, denoise,
+ mode_type, tile_width, tile_height, mask_blur, tile_padding,
+ seam_fix_mode, seam_fix_denoise, seam_fix_mask_blur,
+ seam_fix_width, seam_fix_padding, force_uniform_tiles, tiled_decode):
+
+ shared.sd_upscalers[0] = UpscalerData()
+ shared.actual_upscaler = None
+ shared.batch = [tensor_to_pil(upscaled_image, i) for i in range(len(upscaled_image))]
+ sdprocessing = StableDiffusionProcessing(
+ tensor_to_pil(upscaled_image), model, positive, negative, vae,
+ seed, steps, cfg, sampler_name, scheduler, denoise, 1, force_uniform_tiles, tiled_decode
+ )
+
+ script = usdu.Script()
+ processed = script.run(p=sdprocessing, _=None, tile_width=tile_width, tile_height=tile_height,
+ mask_blur=mask_blur, padding=tile_padding, seams_fix_width=seam_fix_width,
+ seams_fix_denoise=seam_fix_denoise, seams_fix_padding=seam_fix_padding,
+ upscaler_index=0, save_upscaled_image=False, redraw_mode=MODES[mode_type],
+ save_seams_fix_image=False, seams_fix_mask_blur=seam_fix_mask_blur,
+ seams_fix_type=SEAM_FIX_MODES[seam_fix_mode], target_size_type=2,
+ custom_width=None, custom_height=None, custom_scale=1)
+
+ images = [pil_to_tensor(img) for img in shared.batch]
+ tensor = torch.cat(images, dim=0)
+ return (tensor,)
+
+
+# A dictionary that contains all nodes you want to export with their names
+# NOTE: names should be globally unique
+NODE_CLASS_MAPPINGS = {
+ "UltimateSDUpscale": UltimateSDUpscale,
+ "UltimateSDUpscaleNoUpscale": UltimateSDUpscaleNoUpscale
+}
+
+# A dictionary that contains the friendly/humanly readable titles for the nodes
+NODE_DISPLAY_NAME_MAPPINGS = {
+ "UltimateSDUpscale": "Ultimate SD Upscale",
+ "UltimateSDUpscaleNoUpscale": "Ultimate SD Upscale (No Upscale)"
+}
diff --git a/custom_nodes/ComfyUI_UltimateSDUpscale/repositories/__init__.py b/custom_nodes/ComfyUI_UltimateSDUpscale/repositories/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a30c56508421465568ab2697a638bafb3e9326ca
--- /dev/null
+++ b/custom_nodes/ComfyUI_UltimateSDUpscale/repositories/__init__.py
@@ -0,0 +1,14 @@
+import os
+import sys
+import importlib.util
+
+repositories_path = os.path.dirname(os.path.realpath(__file__))
+
+# Import the script
+script_name = os.path.join("scripts", "ultimate-upscale")
+repo_name = "ultimate_sd_upscale"
+script_path = os.path.join(repositories_path, repo_name, f"{script_name}.py")
+spec = importlib.util.spec_from_file_location(script_name, script_path)
+ultimate_upscale = importlib.util.module_from_spec(spec)
+sys.modules[script_name] = ultimate_upscale
+spec.loader.exec_module(ultimate_upscale)
diff --git a/custom_nodes/ComfyUI_UltimateSDUpscale/repositories/ultimate_sd_upscale/.gitignore b/custom_nodes/ComfyUI_UltimateSDUpscale/repositories/ultimate_sd_upscale/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..600d2d33badf45cc068e01d2e3c837e11c417bc4
--- /dev/null
+++ b/custom_nodes/ComfyUI_UltimateSDUpscale/repositories/ultimate_sd_upscale/.gitignore
@@ -0,0 +1 @@
+.vscode
\ No newline at end of file
diff --git a/custom_nodes/ComfyUI_UltimateSDUpscale/repositories/ultimate_sd_upscale/LICENSE b/custom_nodes/ComfyUI_UltimateSDUpscale/repositories/ultimate_sd_upscale/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..ebfe3f5212b6396c75ee993947fe1ebdd6a91207
--- /dev/null
+++ b/custom_nodes/ComfyUI_UltimateSDUpscale/repositories/ultimate_sd_upscale/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ ultimate-upscale-for-automatic1111
+ Copyright (C) 2023 Mirzam
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C) 2023 Mirzam
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
diff --git a/custom_nodes/ComfyUI_UltimateSDUpscale/repositories/ultimate_sd_upscale/README.md b/custom_nodes/ComfyUI_UltimateSDUpscale/repositories/ultimate_sd_upscale/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..d158139e43aa9f19a85b013299fd8fd6b03d405f
--- /dev/null
+++ b/custom_nodes/ComfyUI_UltimateSDUpscale/repositories/ultimate_sd_upscale/README.md
@@ -0,0 +1,43 @@
+# Ultimate SD Upscale extension for [AUTOMATIC1111 Stable Diffusion web UI](https://github.com/AUTOMATIC1111/stable-diffusion-webui)
+Now you have the opportunity to use a large denoise (0.3-0.5) and not spawn many artifacts. Works on any video card, since you can use a 512x512 tile size and the image will converge.
+
+News channel: https://t.me/usdunews
+
+# Instructions
+All instructions can be found on the project's [wiki](https://github.com/Coyote-A/ultimate-upscale-for-automatic1111/wiki).
+
+# Examples
+More on [wiki page](https://github.com/Coyote-A/ultimate-upscale-for-automatic1111/wiki/Examples)
+
+
+ E1
+ Original image
+
+ 
+
+ 2k upscaled. **Tile size**: 512, **Padding**: 32, **Mask blur**: 16, **Denoise**: 0.4
+ 
+
+
+
+ E2
+ Original image
+
+ 
+
+ 2k upscaled. **Tile size**: 768, **Padding**: 55, **Mask blur**: 20, **Denoise**: 0.35
+ 
+
+ 4k upscaled. **Tile size**: 768, **Padding**: 55, **Mask blur**: 20, **Denoise**: 0.35
+ 
+
+
+
+ E3
+ Original image
+
+ 
+
+ 4k upscaled. **Tile size**: 768, **Padding**: 55, **Mask blur**: 20, **Denoise**: 0.4
+ 
+
diff --git a/custom_nodes/ComfyUI_UltimateSDUpscale/repositories/ultimate_sd_upscale/scripts/ultimate-upscale.py b/custom_nodes/ComfyUI_UltimateSDUpscale/repositories/ultimate_sd_upscale/scripts/ultimate-upscale.py
new file mode 100644
index 0000000000000000000000000000000000000000..7bb7ae02b4629b9b171e3ff027851910e5f0ea43
--- /dev/null
+++ b/custom_nodes/ComfyUI_UltimateSDUpscale/repositories/ultimate_sd_upscale/scripts/ultimate-upscale.py
@@ -0,0 +1,557 @@
+import math
+import gradio as gr
+from PIL import Image, ImageDraw, ImageOps
+from modules import processing, shared, images, devices, scripts
+from modules.processing import StableDiffusionProcessing
+from modules.processing import Processed
+from modules.shared import opts, state
+from enum import Enum
+
+class USDUMode(Enum):
+ LINEAR = 0
+ CHESS = 1
+ NONE = 2
+
+class USDUSFMode(Enum):
+ NONE = 0
+ BAND_PASS = 1
+ HALF_TILE = 2
+ HALF_TILE_PLUS_INTERSECTIONS = 3
+
+class USDUpscaler():
+
+ def __init__(self, p, image, upscaler_index:int, save_redraw, save_seams_fix, tile_width, tile_height) -> None:
+ self.p:StableDiffusionProcessing = p
+ self.image:Image = image
+ self.scale_factor = math.ceil(max(p.width, p.height) / max(image.width, image.height))
+ self.upscaler = shared.sd_upscalers[upscaler_index]
+ self.redraw = USDURedraw()
+ self.redraw.save = save_redraw
+ self.redraw.tile_width = tile_width if tile_width > 0 else tile_height
+ self.redraw.tile_height = tile_height if tile_height > 0 else tile_width
+ self.seams_fix = USDUSeamsFix()
+ self.seams_fix.save = save_seams_fix
+ self.seams_fix.tile_width = tile_width if tile_width > 0 else tile_height
+ self.seams_fix.tile_height = tile_height if tile_height > 0 else tile_width
+ self.initial_info = None
+ self.rows = math.ceil(self.p.height / self.redraw.tile_height)
+ self.cols = math.ceil(self.p.width / self.redraw.tile_width)
+
+ def get_factor(self, num):
+ # Its just return, don't need elif
+ if num == 1:
+ return 2
+ if num % 4 == 0:
+ return 4
+ if num % 3 == 0:
+ return 3
+ if num % 2 == 0:
+ return 2
+ return 0
+
+ def get_factors(self):
+ scales = []
+ current_scale = 1
+ current_scale_factor = self.get_factor(self.scale_factor)
+ while current_scale_factor == 0:
+ self.scale_factor += 1
+ current_scale_factor = self.get_factor(self.scale_factor)
+ while current_scale < self.scale_factor:
+ current_scale_factor = self.get_factor(self.scale_factor // current_scale)
+ scales.append(current_scale_factor)
+ current_scale = current_scale * current_scale_factor
+ if current_scale_factor == 0:
+ break
+ self.scales = enumerate(scales)
+
+ def upscale(self):
+ # Log info
+ print(f"Canva size: {self.p.width}x{self.p.height}")
+ print(f"Image size: {self.image.width}x{self.image.height}")
+ print(f"Scale factor: {self.scale_factor}")
+ # Check upscaler is not empty
+ if self.upscaler.name == "None":
+ self.image = self.image.resize((self.p.width, self.p.height), resample=Image.LANCZOS)
+ return
+ # Get list with scale factors
+ self.get_factors()
+ # Upscaling image over all factors
+ for index, value in self.scales:
+ print(f"Upscaling iteration {index+1} with scale factor {value}")
+ self.image = self.upscaler.scaler.upscale(self.image, value, self.upscaler.data_path)
+ # Resize image to set values
+ self.image = self.image.resize((self.p.width, self.p.height), resample=Image.LANCZOS)
+
+ def setup_redraw(self, redraw_mode, padding, mask_blur):
+ self.redraw.mode = USDUMode(redraw_mode)
+ self.redraw.enabled = self.redraw.mode != USDUMode.NONE
+ self.redraw.padding = padding
+ self.p.mask_blur = mask_blur
+
+ def setup_seams_fix(self, padding, denoise, mask_blur, width, mode):
+ self.seams_fix.padding = padding
+ self.seams_fix.denoise = denoise
+ self.seams_fix.mask_blur = mask_blur
+ self.seams_fix.width = width
+ self.seams_fix.mode = USDUSFMode(mode)
+ self.seams_fix.enabled = self.seams_fix.mode != USDUSFMode.NONE
+
+ def save_image(self):
+ if type(self.p.prompt) != list:
+ images.save_image(self.image, self.p.outpath_samples, "", self.p.seed, self.p.prompt, opts.samples_format, info=self.initial_info, p=self.p)
+ else:
+ images.save_image(self.image, self.p.outpath_samples, "", self.p.seed, self.p.prompt[0], opts.samples_format, info=self.initial_info, p=self.p)
+
+ def calc_jobs_count(self):
+ redraw_job_count = (self.rows * self.cols) if self.redraw.enabled else 0
+ seams_job_count = 0
+ if self.seams_fix.mode == USDUSFMode.BAND_PASS:
+ seams_job_count = self.rows + self.cols - 2
+ elif self.seams_fix.mode == USDUSFMode.HALF_TILE:
+ seams_job_count = self.rows * (self.cols - 1) + (self.rows - 1) * self.cols
+ elif self.seams_fix.mode == USDUSFMode.HALF_TILE_PLUS_INTERSECTIONS:
+ seams_job_count = self.rows * (self.cols - 1) + (self.rows - 1) * self.cols + (self.rows - 1) * (self.cols - 1)
+
+ state.job_count = redraw_job_count + seams_job_count
+
+ def print_info(self):
+ print(f"Tile size: {self.redraw.tile_width}x{self.redraw.tile_height}")
+ print(f"Tiles amount: {self.rows * self.cols}")
+ print(f"Grid: {self.rows}x{self.cols}")
+ print(f"Redraw enabled: {self.redraw.enabled}")
+ print(f"Seams fix mode: {self.seams_fix.mode.name}")
+
+ def add_extra_info(self):
+ self.p.extra_generation_params["Ultimate SD upscale upscaler"] = self.upscaler.name
+ self.p.extra_generation_params["Ultimate SD upscale tile_width"] = self.redraw.tile_width
+ self.p.extra_generation_params["Ultimate SD upscale tile_height"] = self.redraw.tile_height
+ self.p.extra_generation_params["Ultimate SD upscale mask_blur"] = self.p.mask_blur
+ self.p.extra_generation_params["Ultimate SD upscale padding"] = self.redraw.padding
+
+ def process(self):
+ state.begin()
+ self.calc_jobs_count()
+ self.result_images = []
+ if self.redraw.enabled:
+ self.image = self.redraw.start(self.p, self.image, self.rows, self.cols)
+ self.initial_info = self.redraw.initial_info
+ self.result_images.append(self.image)
+ if self.redraw.save:
+ self.save_image()
+
+ if self.seams_fix.enabled:
+ self.image = self.seams_fix.start(self.p, self.image, self.rows, self.cols)
+ self.initial_info = self.seams_fix.initial_info
+ self.result_images.append(self.image)
+ if self.seams_fix.save:
+ self.save_image()
+ state.end()
+
+class USDURedraw():
+
+ def init_draw(self, p, width, height):
+ p.inpaint_full_res = True
+ p.inpaint_full_res_padding = self.padding
+ p.width = math.ceil((self.tile_width+self.padding) / 64) * 64
+ p.height = math.ceil((self.tile_height+self.padding) / 64) * 64
+ mask = Image.new("L", (width, height), "black")
+ draw = ImageDraw.Draw(mask)
+ return mask, draw
+
+ def calc_rectangle(self, xi, yi):
+ x1 = xi * self.tile_width
+ y1 = yi * self.tile_height
+ x2 = xi * self.tile_width + self.tile_width
+ y2 = yi * self.tile_height + self.tile_height
+
+ return x1, y1, x2, y2
+
+ def linear_process(self, p, image, rows, cols):
+ mask, draw = self.init_draw(p, image.width, image.height)
+ for yi in range(rows):
+ for xi in range(cols):
+ if state.interrupted:
+ break
+ draw.rectangle(self.calc_rectangle(xi, yi), fill="white")
+ p.init_images = [image]
+ p.image_mask = mask
+ processed = processing.process_images(p)
+ draw.rectangle(self.calc_rectangle(xi, yi), fill="black")
+ if (len(processed.images) > 0):
+ image = processed.images[0]
+
+ p.width = image.width
+ p.height = image.height
+ self.initial_info = processed.infotext(p, 0)
+
+ return image
+
+ def chess_process(self, p, image, rows, cols):
+ mask, draw = self.init_draw(p, image.width, image.height)
+ tiles = []
+ # calc tiles colors
+ for yi in range(rows):
+ for xi in range(cols):
+ if state.interrupted:
+ break
+ if xi == 0:
+ tiles.append([])
+ color = xi % 2 == 0
+ if yi > 0 and yi % 2 != 0:
+ color = not color
+ tiles[yi].append(color)
+
+ for yi in range(len(tiles)):
+ for xi in range(len(tiles[yi])):
+ if state.interrupted:
+ break
+ if not tiles[yi][xi]:
+ tiles[yi][xi] = not tiles[yi][xi]
+ continue
+ tiles[yi][xi] = not tiles[yi][xi]
+ draw.rectangle(self.calc_rectangle(xi, yi), fill="white")
+ p.init_images = [image]
+ p.image_mask = mask
+ processed = processing.process_images(p)
+ draw.rectangle(self.calc_rectangle(xi, yi), fill="black")
+ if (len(processed.images) > 0):
+ image = processed.images[0]
+
+ for yi in range(len(tiles)):
+ for xi in range(len(tiles[yi])):
+ if state.interrupted:
+ break
+ if not tiles[yi][xi]:
+ continue
+ draw.rectangle(self.calc_rectangle(xi, yi), fill="white")
+ p.init_images = [image]
+ p.image_mask = mask
+ processed = processing.process_images(p)
+ draw.rectangle(self.calc_rectangle(xi, yi), fill="black")
+ if (len(processed.images) > 0):
+ image = processed.images[0]
+
+ p.width = image.width
+ p.height = image.height
+ self.initial_info = processed.infotext(p, 0)
+
+ return image
+
+ def start(self, p, image, rows, cols):
+ self.initial_info = None
+ if self.mode == USDUMode.LINEAR:
+ return self.linear_process(p, image, rows, cols)
+ if self.mode == USDUMode.CHESS:
+ return self.chess_process(p, image, rows, cols)
+
+class USDUSeamsFix():
+
+ def init_draw(self, p):
+ self.initial_info = None
+ p.width = math.ceil((self.tile_width+self.padding) / 64) * 64
+ p.height = math.ceil((self.tile_height+self.padding) / 64) * 64
+
+ def half_tile_process(self, p, image, rows, cols):
+
+ self.init_draw(p)
+ processed = None
+
+ gradient = Image.linear_gradient("L")
+ row_gradient = Image.new("L", (self.tile_width, self.tile_height), "black")
+ row_gradient.paste(gradient.resize(
+ (self.tile_width, self.tile_height//2), resample=Image.BICUBIC), (0, 0))
+ row_gradient.paste(gradient.rotate(180).resize(
+ (self.tile_width, self.tile_height//2), resample=Image.BICUBIC),
+ (0, self.tile_height//2))
+ col_gradient = Image.new("L", (self.tile_width, self.tile_height), "black")
+ col_gradient.paste(gradient.rotate(90).resize(
+ (self.tile_width//2, self.tile_height), resample=Image.BICUBIC), (0, 0))
+ col_gradient.paste(gradient.rotate(270).resize(
+ (self.tile_width//2, self.tile_height), resample=Image.BICUBIC), (self.tile_width//2, 0))
+
+ p.denoising_strength = self.denoise
+ p.mask_blur = self.mask_blur
+
+ for yi in range(rows-1):
+ for xi in range(cols):
+ if state.interrupted:
+ break
+ p.width = self.tile_width
+ p.height = self.tile_height
+ p.inpaint_full_res = True
+ p.inpaint_full_res_padding = self.padding
+ mask = Image.new("L", (image.width, image.height), "black")
+ mask.paste(row_gradient, (xi*self.tile_width, yi*self.tile_height + self.tile_height//2))
+
+ p.init_images = [image]
+ p.image_mask = mask
+ processed = processing.process_images(p)
+ if (len(processed.images) > 0):
+ image = processed.images[0]
+
+ for yi in range(rows):
+ for xi in range(cols-1):
+ if state.interrupted:
+ break
+ p.width = self.tile_width
+ p.height = self.tile_height
+ p.inpaint_full_res = True
+ p.inpaint_full_res_padding = self.padding
+ mask = Image.new("L", (image.width, image.height), "black")
+ mask.paste(col_gradient, (xi*self.tile_width+self.tile_width//2, yi*self.tile_height))
+
+ p.init_images = [image]
+ p.image_mask = mask
+ processed = processing.process_images(p)
+ if (len(processed.images) > 0):
+ image = processed.images[0]
+
+ p.width = image.width
+ p.height = image.height
+ if processed is not None:
+ self.initial_info = processed.infotext(p, 0)
+
+ return image
+
+ def half_tile_process_corners(self, p, image, rows, cols):
+ fixed_image = self.half_tile_process(p, image, rows, cols)
+ processed = None
+ self.init_draw(p)
+ gradient = Image.radial_gradient("L").resize(
+ (self.tile_width, self.tile_height), resample=Image.BICUBIC)
+ gradient = ImageOps.invert(gradient)
+ p.denoising_strength = self.denoise
+ #p.mask_blur = 0
+ p.mask_blur = self.mask_blur
+
+ for yi in range(rows-1):
+ for xi in range(cols-1):
+ if state.interrupted:
+ break
+ p.width = self.tile_width
+ p.height = self.tile_height
+ p.inpaint_full_res = True
+ p.inpaint_full_res_padding = 0
+ mask = Image.new("L", (fixed_image.width, fixed_image.height), "black")
+ mask.paste(gradient, (xi*self.tile_width + self.tile_width//2,
+ yi*self.tile_height + self.tile_height//2))
+
+ p.init_images = [fixed_image]
+ p.image_mask = mask
+ processed = processing.process_images(p)
+ if (len(processed.images) > 0):
+ fixed_image = processed.images[0]
+
+ p.width = fixed_image.width
+ p.height = fixed_image.height
+ if processed is not None:
+ self.initial_info = processed.infotext(p, 0)
+
+ return fixed_image
+
+ def band_pass_process(self, p, image, cols, rows):
+
+ self.init_draw(p)
+ processed = None
+
+ p.denoising_strength = self.denoise
+ p.mask_blur = 0
+
+ gradient = Image.linear_gradient("L")
+ mirror_gradient = Image.new("L", (256, 256), "black")
+ mirror_gradient.paste(gradient.resize((256, 128), resample=Image.BICUBIC), (0, 0))
+ mirror_gradient.paste(gradient.rotate(180).resize((256, 128), resample=Image.BICUBIC), (0, 128))
+
+ row_gradient = mirror_gradient.resize((image.width, self.width), resample=Image.BICUBIC)
+ col_gradient = mirror_gradient.rotate(90).resize((self.width, image.height), resample=Image.BICUBIC)
+
+ for xi in range(1, rows):
+ if state.interrupted:
+ break
+ p.width = self.width + self.padding * 2
+ p.height = image.height
+ p.inpaint_full_res = True
+ p.inpaint_full_res_padding = self.padding
+ mask = Image.new("L", (image.width, image.height), "black")
+ mask.paste(col_gradient, (xi * self.tile_width - self.width // 2, 0))
+
+ p.init_images = [image]
+ p.image_mask = mask
+ processed = processing.process_images(p)
+ if (len(processed.images) > 0):
+ image = processed.images[0]
+ for yi in range(1, cols):
+ if state.interrupted:
+ break
+ p.width = image.width
+ p.height = self.width + self.padding * 2
+ p.inpaint_full_res = True
+ p.inpaint_full_res_padding = self.padding
+ mask = Image.new("L", (image.width, image.height), "black")
+ mask.paste(row_gradient, (0, yi * self.tile_height - self.width // 2))
+
+ p.init_images = [image]
+ p.image_mask = mask
+ processed = processing.process_images(p)
+ if (len(processed.images) > 0):
+ image = processed.images[0]
+
+ p.width = image.width
+ p.height = image.height
+ if processed is not None:
+ self.initial_info = processed.infotext(p, 0)
+
+ return image
+
+ def start(self, p, image, rows, cols):
+ if USDUSFMode(self.mode) == USDUSFMode.BAND_PASS:
+ return self.band_pass_process(p, image, rows, cols)
+ elif USDUSFMode(self.mode) == USDUSFMode.HALF_TILE:
+ return self.half_tile_process(p, image, rows, cols)
+ elif USDUSFMode(self.mode) == USDUSFMode.HALF_TILE_PLUS_INTERSECTIONS:
+ return self.half_tile_process_corners(p, image, rows, cols)
+ else:
+ return image
+
+class Script(scripts.Script):
+ def title(self):
+ return "Ultimate SD upscale"
+
+ def show(self, is_img2img):
+ return is_img2img
+
+ def ui(self, is_img2img):
+
+ target_size_types = [
+ "From img2img2 settings",
+ "Custom size",
+ "Scale from image size"
+ ]
+
+ seams_fix_types = [
+ "None",
+ "Band pass",
+ "Half tile offset pass",
+ "Half tile offset pass + intersections"
+ ]
+
+ redrow_modes = [
+ "Linear",
+ "Chess",
+ "None"
+ ]
+
+ info = gr.HTML(
+ "
Will upscale the image depending on the selected target size type