File size: 1,126 Bytes
82ea528
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
import torch


class RegionalStyleModelApplyNode:
    @classmethod
    def INPUT_TYPES(s):
        return {"required": {
                             "style_model": ("STYLE_MODEL", ),
                             "clip_vision_output": ("CLIP_VISION_OUTPUT", ),
                             "strength": ("FLOAT", {"default": 1, "min": -1000.0, "max": 1000.0, "step": 0.01, "round": 0.01}),
                             },
                             "optional": {
                                 "conditioning": ("CONDITIONING", ),
                             }}
    RETURN_TYPES = ("CONDITIONING",)
    FUNCTION = "apply_stylemodel"

    CATEGORY = "fluxtapoz"

    def apply_stylemodel(self, clip_vision_output, style_model, strength, conditioning=None):
        cond = style_model.get_cond(clip_vision_output).flatten(start_dim=0, end_dim=1).unsqueeze(dim=0) * strength
        c = []
        if conditioning is not None:
            for t in conditioning:
                n = [torch.cat((t[0], cond), dim=1), t[1].copy()]
                c.append(n)
        else:
            c.append([cond, None])
        return (c, )