response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Returns the number of trainable parameters in a module. | def n_params(module):
"""Returns the number of trainable parameters in a module."""
return sum(p.numel() for p in module.parameters()) |
Downloads a file if it does not exist, optionally checking its SHA-256 hash. | def download_file(path, url, digest=None):
"""Downloads a file if it does not exist, optionally checking its SHA-256 hash."""
path = Path(path)
path.parent.mkdir(parents=True, exist_ok=True)
if not path.exists():
with urllib.request.urlopen(url) as response, open(path, 'wb') as f:
shutil.copyfileobj(response, f)
if digest is not None:
file_digest = hashlib.sha256(open(path, 'rb').read()).hexdigest()
if digest != file_digest:
raise OSError(f'hash of {path} (url: {url}) failed to validate')
return path |
A context manager that places a model into training mode and restores
the previous mode on exit. | def train_mode(model, mode=True):
"""A context manager that places a model into training mode and restores
the previous mode on exit."""
modes = [module.training for module in model.modules()]
try:
yield model.train(mode)
finally:
for i, module in enumerate(model.modules()):
module.training = modes[i] |
A context manager that places a model into evaluation mode and restores
the previous mode on exit. | def eval_mode(model):
"""A context manager that places a model into evaluation mode and restores
the previous mode on exit."""
return train_mode(model, False) |
Incorporates updated model parameters into an exponential moving averaged
version of a model. It should be called after each optimizer step. | def ema_update(model, averaged_model, decay):
"""Incorporates updated model parameters into an exponential moving averaged
version of a model. It should be called after each optimizer step."""
model_params = dict(model.named_parameters())
averaged_params = dict(averaged_model.named_parameters())
assert model_params.keys() == averaged_params.keys()
for name, param in model_params.items():
averaged_params[name].mul_(decay).add_(param, alpha=1 - decay)
model_buffers = dict(model.named_buffers())
averaged_buffers = dict(averaged_model.named_buffers())
assert model_buffers.keys() == averaged_buffers.keys()
for name, buf in model_buffers.items():
averaged_buffers[name].copy_(buf) |
Draws samples from an lognormal distribution. | def rand_log_normal(shape, loc=0., scale=1., device='cpu', dtype=torch.float32):
"""Draws samples from an lognormal distribution."""
return (torch.randn(shape, device=device, dtype=dtype) * scale + loc).exp() |
Draws samples from an optionally truncated log-logistic distribution. | def rand_log_logistic(shape, loc=0., scale=1., min_value=0., max_value=float('inf'), device='cpu', dtype=torch.float32):
"""Draws samples from an optionally truncated log-logistic distribution."""
min_value = torch.as_tensor(min_value, device=device, dtype=torch.float64)
max_value = torch.as_tensor(max_value, device=device, dtype=torch.float64)
min_cdf = min_value.log().sub(loc).div(scale).sigmoid()
max_cdf = max_value.log().sub(loc).div(scale).sigmoid()
u = torch.rand(shape, device=device, dtype=torch.float64) * (max_cdf - min_cdf) + min_cdf
return u.logit().mul(scale).add(loc).exp().to(dtype) |
Draws samples from an log-uniform distribution. | def rand_log_uniform(shape, min_value, max_value, device='cpu', dtype=torch.float32):
"""Draws samples from an log-uniform distribution."""
min_value = math.log(min_value)
max_value = math.log(max_value)
return (torch.rand(shape, device=device, dtype=dtype) * (max_value - min_value) + min_value).exp() |
Draws samples from a truncated v-diffusion training timestep distribution. | def rand_v_diffusion(shape, sigma_data=1., min_value=0., max_value=float('inf'), device='cpu', dtype=torch.float32):
"""Draws samples from a truncated v-diffusion training timestep distribution."""
min_cdf = math.atan(min_value / sigma_data) * 2 / math.pi
max_cdf = math.atan(max_value / sigma_data) * 2 / math.pi
u = torch.rand(shape, device=device, dtype=dtype) * (max_cdf - min_cdf) + min_cdf
return torch.tan(u * math.pi / 2) * sigma_data |
Draws samples from a split lognormal distribution. | def rand_split_log_normal(shape, loc, scale_1, scale_2, device='cpu', dtype=torch.float32):
"""Draws samples from a split lognormal distribution."""
n = torch.randn(shape, device=device, dtype=dtype).abs()
u = torch.rand(shape, device=device, dtype=dtype)
n_left = n * -scale_1 + loc
n_right = n * scale_2 + loc
ratio = scale_1 / (scale_1 + scale_2)
return torch.where(u < ratio, n_left, n_right).exp() |
A context manager that sets whether TF32 is allowed on cuDNN or matmul. | def tf32_mode(cudnn=None, matmul=None):
"""A context manager that sets whether TF32 is allowed on cuDNN or matmul."""
cudnn_old = torch.backends.cudnn.allow_tf32
matmul_old = torch.backends.cuda.matmul.allow_tf32
try:
if cudnn is not None:
torch.backends.cudnn.allow_tf32 = cudnn
if matmul is not None:
torch.backends.cuda.matmul.allow_tf32 = matmul
yield
finally:
if cudnn is not None:
torch.backends.cudnn.allow_tf32 = cudnn_old
if matmul is not None:
torch.backends.cuda.matmul.allow_tf32 = matmul_old |
https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86
Take the mean over all non-batch dimensions. | def mean_flat(tensor):
"""
https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape)))) |
Computes efficient dot-product attention given query, transposed key, and value.
This is efficient version of attention presented in
https://arxiv.org/abs/2112.05682v2 which comes with O(sqrt(n)) memory requirements.
Args:
query: queries for calculating attention with shape of
`[batch * num_heads, tokens, channels_per_head]`.
key_t: keys for calculating attention with shape of
`[batch * num_heads, channels_per_head, tokens]`.
value: values to be used in attention with shape of
`[batch * num_heads, tokens, channels_per_head]`.
query_chunk_size: int: query chunks size
kv_chunk_size: Optional[int]: key/value chunks size. if None: defaults to sqrt(key_tokens)
kv_chunk_size_min: Optional[int]: key/value minimum chunk size. only considered when kv_chunk_size is None. changes `sqrt(key_tokens)` into `max(sqrt(key_tokens), kv_chunk_size_min)`, to ensure our chunk sizes don't get too small (smaller chunks = more chunks = less concurrent work done).
use_checkpoint: bool: whether to use checkpointing (recommended True for training, False for inference)
Returns:
Output of shape `[batch * num_heads, query_tokens, channels_per_head]`. | def efficient_dot_product_attention(
query: Tensor,
key_t: Tensor,
value: Tensor,
query_chunk_size=1024,
kv_chunk_size: Optional[int] = None,
kv_chunk_size_min: Optional[int] = None,
use_checkpoint=True,
upcast_attention=False,
mask = None,
):
"""Computes efficient dot-product attention given query, transposed key, and value.
This is efficient version of attention presented in
https://arxiv.org/abs/2112.05682v2 which comes with O(sqrt(n)) memory requirements.
Args:
query: queries for calculating attention with shape of
`[batch * num_heads, tokens, channels_per_head]`.
key_t: keys for calculating attention with shape of
`[batch * num_heads, channels_per_head, tokens]`.
value: values to be used in attention with shape of
`[batch * num_heads, tokens, channels_per_head]`.
query_chunk_size: int: query chunks size
kv_chunk_size: Optional[int]: key/value chunks size. if None: defaults to sqrt(key_tokens)
kv_chunk_size_min: Optional[int]: key/value minimum chunk size. only considered when kv_chunk_size is None. changes `sqrt(key_tokens)` into `max(sqrt(key_tokens), kv_chunk_size_min)`, to ensure our chunk sizes don't get too small (smaller chunks = more chunks = less concurrent work done).
use_checkpoint: bool: whether to use checkpointing (recommended True for training, False for inference)
Returns:
Output of shape `[batch * num_heads, query_tokens, channels_per_head]`.
"""
batch_x_heads, q_tokens, q_channels_per_head = query.shape
_, _, k_tokens = key_t.shape
scale = q_channels_per_head ** -0.5
kv_chunk_size = min(kv_chunk_size or int(math.sqrt(k_tokens)), k_tokens)
if kv_chunk_size_min is not None:
kv_chunk_size = max(kv_chunk_size, kv_chunk_size_min)
if mask is not None and len(mask.shape) == 2:
mask = mask.unsqueeze(0)
def get_query_chunk(chunk_idx: int) -> Tensor:
return dynamic_slice(
query,
(0, chunk_idx, 0),
(batch_x_heads, min(query_chunk_size, q_tokens), q_channels_per_head)
)
def get_mask_chunk(chunk_idx: int) -> Tensor:
if mask is None:
return None
chunk = min(query_chunk_size, q_tokens)
return mask[:,chunk_idx:chunk_idx + chunk]
summarize_chunk: SummarizeChunk = partial(_summarize_chunk, scale=scale, upcast_attention=upcast_attention)
summarize_chunk: SummarizeChunk = partial(checkpoint, summarize_chunk) if use_checkpoint else summarize_chunk
compute_query_chunk_attn: ComputeQueryChunkAttn = partial(
_get_attention_scores_no_kv_chunking,
scale=scale,
upcast_attention=upcast_attention
) if k_tokens <= kv_chunk_size else (
# fast-path for when there's just 1 key-value chunk per query chunk (this is just sliced attention btw)
partial(
_query_chunk_attention,
kv_chunk_size=kv_chunk_size,
summarize_chunk=summarize_chunk,
)
)
if q_tokens <= query_chunk_size:
# fast-path for when there's just 1 query chunk
return compute_query_chunk_attn(
query=query,
key_t=key_t,
value=value,
mask=mask,
)
# TODO: maybe we should use torch.empty_like(query) to allocate storage in-advance,
# and pass slices to be mutated, instead of torch.cat()ing the returned slices
res = torch.cat([
compute_query_chunk_attn(
query=get_query_chunk(i * query_chunk_size),
key_t=key_t,
value=value,
mask=get_mask_chunk(i * query_chunk_size)
) for i in range(math.ceil(q_tokens / query_chunk_size))
], dim=1)
return res |
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need". | def get_timestep_embedding(timesteps, embedding_dim):
"""
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
assert len(timesteps.shape) == 1
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
emb = emb.to(device=timesteps.device)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0,1,0,0))
return emb |
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities. | def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas) |
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing. | def checkpoint(func, inputs, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs) |
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings. | def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
if not repeat_only:
half = dim // 2
freqs = torch.exp(
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32, device=timesteps.device) / half
)
args = timesteps[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
else:
embedding = repeat(timesteps, 'b -> b d', d=dim)
return embedding |
Zero out the parameters of a module and return it. | def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module |
Scale the parameters of a module and return it. | def scale_module(module, scale):
"""
Scale the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().mul_(scale)
return module |
Take the mean over all non-batch dimensions. | def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape)))) |
Create a 1D, 2D, or 3D average pooling module. | def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}") |
source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12
Compute the KL divergence between two gaussians.
Shapes are automatically broadcasted, so batches can be compared to
scalars, among other use cases. | def normal_kl(mean1, logvar1, mean2, logvar2):
"""
source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12
Compute the KL divergence between two gaussians.
Shapes are automatically broadcasted, so batches can be compared to
scalars, among other use cases.
"""
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, torch.Tensor):
tensor = obj
break
assert tensor is not None, "at least one argument must be a Tensor"
# Force variances to be Tensors. Broadcasting helps convert scalars to
# Tensors, but it does not work for torch.exp().
logvar1, logvar2 = [
x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)
for x in (logvar1, logvar2)
]
return 0.5 * (
-1.0
+ logvar2
- logvar1
+ torch.exp(logvar1 - logvar2)
+ ((mean1 - mean2) ** 2) * torch.exp(-logvar2)
) |
Create a 1D, 2D, or 3D convolution module. | def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}") |
Create a 1D, 2D, or 3D average pooling module. | def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}") |
Performs log-linear interpolation of a given array of decreasing numbers. | def loglinear_interp(t_steps, num_steps):
"""
Performs log-linear interpolation of a given array of decreasing numbers.
"""
xs = np.linspace(0, 1, len(t_steps))
ys = np.log(t_steps[::-1])
new_xs = np.linspace(0, 1, num_steps)
new_ys = np.interp(new_xs, xs, ys)
interped_ys = np.exp(new_ys)[::-1].copy()
return interped_ys |
Partitions the tokens into src and dst and merges r tokens from src to dst.
Dst tokens are partitioned by choosing one randomy in each (sx, sy) region.
Args:
- metric [B, N, C]: metric to use for similarity
- w: image width in tokens
- h: image height in tokens
- sx: stride in the x dimension for dst, must divide w
- sy: stride in the y dimension for dst, must divide h
- r: number of tokens to remove (by merging)
- no_rand: if true, disable randomness (use top left corner only) | def bipartite_soft_matching_random2d(metric: torch.Tensor,
w: int, h: int, sx: int, sy: int, r: int,
no_rand: bool = False) -> Tuple[Callable, Callable]:
"""
Partitions the tokens into src and dst and merges r tokens from src to dst.
Dst tokens are partitioned by choosing one randomy in each (sx, sy) region.
Args:
- metric [B, N, C]: metric to use for similarity
- w: image width in tokens
- h: image height in tokens
- sx: stride in the x dimension for dst, must divide w
- sy: stride in the y dimension for dst, must divide h
- r: number of tokens to remove (by merging)
- no_rand: if true, disable randomness (use top left corner only)
"""
B, N, _ = metric.shape
if r <= 0 or w == 1 or h == 1:
return do_nothing, do_nothing
gather = mps_gather_workaround if metric.device.type == "mps" else torch.gather
with torch.no_grad():
hsy, wsx = h // sy, w // sx
# For each sy by sx kernel, randomly assign one token to be dst and the rest src
if no_rand:
rand_idx = torch.zeros(hsy, wsx, 1, device=metric.device, dtype=torch.int64)
else:
rand_idx = torch.randint(sy*sx, size=(hsy, wsx, 1), device=metric.device)
# The image might not divide sx and sy, so we need to work on a view of the top left if the idx buffer instead
idx_buffer_view = torch.zeros(hsy, wsx, sy*sx, device=metric.device, dtype=torch.int64)
idx_buffer_view.scatter_(dim=2, index=rand_idx, src=-torch.ones_like(rand_idx, dtype=rand_idx.dtype))
idx_buffer_view = idx_buffer_view.view(hsy, wsx, sy, sx).transpose(1, 2).reshape(hsy * sy, wsx * sx)
# Image is not divisible by sx or sy so we need to move it into a new buffer
if (hsy * sy) < h or (wsx * sx) < w:
idx_buffer = torch.zeros(h, w, device=metric.device, dtype=torch.int64)
idx_buffer[:(hsy * sy), :(wsx * sx)] = idx_buffer_view
else:
idx_buffer = idx_buffer_view
# We set dst tokens to be -1 and src to be 0, so an argsort gives us dst|src indices
rand_idx = idx_buffer.reshape(1, -1, 1).argsort(dim=1)
# We're finished with these
del idx_buffer, idx_buffer_view
# rand_idx is currently dst|src, so split them
num_dst = hsy * wsx
a_idx = rand_idx[:, num_dst:, :] # src
b_idx = rand_idx[:, :num_dst, :] # dst
def split(x):
C = x.shape[-1]
src = gather(x, dim=1, index=a_idx.expand(B, N - num_dst, C))
dst = gather(x, dim=1, index=b_idx.expand(B, num_dst, C))
return src, dst
# Cosine similarity between A and B
metric = metric / metric.norm(dim=-1, keepdim=True)
a, b = split(metric)
scores = a @ b.transpose(-1, -2)
# Can't reduce more than the # tokens in src
r = min(a.shape[1], r)
# Find the most similar greedily
node_max, node_idx = scores.max(dim=-1)
edge_idx = node_max.argsort(dim=-1, descending=True)[..., None]
unm_idx = edge_idx[..., r:, :] # Unmerged Tokens
src_idx = edge_idx[..., :r, :] # Merged Tokens
dst_idx = gather(node_idx[..., None], dim=-2, index=src_idx)
def merge(x: torch.Tensor, mode="mean") -> torch.Tensor:
src, dst = split(x)
n, t1, c = src.shape
unm = gather(src, dim=-2, index=unm_idx.expand(n, t1 - r, c))
src = gather(src, dim=-2, index=src_idx.expand(n, r, c))
dst = dst.scatter_reduce(-2, dst_idx.expand(n, r, c), src, reduce=mode)
return torch.cat([unm, dst], dim=1)
def unmerge(x: torch.Tensor) -> torch.Tensor:
unm_len = unm_idx.shape[1]
unm, dst = x[..., :unm_len, :], x[..., unm_len:, :]
_, _, c = unm.shape
src = gather(dst, dim=-2, index=dst_idx.expand(B, r, c))
# Combine back to the original shape
out = torch.zeros(B, N, c, device=x.device, dtype=x.dtype)
out.scatter_(dim=-2, index=b_idx.expand(B, num_dst, c), src=dst)
out.scatter_(dim=-2, index=gather(a_idx.expand(B, a_idx.shape[1], 1), dim=1, index=unm_idx).expand(B, unm_len, c), src=unm)
out.scatter_(dim=-2, index=gather(a_idx.expand(B, a_idx.shape[1], 1), dim=1, index=src_idx).expand(B, r, c), src=src)
return out
return merge, unmerge |
Conv layer with padding, normalization, activation
mode: CNA --> Conv -> Norm -> Act
NAC --> Norm -> Act --> Conv (Identity Mappings in Deep Residual Networks, ECCV16) | def conv_block(
in_nc: int,
out_nc: int,
kernel_size,
stride=1,
dilation=1,
groups=1,
bias=True,
pad_type="zero",
norm_type: str | None = None,
act_type: str | None = "relu",
mode: ConvMode = "CNA",
c2x2=False,
):
"""
Conv layer with padding, normalization, activation
mode: CNA --> Conv -> Norm -> Act
NAC --> Norm -> Act --> Conv (Identity Mappings in Deep Residual Networks, ECCV16)
"""
if c2x2:
return conv_block_2c2(in_nc, out_nc, act_type=act_type)
assert mode in ("CNA", "NAC", "CNAC"), "Wrong conv mode [{:s}]".format(mode)
padding = get_valid_padding(kernel_size, dilation)
p = pad(pad_type, padding) if pad_type and pad_type != "zero" else None
padding = padding if pad_type == "zero" else 0
c = nn.Conv2d(
in_nc,
out_nc,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
groups=groups,
)
a = act(act_type) if act_type else None
if mode in ("CNA", "CNAC"):
n = norm(norm_type, out_nc) if norm_type else None
return sequential(p, c, n, a)
elif mode == "NAC":
if norm_type is None and act_type is not None:
a = act(act_type, inplace=False)
# Important!
# input----ReLU(inplace)----Conv--+----output
# |________________________|
# inplace ReLU will modify the input, therefore wrong output
n = norm(norm_type, in_nc) if norm_type else None
return sequential(n, a, p, c)
else:
assert False, f"Invalid conv mode {mode}" |
Pixel shuffle layer
(Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional
Neural Network, CVPR17) | def pixelshuffle_block(
in_nc: int,
out_nc: int,
upscale_factor=2,
kernel_size=3,
stride=1,
bias=True,
pad_type="zero",
norm_type: str | None = None,
act_type="relu",
):
"""
Pixel shuffle layer
(Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional
Neural Network, CVPR17)
"""
conv = conv_block(
in_nc,
out_nc * (upscale_factor**2),
kernel_size,
stride,
bias=bias,
pad_type=pad_type,
norm_type=None,
act_type=None,
)
pixel_shuffle = nn.PixelShuffle(upscale_factor)
n = norm(norm_type, out_nc) if norm_type else None
a = act(act_type) if act_type else None
return sequential(conv, pixel_shuffle, n, a) |
Input: Image (B, C, H, W)
Output: Window Partition (B', N, C) | def img2windows(img, H_sp, W_sp):
"""
Input: Image (B, C, H, W)
Output: Window Partition (B', N, C)
"""
B, C, H, W = img.shape
img_reshape = img.view(B, C, H // H_sp, H_sp, W // W_sp, W_sp)
img_perm = (
img_reshape.permute(0, 2, 4, 3, 5, 1).contiguous().reshape(-1, H_sp * W_sp, C)
)
return img_perm |
Input: Window Partition (B', N, C)
Output: Image (B, H, W, C) | def windows2img(img_splits_hw, H_sp, W_sp, H, W):
"""
Input: Window Partition (B', N, C)
Output: Image (B, H, W, C)
"""
B = int(img_splits_hw.shape[0] / (H * W / H_sp / W_sp))
img = img_splits_hw.view(B, H // H_sp, W // W_sp, H_sp, W_sp, -1)
img = img.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return img |
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py | def drop_path(x, drop_prob: float = 0.0, training: bool = False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
From: https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/drop.py
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (
x.ndim - 1
) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output |
Args:
x: (b, h, w, c)
window_size (int): window size
Returns:
windows: (num_windows*b, window_size, window_size, c) | def window_partition(x, window_size):
"""
Args:
x: (b, h, w, c)
window_size (int): window size
Returns:
windows: (num_windows*b, window_size, window_size, c)
"""
b, h, w, c = x.shape
x = x.view(b, h // window_size, window_size, w // window_size, window_size, c)
windows = (
x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, c)
)
return windows |
Args:
windows: (num_windows*b, window_size, window_size, c)
window_size (int): Window size
h (int): Height of image
w (int): Width of image
Returns:
x: (b, h, w, c) | def window_reverse(windows, window_size, h, w):
"""
Args:
windows: (num_windows*b, window_size, window_size, c)
window_size (int): Window size
h (int): Height of image
w (int): Width of image
Returns:
x: (b, h, w, c)
"""
b = int(windows.shape[0] / (h * w / window_size / window_size))
x = windows.view(
b, h // window_size, w // window_size, window_size, window_size, -1
)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(b, h, w, -1)
return x |
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C) | def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = (
x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
)
return windows |
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C) | def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(
B, H // window_size, W // window_size, window_size, window_size, -1
)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x |
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C) | def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = (
x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
)
return windows |
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C) | def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(
B, H // window_size, W // window_size, window_size, window_size, -1
)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x |
A simple wrapper for 3x3 convolution with padding.
Args:
inplanes (int): Channel number of inputs.
outplanes (int): Channel number of outputs.
stride (int): Stride in convolution. Default: 1. | def conv3x3(inplanes, outplanes, stride=1):
"""A simple wrapper for 3x3 convolution with padding.
Args:
inplanes (int): Channel number of inputs.
outplanes (int): Channel number of outputs.
stride (int): Stride in convolution. Default: 1.
"""
return nn.Conv2d(
inplanes, outplanes, kernel_size=3, stride=stride, padding=1, bias=False
) |
Calculate mean and std for adaptive_instance_normalization.
Args:
feat (Tensor): 4D tensor.
eps (float): A small value added to the variance to avoid
divide-by-zero. Default: 1e-5. | def calc_mean_std(feat, eps=1e-5):
"""Calculate mean and std for adaptive_instance_normalization.
Args:
feat (Tensor): 4D tensor.
eps (float): A small value added to the variance to avoid
divide-by-zero. Default: 1e-5.
"""
size = feat.size()
assert len(size) == 4, "The input feature should be 4D tensor."
b, c = size[:2]
feat_var = feat.view(b, c, -1).var(dim=2) + eps
feat_std = feat_var.sqrt().view(b, c, 1, 1)
feat_mean = feat.view(b, c, -1).mean(dim=2).view(b, c, 1, 1)
return feat_mean, feat_std |
Adaptive instance normalization.
Adjust the reference features to have the similar color and illuminations
as those in the degradate features.
Args:
content_feat (Tensor): The reference feature.
style_feat (Tensor): The degradate features. | def adaptive_instance_normalization(content_feat, style_feat):
"""Adaptive instance normalization.
Adjust the reference features to have the similar color and illuminations
as those in the degradate features.
Args:
content_feat (Tensor): The reference feature.
style_feat (Tensor): The degradate features.
"""
size = content_feat.size()
style_mean, style_std = calc_mean_std(style_feat)
content_mean, content_std = calc_mean_std(content_feat)
normalized_feat = (content_feat - content_mean.expand(size)) / content_std.expand(
size
)
return normalized_feat * style_std.expand(size) + style_mean.expand(size) |
Return an activation function given a string | def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(f"activation should be relu/gelu, not {activation}.") |
Make resampling kernel for UpFirDn.
Args:
k (list[int]): A list indicating the 1D resample kernel magnitude.
Returns:
Tensor: 2D resampled kernel. | def make_resample_kernel(k):
"""Make resampling kernel for UpFirDn.
Args:
k (list[int]): A list indicating the 1D resample kernel magnitude.
Returns:
Tensor: 2D resampled kernel.
"""
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None] # to 2D kernel, outer product
# normalize
k /= k.sum()
return k |
Initialize network weights.
Args:
module_list (list[nn.Module] | nn.Module): Modules to be initialized.
scale (float): Scale initialized weights, especially for residual
blocks. Default: 1.
bias_fill (float): The value to fill bias. Default: 0
kwargs (dict): Other arguments for initialization function. | def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs):
"""Initialize network weights.
Args:
module_list (list[nn.Module] | nn.Module): Modules to be initialized.
scale (float): Scale initialized weights, especially for residual
blocks. Default: 1.
bias_fill (float): The value to fill bias. Default: 0
kwargs (dict): Other arguments for initialization function.
"""
if not isinstance(module_list, list):
module_list = [module_list]
for module in module_list:
for m in module.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, **kwargs)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.fill_(bias_fill)
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight, **kwargs)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.fill_(bias_fill)
elif isinstance(m, _BatchNorm):
init.constant_(m.weight, 1)
if m.bias is not None:
m.bias.data.fill_(bias_fill) |
Upsample features according to `upscale_factor`. | def pixelshuffle_block(
in_channels, out_channels, upscale_factor=2, kernel_size=3, bias=False
):
"""
Upsample features according to `upscale_factor`.
"""
padding = kernel_size // 2
conv = nn.Conv2d(
in_channels,
out_channels * (upscale_factor**2),
kernel_size,
padding=1,
bias=bias,
)
pixel_shuffle = nn.PixelShuffle(upscale_factor)
return nn.Sequential(*[conv, pixel_shuffle]) |
DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
DropBlock with an experimental gaussian noise option. This layer has been tested on a few training
runs with success, but needs further validation and possibly optimization for lower runtime impact. | def drop_block_2d(
x,
drop_prob: float = 0.1,
block_size: int = 7,
gamma_scale: float = 1.0,
with_noise: bool = False,
inplace: bool = False,
batchwise: bool = False,
):
"""DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
DropBlock with an experimental gaussian noise option. This layer has been tested on a few training
runs with success, but needs further validation and possibly optimization for lower runtime impact.
"""
_, C, H, W = x.shape
total_size = W * H
clipped_block_size = min(block_size, min(W, H))
# seed_drop_rate, the gamma parameter
gamma = (
gamma_scale
* drop_prob
* total_size
/ clipped_block_size**2
/ ((W - block_size + 1) * (H - block_size + 1))
)
# Forces the block to be inside the feature map.
w_i, h_i = torch.meshgrid(
torch.arange(W).to(x.device), torch.arange(H).to(x.device)
)
valid_block = (
(w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)
) & ((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2))
valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype)
if batchwise:
# one mask for whole batch, quite a bit faster
uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device)
else:
uniform_noise = torch.rand_like(x)
block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype)
block_mask = -F.max_pool2d(
-block_mask,
kernel_size=clipped_block_size, # block_size,
stride=1,
padding=clipped_block_size // 2,
)
if with_noise:
normal_noise = (
torch.randn((1, C, H, W), dtype=x.dtype, device=x.device)
if batchwise
else torch.randn_like(x)
)
if inplace:
x.mul_(block_mask).add_(normal_noise * (1 - block_mask))
else:
x = x * block_mask + normal_noise * (1 - block_mask)
else:
normalize_scale = (
block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)
).to(x.dtype)
if inplace:
x.mul_(block_mask * normalize_scale)
else:
x = x * block_mask * normalize_scale
return x |
DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid
block mask at edges. | def drop_block_fast_2d(
x: torch.Tensor,
drop_prob: float = 0.1,
block_size: int = 7,
gamma_scale: float = 1.0,
with_noise: bool = False,
inplace: bool = False,
):
"""DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid
block mask at edges.
"""
_, _, H, W = x.shape
total_size = W * H
clipped_block_size = min(block_size, min(W, H))
gamma = (
gamma_scale
* drop_prob
* total_size
/ clipped_block_size**2
/ ((W - block_size + 1) * (H - block_size + 1))
)
block_mask = torch.empty_like(x).bernoulli_(gamma)
block_mask = F.max_pool2d(
block_mask.to(x.dtype),
kernel_size=clipped_block_size,
stride=1,
padding=clipped_block_size // 2,
)
if with_noise:
normal_noise = torch.empty_like(x).normal_()
if inplace:
x.mul_(1.0 - block_mask).add_(normal_noise * block_mask)
else:
x = x * (1.0 - block_mask) + normal_noise * block_mask
else:
block_mask = 1 - block_mask
normalize_scale = (
block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-6)
).to(dtype=x.dtype)
if inplace:
x.mul_(block_mask * normalize_scale)
else:
x = x * block_mask * normalize_scale
return x |
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument. | def drop_path(
x, drop_prob: float = 0.0, training: bool = False, scale_by_keep: bool = True
):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (
x.ndim - 1
) # work with diff dim tensors, not just 2D ConvNets
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
if keep_prob > 0.0 and scale_by_keep:
random_tensor.div_(keep_prob)
return x * random_tensor |
Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
NOTE: this impl is similar to the PyTorch trunc_normal_, the bounds [a, b] are
applied while sampling the normal with mean/std applied, therefore a, b args
should be adjusted to match the range of mean, std args.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w) | def trunc_normal_(
tensor: torch.Tensor, mean=0.0, std=1.0, a=-2.0, b=2.0
) -> torch.Tensor:
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
NOTE: this impl is similar to the PyTorch trunc_normal_, the bounds [a, b] are
applied while sampling the normal with mean/std applied, therefore a, b args
should be adjusted to match the range of mean, std args.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b) |
Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the
bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0
and the result is subsquently scaled and shifted by the mean and std args.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w) | def trunc_normal_tf_(
tensor: torch.Tensor, mean=0.0, std=1.0, a=-2.0, b=2.0
) -> torch.Tensor:
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
NOTE: this 'tf' variant behaves closer to Tensorflow / JAX impl where the
bounds [a, b] are applied when sampling the normal distribution with mean=0, std=1.0
and the result is subsquently scaled and shifted by the mean and std args.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
_no_grad_trunc_normal_(tensor, 0, 1.0, a, b)
with torch.no_grad():
tensor.mul_(std).add_(mean)
return tensor |
make_app. | def make_app(jwt_validator=None):
"""make_app."""
path = pkg_resources.resource_filename("consoleme", "templates")
oss_routes = [
(r"/auth", AuthHandler),
(r"/healthcheck", HealthHandler),
(
r"/static/(.*)",
tornado.web.StaticFileHandler,
dict(path=os.path.join(path, "static")),
),
(
r"/images/(.*)",
tornado.web.StaticFileHandler,
dict(path=os.path.join(path, "images")),
),
(
r"/(favicon.ico)",
tornado.web.StaticFileHandler,
dict(path=path),
),
(r"/api/v1/get_credentials", GetCredentialsHandler),
(r"/api/v1/get_roles", GetRolesHandler),
(r"/api/v2/get_roles", GetRolesMTLSHandler),
(r"/api/v2/get_resource_url", GetResourceURLHandler),
# Used to autocomplete AWS permissions
(r"/api/v1/policyuniverse/autocomplete/?", AutocompleteHandler),
(r"/api/v2/user_profile/?", UserProfileHandler),
(r"/api/v2/self_service_config/?", SelfServiceConfigHandler),
(r"/api/v2/permission_templates/?", PermissionTemplatesHandler),
(r"/api/v1/myheaders/?", ApiHeaderHandler),
(r"/api/v1/policies/typeahead", ApiResourceTypeAheadHandler),
(r"/api/v2/policies/check", CheckPoliciesHandler),
(r"/api/v2/dynamic_config", DynamicConfigApiHandler),
(r"/api/v2/eligible_roles", EligibleRoleHandler),
(r"/api/v2/eligible_roles_page_config", EligibleRolePageConfigHandler),
(r"/api/v2/policies_page_config", PoliciesPageConfigHandler),
(r"/api/v2/requests_page_config", RequestsPageConfigHandler),
(r"/api/v2/generate_policy", GeneratePolicyHandler),
(r"/api/v2/notifications/?", NotificationsHandler),
(r"/api/v2/managed_policies/(\d{12})", ManagedPoliciesForAccountHandler),
(r"/api/v2/managed_policies/(.*)", ManagedPoliciesHandler),
(
r"/api/v2/templated_resource/([a-zA-Z0-9_-]+)/(.*)",
TemplatedResourceDetailHandler,
),
(
r"/api/v2/managed_policies_on_principal/(.*)",
ManagedPoliciesOnPrincipalHandler,
),
(r"/api/v2/login", LoginHandler),
(r"/api/v2/login_configuration", LoginConfigurationHandler),
(r"/api/v2/logout", LogOutHandler),
(
r"/api/v2/typeahead/self_service_resources",
SelfServiceStep1ResourceTypeahead,
),
(r"/api/v2/user", UserManagementHandler),
(r"/api/v2/user_registration", UserRegistrationHandler),
(r"/api/v2/policies", PoliciesHandler),
(r"/api/v2/request", RequestHandler),
(r"/api/v2/requests", RequestsHandler),
(r"/api/v2/requests/([a-zA-Z0-9_-]+)", RequestDetailHandler),
(r"/api/v2/roles/?", RolesHandler),
(r"/api/v2/roles/(\d{12})", AccountRolesHandler),
(r"/api/v2/roles/(\d{12})/(.*)", RoleDetailHandler),
(r"/api/v2/users/(\d{12})/(.*)", UserDetailHandler),
(
r"/api/v2/resources/(\d{12})/(s3|sqs|sns|managed_policy)(?:/([a-z\-1-9]+))?/(.*)",
ResourceDetailHandler,
),
(r"/api/v2/service_control_policies/(.*)", ServiceControlPolicyHandler),
(r"/api/v2/mtls/roles/(\d{12})/(.*)", RoleDetailAppHandler),
(r"/api/v2/clone/role", RoleCloneHandler),
(r"/api/v2/generate_changes/?", GenerateChangesHandler),
(r"/api/v2/typeahead/resources", ResourceTypeAheadHandlerV2),
(r"/api/v2/role_login/(.*)", RoleConsoleLoginHandler),
(r"/myheaders/?", HeaderHandler),
(r"/policies/typeahead/?", ResourceTypeAheadHandler),
(r"/saml/(.*)", SamlHandler),
(
r"/api/v2/challenge_validator/([a-zA-Z0-9_-]+)",
ChallengeValidatorHandler,
),
(r"/noauth/v1/challenge_generator/(.*)", ChallengeGeneratorHandler),
(r"/noauth/v1/challenge_poller/([a-zA-Z0-9_-]+)", ChallengePollerHandler),
(r"/api/v2/audit/roles", AuditRolesHandler),
(r"/api/v2/audit/roles/(\d{12})/(.*)/access", AuditRolesAccessHandler),
(r"/api/v2/.*", V2NotFoundHandler),
(
r"/(.*)",
FrontendHandler,
dict(path=path, default_filename="index.html"),
),
]
# Prioritize internal routes before OSS routes so that OSS routes can be overridden if desired.
internal_route_list = internal_routes.get_internal_routes(
make_jwt_validator, jwt_validator
)
routes = internal_route_list + oss_routes
app = tornado.web.Application(
routes,
debug=config.get("tornado.debug", False),
xsrf_cookies=config.get("tornado.xsrf", True),
xsrf_cookie_kwargs=config.get("tornado.xsrf_cookie_kwargs", {}),
template_path=config.get(
"tornado.template_path", f"{os.path.dirname(consoleme.__file__)}/templates"
),
ui_modules=internal_routes.ui_modules,
)
sentry_dsn = config.get("sentry.dsn")
if sentry_dsn:
sentry_sdk.init(
dsn=sentry_dsn,
integrations=[
TornadoIntegration(),
AioHttpIntegration(),
RedisIntegration(),
],
)
return app |
For each celery task, this will determine the number of seconds since it has last been successful.
Celery tasks should be emitting redis stats with a deterministic key (In our case, `f"{task}.last_success"`.
report_celery_last_success_metrics should be ran periodically to emit metrics on when a task was last successful.
We can then alert when tasks are not ran when intended. We should also alert when no metrics are emitted
from this function. | def report_celery_last_success_metrics() -> bool:
"""
For each celery task, this will determine the number of seconds since it has last been successful.
Celery tasks should be emitting redis stats with a deterministic key (In our case, `f"{task}.last_success"`.
report_celery_last_success_metrics should be ran periodically to emit metrics on when a task was last successful.
We can then alert when tasks are not ran when intended. We should also alert when no metrics are emitted
from this function.
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
log_data = {"function": function}
current_time = int(time.time())
global schedule
for _, t in schedule.items():
task = t.get("task")
last_success = int(red.get(f"{task}.last_success") or 0)
if last_success == 0:
log_data["message"] = "Last Success Value is 0"
log_data["task_last_success_key"] = f"{task}.last_success"
log.warning(log_data)
stats.gauge(f"{task}.time_since_last_success", current_time - last_success)
red.set(f"{task}.time_since_last_success", current_time - last_success)
red.set(
f"{function}.last_success", int(time.time())
) # Alert if this metric is not seen
stats.count(f"{function}.success")
stats.timer("worker.healthy")
return True |
Report the number of pending tasks to our metrics broker every time a task is published. This metric can be used
for autoscaling workers.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-received
:param sender:
:param headers:
:param body:
:param kwargs:
:return: | def report_number_pending_tasks(**kwargs):
"""
Report the number of pending tasks to our metrics broker every time a task is published. This metric can be used
for autoscaling workers.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-received
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
tags = get_celery_request_tags(**kwargs)
tags.pop("task_id", None)
stats.timer("celery.new_pending_task", tags=tags) |
Report a generic success metric as tasks to our metrics broker every time a task finished correctly.
This metric can be used for autoscaling workers.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-success
:param sender:
:param headers:
:param body:
:param kwargs:
:return: | def report_successful_task(**kwargs):
"""
Report a generic success metric as tasks to our metrics broker every time a task finished correctly.
This metric can be used for autoscaling workers.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-success
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
tags = get_celery_request_tags(**kwargs)
red.set(f"{tags['task_name']}.last_success", int(time.time()))
tags.pop("error", None)
tags.pop("task_id", None)
stats.timer("celery.successful_task", tags=tags) |
Report a generic retry metric as tasks to our metrics broker every time a task is retroed.
This metric can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-retry
:param sender:
:param headers:
:param body:
:param kwargs:
:return: | def report_task_retry(**kwargs):
"""
Report a generic retry metric as tasks to our metrics broker every time a task is retroed.
This metric can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-retry
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Retry",
}
# Add traceback if exception info is in the kwargs
einfo = kwargs.get("einfo")
if einfo:
log_data["traceback"] = einfo.traceback
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.retried_task", tags=error_tags) |
Report a generic failure metric as tasks to our metrics broker every time a task fails. This is also called when
a task has hit a SoftTimeLimit.
The metric emited by this function can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-failure
:param sender:
:param headers:
:param body:
:param kwargs:
:return: | def report_failed_task(**kwargs):
"""
Report a generic failure metric as tasks to our metrics broker every time a task fails. This is also called when
a task has hit a SoftTimeLimit.
The metric emited by this function can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-failure
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Failure",
}
# Add traceback if exception info is in the kwargs
einfo = kwargs.get("einfo")
if einfo:
log_data["traceback"] = einfo.traceback
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.failed_task", tags=error_tags) |
Report a generic failure metric as tasks to our metrics broker every time a worker receives an unknown task.
The metric emited by this function can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-unknown
:param sender:
:param headers:
:param body:
:param kwargs:
:return: | def report_unknown_task(**kwargs):
"""
Report a generic failure metric as tasks to our metrics broker every time a worker receives an unknown task.
The metric emited by this function can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-unknown
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Unknown",
}
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.unknown_task", tags=error_tags) |
Report a generic failure metric as tasks to our metrics broker every time a task is rejected.
The metric emited by this function can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-rejected
:param sender:
:param headers:
:param body:
:param kwargs:
:return: | def report_rejected_task(**kwargs):
"""
Report a generic failure metric as tasks to our metrics broker every time a task is rejected.
The metric emited by this function can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-rejected
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Rejected",
}
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.rejected_task", tags=error_tags) |
Report a generic failure metric as tasks to our metrics broker every time a task is revoked.
This metric can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-revoked
:param sender:
:param headers:
:param body:
:param kwargs:
:return: | def report_revoked_task(**kwargs):
"""
Report a generic failure metric as tasks to our metrics broker every time a task is revoked.
This metric can be used for alerting.
https://docs.celeryproject.org/en/latest/userguide/signals.html#task-revoked
:param sender:
:param headers:
:param body:
:param kwargs:
:return:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Celery Task Revoked",
}
error_tags = get_celery_request_tags(**kwargs)
log_data.update(error_tags)
log.error(log_data)
error_tags.pop("error", None)
error_tags.pop("task_id", None)
stats.timer("celery.revoked_task", tags=error_tags) |
Returns True if an identical task for a given function (and arguments) is already being
ran by Celery. | def is_task_already_running(fun, args):
"""
Returns True if an identical task for a given function (and arguments) is already being
ran by Celery.
"""
task_id = None
if celery.current_task:
task_id = celery.current_task.request.id
if not task_id:
return False
log.debug(task_id)
active_tasks = app.control.inspect()._request("active")
if not active_tasks:
return False
for _, tasks in active_tasks.items():
for task in tasks:
if task.get("id") == task_id:
continue
if task.get("name") == fun and task.get("args") == args:
return True
return False |
This function will add IAM role data to redis so that policy details can be quickly retrieved by the policies
endpoint.
IAM role data is stored in the `redis_key` redis key by the role's ARN.
Parameters
----------
redis_key : str
The redis key (hash)
role_entry : Dict
The role entry
Example: {'name': 'nameOfRole', 'accountId': '123456789012', 'arn': 'arn:aws:iam::123456789012:role/nameOfRole',
'templated': None, 'ttl': 1562510908, 'policy': '<json_formatted_policy>'} | def _add_role_to_redis(redis_key: str, role_entry: Dict) -> None:
"""
This function will add IAM role data to redis so that policy details can be quickly retrieved by the policies
endpoint.
IAM role data is stored in the `redis_key` redis key by the role's ARN.
Parameters
----------
redis_key : str
The redis key (hash)
role_entry : Dict
The role entry
Example: {'name': 'nameOfRole', 'accountId': '123456789012', 'arn': 'arn:aws:iam::123456789012:role/nameOfRole',
'templated': None, 'ttl': 1562510908, 'policy': '<json_formatted_policy>'}
"""
try:
red.hset(redis_key, str(role_entry["arn"]), str(json.dumps(role_entry)))
except Exception as e: # noqa
stats.count(
"_add_role_to_redis.error",
tags={"redis_key": redis_key, "error": str(e), "role_entry": role_entry},
)
log_data = {
"message": "Error syncing Account's IAM roles to Redis",
"account_id": role_entry["account_id"],
"arn": role_entry["arn"],
"role_entry": role_entry,
}
log.error(log_data, exc_info=True)
raise |
This function will gather the number of existing IAM Roles and IAM Role quota in all owned AWS accounts. | def get_iam_role_limit() -> dict:
"""
This function will gather the number of existing IAM Roles and IAM Role quota in all owned AWS accounts.
"""
function: str = f"{__name__}.{sys._getframe().f_code.co_name}"
num_accounts = 0
num_roles = 0
if not config.get("celery.get_iam_role_limit.enabled"):
return {}
success_message = "Not running - Inactive region"
if config.region == config.get(
"celery.active_region", config.region
) and config.get("environment") in ["prod", "dev"]:
@sts_conn("iam", client_kwargs=config.get("boto3.client_kwargs", {}))
def _get_delivery_channels(**kwargs) -> list:
"""Gets the delivery channels in the account/region -- calls are wrapped with CloudAux"""
return kwargs.pop("client").get_account_summary(**kwargs)
success_message = "Task successfully completed"
# First, get list of accounts
accounts_d: Dict = async_to_sync(get_account_id_to_name_mapping)()
num_accounts = len(accounts_d.keys())
for account_id, account_name in accounts_d.items():
try:
iam_summary = _get_delivery_channels(
account_number=account_id,
assume_role=config.get("policies.role_name"),
region=config.region,
)
num_iam_roles = iam_summary["SummaryMap"]["Roles"]
iam_role_quota = iam_summary["SummaryMap"]["RolesQuota"]
iam_role_quota_ratio = num_iam_roles / iam_role_quota
num_roles += num_iam_roles
log_data = {
"function": function,
"message": "IAM role quota for account",
"num_iam_roles": num_iam_roles,
"iam_role_quota": iam_role_quota,
"iam_role_quota_ratio": iam_role_quota_ratio,
"account_id": account_id,
"account_name": account_name,
}
stats.gauge(
f"{function}.quota_ratio_gauge",
iam_role_quota_ratio,
tags={
"num_iam_roles": num_iam_roles,
"iam_role_quota": iam_role_quota,
"account_id": account_id,
"account_name": account_name,
},
)
log.debug(log_data)
except ClientError as e:
log_data = {
"function": function,
"message": "Error retrieving IAM quota",
"account_id": account_id,
"account_name": account_name,
"error": e,
}
stats.count(f"{function}.error", tags={"account_id": account_id})
log.error(log_data, exc_info=True)
sentry_sdk.capture_exception()
raise
log_data = {
"function": function,
"num_accounts": num_accounts,
"num_roles": num_roles,
"message": success_message,
}
log.debug(log_data)
return log_data |
This task triggers a role cache refresh for any role that a change was detected for. This feature requires an
Event Bridge rule monitoring Cloudtrail for your accounts for IAM role mutation.
This task will trigger a credential authorization refresh if any changes were detected.
This task should run in all regions to force IAM roles to be refreshed in each region's cache on change.
:return: | def trigger_credential_mapping_refresh_from_role_changes():
"""
This task triggers a role cache refresh for any role that a change was detected for. This feature requires an
Event Bridge rule monitoring Cloudtrail for your accounts for IAM role mutation.
This task will trigger a credential authorization refresh if any changes were detected.
This task should run in all regions to force IAM roles to be refreshed in each region's cache on change.
:return:
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
if not config.get(
"celery.trigger_credential_mapping_refresh_from_role_changes.enabled"
):
return {
"function": function,
"message": "Not running Celery task because it is not enabled.",
}
roles_changed = detect_role_changes_and_update_cache(app)
log_data = {
"function": function,
"message": "Successfully checked role changes",
"num_roles_changed": len(roles_changed),
}
if roles_changed:
# Trigger credential authorization mapping refresh. We don't want credential authorization mapping refreshes
# running in parallel, so the cache_credential_authorization_mapping is protected to prevent parallel runs.
# This task can run in parallel without negative impact.
cache_credential_authorization_mapping.apply_async(countdown=30)
log.debug(log_data)
return log_data |
This task caches access denies reported by Cloudtrail. This feature requires an
Event Bridge rule monitoring Cloudtrail for your accounts for access deny errors. | def cache_cloudtrail_denies():
"""
This task caches access denies reported by Cloudtrail. This feature requires an
Event Bridge rule monitoring Cloudtrail for your accounts for access deny errors.
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
if not (
config.region == config.get("celery.active_region", config.region)
or config.get("environment") in ["dev", "test"]
):
return {
"function": function,
"message": "Not running Celery task in inactive region",
}
events = async_to_sync(detect_cloudtrail_denies_and_update_cache)(app)
if events["new_events"] > 0:
# Spawn off a task to cache errors by ARN for the UI
cache_cloudtrail_errors_by_arn.delay()
log_data = {
"function": function,
"message": "Successfully cached cloudtrail denies",
# Total CT denies
"num_cloudtrail_denies": events["num_events"],
# "New" CT messages that we don't already have cached in Dynamo DB. Not a "repeated" error
"num_new_cloudtrail_denies": events["new_events"],
}
log.debug(log_data)
return log_data |
This task is called on demand to asynchronously refresh an AWS IAM role in Redis/DDB | def refresh_iam_role(role_arn):
"""
This task is called on demand to asynchronously refresh an AWS IAM role in Redis/DDB
"""
account_id = role_arn.split(":")[4]
async_to_sync(aws().fetch_iam_role)(
account_id, role_arn, force_refresh=True, run_sync=True
) |
This task caches notifications to be shown to end-users based on their identity or group membership. | def cache_notifications() -> Dict[str, Any]:
"""
This task caches notifications to be shown to end-users based on their identity or group membership.
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
log_data = {"function": function}
result = async_to_sync(cache_notifications_to_redis_s3)()
log_data.update({**result, "message": "Successfully cached notifications"})
log.debug(log_data)
return log_data |
Recursively merge two dictionaries, including nested dicts | def dict_merge(dct: dict, merge_dct: dict):
"""Recursively merge two dictionaries, including nested dicts"""
for k, v in merge_dct.items():
if (
k in dct
and isinstance(dct[k], dict)
and isinstance(merge_dct[k], collections.abc.Mapping)
):
dict_merge(dct[k], merge_dct[k])
else:
# Prefer original dict values over merged dict values if they already exist
if k not in dct.keys():
dct[k] = merge_dct[k]
return dct |
Initialize the auth plugin. | def init():
"""Initialize the auth plugin."""
return Auth() |
Initialize the AWS plugin. | def init():
"""Initialize the AWS plugin."""
return Aws() |
This task retrieves application information from configuration. You may want to override this function to
utilize your organization's CI/CD pipeline for this information.
:return: | def cache_application_information():
"""
This task retrieves application information from configuration. You may want to override this function to
utilize your organization's CI/CD pipeline for this information.
:return:
"""
apps_to_roles = {}
for k, v in config.get("application_settings", {}).items():
apps_to_roles[k] = v.get("roles", [])
red.set(
config.get("celery.apps_to_roles.redis_key", "APPS_TO_ROLES"),
json.dumps(apps_to_roles, cls=SetEncoder),
) |
This task demonstrates how you can implement your own internal celery tasks to run on schedule or on demand.
:return: | def task_1():
"""
This task demonstrates how you can implement your own internal celery tasks to run on schedule or on demand.
:return:
"""
pass |
Initialize the Celery Tasks plugin. | def init():
"""Initialize the Celery Tasks plugin."""
return internal_schedule |
Initialize the Config plugin. | def init():
"""Initialize the Config plugin."""
return Config() |
Initialize group_mapping plugin. | def init():
"""Initialize group_mapping plugin."""
return GroupMapping() |
Initialize the internal routes plugin. | def init():
"""Initialize the internal routes plugin."""
return InternalRoutes() |
Initialize metrics plugin. | def init():
"""Initialize metrics plugin."""
return Metric |
Initialize Policies plugin. | def init():
"""Initialize Policies plugin."""
return Policies() |
Apply a managed policy to a role.
:param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call)
:param policy_name: Name of managed policy to add to role
:param session_name: Name of session to assume role with. This is an identifier that will be logged in CloudTrail
:return: | def apply_managed_policy_to_role(
role: Dict, policy_name: str, session_name: str
) -> bool:
"""
Apply a managed policy to a role.
:param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call)
:param policy_name: Name of managed policy to add to role
:param session_name: Name of session to assume role with. This is an identifier that will be logged in CloudTrail
:return:
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
log_data = {
"function": function,
"role": role,
"policy_name": policy_name,
"session_name": session_name,
}
account_id = role.get("Arn").split(":")[4]
policy_arn = f"arn:aws:iam::{account_id}:policy/{policy_name}"
client = boto3_cached_conn(
"iam",
account_number=account_id,
assume_role=config.get("policies.role_name"),
session_name=session_name,
retry_max_attempts=2,
client_kwargs=config.get("boto3.client_kwargs", {}),
)
client.attach_role_policy(RoleName=role.get("RoleName"), PolicyArn=policy_arn)
log_data["message"] = "Applied managed policy to role"
log.debug(log_data)
stats.count(
f"{function}.attach_role_policy",
tags={"role": role.get("Arn"), "policy": policy_arn},
)
return True |
Checks a role dictionary and determine of the role has the specified tag. If `value` is passed,
This function will only return true if the tag's value matches the `value` variable.
:param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call)
:param key: key of the tag
:param value: optional value of the tag
:return: | def role_has_tag(role: Dict, key: str, value: Optional[str] = None) -> bool:
"""
Checks a role dictionary and determine of the role has the specified tag. If `value` is passed,
This function will only return true if the tag's value matches the `value` variable.
:param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call)
:param key: key of the tag
:param value: optional value of the tag
:return:
"""
for tag in role.get("Tags", []):
if tag.get("Key") == key:
if not value or tag.get("Value") == value:
return True
return False |
Checks a role dictionary to determine if a managed policy is attached
:param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call)
:param managed_policy_name: the name of the managed policy
:return: | def role_has_managed_policy(role: Dict, managed_policy_name: str) -> bool:
"""
Checks a role dictionary to determine if a managed policy is attached
:param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call)
:param managed_policy_name: the name of the managed policy
:return:
"""
for managed_policy in role.get("AttachedManagedPolicies", []):
if managed_policy.get("PolicyName") == managed_policy_name:
return True
return False |
Checks a role dictionary to determine if it is newer than the specified number of days
:param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call)
:param days: number of days
:return: | def role_newer_than_x_days(role: Dict, days: int) -> bool:
"""
Checks a role dictionary to determine if it is newer than the specified number of days
:param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call)
:param days: number of days
:return:
"""
if isinstance(role.get("CreateDate"), str):
role["CreateDate"] = parse(role.get("CreateDate"))
role_age = datetime.now(tz=pytz.utc) - role.get("CreateDate")
if role_age.days < days:
return True
return False |
Checks a role naively to determine if it is associate with an instance profile.
We only check by name, and not the actual attached instance profiles.
:param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call)
:return: | def is_role_instance_profile(role: Dict) -> bool:
"""
Checks a role naively to determine if it is associate with an instance profile.
We only check by name, and not the actual attached instance profiles.
:param role: An AWS role dictionary (from a boto3 get_role or get_account_authorization_details call)
:return:
"""
return role.get("RoleName").endswith("InstanceProfile") |
Given an ARN, return the region in the ARN, if it is available. In certain cases like S3 it is not | def get_region_from_arn(arn):
"""Given an ARN, return the region in the ARN, if it is available. In certain cases like S3 it is not"""
result = parse_arn(arn)
# Support S3 buckets with no values under region
if result["region"] is None:
result = ""
else:
result = result["region"]
return result |
Given an ARN, parse it according to ARN namespacing and return the resource. See
http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more details on ARN namespacing. | def get_resource_from_arn(arn):
"""Given an ARN, parse it according to ARN namespacing and return the resource. See
http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more details on ARN namespacing.
"""
result = parse_arn(arn)
return result["resource"] |
Given an ARN string, return the service | def get_service_from_arn(arn):
"""Given an ARN string, return the service"""
result = parse_arn(arn)
return result["service"] |
This function determines whether ConsoleMe is allowed to sync or otherwise manipulate an IAM role. By default,
ConsoleMe will sync all roles that it can get its grubby little hands on. However, ConsoleMe administrators can tell
ConsoleMe to only sync roles with either 1) Specific ARNs, or 2) Specific tag key/value pairs. All configured tags
must exist on the role for ConsoleMe to sync it., or 3) Specific tag keys
Here's an example configuration for a tag-based restriction:
```
roles:
allowed_tags:
tag1: value1
tag2: value2
```
And another one for an ARN-based restriction:
```
roles:
allowed_arns:
- arn:aws:iam::111111111111:role/role-name-here-1
- arn:aws:iam::111111111111:role/role-name-here-2
- arn:aws:iam::111111111111:role/role-name-here-3
- arn:aws:iam::222222222222:role/role-name-here-1
- arn:aws:iam::333333333333:role/role-name-here-1
```
And another one for an tag key based restriction:
```
roles:
allowed_tag_keys:
- cosoleme-authorized
- consoleme-authorized-cli-only
```
:param
arn: The AWS role arn
role_tags: A dictionary of role tags
:return: boolean specifying whether ConsoleMe is allowed to sync / access the role | def allowed_to_sync_role(
role_arn: str, role_tags: List[Optional[Dict[str, str]]]
) -> bool:
"""
This function determines whether ConsoleMe is allowed to sync or otherwise manipulate an IAM role. By default,
ConsoleMe will sync all roles that it can get its grubby little hands on. However, ConsoleMe administrators can tell
ConsoleMe to only sync roles with either 1) Specific ARNs, or 2) Specific tag key/value pairs. All configured tags
must exist on the role for ConsoleMe to sync it., or 3) Specific tag keys
Here's an example configuration for a tag-based restriction:
```
roles:
allowed_tags:
tag1: value1
tag2: value2
```
And another one for an ARN-based restriction:
```
roles:
allowed_arns:
- arn:aws:iam::111111111111:role/role-name-here-1
- arn:aws:iam::111111111111:role/role-name-here-2
- arn:aws:iam::111111111111:role/role-name-here-3
- arn:aws:iam::222222222222:role/role-name-here-1
- arn:aws:iam::333333333333:role/role-name-here-1
```
And another one for an tag key based restriction:
```
roles:
allowed_tag_keys:
- cosoleme-authorized
- consoleme-authorized-cli-only
```
:param
arn: The AWS role arn
role_tags: A dictionary of role tags
:return: boolean specifying whether ConsoleMe is allowed to sync / access the role
"""
allowed_tags = config.get("roles.allowed_tags", {})
allowed_arns = config.get("roles.allowed_arns", [])
allowed_tag_keys = config.get("roles.allowed_tag_keys", [])
if not allowed_tags and not allowed_arns and not allowed_tag_keys:
return True
if role_arn in allowed_arns:
return True
# Convert list of role tag dicts to an array of tag keys
# ex:
# role_tags = [{'Key': 'consoleme-authorized', 'Value': 'consoleme_admins'},
# {'Key': 'Description', 'Value': 'ConsoleMe OSS Demo Role'}]
# so: actual_tag_keys = ['consoleme-authorized', 'Description']
actual_tag_keys = [d["Key"] for d in role_tags]
# If any allowed tag key exists in the role's actual_tags this condition will pass
if allowed_tag_keys and any(x in allowed_tag_keys for x in actual_tag_keys):
return True
# Convert list of role tag dicts to a single key/value dict of tags
# ex:
# role_tags = [{'Key': 'consoleme-authorized', 'Value': 'consoleme_admins'},
# {'Key': 'Description', 'Value': 'ConsoleMe OSS Demo Role'}]
# so: actual_tags = {'consoleme-authorized': 'consoleme_admins', 'Description': 'ConsoleMe OSS Demo Role'}
actual_tags = {
d["Key"]: d["Value"] for d in role_tags
} # Convert List[Dicts] to 1 Dict
# All configured allowed_tags must exist in the role's actual_tags for this condition to pass
if allowed_tags and allowed_tags.items() <= actual_tags.items():
return True
return False |
If this feature is enabled, it will look at inline role policies and remove expired policies if they have been
designated as temporary. Policies can be designated as temporary through a certain prefix in the policy name.
In the future, we may allow specifying temporary policies by `Sid` or other means.
:param role: A single AWS IAM role entry in dictionary format as returned by the `get_account_authorization_details`
call
:return: bool: Whether policies were removed or not | def remove_temp_policies(role, iam_client) -> bool:
"""
If this feature is enabled, it will look at inline role policies and remove expired policies if they have been
designated as temporary. Policies can be designated as temporary through a certain prefix in the policy name.
In the future, we may allow specifying temporary policies by `Sid` or other means.
:param role: A single AWS IAM role entry in dictionary format as returned by the `get_account_authorization_details`
call
:return: bool: Whether policies were removed or not
"""
function = f"{__name__}.{sys._getframe().f_code.co_name}"
if not config.get("policies.temp_policy_support"):
return False
temp_policy_prefix = config.get("policies.temp_policy_prefix", "cm_delete-on")
if not temp_policy_prefix:
return False
current_dateint = datetime.today().strftime("%Y%m%d")
log_data = {
"function": function,
"temp_policy_prefix": temp_policy_prefix,
"role_arn": role["Arn"],
}
policies_removed = False
for policy in role["RolePolicyList"]:
try:
policy_name = policy["PolicyName"]
if not policy_name.startswith(temp_policy_prefix):
continue
expiration_date = policy_name.replace(temp_policy_prefix, "", 1).split("_")[
1
]
if not current_dateint >= expiration_date:
continue
log.debug(
{
**log_data,
"message": "Deleting temporary policy",
"policy_name": policy_name,
}
)
iam_client.delete_role_policy(
RoleName=role["RoleName"], PolicyName=policy_name
)
policies_removed = True
except Exception as e:
log.error(
{
**log_data,
"message": "Error deleting temporary IAM policy",
"error": str(e),
},
exc_info=True,
)
sentry_sdk.capture_exception()
return policies_removed |
Identifies the owning user/group of an AWS principal based on one or more trusted and configurable principal tags.
`owner` is used to notify application owners of permission problems with their detected AWS principals or resources
if another identifier (ie: session name) for a principal doesn't point to a specific user for notification.
:return: owner: str | def get_aws_principal_owner(role_details: Dict[str, Any]) -> Optional[str]:
"""
Identifies the owning user/group of an AWS principal based on one or more trusted and configurable principal tags.
`owner` is used to notify application owners of permission problems with their detected AWS principals or resources
if another identifier (ie: session name) for a principal doesn't point to a specific user for notification.
:return: owner: str
"""
owner = None
owner_tag_names = config.get("aws.tags.owner", [])
if not owner_tag_names:
return owner
if isinstance(owner_tag_names, str):
owner_tag_names = [owner_tag_names]
role_tags = role_details.get("Tags")
for owner_tag_name in owner_tag_names:
for role_tag in role_tags:
if role_tag["Key"] == owner_tag_name:
return role_tag["Value"]
return owner |
This function sanitizes the session name typically passed in an assume_role call, to verify that it's | def sanitize_session_name(unsanitized_session_name):
"""
This function sanitizes the session name typically passed in an assume_role call, to verify that it's
"""
valid_characters_re = re.compile(r"[\w+=,.@-]")
sanitized_session_name = ""
max_length = 64 # Session names have a length limit of 64 characters
for char in unsanitized_session_name:
if len(sanitized_session_name) == max_length:
break
if valid_characters_re.match(char):
sanitized_session_name += char
return sanitized_session_name |
Yields successive n=zied chunks from list l by looping
until length l.
`divide_chunks(["a","b","c","d","e"], 2)` yields:
['a', 'b', 'c']
['d', 'e'] | def divide_chunks(list_, n):
"""
Yields successive n=zied chunks from list l by looping
until length l.
`divide_chunks(["a","b","c","d","e"], 2)` yields:
['a', 'b', 'c']
['d', 'e']
"""
for i in range(0, len(list_), n):
yield list_[i : i + n] |
Pass in a dict with a list of rows to include in a formatted table. This will return the HTML for the table.
:param d:
:return:
html: HTML formatted table | def generate_html(d: List[Dict[str, Union[str, bool]]]) -> str:
"""
Pass in a dict with a list of rows to include in a formatted table. This will return the HTML for the table.
:param d:
:return:
html: HTML formatted table
"""
if not d:
return
pd.set_option("display.max_colwidth", -1)
df = pd.DataFrame(d)
html = df.to_html(classes=["ui", "celled", "table"], escape=False, index=False)
return html |
Recursively sorts dictionary keys and dictionary values in alphabetical order | def sort_dict(original):
"""Recursively sorts dictionary keys and dictionary values in alphabetical order"""
if isinstance(original, dict):
res = (
dict()
) # Make a new "ordered" dictionary. No need for Collections in Python 3.7+
for k, v in sorted(original.items()):
res[k] = v
d = res
else:
d = original
for k in d:
if isinstance(d[k], str):
continue
if isinstance(d[k], list) and len(d[k]) > 1 and isinstance(d[k][0], str):
d[k] = sorted(d[k])
if isinstance(d[k], dict):
d[k] = sort_dict(d[k])
if isinstance(d[k], list) and len(d[k]) >= 1 and isinstance(d[k][0], dict):
for i in range(len(d[k])):
d[k][i] = sort_dict(d[k][i])
return d |
Helper function to unwrap nested JSON in the AWS Config resource configuration. | def un_wrap_json(json_obj: Any) -> Any:
"""Helper function to unwrap nested JSON in the AWS Config resource configuration."""
# pylint: disable=C0103,W0703,R0911
# Is this a field that we can safely return?
if isinstance(json_obj, (type(None), int, bool, float)): # noqa
return json_obj
# Is this a Datetime? Convert it to a string and return it:
if isinstance(json_obj, datetime):
return str(json_obj)
# Is this a Dictionary?
if isinstance(json_obj, dict):
decoded = {}
for k, v in json_obj.items():
decoded[k] = un_wrap_json(v)
# Is this a List?
elif isinstance(json_obj, list):
decoded = []
for x in json_obj:
decoded.append(un_wrap_json(x))
# Yes, try to sort the contents of lists. This is because AWS does not consistently store list ordering for many resource types:
try:
sorted_list = sorted(decoded)
decoded = sorted_list
except Exception: # noqa # nosec # If we can't sort then NBD
pass
else:
# Try to load the JSON string:
try:
# Check if the string starts with a "[" or a "{" (because apparently '123' is a valid JSON)
for check_field in {
"{",
"[",
'"{',
'"[',
}: # Some of the double-wrapping is really ridiculous
if json_obj.startswith(check_field):
decoded = json.loads(json_obj)
# If we loaded this properly, then we need to pass the decoded JSON back in for all the nested stuff:
return un_wrap_json(decoded)
# Check if this string is URL Encoded - if it is, then re-run it through:
decoded = unquote_plus(json_obj)
if decoded != json_obj:
return un_wrap_json(decoded)
return json_obj
# If we didn't get a JSON back (exception), then just return the raw value back:
except Exception: # noqa
return json_obj
return decoded |
Experimental function to force-push discovered IAM resources into a Git repository's master branch.
Use at your own risk. | def store_iam_resources_in_git(
iam_resources,
account_id,
git_url=config.get("cache_iam_resources_for_account.store_in_git.repo"),
git_message="[Automated] Update IAM Cache",
):
"""
Experimental function to force-push discovered IAM resources into a Git repository's master branch.
Use at your own risk.
"""
accounts_d = async_to_sync(get_account_id_to_name_mapping)()
tempdir = tempfile.mkdtemp()
try:
repo = clone_repo(git_url, tempdir)
repo.config_writer().set_value("user", "name", "ConsoleMe").release()
email = config.get("cache_iam_resources_for_account.store_in_git.email")
if email:
repo.config_writer().set_value("user", "email", email).release()
expected_entries = {
"UserDetailList": {
"category": "iam_users",
"resource_name_key": "UserName",
},
"GroupDetailList": {
"category": "iam_groups",
"resource_name_key": "GroupName",
},
"RoleDetailList": {
"category": "iam_roles",
"resource_name_key": "RoleName",
},
"Policies": {"category": "iam_policies", "resource_name_key": "PolicyName"},
}
for key, settings in expected_entries.items():
category = settings["category"]
for resource in iam_resources[key]:
if key == "RoleDetailList":
resource.pop("RoleLastUsed", None)
resource_name = resource[settings["resource_name_key"]]
yaml = YAML()
yaml.preserve_quotes = True # type: ignore
yaml.indent(mapping=2, sequence=4, offset=2)
account_name = accounts_d.get(account_id, account_id)
if not account_name:
account_name = "unknown"
path_in_repo = os.path.join(
repo.working_dir, f"{account_name}/{category}/{resource_name}.yaml"
)
os.makedirs(Path(path_in_repo).parent.absolute(), exist_ok=True)
should_write = True
to_write = sort_dict(resource)
if os.path.exists(path_in_repo):
with open(path_in_repo, "r") as f:
# Unfortunately at the time of writing, ruamel.yaml loads this into ordered dictionaries.
# We want this to be the same type as `to_write`, so we use the builtin yaml library to load it
existing = builtin_yaml.safe_load(f)
if not DeepDiff(to_write, existing, ignore_order=True):
should_write = False
if should_write:
with open(path_in_repo, "w") as f:
yaml.dump(to_write, f)
repo.git.add("*")
if repo.index.diff("HEAD"):
repo.index.commit(git_message)
origin = repo.remote("origin")
origin.pull()
origin.push("master", force=True)
except Exception: # noqa
sentry_sdk.capture_exception()
finally:
shutil.rmtree(tempdir) |
Given a role name, return what to display on the UI. This cleanly formats per-user roles. | def format_role_name(arn: str, accounts: dict) -> str:
"""Given a role name, return what to display on the UI. This cleanly formats per-user roles."""
role_name = arn.split("role/")[1]
if not accounts:
# Only fall back to ALL_ACCOUNTS if an accounts dict is not supplied
accounts = ALL_ACCOUNTS
if config.get("format_role_name.show_full_arn"):
return arn
elif config.get("format_role_name.show_account_name_role_name"):
account_id = arn.split(":")[4]
account_name = accounts.get(account_id)
if not account_name:
account_name = account_id
return f"{account_name}/{role_name}"
if not role_name.startswith("cm_"):
return role_name
name = accounts.get(arn.split(":")[4])
# This should NOT happen, but if it does, log it keep a metric of it:
if not name:
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Can't find account for per-user role",
"role": role_name,
"accounts": accounts,
}
log.error(log_data)
stats.count("index.unknown_account_role", tags={"role": role_name})
return name |
Import a class by a dot-delimited class name.
i.e: import_class("consoleme.default_plugins.plugins.metrics.default_metrics.DefaultMetric")
--> <class 'consoleme.default_plugins.plugins.metrics.default_metrics.DefaultMetric'> | def import_class_by_name(class_full_path: str):
"""
Import a class by a dot-delimited class name.
i.e: import_class("consoleme.default_plugins.plugins.metrics.default_metrics.DefaultMetric")
--> <class 'consoleme.default_plugins.plugins.metrics.default_metrics.DefaultMetric'>
"""
d = class_full_path.rfind(".")
class_name = class_full_path[d + 1 : len(class_full_path)]
m = __import__(class_full_path[0:d], globals(), locals(), [class_name])
return getattr(m, class_name) |
For the given resource and policy statement, return the actions that are
for that resource's service. | def get_actions_for_resource(resource_arn: str, statement: Dict) -> List[str]:
"""For the given resource and policy statement, return the actions that are
for that resource's service.
"""
results: List[str] = []
# Get service from resource
resource_service = get_service_from_arn(resource_arn)
# Get relevant actions from policy doc
actions = statement.get("Action", [])
actions = actions if isinstance(actions, list) else [actions]
for action in actions:
if action == "*":
results.append(action)
else:
if get_service_from_action(action) == resource_service:
if action not in results:
results.append(action)
return results |
Convert a snake_case string to CamelCase | def to_camel(string):
"""Convert a snake_case string to CamelCase"""
return "".join(word.capitalize() for word in string.split("_")) |
Create an S3 object -- calls wrapped with CloudAux. | def put_object(client=None, **kwargs):
"""Create an S3 object -- calls wrapped with CloudAux."""
return client.put_object(**kwargs) |
Return a complete list of service control policy metadata dicts from the paginated ListPolicies API call
Args:
ca: CloudAux instance | def _list_service_control_policies(ca: CloudAux, **kwargs) -> List[Dict]:
"""Return a complete list of service control policy metadata dicts from the paginated ListPolicies API call
Args:
ca: CloudAux instance
"""
return ca.call(
"organizations.client.list_policies",
Filter="SERVICE_CONTROL_POLICY",
MaxResults=20,
**kwargs
) |
Return a complete list of target metadata dicts from the paginated ListTargetsForPolicy API call
Args:
ca: CloudAux instance
scp_id: service control policy ID | def _list_targets_for_policy(
ca: CloudAux, scp_id: str, **kwargs
) -> List[Dict[str, str]]:
"""Return a complete list of target metadata dicts from the paginated ListTargetsForPolicy API call
Args:
ca: CloudAux instance
scp_id: service control policy ID
"""
return ca.call(
"organizations.client.list_targets_for_policy",
PolicyId=scp_id,
MaxResults=20,
**kwargs
) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.