code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def pack_result(human_detection, result, img_h, img_w):
"""Short summary.
Args:
human_detection (np.ndarray): Human detection result.
result (type): The predicted label of each human proposal.
img_h (int): The image height.
img_w (int): The image width.
Returns:
tuple: Tuple of human proposal, label name and label score.
"""
human_detection[:, 0::2] /= img_w
human_detection[:, 1::2] /= img_h
results = []
if result is None:
return None
for prop, res in zip(human_detection, result):
res.sort(key=lambda x: -x[1])
results.append(
(prop.data.cpu().numpy(), [x[0] for x in res], [x[1]
for x in res]))
return results | Short summary.
Args:
human_detection (np.ndarray): Human detection result.
result (type): The predicted label of each human proposal.
img_h (int): The image height.
img_w (int): The image width.
Returns:
tuple: Tuple of human proposal, label name and label score. | pack_result | python | open-mmlab/mmaction2 | demo/demo_video_structuralize.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_video_structuralize.py | Apache-2.0 |
def dense_timestamps(timestamps, n):
"""Make it nx frames."""
old_frame_interval = (timestamps[1] - timestamps[0])
start = timestamps[0] - old_frame_interval / n * (n - 1) / 2
new_frame_inds = np.arange(
len(timestamps) * n) * old_frame_interval / n + start
return new_frame_inds.astype(np.int64) | Make it nx frames. | main.dense_timestamps | python | open-mmlab/mmaction2 | demo/demo_video_structuralize.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_video_structuralize.py | Apache-2.0 |
def main():
args = parse_args()
tmp_dir = tempfile.TemporaryDirectory()
frame_paths, original_frames = frame_extract(
args.video, out_dir=tmp_dir.name)
num_frame = len(frame_paths)
h, w, _ = original_frames[0].shape
# Get Human detection results and pose results
human_detections, _ = detection_inference(
args.det_config,
args.det_checkpoint,
frame_paths,
args.det_score_thr,
device=args.device)
pose_datasample = None
if args.use_skeleton_recog or args.use_skeleton_stdet:
pose_results, pose_datasample = pose_inference(
args.pose_config,
args.pose_checkpoint,
frame_paths,
human_detections,
device=args.device)
# resize frames to shortside 256
new_w, new_h = mmcv.rescale_size((w, h), (256, np.Inf))
frames = [mmcv.imresize(img, (new_w, new_h)) for img in original_frames]
w_ratio, h_ratio = new_w / w, new_h / h
# Load spatio-temporal detection label_map
stdet_label_map = load_label_map(args.label_map_stdet)
rgb_stdet_config = mmengine.Config.fromfile(args.rgb_stdet_config)
rgb_stdet_config.merge_from_dict(args.cfg_options)
try:
if rgb_stdet_config['data']['train']['custom_classes'] is not None:
stdet_label_map = {
id + 1: stdet_label_map[cls]
for id, cls in enumerate(rgb_stdet_config['data']['train']
['custom_classes'])
}
except KeyError:
pass
action_result = None
if args.use_skeleton_recog:
print('Use skeleton-based recognition')
action_result = skeleton_based_action_recognition(
args, pose_results, h, w)
else:
print('Use rgb-based recognition')
action_result = rgb_based_action_recognition(args)
stdet_preds = None
if args.use_skeleton_stdet:
print('Use skeleton-based SpatioTemporal Action Detection')
clip_len, frame_interval = 30, 1
timestamps, stdet_preds = skeleton_based_stdet(args, stdet_label_map,
human_detections,
pose_results, num_frame,
clip_len,
frame_interval, h, w)
for i in range(len(human_detections)):
det = human_detections[i]
det[:, 0:4:2] *= w_ratio
det[:, 1:4:2] *= h_ratio
human_detections[i] = torch.from_numpy(det[:, :4]).to(args.device)
else:
print('Use rgb-based SpatioTemporal Action Detection')
for i in range(len(human_detections)):
det = human_detections[i]
det[:, 0:4:2] *= w_ratio
det[:, 1:4:2] *= h_ratio
human_detections[i] = torch.from_numpy(det[:, :4]).to(args.device)
timestamps, stdet_preds = rgb_based_stdet(args, frames,
stdet_label_map,
human_detections, w, h,
new_w, new_h, w_ratio,
h_ratio)
stdet_results = []
for timestamp, prediction in zip(timestamps, stdet_preds):
human_detection = human_detections[timestamp - 1]
stdet_results.append(
pack_result(human_detection, prediction, new_h, new_w))
def dense_timestamps(timestamps, n):
"""Make it nx frames."""
old_frame_interval = (timestamps[1] - timestamps[0])
start = timestamps[0] - old_frame_interval / n * (n - 1) / 2
new_frame_inds = np.arange(
len(timestamps) * n) * old_frame_interval / n + start
return new_frame_inds.astype(np.int64)
dense_n = int(args.predict_stepsize / args.output_stepsize)
output_timestamps = dense_timestamps(timestamps, dense_n)
frames = [
cv2.imread(frame_paths[timestamp - 1])
for timestamp in output_timestamps
]
if args.use_skeleton_recog or args.use_skeleton_stdet:
pose_datasample = [
pose_datasample[timestamp - 1] for timestamp in output_timestamps
]
vis_frames = visualize(args, frames, stdet_results, pose_datasample,
action_result)
vid = mpy.ImageSequenceClip(vis_frames, fps=args.output_fps)
vid.write_videofile(args.out_filename)
tmp_dir.cleanup() | Make it nx frames. | main | python | open-mmlab/mmaction2 | demo/demo_video_structuralize.py | https://github.com/open-mmlab/mmaction2/blob/master/demo/demo_video_structuralize.py | Apache-2.0 |
def _sm_start(self, *args, **kwargs):
"""
Start the timer waiting for pain
"""
millisec = random.randint(self._start_min_delay, self._start_max_delay)
self._timer = threading.Timer(millisec / 1000.0, self.event_timeout)
self._timer.start() | Start the timer waiting for pain | _sm_start | python | worstcase/blockade | blockade/chaos.py | https://github.com/worstcase/blockade/blob/master/blockade/chaos.py | Apache-2.0 |
def _sm_to_pain(self, *args, **kwargs):
"""
Start the blockade event
"""
_logger.info("Starting chaos for blockade %s" % self._blockade_name)
self._do_blockade_event()
# start the timer to end the pain
millisec = random.randint(self._run_min_time, self._run_max_time)
self._timer = threading.Timer(millisec / 1000.0, self.event_timeout)
self._timer.start() | Start the blockade event | _sm_to_pain | python | worstcase/blockade | blockade/chaos.py | https://github.com/worstcase/blockade/blob/master/blockade/chaos.py | Apache-2.0 |
def _sm_stop_from_no_pain(self, *args, **kwargs):
"""
Stop chaos when there is no current blockade operation
"""
# Just stop the timer. It is possible that it was too late and the
# timer is about to run
_logger.info("Stopping chaos for blockade %s" % self._blockade_name)
self._timer.cancel() | Stop chaos when there is no current blockade operation | _sm_stop_from_no_pain | python | worstcase/blockade | blockade/chaos.py | https://github.com/worstcase/blockade/blob/master/blockade/chaos.py | Apache-2.0 |
def _sm_relieve_pain(self, *args, **kwargs):
"""
End the blockade event and return to a steady state
"""
_logger.info(
"Ending the degradation for blockade %s" % self._blockade_name)
self._do_reset_all()
# set a timer for the next pain event
millisec = random.randint(self._start_min_delay, self._start_max_delay)
self._timer = threading.Timer(millisec/1000.0, self.event_timeout)
self._timer.start() | End the blockade event and return to a steady state | _sm_relieve_pain | python | worstcase/blockade | blockade/chaos.py | https://github.com/worstcase/blockade/blob/master/blockade/chaos.py | Apache-2.0 |
def _sm_stop_from_pain(self, *args, **kwargs):
"""
Stop chaos while there is a blockade event in progress
"""
_logger.info("Stopping chaos for blockade %s" % self._blockade_name)
self._do_reset_all() | Stop chaos while there is a blockade event in progress | _sm_stop_from_pain | python | worstcase/blockade | blockade/chaos.py | https://github.com/worstcase/blockade/blob/master/blockade/chaos.py | Apache-2.0 |
def _sm_cleanup(self, *args, **kwargs):
"""
Delete all state associated with the chaos session
"""
if self._done_notification_func is not None:
self._done_notification_func()
self._timer.cancel() | Delete all state associated with the chaos session | _sm_cleanup | python | worstcase/blockade | blockade/chaos.py | https://github.com/worstcase/blockade/blob/master/blockade/chaos.py | Apache-2.0 |
def _sm_stale_timer(self, *args, **kwargs):
"""
This is used when a cancel was called right before the timer fired but
after it was too late to cancel the timer.
"""
_logger.debug("Stale timer event. Interesting but ignorable.") | This is used when a cancel was called right before the timer fired but
after it was too late to cancel the timer. | _sm_stale_timer | python | worstcase/blockade | blockade/chaos.py | https://github.com/worstcase/blockade/blob/master/blockade/chaos.py | Apache-2.0 |
def from_dict(name, values):
'''
Convert a dictionary of configuration values
into a sequence of BlockadeContainerConfig instances
'''
# determine the number of instances of this container
count = 1
count_value = values.get('count', 1)
if isinstance(count_value, int):
count = max(count_value, 1)
def with_index(name, idx):
if name and idx:
return '%s_%d' % (name, idx)
return name
def get_instance(n, idx=None):
return BlockadeContainerConfig(
with_index(n, idx),
values['image'],
command=values.get('command'),
links=values.get('links'),
volumes=values.get('volumes'),
publish_ports=values.get('ports'),
expose_ports=values.get('expose'),
environment=values.get('environment'),
hostname=values.get('hostname'),
dns=values.get('dns'),
start_delay=values.get('start_delay', 0),
neutral=values.get('neutral', False),
holy=values.get('holy', False),
container_name=with_index(values.get('container_name'), idx),
cap_add=values.get('cap_add'))
if count == 1:
yield get_instance(name)
else:
for idx in range(1, count+1):
# TODO: configurable name/index format
yield get_instance(name, idx) | Convert a dictionary of configuration values
into a sequence of BlockadeContainerConfig instances | from_dict | python | worstcase/blockade | blockade/config.py | https://github.com/worstcase/blockade/blob/master/blockade/config.py | Apache-2.0 |
def from_dict(values):
'''
Instantiate a BlockadeConfig instance based on
a given dictionary of configuration values
'''
try:
containers = values['containers']
parsed_containers = {}
for name, container_dict in containers.items():
try:
# one config entry might result in many container
# instances (indicated by the 'count' config value)
for cnt in BlockadeContainerConfig.from_dict(name, container_dict):
# check for duplicate 'container_name' definitions
if cnt.container_name:
cname = cnt.container_name
existing = [c for c in parsed_containers.values() if c.container_name == cname]
if existing:
raise BlockadeConfigError("Duplicate 'container_name' definition: %s" % (cname))
parsed_containers[cnt.name] = cnt
except Exception as err:
raise BlockadeConfigError(
"Container '%s' config problem: %s" % (name, err))
network = values.get('network')
if network:
defaults = _DEFAULT_NETWORK_CONFIG.copy()
defaults.update(network)
network = defaults
else:
network = _DEFAULT_NETWORK_CONFIG.copy()
return BlockadeConfig(parsed_containers, network=network)
except KeyError as err:
raise BlockadeConfigError("Config missing value: " + str(err))
except Exception as err:
# TODO log this to some debug stream?
raise BlockadeConfigError("Failed to load config: " + str(err)) | Instantiate a BlockadeConfig instance based on
a given dictionary of configuration values | from_dict | python | worstcase/blockade | blockade/config.py | https://github.com/worstcase/blockade/blob/master/blockade/config.py | Apache-2.0 |
def dependency_sorted(containers):
"""Sort a dictionary or list of containers into dependency order
Returns a sequence
"""
if not isinstance(containers, collections.Mapping):
containers = dict((c.name, c) for c in containers)
container_links = dict((name, set(c.links.keys()))
for name, c in containers.items())
sorted_names = _resolve(container_links)
return [containers[name] for name in sorted_names] | Sort a dictionary or list of containers into dependency order
Returns a sequence | dependency_sorted | python | worstcase/blockade | blockade/config.py | https://github.com/worstcase/blockade/blob/master/blockade/config.py | Apache-2.0 |
def expand_partitions(containers, partitions):
'''
Validate the partitions of containers. If there are any containers
not in any partition, place them in an new partition.
'''
# filter out holy containers that don't belong
# to any partition at all
all_names = frozenset(c.name for c in containers if not c.holy)
holy_names = frozenset(c.name for c in containers if c.holy)
neutral_names = frozenset(c.name for c in containers if c.neutral)
partitions = [frozenset(p) for p in partitions]
unknown = set()
holy = set()
union = set()
for partition in partitions:
unknown.update(partition - all_names - holy_names)
holy.update(partition - all_names)
union.update(partition)
if unknown:
raise BlockadeError('Partitions contain unknown containers: %s' %
list(unknown))
if holy:
raise BlockadeError('Partitions contain holy containers: %s' %
list(holy))
# put any leftover containers in an implicit partition
leftover = all_names.difference(union)
if leftover:
partitions.append(leftover)
# we create an 'implicit' partition for the neutral containers
# in case they are not part of the leftover anyways
if not neutral_names.issubset(leftover):
partitions.append(neutral_names)
return partitions | Validate the partitions of containers. If there are any containers
not in any partition, place them in an new partition. | expand_partitions | python | worstcase/blockade | blockade/core.py | https://github.com/worstcase/blockade/blob/master/blockade/core.py | Apache-2.0 |
def get_source_chains(self, blockade_id):
"""Get a map of blockade chains IDs -> list of IPs targeted at them
For figuring out which container is in which partition
"""
result = {}
if not blockade_id:
raise ValueError("invalid blockade_id")
lines = self.get_chain_rules("FORWARD")
for line in lines:
parts = line.split()
if len(parts) < 4:
continue
try:
partition_index = parse_partition_index(blockade_id, parts[0])
except ValueError:
continue # not a rule targetting a blockade chain
source = parts[3]
if source:
result[source] = partition_index
return result | Get a map of blockade chains IDs -> list of IPs targeted at them
For figuring out which container is in which partition | get_source_chains | python | worstcase/blockade | blockade/net.py | https://github.com/worstcase/blockade/blob/master/blockade/net.py | Apache-2.0 |
def insert_rule(self, chain, src=None, dest=None, target=None):
"""Insert a new rule in the chain
"""
if not chain:
raise ValueError("Invalid chain")
if not target:
raise ValueError("Invalid target")
if not (src or dest):
raise ValueError("Need src, dest, or both")
args = ["-I", chain]
if src:
args += ["-s", src]
if dest:
args += ["-d", dest]
args += ["-j", target]
self.call(*args) | Insert a new rule in the chain | insert_rule | python | worstcase/blockade | blockade/net.py | https://github.com/worstcase/blockade/blob/master/blockade/net.py | Apache-2.0 |
def create_chain(self, chain):
"""Create a new chain
"""
if not chain:
raise ValueError("Invalid chain")
self.call("-N", chain) | Create a new chain | create_chain | python | worstcase/blockade | blockade/net.py | https://github.com/worstcase/blockade/blob/master/blockade/net.py | Apache-2.0 |
def clear(self, blockade_id):
"""Remove all iptables rules and chains related to this blockade
"""
# first remove refererences to our custom chains
self.delete_blockade_rules(blockade_id)
# then remove the chains themselves
self.delete_blockade_chains(blockade_id) | Remove all iptables rules and chains related to this blockade | clear | python | worstcase/blockade | blockade/net.py | https://github.com/worstcase/blockade/blob/master/blockade/net.py | Apache-2.0 |
def cmd_up(opts):
"""Start the containers and link them together
"""
config = load_config(opts.config)
b = get_blockade(config, opts)
containers = b.create(verbose=opts.verbose, force=opts.force)
print_containers(containers, opts.json) | Start the containers and link them together | cmd_up | python | worstcase/blockade | blockade/cli.py | https://github.com/worstcase/blockade/blob/master/blockade/cli.py | Apache-2.0 |
def cmd_destroy(opts):
"""Destroy all containers and restore networks
"""
config = load_config(opts.config)
b = get_blockade(config, opts)
b.destroy() | Destroy all containers and restore networks | cmd_destroy | python | worstcase/blockade | blockade/cli.py | https://github.com/worstcase/blockade/blob/master/blockade/cli.py | Apache-2.0 |
def cmd_status(opts):
"""Print status of containers and networks
"""
config = load_config(opts.config)
b = get_blockade(config, opts)
containers = b.status()
print_containers(containers, opts.json) | Print status of containers and networks | cmd_status | python | worstcase/blockade | blockade/cli.py | https://github.com/worstcase/blockade/blob/master/blockade/cli.py | Apache-2.0 |
def cmd_start(opts):
"""Start some or all containers
"""
__with_containers(opts, Blockade.start) | Start some or all containers | cmd_start | python | worstcase/blockade | blockade/cli.py | https://github.com/worstcase/blockade/blob/master/blockade/cli.py | Apache-2.0 |
def cmd_kill(opts):
"""Kill some or all containers
"""
kill_signal = opts.signal if hasattr(opts, 'signal') else "SIGKILL"
__with_containers(opts, Blockade.kill, signal=kill_signal) | Kill some or all containers | cmd_kill | python | worstcase/blockade | blockade/cli.py | https://github.com/worstcase/blockade/blob/master/blockade/cli.py | Apache-2.0 |
def cmd_stop(opts):
"""Stop some or all containers
"""
__with_containers(opts, Blockade.stop) | Stop some or all containers | cmd_stop | python | worstcase/blockade | blockade/cli.py | https://github.com/worstcase/blockade/blob/master/blockade/cli.py | Apache-2.0 |
def cmd_restart(opts):
"""Restart some or all containers
"""
__with_containers(opts, Blockade.restart) | Restart some or all containers | cmd_restart | python | worstcase/blockade | blockade/cli.py | https://github.com/worstcase/blockade/blob/master/blockade/cli.py | Apache-2.0 |
def cmd_flaky(opts):
"""Make the network flaky for some or all containers
"""
__with_containers(opts, Blockade.flaky) | Make the network flaky for some or all containers | cmd_flaky | python | worstcase/blockade | blockade/cli.py | https://github.com/worstcase/blockade/blob/master/blockade/cli.py | Apache-2.0 |
def cmd_slow(opts):
"""Make the network slow for some or all containers
"""
__with_containers(opts, Blockade.slow) | Make the network slow for some or all containers | cmd_slow | python | worstcase/blockade | blockade/cli.py | https://github.com/worstcase/blockade/blob/master/blockade/cli.py | Apache-2.0 |
def cmd_fast(opts):
"""Restore network speed and reliability for some or all containers
"""
__with_containers(opts, Blockade.fast) | Restore network speed and reliability for some or all containers | cmd_fast | python | worstcase/blockade | blockade/cli.py | https://github.com/worstcase/blockade/blob/master/blockade/cli.py | Apache-2.0 |
def cmd_duplicate(opts):
"""Introduce packet duplication into the network of some or all containers
"""
__with_containers(opts, Blockade.duplicate) | Introduce packet duplication into the network of some or all containers | cmd_duplicate | python | worstcase/blockade | blockade/cli.py | https://github.com/worstcase/blockade/blob/master/blockade/cli.py | Apache-2.0 |
def cmd_partition(opts):
"""Partition the network between containers
Replaces any existing partitions outright. Any containers NOT specified
in arguments will be globbed into a single implicit partition. For
example if you have three containers: c1, c2, and c3 and you run:
blockade partition c1
The result will be a partition with just c1 and another partition with
c2 and c3.
Alternatively, --random may be specified, and zero or more random
partitions will be generated by blockade.
"""
config = load_config(opts.config)
b = get_blockade(config, opts)
if opts.random:
if opts.partitions:
raise BlockadeError("Either specify individual partitions "
"or --random, but not both")
b.random_partition()
else:
partitions = []
for partition in opts.partitions:
names = []
for name in partition.split(","):
name = name.strip()
if name:
names.append(name)
partitions.append(names)
if not partitions:
raise BlockadeError("Either specify individual partitions "
"or random")
b.partition(partitions) | Partition the network between containers
Replaces any existing partitions outright. Any containers NOT specified
in arguments will be globbed into a single implicit partition. For
example if you have three containers: c1, c2, and c3 and you run:
blockade partition c1
The result will be a partition with just c1 and another partition with
c2 and c3.
Alternatively, --random may be specified, and zero or more random
partitions will be generated by blockade. | cmd_partition | python | worstcase/blockade | blockade/cli.py | https://github.com/worstcase/blockade/blob/master/blockade/cli.py | Apache-2.0 |
def cmd_join(opts):
"""Restore full networking between containers
"""
config = load_config(opts.config)
b = get_blockade(config, opts)
b.join() | Restore full networking between containers | cmd_join | python | worstcase/blockade | blockade/cli.py | https://github.com/worstcase/blockade/blob/master/blockade/cli.py | Apache-2.0 |
def cmd_logs(opts):
"""Fetch the logs of a container
"""
config = load_config(opts.config)
b = get_blockade(config, opts)
puts(b.logs(opts.container).decode(encoding='UTF-8')) | Fetch the logs of a container | cmd_logs | python | worstcase/blockade | blockade/cli.py | https://github.com/worstcase/blockade/blob/master/blockade/cli.py | Apache-2.0 |
def cmd_daemon(opts):
"""Start the Blockade REST API
"""
if opts.data_dir is None:
raise BlockadeError("You must supply a data directory for the daemon")
rest.start(data_dir=opts.data_dir, port=opts.port, debug=opts.debug,
host_exec=get_host_exec()) | Start the Blockade REST API | cmd_daemon | python | worstcase/blockade | blockade/cli.py | https://github.com/worstcase/blockade/blob/master/blockade/cli.py | Apache-2.0 |
def cmd_add(opts):
"""Add one or more existing Docker containers to a Blockade group
"""
config = load_config(opts.config)
b = get_blockade(config, opts)
b.add_container(opts.containers) | Add one or more existing Docker containers to a Blockade group | cmd_add | python | worstcase/blockade | blockade/cli.py | https://github.com/worstcase/blockade/blob/master/blockade/cli.py | Apache-2.0 |
def cmd_version(opts):
"""Show the Blockade version information
"""
import blockade.version
puts("Blockade " + blockade.version.__version__) | Show the Blockade version information | cmd_version | python | worstcase/blockade | blockade/cli.py | https://github.com/worstcase/blockade/blob/master/blockade/cli.py | Apache-2.0 |
def cmd_events(opts):
"""Get the event log for a given blockade
"""
config = load_config(opts.config)
b = get_blockade(config, opts)
if opts.json:
outf = None
_write = puts
if opts.output is not None:
outf = open(opts.output, "w")
_write = outf.write
try:
delim = ""
logs = b.get_audit().read_logs(as_json=False)
_write('{"events": [')
_write(os.linesep)
for l in logs:
_write(delim + l)
delim = "," + os.linesep
_write(os.linesep)
_write(']}')
finally:
if opts.output is not None:
outf.close()
else:
puts(colored.blue(columns(["EVENT", 10],
["TARGET", 16],
["STATUS", 8],
["TIME", 16],
["MESSAGE", 25])))
logs = b.get_audit().read_logs(as_json=True)
for l in logs:
puts(columns([l['event'], 10],
[str([str(t) for t in l['targets']]), 16],
[l['status'], 8],
[str(l['timestamp']), 16],
[l['message'], 25])) | Get the event log for a given blockade | cmd_events | python | worstcase/blockade | blockade/cli.py | https://github.com/worstcase/blockade/blob/master/blockade/cli.py | Apache-2.0 |
def blockade_net_name(self):
'''Generate blockade nework name based on the blockade_id'''
return "%s_net" % self._blockade_id | Generate blockade nework name based on the blockade_id | blockade_net_name | python | worstcase/blockade | blockade/state.py | https://github.com/worstcase/blockade/blob/master/blockade/state.py | Apache-2.0 |
def containers(self):
'''Dictionary of container information'''
return deepcopy(self._containers) | Dictionary of container information | containers | python | worstcase/blockade | blockade/state.py | https://github.com/worstcase/blockade/blob/master/blockade/state.py | Apache-2.0 |
def container_id(self, name):
'''Try to find the container ID with the specified name'''
container = self._containers.get(name, None)
if not container is None:
return container.get('id', None)
return None | Try to find the container ID with the specified name | container_id | python | worstcase/blockade | blockade/state.py | https://github.com/worstcase/blockade/blob/master/blockade/state.py | Apache-2.0 |
def initialize(self, containers):
'''
Initialize a new state file with the given contents.
This function fails in case the state file already exists.
'''
self._containers = deepcopy(containers)
self.__write(containers, initialize=True) | Initialize a new state file with the given contents.
This function fails in case the state file already exists. | initialize | python | worstcase/blockade | blockade/state.py | https://github.com/worstcase/blockade/blob/master/blockade/state.py | Apache-2.0 |
def exists(self):
'''Checks whether a blockade state file already exists'''
return os.path.isfile(self._state_file) | Checks whether a blockade state file already exists | exists | python | worstcase/blockade | blockade/state.py | https://github.com/worstcase/blockade/blob/master/blockade/state.py | Apache-2.0 |
def update(self, containers):
'''Update the current state file with the specified contents'''
self._containers = deepcopy(containers)
self.__write(containers, initialize=False) | Update the current state file with the specified contents | update | python | worstcase/blockade | blockade/state.py | https://github.com/worstcase/blockade/blob/master/blockade/state.py | Apache-2.0 |
def load(self):
'''Try to load a blockade state file in the current directory'''
try:
with open(self._state_file) as f:
state = yaml.safe_load(f)
self._containers = state['containers']
except (IOError, OSError) as err:
if err.errno == errno.ENOENT:
raise NotInitializedError("No blockade exists in this context")
raise InconsistentStateError("Failed to load Blockade state: "
+ str(err))
except Exception as err:
raise InconsistentStateError("Failed to load Blockade state: "
+ str(err)) | Try to load a blockade state file in the current directory | load | python | worstcase/blockade | blockade/state.py | https://github.com/worstcase/blockade/blob/master/blockade/state.py | Apache-2.0 |
def destroy(self):
'''Try to remove the current state file and directory'''
self._state_delete() | Try to remove the current state file and directory | destroy | python | worstcase/blockade | blockade/state.py | https://github.com/worstcase/blockade/blob/master/blockade/state.py | Apache-2.0 |
def _get_blockade_id_from_cwd(self, cwd=None):
'''Generate a new blockade ID based on the CWD'''
if not cwd:
cwd = os.getcwd()
# this follows a similar pattern as docker-compose uses
parent_dir = os.path.abspath(cwd)
basename = os.path.basename(parent_dir).lower()
blockade_id = re.sub(r"[^a-z0-9]", "", basename)
if not blockade_id: # if we can't get a valid name from CWD, use "default"
blockade_id = "default"
return blockade_id | Generate a new blockade ID based on the CWD | _get_blockade_id_from_cwd | python | worstcase/blockade | blockade/state.py | https://github.com/worstcase/blockade/blob/master/blockade/state.py | Apache-2.0 |
def _assure_dir(self):
'''Make sure the state directory exists'''
try:
os.makedirs(self._state_dir)
except OSError as err:
if err.errno != errno.EEXIST:
raise | Make sure the state directory exists | _assure_dir | python | worstcase/blockade | blockade/state.py | https://github.com/worstcase/blockade/blob/master/blockade/state.py | Apache-2.0 |
def _state_delete(self):
'''Try to delete the state.yml file and the folder .blockade'''
try:
os.remove(self._state_file)
except OSError as err:
if err.errno not in (errno.EPERM, errno.ENOENT):
raise
try:
os.rmdir(self._state_dir)
except OSError as err:
if err.errno not in (errno.ENOTEMPTY, errno.ENOENT):
raise | Try to delete the state.yml file and the folder .blockade | _state_delete | python | worstcase/blockade | blockade/state.py | https://github.com/worstcase/blockade/blob/master/blockade/state.py | Apache-2.0 |
def __base_state(self, containers):
'''
Convert blockade ID and container information into
a state dictionary object.
'''
return dict(blockade_id=self._blockade_id,
containers=containers,
version=self._state_version) | Convert blockade ID and container information into
a state dictionary object. | __base_state | python | worstcase/blockade | blockade/state.py | https://github.com/worstcase/blockade/blob/master/blockade/state.py | Apache-2.0 |
def __write(self, containers, initialize=True):
'''Write the given state information into a file'''
path = self._state_file
self._assure_dir()
try:
flags = os.O_WRONLY | os.O_CREAT
if initialize:
flags |= os.O_EXCL
with os.fdopen(os.open(path, flags), "w") as f:
yaml.safe_dump(self.__base_state(containers), f)
except OSError as err:
if err.errno == errno.EEXIST:
raise AlreadyInitializedError(
"Path %s exists. "
"You may need to destroy a previous blockade." % path)
raise
except Exception:
# clean up our created file
self._state_delete()
raise | Write the given state information into a file | __write | python | worstcase/blockade | blockade/state.py | https://github.com/worstcase/blockade/blob/master/blockade/state.py | Apache-2.0 |
def test_reuse_container(self):
"""test that containers are reused"""
# no containers should be running
self.assert_no_containers()
# run one process, which should start a container and leave it running
host_exec = self.get_host_exec()
container_hostname_1 = host_exec.run(_GET_CONTAINER_ID_CMD).strip()
self.assertTrue(container_hostname_1)
containers_1 = self.helper.find_created_containers()
self.assertEqual(len(containers_1), 1)
self.assertTrue(containers_1[0].startswith(container_hostname_1))
# run another process, which should reuse the existing container
container_hostname_2 = host_exec.run(_GET_CONTAINER_ID_CMD).strip()
containers_2 = self.helper.find_created_containers()
self.assertEqual(containers_1, containers_2)
self.assertEqual(container_hostname_1, container_hostname_2)
host_exec.close()
self.assert_no_containers()
container_hostname_3 = host_exec.run(_GET_CONTAINER_ID_CMD).strip()
self.assertNotEqual(container_hostname_3, container_hostname_1)
containers_3 = self.helper.find_created_containers()
self.assertEqual(len(containers_3), 1)
self.assertTrue(containers_3[0].startswith(container_hostname_3))
host_exec.close() | test that containers are reused | test_reuse_container | python | worstcase/blockade | blockade/tests/test_host.py | https://github.com/worstcase/blockade/blob/master/blockade/tests/test_host.py | Apache-2.0 |
def test_killed_container(self):
"""test that dead containers are gracefully replaced"""
host_exec = self.get_host_exec()
host_exec.run(["hostname"])
containers_1 = self.helper.find_created_containers()
self.assertEqual(len(containers_1), 1)
self.docker.kill(containers_1[0])
host_exec.run(["hostname"])
containers_2 = self.helper.find_created_containers()
self.assertEqual(len(containers_2), 1)
self.assertNotEqual(containers_1, containers_2)
host_exec.close() | test that dead containers are gracefully replaced | test_killed_container | python | worstcase/blockade | blockade/tests/test_host.py | https://github.com/worstcase/blockade/blob/master/blockade/tests/test_host.py | Apache-2.0 |
def test_removed_container(self):
"""test that missing containers are gracefully replaced"""
host_exec = self.get_host_exec()
host_exec.run(["hostname"])
containers_1 = self.helper.find_created_containers()
self.assertEqual(len(containers_1), 1)
self.docker.kill(containers_1[0])
self.docker.remove_container(containers_1[0])
host_exec.run(["hostname"])
containers_2 = self.helper.find_created_containers()
self.assertEqual(len(containers_2), 1)
self.assertNotEqual(containers_1, containers_2)
host_exec.close() | test that missing containers are gracefully replaced | test_removed_container | python | worstcase/blockade | blockade/tests/test_host.py | https://github.com/worstcase/blockade/blob/master/blockade/tests/test_host.py | Apache-2.0 |
def test_close_removed_container(self):
"""test that close() handles missing containers gracefully"""
host_exec = self.get_host_exec()
host_exec.run(["hostname"])
containers_1 = self.helper.find_created_containers()
self.assertEqual(len(containers_1), 1)
self.docker.kill(containers_1[0])
self.docker.remove_container(containers_1[0])
host_exec.close() | test that close() handles missing containers gracefully | test_close_removed_container | python | worstcase/blockade | blockade/tests/test_host.py | https://github.com/worstcase/blockade/blob/master/blockade/tests/test_host.py | Apache-2.0 |
def test_expired_container(self):
""" test that containers are replaced upon expiration"""
host_exec = self.get_host_exec()
host_exec.run(["hostname"])
containers_1 = self.helper.find_created_containers()
self.assertEqual(len(containers_1), 1)
# ensure that expire time was set approximately correctly
time_left = host_exec._container_expire_time - time.time()
self.assertTrue(0 < time_left < host_exec._container_expire)
# forcibly set expire time to ensure that next call triggers
host_exec._container_expire_time = time.time()
host_exec.run(["hostname"])
containers_2 = self.helper.find_created_containers()
self.assertEqual(len(containers_2), 1)
self.assertNotEqual(containers_1, containers_2)
host_exec.close() | test that containers are replaced upon expiration | test_expired_container | python | worstcase/blockade | blockade/tests/test_host.py | https://github.com/worstcase/blockade/blob/master/blockade/tests/test_host.py | Apache-2.0 |
def test_command_error(self):
"""test that commands with nonzero exit codes result in exceptions
"""
host_exec = self.get_host_exec()
with self.assertRaises(HostExecError) as cm:
host_exec.run(["false"])
_logger.debug(cm.exception)
host_exec.close() | test that commands with nonzero exit codes result in exceptions | test_command_error | python | worstcase/blockade | blockade/tests/test_host.py | https://github.com/worstcase/blockade/blob/master/blockade/tests/test_host.py | Apache-2.0 |
def test_parallel_run(self):
"""test that many commands can be run in parallel"""
host_exec = self.get_host_exec()
event = threading.Event()
error_lock = threading.Lock()
errors = []
def _thread():
try:
assert event.wait(60)
for _ in range(10):
host_exec.run(["hostname"])
except Exception as e:
with error_lock:
errors.append(e)
threads = [threading.Thread(target=_thread) for _ in range(10)]
for t in threads:
t.daemon = True
t.start()
event.set()
for t in threads:
t.join()
self.assertEqual(errors, [])
host_exec.close() | test that many commands can be run in parallel | test_parallel_run | python | worstcase/blockade | blockade/tests/test_host.py | https://github.com/worstcase/blockade/blob/master/blockade/tests/test_host.py | Apache-2.0 |
def test_yml(self):
"""load config from blockade.yml"""
self._writeConfig("blockade.yml")
config = cli.load_config()
self.assertIn("zzz", config.containers) | load config from blockade.yml | test_yml | python | worstcase/blockade | blockade/tests/test_cli.py | https://github.com/worstcase/blockade/blob/master/blockade/tests/test_cli.py | Apache-2.0 |
def test_yaml(self):
"""load config from blockade.yaml"""
self._writeConfig("blockade.yaml")
config = cli.load_config()
self.assertIn("zzz", config.containers) | load config from blockade.yaml | test_yaml | python | worstcase/blockade | blockade/tests/test_cli.py | https://github.com/worstcase/blockade/blob/master/blockade/tests/test_cli.py | Apache-2.0 |
def test_custom(self):
"""load config from custom path"""
self._writeConfig("custom-file.yaml")
config = cli.load_config("./custom-file.yaml")
self.assertIn("zzz", config.containers) | load config from custom path | test_custom | python | worstcase/blockade | blockade/tests/test_cli.py | https://github.com/worstcase/blockade/blob/master/blockade/tests/test_cli.py | Apache-2.0 |
def test_custom_notfound(self):
"""load config from nonexistent custom path"""
with self.assertRaisesRegexp(BlockadeError, "^Failed to load config"):
cli.load_config("./custom-file.yaml") | load config from nonexistent custom path | test_custom_notfound | python | worstcase/blockade | blockade/tests/test_cli.py | https://github.com/worstcase/blockade/blob/master/blockade/tests/test_cli.py | Apache-2.0 |
def test_noconfig(self):
"""load default config when no file is present"""
config = cli.load_config()
self.assertEqual(0, len(config.containers)) | load default config when no file is present | test_noconfig | python | worstcase/blockade | blockade/tests/test_cli.py | https://github.com/worstcase/blockade/blob/master/blockade/tests/test_cli.py | Apache-2.0 |
def wait_for_children():
"""Wait for child processes to exit
The testing system launches and terminates child processes, but
doesn't wait for them to actually die. So in a few places we need
this extra call"""
wait(lambda: len(multiprocessing.active_children()) == 0) | Wait for child processes to exit
The testing system launches and terminates child processes, but
doesn't wait for them to actually die. So in a few places we need
this extra call | wait_for_children | python | worstcase/blockade | blockade/tests/test_integration_rest.py | https://github.com/worstcase/blockade/blob/master/blockade/tests/test_integration_rest.py | Apache-2.0 |
def test_action_stop_start_slow(self):
data = '''
{
"partitions": [["c1"], ["c2"]]
}
'''
url = self.url + '/partitions'
result = requests.post(url, headers=self.headers, data=data)
assert result.status_code == 204
self._assert_partition()
self._assert_event('partition')
result = requests.delete(url)
self._assert_join()
data = '''
{
"command": "stop",
"container_names": ["c2"]
}
'''
url = self.url + '/action'
result = requests.post(url, headers=self.headers, data=data)
assert result.status_code == 204
self._assert_container_status('c2', 'DOWN')
time.sleep(90)
data = '''
{
"command": "start",
"container_names": ["c2"]
}
'''
url = self.url + '/action'
result = requests.post(url, headers=self.headers, data=data)
assert result.status_code == 204
self._assert_container_status('c2', 'UP')
data = '''
{
"network_state": "slow",
"container_names": ["c1"]
}
'''
url = self.url + '/network_state'
result = requests.post(url, headers=self.headers, data=data)
assert result.status_code == 204
self._assert_container_network_state('c1', 'SLOW')
url = self.url + '/events'
result = requests.get(url, headers=self.headers, data=data) | url = self.url + '/partitions'
result = requests.post(url, headers=self.headers, data=data)
assert result.status_code == 204
self._assert_partition()
self._assert_event('partition')
result = requests.delete(url)
self._assert_join()
data = | test_action_stop_start_slow | python | worstcase/blockade | blockade/tests/test_integration_rest.py | https://github.com/worstcase/blockade/blob/master/blockade/tests/test_integration_rest.py | Apache-2.0 |
def enum(enum_type='enum', base_classes=None, methods=None, **attrs):
"""
Generates a enumeration with the given attributes.
"""
# Enumerations can not be initalized as a new instance
def __init__(instance, *args, **kwargs):
raise RuntimeError('%s types can not be initialized.' % enum_type)
if base_classes is None:
base_classes = ()
if methods is None:
methods = {}
base_classes = base_classes + (object,)
for k, v in methods.items():
methods[k] = classmethod(v)
attrs['enums'] = attrs.copy()
methods.update(attrs)
methods['__init__'] = __init__
return type(to_string(enum_type), base_classes, methods) | Generates a enumeration with the given attributes. | enum | python | ozgur/python-linkedin | linkedin/utils.py | https://github.com/ozgur/python-linkedin/blob/master/linkedin/utils.py | MIT |
def quick_api(api_key, secret_key, port=8000):
"""
This method helps you get access to linkedin api quickly when using it
from the interpreter.
Notice that this method creates http server and wait for a request, so it
shouldn't be used in real production code - it's just an helper for debugging
The usage is basically:
api = quick_api(KEY, SECRET)
After you do that, it will print a URL to the screen which you must go in
and allow the access, after you do that, the method will return with the api
object.
"""
auth = LinkedInAuthentication(api_key, secret_key, 'http://localhost:8000/',
PERMISSIONS.enums.values())
app = LinkedInApplication(authentication=auth)
print auth.authorization_url
_wait_for_user_to_enter_browser(app, port)
return app | This method helps you get access to linkedin api quickly when using it
from the interpreter.
Notice that this method creates http server and wait for a request, so it
shouldn't be used in real production code - it's just an helper for debugging
The usage is basically:
api = quick_api(KEY, SECRET)
After you do that, it will print a URL to the screen which you must go in
and allow the access, after you do that, the method will return with the api
object. | quick_api | python | ozgur/python-linkedin | linkedin/server.py | https://github.com/ozgur/python-linkedin/blob/master/linkedin/server.py | MIT |
def reset(self, item: Optional[str] = None) -> None:
"""Reset one or all (if None is received) configuration values.
Parameters
----------
item : str, optional
Configuration item name.
Returns
-------
None
None.
Examples
--------
>>> import cfn_flip
>>> cfn_flip.config.reset("max_col_width") # Reset one specific configuration
>>> cfn_flip.config.reset() # Reset all
"""
if item is None:
for name, conf in _CONFIG_DEFAULTS.items():
delattr(self, f"_{name}")
self._load_config(name=name, conf=conf)
else:
delattr(self, f"_{item}")
self._load_config(name=item, conf=_CONFIG_DEFAULTS[item]) | Reset one or all (if None is received) configuration values.
Parameters
----------
item : str, optional
Configuration item name.
Returns
-------
None
None.
Examples
--------
>>> import cfn_flip
>>> cfn_flip.config.reset("max_col_width") # Reset one specific configuration
>>> cfn_flip.config.reset() # Reset all | reset | python | awslabs/aws-cfn-template-flip | cfn_tools/_config.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/cfn_tools/_config.py | Apache-2.0 |
def apply_configs(function):
"""Decorate some function with configs."""
signature = inspect.signature(function)
args_names = list(signature.parameters.keys())
valid_configs = [x for x in _CONFIG_DEFAULTS if x in args_names]
def wrapper(*args, **kwargs):
received_args = signature.bind_partial(*args, **kwargs).arguments
available_configs = [x for x in valid_configs if (x not in received_args) and (hasattr(config, x) is True)]
missing_args = {x: config[x] for x in available_configs}
final_args = {**received_args, **missing_args}
return function(**final_args)
wrapper.__doc__ = function.__doc__
wrapper.__name__ = function.__name__
wrapper.__signature__ = signature
return wrapper | Decorate some function with configs. | apply_configs | python | awslabs/aws-cfn-template-flip | cfn_tools/_config.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/cfn_tools/_config.py | Apache-2.0 |
def map_representer(dumper, value):
"""
Map ODict into ODict to prevent sorting
"""
return dumper.represent_mapping(TAG_MAP, value) | Map ODict into ODict to prevent sorting | map_representer | python | awslabs/aws-cfn-template-flip | cfn_tools/yaml_dumper.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/cfn_tools/yaml_dumper.py | Apache-2.0 |
def multi_constructor(loader, tag_suffix, node):
"""
Deal with !Ref style function format
"""
if tag_suffix not in UNCONVERTED_SUFFIXES:
tag_suffix = "{}{}".format(FN_PREFIX, tag_suffix)
constructor = None
if tag_suffix == "Fn::GetAtt":
constructor = construct_getatt
elif isinstance(node, yaml.ScalarNode):
constructor = loader.construct_scalar
elif isinstance(node, yaml.SequenceNode):
constructor = loader.construct_sequence
elif isinstance(node, yaml.MappingNode):
constructor = loader.construct_mapping
else:
raise Exception("Bad tag: !{}".format(tag_suffix))
return ODict((
(tag_suffix, constructor(node)),
)) | Deal with !Ref style function format | multi_constructor | python | awslabs/aws-cfn-template-flip | cfn_tools/yaml_loader.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/cfn_tools/yaml_loader.py | Apache-2.0 |
def construct_getatt(node):
"""
Reconstruct !GetAtt into a list
"""
if isinstance(node.value, six.text_type):
return node.value.split(".", 1)
elif isinstance(node.value, list):
return [s.value for s in node.value]
else:
raise ValueError("Unexpected node type: {}".format(type(node.value))) | Reconstruct !GetAtt into a list | construct_getatt | python | awslabs/aws-cfn-template-flip | cfn_tools/yaml_loader.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/cfn_tools/yaml_loader.py | Apache-2.0 |
def construct_mapping(self, node, deep=False):
"""
Use ODict for maps
"""
mapping = ODict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping | Use ODict for maps | construct_mapping | python | awslabs/aws-cfn-template-flip | cfn_tools/yaml_loader.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/cfn_tools/yaml_loader.py | Apache-2.0 |
def convert_join(value):
"""
Fix a Join ;)
"""
if not isinstance(value, list) or len(value) != 2:
# Cowardly refuse
return value
sep, parts = value[0], value[1]
if isinstance(parts, six.string_types):
return parts
if not isinstance(parts, list):
# This looks tricky, just return the join as it was
return {
"Fn::Join": value,
}
plain_string = True
args = ODict()
new_parts = []
for part in parts:
part = clean(part)
if isinstance(part, dict):
plain_string = False
if "Ref" in part:
new_parts.append("${{{}}}".format(part["Ref"]))
elif "Fn::GetAtt" in part:
params = part["Fn::GetAtt"]
new_parts.append("${{{}}}".format(".".join(params)))
else:
for key, val in args.items():
# we want to bail if a conditional can evaluate to AWS::NoValue
if isinstance(val, dict):
if "Fn::If" in val and "AWS::NoValue" in str(val["Fn::If"]):
return {
"Fn::Join": value,
}
if val == part:
param_name = key
break
else:
param_name = "Param{}".format(len(args) + 1)
args[param_name] = part
new_parts.append("${{{}}}".format(param_name))
elif isinstance(part, six.string_types):
new_parts.append(part.replace("${", "${!"))
else:
# Doing something weird; refuse
return {
"Fn::Join": value
}
source = sep.join(new_parts)
if plain_string:
return source
if args:
return ODict((
("Fn::Sub", [source, args]),
))
return ODict((
("Fn::Sub", source),
)) | Fix a Join ;) | convert_join | python | awslabs/aws-cfn-template-flip | cfn_clean/__init__.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/cfn_clean/__init__.py | Apache-2.0 |
def clean(source):
"""
Clean up the source:
* Replace use of Fn::Join with Fn::Sub
* Keep json body for specific resource properties
"""
if isinstance(source, dict):
for key, value in source.items():
if key == "Fn::Join":
return convert_join(value)
else:
source[key] = clean(value)
elif isinstance(source, list):
return [clean(item) for item in source]
return source | Clean up the source:
* Replace use of Fn::Join with Fn::Sub
* Keep json body for specific resource properties | clean | python | awslabs/aws-cfn-template-flip | cfn_clean/__init__.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/cfn_clean/__init__.py | Apache-2.0 |
def cfn_literal_parser(source):
"""
Sanitize the source:
* Keep json body for specific resource properties
"""
if isinstance(source, dict):
for key, value in source.items():
if key == "Type":
for item in UNCONVERTED_KEYS:
if value == item[0]:
# Checking if this resource has "Properties" and the property literal to maintain
# Better check than just try/except KeyError :-)
if source.get("Properties") and source.get("Properties", {}).get(item[1]):
if isinstance(source["Properties"][item[1]], dict) and \
not has_intrinsic_functions(source["Properties"][item[1]].keys()):
source["Properties"][item[1]] = LiteralString(u"{}".format(json.dumps(
source["Properties"][item[1]],
indent=2,
separators=(',', ': '))
))
else:
source[key] = cfn_literal_parser(value)
elif isinstance(source, list):
return [cfn_literal_parser(item) for item in source]
return source | Sanitize the source:
* Keep json body for specific resource properties | cfn_literal_parser | python | awslabs/aws-cfn-template-flip | cfn_clean/__init__.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/cfn_clean/__init__.py | Apache-2.0 |
def test_basic_case():
"""
As simple as it gets
"""
source = {
"Fn::Join": [
" ",
["The", "cake", "is", "a", "lie"],
],
}
expected = "The cake is a lie"
actual = clean(source)
assert expected == actual | As simple as it gets | test_basic_case | python | awslabs/aws-cfn-template-flip | tests/test_clean.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_clean.py | Apache-2.0 |
def test_ref():
"""
Refs should be replaced by ${value}
"""
source = {
"Fn::Join": [
" ",
["The", {"Ref": "Cake"}, "is", "a", "lie"],
],
}
expected = {
"Fn::Sub": "The ${Cake} is a lie",
}
actual = clean(source)
assert expected == actual | Refs should be replaced by ${value} | test_ref | python | awslabs/aws-cfn-template-flip | tests/test_clean.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_clean.py | Apache-2.0 |
def test_get_att():
"""
Intrinsics should be replaced by parameters to Sub
"""
source = {
"Fn::Join": [
" ",
["The", {"Fn::GetAtt": ["Cake", "Hole"]}, "is", "a", "lie"],
],
}
expected = {
"Fn::Sub": "The ${Cake.Hole} is a lie",
}
actual = clean(source)
assert expected == actual | Intrinsics should be replaced by parameters to Sub | test_get_att | python | awslabs/aws-cfn-template-flip | tests/test_clean.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_clean.py | Apache-2.0 |
def test_multi_level_get_att():
"""
Intrinsics should be replaced by parameters to Sub
"""
source = {
"Fn::Join": [
" ",
["The", {"Fn::GetAtt": ["First", "Second", "Third"]}, "is", "a", "lie"],
],
}
expected = {
"Fn::Sub": "The ${First.Second.Third} is a lie",
}
actual = clean(source)
assert expected == actual | Intrinsics should be replaced by parameters to Sub | test_multi_level_get_att | python | awslabs/aws-cfn-template-flip | tests/test_clean.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_clean.py | Apache-2.0 |
def test_others():
"""
GetAtt should be replaced by ${Thing.Property}
"""
source = {
"Fn::Join": [
" ",
["The", {"Fn::Base64": "Notreallybase64"}, "is", "a", "lie"],
],
}
expected = {
"Fn::Sub": [
"The ${Param1} is a lie",
{
"Param1": {
"Fn::Base64": "Notreallybase64",
},
},
],
}
actual = clean(source)
assert expected == actual | GetAtt should be replaced by ${Thing.Property} | test_others | python | awslabs/aws-cfn-template-flip | tests/test_clean.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_clean.py | Apache-2.0 |
def test_in_array():
"""
Converting Join to Sub should still work when the join is part of a larger array
"""
source = {
"things": [
"Just a string",
{
"Fn::Join": [
" ",
["The", {"Fn::Base64": "Notreallybase64"}, "is", "a", "lie"],
],
},
{
"Another": "thing",
},
],
}
expected = {
"things": [
"Just a string",
{
"Fn::Sub": [
"The ${Param1} is a lie",
{
"Param1": {
"Fn::Base64": "Notreallybase64",
},
},
],
},
{
"Another": "thing",
},
],
}
actual = clean(source)
assert expected == actual | Converting Join to Sub should still work when the join is part of a larger array | test_in_array | python | awslabs/aws-cfn-template-flip | tests/test_clean.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_clean.py | Apache-2.0 |
def test_literals():
"""
Test that existing ${var} in source is respected
"""
source = {
"Fn::Join": [
" ",
["The", "${cake}", "is", "a", "lie"],
],
}
expected = "The ${!cake} is a lie"
actual = clean(source)
assert expected == actual | Test that existing ${var} in source is respected | test_literals | python | awslabs/aws-cfn-template-flip | tests/test_clean.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_clean.py | Apache-2.0 |
def test_nested_join():
"""
Test that a join of joins works correctly
"""
source = {
"Fn::Join": [
" ",
["The", "cake", {
"Fn::Join": [
" ",
["is", "a"],
],
}, "lie"],
],
}
expected = "The cake is a lie"
actual = clean(source)
assert expected == actual | Test that a join of joins works correctly | test_nested_join | python | awslabs/aws-cfn-template-flip | tests/test_clean.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_clean.py | Apache-2.0 |
def test_deep_nested_join():
"""
Test that a join works correctly when inside an intrinsic, inside a join
"""
source = {
"Fn::Join": [
" ",
["The", "cake", "is", "a", {
"Fn::ImportValue": {
"Fn::Join": [
"-",
[{"Ref": "lieStack"}, "lieValue"],
]
},
}],
],
}
expected = {
"Fn::Sub": [
"The cake is a ${Param1}",
{
"Param1": {
"Fn::ImportValue": {
"Fn::Sub": "${lieStack}-lieValue",
},
},
},
]
}
actual = clean(source)
assert expected == actual | Test that a join works correctly when inside an intrinsic, inside a join | test_deep_nested_join | python | awslabs/aws-cfn-template-flip | tests/test_clean.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_clean.py | Apache-2.0 |
def test_gh_63_no_value():
"""
Test that Joins with conditionals that can evaluate to AWS::NoValue
are not converted to Fn::Sub
"""
source = {
"Fn::Join": [
",",
[
{
"Fn::If": [
"Condition1",
"True1",
"Ref: AWS::NoValue"
]
},
{
"Fn::If": [
"Condition2",
"True2",
"False2"
]
}
]
]
}
assert source == clean(source) | Test that Joins with conditionals that can evaluate to AWS::NoValue
are not converted to Fn::Sub | test_gh_63_no_value | python | awslabs/aws-cfn-template-flip | tests/test_clean.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_clean.py | Apache-2.0 |
def test_gh_63_value():
"""
Test that Joins with conditionals that cannot evaluate to AWS::NoValue
are converted to Fn::Sub
"""
source = {
"Fn::Join": [
",",
[
{
"Fn::If": [
"Condition1",
"True1",
"False1"
]
},
{
"Fn::If": [
"Condition2",
"True2",
"False2"
]
}
]
]
}
expected = ODict((
("Fn::Sub", [
"${Param1},${Param2}",
ODict((
("Param1", ODict((
("Fn::If", ["Condition1", "True1", "False1"]),
))),
("Param2", ODict((
("Fn::If", ["Condition2", "True2", "False2"]),
))),
)),
]),
))
actual = clean(source)
assert actual == expected | Test that Joins with conditionals that cannot evaluate to AWS::NoValue
are converted to Fn::Sub | test_gh_63_value | python | awslabs/aws-cfn-template-flip | tests/test_clean.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_clean.py | Apache-2.0 |
def test_misused_join():
"""
Test that we don't break in the case that there is
a Fn::Join with a single element instead of a list.
We'll just return the Join as it was unless it's clearly just a string.
"""
cases = (
{
"Fn::Join": [
" ",
"foo",
],
},
{
"Fn::Join": "Just a string",
},
{
"Fn::Join": [
" ",
{
"Ref": "foo",
},
],
},
{
"Fn::Join": [
"-",
[
{"Ref": "This is fine"},
["But this is unexpected"],
]
],
}
)
expecteds = (
"foo",
"Just a string",
{
"Fn::Join": [
" ",
{
"Ref": "foo",
},
],
},
{
"Fn::Join": [
"-",
[
{"Ref": "This is fine"},
["But this is unexpected"],
]
],
}
)
for i, case in enumerate(cases):
expected = expecteds[i]
actual = clean(case)
assert expected == actual | Test that we don't break in the case that there is
a Fn::Join with a single element instead of a list.
We'll just return the Join as it was unless it's clearly just a string. | test_misused_join | python | awslabs/aws-cfn-template-flip | tests/test_clean.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_clean.py | Apache-2.0 |
def test_yaml_dumper():
"""
The clean dumper should use | format for multi-line strings
"""
source = {
"start": "This is\na multi-line\nstring",
}
actual = yaml.dump(source, Dumper=CleanCfnYamlDumper)
assert actual == """start: |-
This is
a multi-line
string
""" | The clean dumper should use | format for multi-line strings | test_yaml_dumper | python | awslabs/aws-cfn-template-flip | tests/test_clean.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_clean.py | Apache-2.0 |
def test_reused_sub_params():
"""
Test that params in Joins converted to Subs get reused when possible
"""
source = {
"Fn::Join": [
" ", [
"The",
{
"Fn::Join": ["-", [{
"Ref": "Cake"
}, "Lie"]],
},
"is",
{
"Fn::Join": ["-", [{
"Ref": "Cake"
}, "Lie"]],
},
"and isn't",
{
"Fn::Join": ["-", [{
"Ref": "Pizza"
}, "Truth"]],
},
],
],
}
expected = ODict((
("Fn::Sub", [
"The ${Param1} is ${Param1} and isn't ${Param2}",
ODict((
("Param1", ODict((
("Fn::Sub", "${Cake}-Lie"),
))),
("Param2", ODict((
("Fn::Sub", "${Pizza}-Truth"),
))),
)),
]),
))
assert clean(source) == expected | Test that params in Joins converted to Subs get reused when possible | test_reused_sub_params | python | awslabs/aws-cfn-template-flip | tests/test_clean.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_clean.py | Apache-2.0 |
def test_to_json_with_yaml(input_yaml, parsed_json):
"""
Test that to_json performs correctly
"""
actual = cfn_flip.to_json(input_yaml)
assert load_json(actual) == parsed_json | Test that to_json performs correctly | test_to_json_with_yaml | python | awslabs/aws-cfn-template-flip | tests/test_flip.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_flip.py | Apache-2.0 |
def test_to_json_with_json(input_json, parsed_json):
"""
Test that to_json still works when passed json
(All json is valid yaml)
"""
actual = cfn_flip.to_json(input_json)
assert load_json(actual) == parsed_json | Test that to_json still works when passed json
(All json is valid yaml) | test_to_json_with_json | python | awslabs/aws-cfn-template-flip | tests/test_flip.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_flip.py | Apache-2.0 |
def test_to_yaml_with_long_json(input_long_json):
"""
Test that to_yaml performs correctly
"""
actual = cfn_flip.to_yaml(input_long_json)
# The result should not parse as json
with pytest.raises(ValueError):
load_json(actual)
parsed_actual = load_yaml(actual)
assert parsed_actual['TooShort'] == "foo\nbar\nbaz\nquuux"
assert 'WideText: >-' in actual
assert 'TooShort: "foo' in actual | Test that to_yaml performs correctly | test_to_yaml_with_long_json | python | awslabs/aws-cfn-template-flip | tests/test_flip.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_flip.py | Apache-2.0 |
def test_to_yaml_with_json(input_json, parsed_yaml):
"""
Test that to_yaml performs correctly
"""
actual = cfn_flip.to_yaml(input_json)
# The result should not parse as json
with pytest.raises(ValueError):
load_json(actual)
parsed_actual = load_yaml(actual)
assert parsed_actual == parsed_yaml | Test that to_yaml performs correctly | test_to_yaml_with_json | python | awslabs/aws-cfn-template-flip | tests/test_flip.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_flip.py | Apache-2.0 |
def test_to_yaml_with_yaml(input_yaml, parsed_yaml):
"""
Test that to_yaml still works when passed yaml
"""
actual = cfn_flip.to_yaml(input_yaml)
assert load_yaml(actual) == parsed_yaml | Test that to_yaml still works when passed yaml | test_to_yaml_with_yaml | python | awslabs/aws-cfn-template-flip | tests/test_flip.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_flip.py | Apache-2.0 |
def test_flip_to_json(input_yaml, input_json, parsed_json):
"""
Test that flip performs correctly transforming from yaml to json
"""
actual = cfn_flip.flip(input_yaml)
assert load_json(actual) == parsed_json | Test that flip performs correctly transforming from yaml to json | test_flip_to_json | python | awslabs/aws-cfn-template-flip | tests/test_flip.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_flip.py | Apache-2.0 |
def test_flip_to_yaml(input_json, input_yaml, parsed_yaml):
"""
Test that flip performs correctly transforming from json to yaml
"""
actual = cfn_flip.flip(input_json)
assert actual == input_yaml + "\n"
# The result should not parse as json
with pytest.raises(ValueError):
load_json(actual)
parsed_actual = load_yaml(actual)
assert parsed_actual == parsed_yaml | Test that flip performs correctly transforming from json to yaml | test_flip_to_yaml | python | awslabs/aws-cfn-template-flip | tests/test_flip.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_flip.py | Apache-2.0 |
def test_flip_to_clean_json(input_yaml, clean_json, parsed_clean_json):
"""
Test that flip performs correctly transforming from yaml to json
and the `clean_up` flag is active
"""
actual = cfn_flip.flip(input_yaml, clean_up=True)
assert load_json(actual) == parsed_clean_json | Test that flip performs correctly transforming from yaml to json
and the `clean_up` flag is active | test_flip_to_clean_json | python | awslabs/aws-cfn-template-flip | tests/test_flip.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_flip.py | Apache-2.0 |
def test_flip_to_clean_yaml(input_json, clean_yaml, parsed_clean_yaml):
"""
Test that flip performs correctly transforming from json to yaml
and the `clean_up` flag is active
"""
actual = cfn_flip.flip(input_json, clean_up=True)
assert actual == clean_yaml + "\n"
# The result should not parse as json
with pytest.raises(ValueError):
load_json(actual)
parsed_actual = load_yaml(actual)
assert parsed_actual == parsed_clean_yaml | Test that flip performs correctly transforming from json to yaml
and the `clean_up` flag is active | test_flip_to_clean_yaml | python | awslabs/aws-cfn-template-flip | tests/test_flip.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_flip.py | Apache-2.0 |
def test_flip_to_multibyte_json(multibyte_json, parsed_multibyte_yaml):
"""
Test that load multibyte file performs correctly
"""
actual = cfn_flip.to_yaml(multibyte_json)
assert load_yaml(actual) == parsed_multibyte_yaml | Test that load multibyte file performs correctly | test_flip_to_multibyte_json | python | awslabs/aws-cfn-template-flip | tests/test_flip.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_flip.py | Apache-2.0 |
def test_flip_to_multibyte_yaml(multibyte_yaml, parsed_multibyte_json):
"""
Test that load multibyte file performs correctly
"""
actual = cfn_flip.to_json(multibyte_yaml)
assert load_json(actual) == parsed_multibyte_json | Test that load multibyte file performs correctly | test_flip_to_multibyte_yaml | python | awslabs/aws-cfn-template-flip | tests/test_flip.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_flip.py | Apache-2.0 |
def test_flip_with_bad_data(fail_message, bad_data):
"""
Test that flip fails with an error message when passed bad data
"""
with pytest.raises(JSONDecodeError, match=fail_message):
cfn_flip.flip(bad_data) | Test that flip fails with an error message when passed bad data | test_flip_with_bad_data | python | awslabs/aws-cfn-template-flip | tests/test_flip.py | https://github.com/awslabs/aws-cfn-template-flip/blob/master/tests/test_flip.py | Apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.