response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Creates a new video that looks like a meme, given text and video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param text: the text to be overlaid/used in the meme. note: if using a very
long string, please add in newline characters such that the text remains
in a readable font size
@param font_file: iopath uri to the .ttf font file
@param opacity: the lower the opacity, the more transparent the text
@param text_color: color of the text in RGB values
@param caption_height: the height of the meme caption
@param meme_bg_color: background color of the meme caption in RGB values
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video | def meme_format(
video_path: str,
output_path: Optional[str] = None,
text: str = "LOL",
font_file: str = utils.MEME_DEFAULT_FONT,
opacity: float = 1.0,
text_color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
caption_height: int = 250,
meme_bg_color: Tuple[int, int, int] = utils.WHITE_RGB_COLOR,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Creates a new video that looks like a meme, given text and video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param text: the text to be overlaid/used in the meme. note: if using a very
long string, please add in newline characters such that the text remains
in a readable font size
@param font_file: iopath uri to the .ttf font file
@param opacity: the lower the opacity, the more transparent the text
@param text_color: color of the text in RGB values
@param caption_height: the height of the meme caption
@param meme_bg_color: background color of the meme caption in RGB values
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
meme_func = functools.partial(
imaugs.meme_format,
text=text,
font_file=font_file,
opacity=opacity,
text_color=text_color,
caption_height=caption_height,
meme_bg_color=meme_bg_color,
)
vdutils.apply_to_each_frame(meme_func, video_path, output_path)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="meme_format", **func_kwargs
)
return output_path or video_path |
Overlays media onto the video at position (width * x_factor, height * y_factor)
@param video_path: the path to the video to be augmented
@param overlay_path: the path to the media (image or video) that will be
overlaid onto the video
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param overlay_size: size of the overlaid media with respect to the background
video. If set to None, the original size of the overlaid media is used
@param x_factor: specifies where the left side of the overlaid media should be
placed, relative to the video width
@param y_factor: specifies where the top side of the overlaid media should be
placed, relative to the video height
@param use_overlay_audio: if set to True and the media type is a video, the audio
of the overlaid video will be used instead of the main/background video's audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video | def overlay(
video_path: str,
overlay_path: str,
output_path: Optional[str] = None,
overlay_size: Optional[float] = None,
x_factor: float = 0.0,
y_factor: float = 0.0,
use_overlay_audio: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Overlays media onto the video at position (width * x_factor, height * y_factor)
@param video_path: the path to the video to be augmented
@param overlay_path: the path to the media (image or video) that will be
overlaid onto the video
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param overlay_size: size of the overlaid media with respect to the background
video. If set to None, the original size of the overlaid media is used
@param x_factor: specifies where the left side of the overlaid media should be
placed, relative to the video width
@param y_factor: specifies where the top side of the overlaid media should be
placed, relative to the video height
@param use_overlay_audio: if set to True and the media type is a video, the audio
of the overlaid video will be used instead of the main/background video's audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
local_path = utils.pathmgr.get_local_path(video_path)
overlay_path = utils.pathmgr.get_local_path(overlay_path)
tmp_overlay_path = None
if overlay_size is not None:
assert 0 < overlay_size <= 1, "overlay_size must be a value in the range (0, 1]"
video_info = helpers.get_video_info(local_path)
overlay_h = int(video_info["height"] * overlay_size)
overlay_w = int(video_info["width"] * overlay_size)
_, tmp_overlay_path = tempfile.mkstemp(suffix=os.path.splitext(overlay_path)[1])
if utils.is_image_file(overlay_path):
imaugs.resize(overlay_path, tmp_overlay_path, overlay_w, overlay_h)
else:
resize(overlay_path, tmp_overlay_path, overlay_h, overlay_w)
overlay_aug = af.VideoAugmenterByOverlay(
tmp_overlay_path or overlay_path, x_factor, y_factor, use_overlay_audio
)
overlay_aug.add_augmenter(local_path, output_path)
if tmp_overlay_path:
os.remove(tmp_overlay_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="overlay", **func_kwargs)
return output_path or video_path |
Overlays dots onto a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param num_dots: the number of dots to add to each frame
@param dot_type: specify if you would like "blur" or "colored"
@param random_movement: whether or not you want the dots to randomly move around
across the frame or to move across in a "linear" way
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video | def overlay_dots(
video_path: str,
output_path: Optional[str] = None,
num_dots: int = 100,
dot_type: str = "colored",
random_movement: bool = True,
metadata: Optional[List[Dict[str, Any]]] = None,
**kwargs,
) -> str:
"""
Overlays dots onto a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param num_dots: the number of dots to add to each frame
@param dot_type: specify if you would like "blur" or "colored"
@param random_movement: whether or not you want the dots to randomly move around
across the frame or to move across in a "linear" way
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
dots_aug = ac.VideoDistractorByDots(num_dots, dot_type, random_movement)
vdutils.apply_cv2_augmenter(dots_aug, video_path, output_path, **kwargs)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="overlay_dots", **func_kwargs
)
return output_path or video_path |
Overlays an emoji onto each frame of a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param emoji_path: iopath uri to the emoji image
@param x_factor: specifies where the left side of the emoji should be placed,
relative to the video width
@param y_factor: specifies where the top side of the emoji should be placed,
relative to the video height
@param opacity: opacity of the emoji image
@param emoji_size: emoji size relative to the height of the video frame
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video | def overlay_emoji(
video_path: str,
output_path: Optional[str] = None,
emoji_path: str = utils.EMOJI_PATH,
x_factor: float = 0.4,
y_factor: float = 0.4,
opacity: float = 1.0,
emoji_size: float = 0.15,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Overlays an emoji onto each frame of a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param emoji_path: iopath uri to the emoji image
@param x_factor: specifies where the left side of the emoji should be placed,
relative to the video width
@param y_factor: specifies where the top side of the emoji should be placed,
relative to the video height
@param opacity: opacity of the emoji image
@param emoji_size: emoji size relative to the height of the video frame
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
local_path = utils.pathmgr.get_local_path(video_path)
utils.validate_video_path(video_path)
video_info = helpers.get_video_info(local_path)
with tempfile.TemporaryDirectory() as tmpdir:
local_emoji_path = utils.pathmgr.get_local_path(emoji_path, cache_dir=tmpdir)
utils.validate_image_path(local_emoji_path)
emoji_output_path = os.path.join(tmpdir, "modified_emoji.png")
imaugs.resize(
local_emoji_path,
output_path=emoji_output_path,
height=int(emoji_size * video_info["height"]),
width=int(emoji_size * video_info["height"]),
)
imaugs.opacity(emoji_output_path, output_path=emoji_output_path, level=opacity)
overlay(
video_path,
emoji_output_path,
output_path,
x_factor=x_factor,
y_factor=y_factor,
)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="overlay_emoji", **func_kwargs
)
return output_path or video_path |
Overlays the video onto a background video, pointed to by background_path.
@param video_path: the path to the video to be augmented
@param background_path: the path to the background video
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param overlay_size: size of the overlaid media with respect to the background
video. If set to None, the original size of the overlaid media is used
@param x_factor: specifies where the left side of the overlaid media should be
placed, relative to the video width
@param y_factor: specifies where the top side of the overlaid media should be
placed, relative to the video height
@param use_background_audio: if set to True and the media type is a video, the
audio of the background video will be used instead of the src video's audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video | def overlay_onto_background_video(
video_path: str,
background_path: str,
output_path: Optional[str] = None,
overlay_size: Optional[float] = 0.7,
x_factor: float = 0.0,
y_factor: float = 0.0,
use_background_audio: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Overlays the video onto a background video, pointed to by background_path.
@param video_path: the path to the video to be augmented
@param background_path: the path to the background video
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param overlay_size: size of the overlaid media with respect to the background
video. If set to None, the original size of the overlaid media is used
@param x_factor: specifies where the left side of the overlaid media should be
placed, relative to the video width
@param y_factor: specifies where the top side of the overlaid media should be
placed, relative to the video height
@param use_background_audio: if set to True and the media type is a video, the
audio of the background video will be used instead of the src video's audio
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
overlay(
video_path=background_path,
overlay_path=video_path,
output_path=output_path or video_path,
overlay_size=overlay_size,
x_factor=x_factor,
y_factor=y_factor,
use_overlay_audio=not use_background_audio,
)
if metadata is not None:
helpers.get_metadata(
metadata=metadata,
function_name="overlay_onto_background_video",
**func_kwargs,
)
return output_path or video_path |
Overlays the video onto a screenshot template so it looks like it was
screen-recorded on Instagram
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param template_filepath: iopath uri to the screenshot template
@param template_bboxes_filepath: iopath uri to the file containing the bounding
box for each template
@param max_image_size_pixels: if provided, the template image and/or src video
will be scaled down to avoid an output image with an area greater than this
size (in pixels)
@param crop_src_to_fit: if True, the src image will be cropped if necessary to
fit into the template image. If False, the src image will instead be resized
if necessary
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video | def overlay_onto_screenshot(
video_path: str,
output_path: Optional[str] = None,
template_filepath: str = utils.TEMPLATE_PATH,
template_bboxes_filepath: str = utils.BBOXES_PATH,
max_image_size_pixels: Optional[int] = None,
crop_src_to_fit: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Overlays the video onto a screenshot template so it looks like it was
screen-recorded on Instagram
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param template_filepath: iopath uri to the screenshot template
@param template_bboxes_filepath: iopath uri to the file containing the bounding
box for each template
@param max_image_size_pixels: if provided, the template image and/or src video
will be scaled down to avoid an output image with an area greater than this
size (in pixels)
@param crop_src_to_fit: if True, the src image will be cropped if necessary to
fit into the template image. If False, the src image will instead be resized
if necessary
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
sc_func = functools.partial(
imaugs.overlay_onto_screenshot,
template_filepath=template_filepath,
template_bboxes_filepath=template_bboxes_filepath,
max_image_size_pixels=max_image_size_pixels,
crop_src_to_fit=crop_src_to_fit,
)
vdutils.apply_to_each_frame(sc_func, video_path, output_path)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="overlay_onto_screenshot", **func_kwargs
)
return output_path or video_path |
Overlays random shapes onto a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param num_shapes: the number of shapes to add to each frame
@param shape_type: specify if you would like circles or rectangles
@param colors: list of (R, G, B) colors to sample from
@param thickness: specify the thickness desired for the shapes
@param random_movement: whether or not you want the shapes to randomly move
around across the frame or to move across in a "linear" way
@param topleft: specifying the boundary of shape region. The boundary are all
floats [0, 1] representing the fraction w.r.t width/height
@param bottomright: specifying the boundary of shape region. The boundary are all
floats [0, 1] representing the fraction w.r.t width/height
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video | def overlay_shapes(
video_path: str,
output_path: Optional[str] = None,
num_shapes: int = 1,
shape_type: Optional[str] = None,
colors: Optional[List[Tuple[int, int, int]]] = None,
thickness: Optional[int] = None,
radius: Optional[float] = None,
random_movement: bool = True,
topleft: Optional[Tuple[float, float]] = None,
bottomright: Optional[Tuple[float, float]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
**kwargs,
) -> str:
"""
Overlays random shapes onto a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param num_shapes: the number of shapes to add to each frame
@param shape_type: specify if you would like circles or rectangles
@param colors: list of (R, G, B) colors to sample from
@param thickness: specify the thickness desired for the shapes
@param random_movement: whether or not you want the shapes to randomly move
around across the frame or to move across in a "linear" way
@param topleft: specifying the boundary of shape region. The boundary are all
floats [0, 1] representing the fraction w.r.t width/height
@param bottomright: specifying the boundary of shape region. The boundary are all
floats [0, 1] representing the fraction w.r.t width/height
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
shapes_aug = ac.VideoDistractorByShapes(
num_shapes,
shape_type,
colors,
thickness,
radius,
random_movement,
topleft,
bottomright,
)
vdutils.apply_cv2_augmenter(shapes_aug, video_path, output_path, **kwargs)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="overlay_shapes", **func_kwargs
)
return output_path or video_path |
Overlays random text onto a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param text_len: length of string for randomized texts.
@param text_change_nth: change random text every nth frame. None means
using same text for all frames.
@param fonts: list of fonts to sample from. Each font can be a cv2 fontFace,
a PIL ImageFont, or a path to a PIL ImageFont file. Each font is coupled with
a chars file (the second item in the tuple) - a path to a file which contains
the characters associated with the given font. For example, non-western
alphabets have different valid characters than the roman alphabet, and these
must be specified in order to construct random valid text in that font. If the
chars file path is None, the roman alphabet will be used.
@param fontscales: 2-tuple of float (min_scale, max_scale).
@param colors: list of (R, G, B) colors to sample from
@param thickness: specify the thickness desired for the text.
@param random_movement: whether or not you want the text to randomly move around
across frame or to move across in a "linear" way
@param topleft: specifying the boundary of text region. The boundary are all
floats [0, 1] representing the fraction w.r.t width/height
@param bottomright: specifying the boundary of text region. The boundary are all
floats [0, 1] representing the fraction w.r.t width/height
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video | def overlay_text(
video_path: str,
output_path: Optional[str] = None,
text_len: int = 10,
text_change_nth: Optional[int] = None,
fonts: Optional[List[Tuple[Any, Optional[str]]]] = None,
fontscales: Optional[Tuple[float, float]] = None,
colors: Optional[List[Tuple[int, int, int]]] = None,
thickness: Optional[int] = None,
random_movement: bool = False,
topleft: Optional[Tuple[float, float]] = None,
bottomright: Optional[Tuple[float, float]] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
**kwargs,
) -> str:
"""
Overlays random text onto a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param text_len: length of string for randomized texts.
@param text_change_nth: change random text every nth frame. None means
using same text for all frames.
@param fonts: list of fonts to sample from. Each font can be a cv2 fontFace,
a PIL ImageFont, or a path to a PIL ImageFont file. Each font is coupled with
a chars file (the second item in the tuple) - a path to a file which contains
the characters associated with the given font. For example, non-western
alphabets have different valid characters than the roman alphabet, and these
must be specified in order to construct random valid text in that font. If the
chars file path is None, the roman alphabet will be used.
@param fontscales: 2-tuple of float (min_scale, max_scale).
@param colors: list of (R, G, B) colors to sample from
@param thickness: specify the thickness desired for the text.
@param random_movement: whether or not you want the text to randomly move around
across frame or to move across in a "linear" way
@param topleft: specifying the boundary of text region. The boundary are all
floats [0, 1] representing the fraction w.r.t width/height
@param bottomright: specifying the boundary of text region. The boundary are all
floats [0, 1] representing the fraction w.r.t width/height
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
text_aug = ac.VideoDistractorByText(
text_len,
text_change_nth,
fonts,
fontscales,
colors,
thickness,
random_movement,
topleft,
bottomright,
)
vdutils.apply_cv2_augmenter(text_aug, video_path, output_path, **kwargs)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="overlay_text", **func_kwargs
)
return output_path or video_path |
Pads the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param w_factor: pad right and left with w_factor * frame width
@param h_factor: pad bottom and top with h_factor * frame height
@param color: RGB color of the padded margin
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video | def pad(
video_path: str,
output_path: Optional[str] = None,
w_factor: float = 0.25,
h_factor: float = 0.25,
color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Pads the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param w_factor: pad right and left with w_factor * frame width
@param h_factor: pad bottom and top with h_factor * frame height
@param color: RGB color of the padded margin
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
pad_aug = af.VideoAugmenterByPadding(w_factor, h_factor, color)
pad_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="pad", **func_kwargs)
return output_path or video_path |
Apply a perspective transform to the video so it looks like it was taken
as a photo from another device (e.g. taking a video from your phone of a
video on a computer). Also has a shake factor to mimic the shakiness of
someone holding a phone.
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param sigma: the standard deviation of the distribution of destination coordinates.
The larger the sigma value, the more intense the transform
@param shake_radius: determines the amount by which to "shake" the video; the larger
the radius, the more intense the shake.
@param seed: if provided, this will set the random seed to ensure consistency
between runs
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video | def perspective_transform_and_shake(
video_path: str,
output_path: Optional[str] = None,
sigma: float = 50.0,
shake_radius: float = 0.0,
seed: Optional[int] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Apply a perspective transform to the video so it looks like it was taken
as a photo from another device (e.g. taking a video from your phone of a
video on a computer). Also has a shake factor to mimic the shakiness of
someone holding a phone.
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param sigma: the standard deviation of the distribution of destination coordinates.
The larger the sigma value, the more intense the transform
@param shake_radius: determines the amount by which to "shake" the video; the larger
the radius, the more intense the shake.
@param seed: if provided, this will set the random seed to ensure consistency
between runs
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
perspective_func = functools.partial(
imaugs.perspective_transform, sigma=sigma, seed=seed
)
duration = float(helpers.get_video_info(video_path)["duration"])
rng = np.random.RandomState(seed) if seed is not None else np.random
def get_dx_dy(frame_number: int) -> Dict:
u = math.sin(frame_number / duration * math.pi) ** 2
return {
"dx": u * rng.normal(0, shake_radius),
"dy": u * rng.normal(0, shake_radius),
}
vdutils.apply_to_each_frame(perspective_func, video_path, output_path, get_dx_dy)
if metadata is not None:
helpers.get_metadata(
metadata=metadata,
function_name="perspective_transform_and_shake",
**func_kwargs,
)
return output_path or video_path |
Pixelizes the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param ratio: smaller values result in a more pixelated video, 1.0 indicates
no change, and any value above one doesn't have a noticeable effect
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video | def pixelization(
video_path: str,
output_path: Optional[str] = None,
ratio: float = 1.0,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Pixelizes the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param ratio: smaller values result in a more pixelated video, 1.0 indicates
no change, and any value above one doesn't have a noticeable effect
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
assert ratio > 0, "Expected 'ratio' to be a positive number"
video_info = helpers.get_video_info(video_path)
width, height = video_info["width"], video_info["height"]
output_path = output_path or video_path
resize(video_path, output_path, height * ratio, width * ratio)
resize(output_path, output_path, height, width)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="pixelization", **func_kwargs
)
return output_path or video_path |
Removes the audio stream from a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video | def remove_audio(
video_path: str,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Removes the audio stream from a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
remove_audio_aug = af.VideoAugmenterByRemovingAudio()
remove_audio_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="remove_audio", **func_kwargs
)
return output_path or video_path |
Replaces part of the video with frames of the specified color
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param offset_factor: start point of the replacement relative to the video
duration (this parameter is multiplied by the video duration)
@param duration_factor: the length of the replacement relative to the video
duration (this parameter is multiplied by the video duration)
@param color: RGB color of the replaced frames. Default color is black
@param transition: optional transition configuration to apply between the clips
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video | def replace_with_color_frames(
video_path: str,
output_path: Optional[str] = None,
offset_factor: float = 0.0,
duration_factor: float = 1.0,
color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
transition: Optional[af.TransitionConfig] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Replaces part of the video with frames of the specified color
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param offset_factor: start point of the replacement relative to the video
duration (this parameter is multiplied by the video duration)
@param duration_factor: the length of the replacement relative to the video
duration (this parameter is multiplied by the video duration)
@param color: RGB color of the replaced frames. Default color is black
@param transition: optional transition configuration to apply between the clips
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
utils.validate_video_path(video_path)
assert (
0.0 <= offset_factor <= 1.0 and 0.0 <= duration_factor <= 1.0
), "Both offset & duration factors must be values in the range [0.0, 1.0]"
func_kwargs = {
**helpers.get_func_kwargs(metadata, locals(), video_path),
"function_name": "replace_with_color_frames",
}
video_info = helpers.get_video_info(video_path)
video_duration = float(video_info["duration"])
width, height = video_info["width"], video_info["height"]
offset = video_duration * offset_factor
duration = video_duration * duration_factor
output_path = output_path or video_path
if duration == 0 or offset == video_duration:
if output_path != video_path:
shutil.copy(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, **func_kwargs)
return output_path or video_path
video_paths = []
src_video_path_index = 0 if offset > 0 else 2
with tempfile.TemporaryDirectory() as tmpdir:
color_duration = (
video_duration - offset if offset + duration >= video_duration else duration
)
color_path = os.path.join(tmpdir, "color_frames.mp4")
helpers.create_color_video(color_path, color_duration, height, width, color)
if helpers.has_audio_stream(video_path):
audio_path = os.path.join(tmpdir, "audio.aac")
helpers.extract_audio_to_file(video_path, audio_path)
audio_swap(color_path, audio_path, offset=offset)
if offset_factor == 0 and duration_factor == 1.0:
shutil.copy(color_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, **func_kwargs)
return output_path or video_path
if offset > 0:
before_path = os.path.join(tmpdir, "before.mp4")
trim(video_path, before_path, end=offset)
video_paths.append(before_path)
video_paths.append(color_path)
if offset + duration < video_duration:
after_path = os.path.join(tmpdir, "after.mp4")
trim(video_path, after_path, start=offset + duration)
video_paths.append(after_path)
concat(
video_paths,
output_path,
src_video_path_index=src_video_path_index,
transition=transition,
)
if metadata is not None:
helpers.get_metadata(metadata=metadata, **func_kwargs)
return output_path or video_path |
Resizes a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param height: the height in which the video should be resized to. If not specified,
the original video height will be used
@param width: the width in which the video should be resized to. If not specified,
the original video width will be used
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video | def resize(
video_path: str,
output_path: Optional[str] = None,
height: Union[int, str] = "ih",
width: Union[int, str] = "iw",
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Resizes a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param height: the height in which the video should be resized to. If not specified,
the original video height will be used
@param width: the width in which the video should be resized to. If not specified,
the original video width will be used
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
resize_aug = af.VideoAugmenterByResize(height, width)
resize_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="resize", **func_kwargs)
return output_path or video_path |
Rotates a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param degrees: expression for the angle by which to rotate the input video
clockwise, expressed in degrees (supports negative values as well)
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video | def rotate(
video_path: str,
output_path: Optional[str] = None,
degrees: float = 15.0,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Rotates a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param degrees: expression for the angle by which to rotate the input video
clockwise, expressed in degrees (supports negative values as well)
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
rotate_aug = af.VideoAugmenterByRotation(degrees)
rotate_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="rotate", **func_kwargs)
return output_path or video_path |
Alters the resolution of a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param factor: the ratio by which the video should be downscaled or upscaled
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video | def scale(
video_path: str,
output_path: Optional[str] = None,
factor: float = 0.5,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Alters the resolution of a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param factor: the ratio by which the video should be downscaled or upscaled
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
scale_aug = af.VideoAugmenterByResolution(factor)
scale_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="scale", **func_kwargs)
return output_path or video_path |
Shifts the original frame position from the center by a vector
(width * x_factor, height * y_factor) and pads the rest with a
colored margin
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param x_factor: the horizontal amount that the video should be shifted,
relative to the width of the video
@param y_factor: the vertical amount that the video should be shifted,
relative to the height of the video
@param color: RGB color of the margin generated by the shift. Default color is black
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video | def shift(
video_path: str,
output_path: Optional[str] = None,
x_factor: float = 0.0,
y_factor: float = 0.0,
color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Shifts the original frame position from the center by a vector
(width * x_factor, height * y_factor) and pads the rest with a
colored margin
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param x_factor: the horizontal amount that the video should be shifted,
relative to the width of the video
@param y_factor: the vertical amount that the video should be shifted,
relative to the height of the video
@param color: RGB color of the margin generated by the shift. Default color is black
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
utils.validate_video_path(video_path)
video_info = helpers.get_video_info(video_path)
with tempfile.TemporaryDirectory() as tmpdir:
background_path = os.path.join(tmpdir, "background.mp4")
helpers.create_color_video(
background_path,
float(video_info["duration"]),
video_info["height"],
video_info["width"],
color,
)
overlay(
background_path,
video_path,
output_path,
x_factor=x_factor,
y_factor=y_factor,
use_overlay_audio=True,
)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="shift", **func_kwargs)
return output_path or video_path |
Crops the video using the specified offset and duration factors
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param offset_factor: start point of the crop relative to the video duration
(this parameter is multiplied by the video duration)
@param duration_factor: the length of the crop relative to the video duration
(this parameter is multiplied by the video duration)
@param minimum_duration: the minimum duration of a segment selected
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video | def time_crop(
video_path: str,
output_path: Optional[str] = None,
offset_factor: float = 0.0,
duration_factor: float = 1.0,
minimum_duration: float = 0.0,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Crops the video using the specified offset and duration factors
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param offset_factor: start point of the crop relative to the video duration
(this parameter is multiplied by the video duration)
@param duration_factor: the length of the crop relative to the video duration
(this parameter is multiplied by the video duration)
@param minimum_duration: the minimum duration of a segment selected
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
time_crop_aug = af.VideoAugmenterByTrim(
offset_factor=offset_factor,
duration_factor=duration_factor,
minimum_duration=minimum_duration,
)
time_crop_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="time_crop", **func_kwargs
)
return output_path or video_path |
Removes evenly sized (off) chunks, and concatenates evenly spaced (on)
chunks from the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param start_offset_factor: relative to the video duration; the offset
at which to start taking "on" segments
@param on_factor: relative to the video duration; the amount of time each
"on" video chunk should be
@param off_factor: relative to the "on" duration; the amount of time each
"off" video chunk should be
@param transition: optional transition configuration to apply between the clips
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video | def time_decimate(
video_path: str,
output_path: Optional[str] = None,
start_offset_factor: float = 0.0,
on_factor: float = 0.2,
off_factor: float = 0.5,
transition: Optional[af.TransitionConfig] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Removes evenly sized (off) chunks, and concatenates evenly spaced (on)
chunks from the video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param start_offset_factor: relative to the video duration; the offset
at which to start taking "on" segments
@param on_factor: relative to the video duration; the amount of time each
"on" video chunk should be
@param off_factor: relative to the "on" duration; the amount of time each
"off" video chunk should be
@param transition: optional transition configuration to apply between the clips
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
assert (
0 <= start_offset_factor < 1
), f"start_offset_factor value {start_offset_factor} must be in the range [0, 1)"
assert 0 < on_factor <= 1, "on_factor must be a value in the range (0, 1]"
assert 0 <= off_factor <= 1, "off_factor must be a value in the range [0, 1]"
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
local_path = utils.pathmgr.get_local_path(video_path)
utils.validate_video_path(local_path)
video_info = helpers.get_video_info(local_path)
_, video_ext = os.path.splitext(local_path)
duration = float(video_info["duration"])
start_offset = duration * start_offset_factor
on_segment = duration * on_factor
off_segment = on_segment * off_factor
subclips = []
n = int((duration - start_offset) / (on_segment + off_segment))
# let a = on_segment and b = off_segment
# subclips: 0->a, a+b -> 2*a + b, 2a+2b -> 3a+2b, .., na+nb -> (n+1)a + nb
with tempfile.TemporaryDirectory() as tmpdir:
for i in range(n):
clip_path = os.path.join(tmpdir, f"{i}{video_ext}")
trim(
video_path,
clip_path,
start=start_offset + i * on_segment + i * off_segment,
end=min(
duration, start_offset + (i + 1) * on_segment + i * off_segment
),
)
subclips.append(clip_path)
# Skip concatenation if only 1 clip.
if n > 1:
concat(
subclips,
output_path,
transition=transition,
)
else:
if output_path is not None:
shutil.copy(subclips[0], output_path)
else:
shutil.copy(subclips[0], local_path)
if metadata is not None:
helpers.get_metadata(
metadata=metadata, function_name="time_decimate", **func_kwargs
)
return output_path or video_path |
Trims the video using the specified start and end parameters
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param start: starting point in seconds of when the trimmed video should start.
If None, start will be 0
@param end: ending point in seconds of when the trimmed video should end.
If None, the end will be the duration of the video
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video | def trim(
video_path: str,
output_path: Optional[str] = None,
start: Optional[float] = None,
end: Optional[float] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Trims the video using the specified start and end parameters
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param start: starting point in seconds of when the trimmed video should start.
If None, start will be 0
@param end: ending point in seconds of when the trimmed video should end.
If None, the end will be the duration of the video
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
trim_aug = af.VideoAugmenterByTrim(start=start, end=end)
trim_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="trim", **func_kwargs)
return output_path or video_path |
Vertically flips a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video | def vflip(
video_path: str,
output_path: Optional[str] = None,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Vertically flips a video
@param video_path: the path to the video to be augmented
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
vflip_aug = af.VideoAugmenterByVFlip()
vflip_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="vflip", **func_kwargs)
return output_path or video_path |
Vertically stacks two videos
@param video_path: the path to the video that will be stacked on top
@param second_video_path: the path to the video that will be stacked on the bottom
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param use_second_audio: if set to True, the audio of the bottom video will be used
instead of the top's
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video | def vstack(
video_path: str,
second_video_path: str,
output_path: Optional[str] = None,
use_second_audio: bool = False,
metadata: Optional[List[Dict[str, Any]]] = None,
) -> str:
"""
Vertically stacks two videos
@param video_path: the path to the video that will be stacked on top
@param second_video_path: the path to the video that will be stacked on the bottom
@param output_path: the path in which the resulting video will be stored.
If not passed in, the original video file will be overwritten
@param use_second_audio: if set to True, the audio of the bottom video will be used
instead of the top's
@param metadata: if set to be a list, metadata about the function execution
including its name, the source & dest duration, fps, etc. will be appended
to the inputted list. If set to None, no metadata will be appended or returned
@returns: the path to the augmented video
"""
func_kwargs = helpers.get_func_kwargs(metadata, locals(), video_path)
vstack_aug = af.VideoAugmenterByStack(second_video_path, use_second_audio, "vstack")
vstack_aug.add_augmenter(video_path, output_path)
if metadata is not None:
helpers.get_metadata(metadata=metadata, function_name="vstack", **func_kwargs)
return output_path or video_path |
Returns whatever ffprobe returns. Of particular use are things such as the
encoder ("codec_name") used for audio encoding, the sample rate ("sample_rate"),
and length in seconds ("duration")
Accepts as input either an audio or video path. | def get_audio_info(media_path: str) -> Dict[str, Any]:
"""
Returns whatever ffprobe returns. Of particular use are things such as the
encoder ("codec_name") used for audio encoding, the sample rate ("sample_rate"),
and length in seconds ("duration")
Accepts as input either an audio or video path.
"""
try:
local_media_path = pathmgr.get_local_path(media_path)
except RuntimeError:
raise FileNotFoundError(f"Provided media path {media_path} does not exist")
probe = ffmpeg.probe(local_media_path, cmd=FFPROBE_PATH)
audio_info = next(
(stream for stream in probe["streams"] if stream["codec_type"] == "audio"),
None,
)
assert (
audio_info is not None
), "Error retrieving audio metadata, please verify that an audio stream exists"
return audio_info |
Returns whatever ffprobe returns. Of particular use are things such as the FPS
("avg_frame_rate"), number of raw frames ("nb_frames"), height and width of each
frame ("height", "width") and length in seconds ("duration") | def get_video_info(video_path: str) -> Dict[str, Any]:
"""
Returns whatever ffprobe returns. Of particular use are things such as the FPS
("avg_frame_rate"), number of raw frames ("nb_frames"), height and width of each
frame ("height", "width") and length in seconds ("duration")
"""
try:
local_video_path = pathmgr.get_local_path(video_path)
except RuntimeError:
raise FileNotFoundError(f"Provided video path {video_path} does not exist")
probe = ffmpeg.probe(local_video_path, cmd=FFPROBE_PATH)
video_info = next(
(stream for stream in probe["streams"] if stream["codec_type"] == "video"),
None,
)
assert (
video_info is not None
), "Error retrieving video metadata, please verify that the video file exists"
return video_info |
The intensity is calculated as the percentage of the result video
that contains inserted segments. | def insert_in_background_multiple_intensity(
metadata: Dict[str, Any], **kwargs
) -> float:
"""
The intensity is calculated as the percentage of the result video
that contains inserted segments.
"""
dst_duration = metadata["dst_duration"]
starts = metadata["src_segment_starts"]
ends = metadata["src_segment_ends"]
inserted = np.sum(ends - starts)
return inserted / dst_duration |
The intensity of replace_with_background is the fraction of the source video duration
that was replaced with background. Because the overall duration of the video is preserved,
the background segments together must be shorter than the source duration so the intensity is never
greater than 100. | def replace_with_background_intensity(metadata: Dict[str, Any], **kwargs) -> float:
"""
The intensity of replace_with_background is the fraction of the source video duration
that was replaced with background. Because the overall duration of the video is preserved,
the background segments together must be shorter than the source duration so the intensity is never
greater than 100.
"""
src_duration = metadata["src_duration"]
total_bg_duration = (
metadata["starting_background_duration"]
+ metadata["ending_background_duration"]
)
return min((total_bg_duration / src_duration) * 100.0, 100.0) |
Computes intensity of any distractor-type transform, which adds some kind
of media (images, emojis, text, dots, logos) on top of the src video within
a specified bounding box. | def distractor_overlay_intensity_helper(
topleft: Optional[Tuple[float, float]],
bottomright: Optional[Tuple[float, float]],
num_overlay_content: int,
**kwargs,
) -> float:
"""
Computes intensity of any distractor-type transform, which adds some kind
of media (images, emojis, text, dots, logos) on top of the src video within
a specified bounding box.
"""
assert topleft is None or all(
0.0 <= t <= 1.0 for t in topleft
), "Topleft must be in the range [0, 1]"
assert bottomright is None or all(
0.0 <= b <= 1.0 for b in bottomright
), "Bottomright must be in the range [0, 1]"
assert (
isinstance(num_overlay_content, int) and num_overlay_content >= 0
), "num_overlay_content must be a nonnegative int"
if topleft is None or bottomright is None:
return 100.0
max_num_overlay_content_val = 100
num_overlay_content_intensity = num_overlay_content / max_num_overlay_content_val
x1, y1 = topleft
x2, y2 = bottomright
distractor_area = (x2 - x1) * (y2 - y1)
return min((distractor_area * num_overlay_content_intensity) * 100.0, 100.0) |
Computes intensity of a transform that consists of temporal cropping or
padding. For these types of transforms the intensity is defined as the
percentage of video time that has been cut out (for cropping) or added
(for padding). When computing the percentage, the denominator should be
the longer of the src & dst durations so the resulting percentage isn't
greater than 100. | def time_crop_or_pad_intensity_helper(metadata: Dict[str, Any]) -> float:
"""
Computes intensity of a transform that consists of temporal cropping or
padding. For these types of transforms the intensity is defined as the
percentage of video time that has been cut out (for cropping) or added
(for padding). When computing the percentage, the denominator should be
the longer of the src & dst durations so the resulting percentage isn't
greater than 100.
"""
dst_duration = metadata["dst_duration"]
src_duration = metadata["src_duration"]
larger_duration = max(src_duration, dst_duration)
return (abs(dst_duration - src_duration) / larger_duration) * 100.0 |
Calculates how the given matching pair src_segment & dst_segment change
given a temporal crop starting at crop_start & ending at crop_end. We can
use the same logic here for multiple transforms, by setting the crop_start
& crop_end depending on the transform kwargs.
Doesn't return anything, but appends the new matching segments in the dst
video corresponding to the pair passed in to new_src_segments & new_dst_segments,
if the segment pair still matches in the dst video. If the passed in segment
pair is cropped out as a result of this temporal crop, nothing will be
appended to the lists, since the segment pair doesn't exist in the dst video. | def compute_time_crop_segments(
src_segment: Segment,
dst_segment: Segment,
speed_factor: float,
crop_start: float,
crop_end: float,
new_src_segments: List[Segment],
new_dst_segments: List[Segment],
end_dst_offset: float = 0.0,
) -> None:
"""
Calculates how the given matching pair src_segment & dst_segment change
given a temporal crop starting at crop_start & ending at crop_end. We can
use the same logic here for multiple transforms, by setting the crop_start
& crop_end depending on the transform kwargs.
Doesn't return anything, but appends the new matching segments in the dst
video corresponding to the pair passed in to new_src_segments & new_dst_segments,
if the segment pair still matches in the dst video. If the passed in segment
pair is cropped out as a result of this temporal crop, nothing will be
appended to the lists, since the segment pair doesn't exist in the dst video.
"""
# Crop segment is outside of the initial clip, so this matching segment
# pair no longer exists in the new video.
if crop_start >= dst_segment.end or crop_end <= dst_segment.start:
return
# new_start represents where the matching segment starts in the dst audio
# (if negative, then part of the matching segment is getting cut out, so
# we need to adjust both the src & dst starts).
# Note: if the video was sped up before, we need to take this into account
# (the matching segment that is e.g. 10 seconds of dst audio might
# correspond to 5 seconds of src audio, if it was previously
# slowed down by 0.5x).
new_start = (dst_segment.start - crop_start) * speed_factor
src_start, src_end, src_id = src_segment
if new_start < 0:
# We're cropping the beginning of the matching segment.
src_start = src_segment.start - new_start
new_start = 0
new_end = min(dst_segment.end - crop_start, crop_end - crop_start)
if crop_end < dst_segment.end:
# We're cropping the end of the matching segment.
# Note: if the video was sped up before, we need to take this into
# account (as above).
src_end = src_segment.end - (dst_segment.end - crop_end) * speed_factor
new_src_segments.append(Segment(src_start, src_end, src_id))
new_dst_segments.append(
Segment(new_start + end_dst_offset, new_end + end_dst_offset)
) |
This function performs the logic of computing the new matching segments based
on the old ones, for the set of transforms that temporally change the video.
Returns the lists of new src segments & dst segments, respectively. | def compute_changed_segments(
name: str,
src_segments: List[Segment],
dst_segments: List[Segment],
src_duration: float,
dst_duration: float,
speed_factor: float,
**kwargs,
) -> Tuple[List[Segment], List[Segment]]:
"""
This function performs the logic of computing the new matching segments based
on the old ones, for the set of transforms that temporally change the video.
Returns the lists of new src segments & dst segments, respectively.
"""
new_src_segments, new_dst_segments = [], []
td = get_transition_duration(kwargs)
for src_segment, dst_segment in zip(src_segments, dst_segments):
if name == "insert_in_background":
# Note: When we implement insert_in_background, make sure to pass these kwargs
offset = kwargs["offset_factor"] * kwargs["background_video_duration"]
transition_before = int(kwargs["transition_before"])
transition_after = int(kwargs["transition_after"])
# The matching segments are just offset in the dst audio by the amount
# of background video inserted before the src video.
new_src_segments.append(
src_segment.delta(
transition_before * td / 2, -transition_after * td / 2
)
)
new_dst_segments.append(
Segment(
dst_segment.start + offset - transition_before * td / 2,
dst_segment.end
+ offset
- transition_before * td
- transition_after * td / 2,
)
)
elif name == "insert_in_background_multiple":
compute_insert_in_background_multiple_segments(
src_segment_starts=kwargs["src_segment_starts"],
src_segment_ends=kwargs["src_segment_ends"],
bkg_insertion_points=kwargs["bkg_insertion_points"],
src_ids=kwargs["src_ids"],
transition_duration=td,
new_src_segments=new_src_segments,
new_dst_segments=new_dst_segments,
)
elif name == "replace_with_background":
clip_start = kwargs["starting_background_duration"]
duration = kwargs["source_duration"]
compute_time_crop_segments(
src_segment,
dst_segment,
speed_factor,
clip_start,
clip_start + duration,
new_src_segments,
new_dst_segments,
end_dst_offset=clip_start,
)
elif name == "change_video_speed":
crt_factor = kwargs["factor"]
global_factor = crt_factor * speed_factor
new_src_segments.append(src_segment)
new_dst_segments.append(
Segment(
dst_segment.start / global_factor,
dst_segment.end / global_factor,
)
)
elif name == "concat":
src_index = kwargs["src_video_path_index"]
num_videos = len(kwargs["video_paths"])
transition_offset_start = td / 2 if src_index > 0 else 0.0
transition_offset_end = td / 2 if src_index < num_videos - 1 else 0.0
new_src_segments.append(
src_segment.delta(transition_offset_start, -transition_offset_end)
)
offset = sum(
float(helpers.get_video_info(vp)["duration"]) - td
for vp in kwargs["video_paths"][: kwargs["src_video_path_index"]]
)
new_dst_segments.append(
Segment(
dst_segment.start + offset + transition_offset_start,
dst_segment.end + offset - transition_offset_end,
)
)
elif name == "loop":
# The existing segments are unchanged.
new_src_segments.append(src_segment)
new_dst_segments.append(dst_segment)
# Each original src segments now additionally matches a segment in
# each loop in the dst video.
for l_idx in range(kwargs["num_loops"]):
new_src_segments.append(src_segment)
new_dst_segments.append(
Segment(
dst_segment.start + (l_idx + 1) * src_duration,
dst_segment.end + (l_idx + 1) * src_duration,
)
)
elif name == "time_crop":
crop_start = kwargs["offset_factor"] * src_duration
crop_end = crop_start + kwargs["duration_factor"] * src_duration
compute_time_crop_segments(
src_segment,
dst_segment,
speed_factor,
crop_start,
crop_end,
new_src_segments,
new_dst_segments,
)
elif name == "time_decimate":
compute_time_decimate_segments(
src_segment,
dst_segment,
src_duration,
speed_factor,
td,
new_src_segments,
new_dst_segments,
**kwargs,
)
elif name == "trim":
crop_start = kwargs["start"] or 0.0
crop_end = kwargs["end"] or src_duration
compute_time_crop_segments(
src_segment,
dst_segment,
speed_factor,
crop_start,
crop_end,
new_src_segments,
new_dst_segments,
)
elif name == "replace_with_color_frames":
# This transform is like the inverse of time_crop/trim, because
# offset & duration denote where the src content is being cropped
# out, instead of which src content is being kept.
offset = kwargs["offset_factor"] * src_duration
duration = kwargs["duration_factor"] * src_duration
compute_time_crop_segments(
src_segment,
dst_segment,
speed_factor,
0.0,
offset,
new_src_segments,
new_dst_segments,
)
compute_time_crop_segments(
src_segment,
dst_segment,
speed_factor,
offset + duration,
dst_duration,
new_src_segments,
new_dst_segments,
)
return new_src_segments, new_dst_segments |
Compute matching pairs of src_segment -> dst_segment, given the kwargs of the
transform, as well as the metadata about previously applied transforms. | def compute_segments(
name: str,
src_duration: float,
dst_duration: float,
metadata: List[Dict[str, Any]],
**kwargs,
) -> Tuple[List[Segment], List[Segment]]:
"""
Compute matching pairs of src_segment -> dst_segment, given the kwargs of the
transform, as well as the metadata about previously applied transforms.
"""
speed_factor = 1.0
src_id = kwargs.get("src_id", None)
if not metadata:
src_segments = [Segment(0.0, src_duration, src_id)]
dst_segments = [Segment(0.0, src_duration)]
else:
src_segments = [
Segment(
segment_dict["start"], segment_dict["end"], segment_dict.get("src_id")
)
for segment_dict in metadata[-1]["src_segments"]
]
dst_segments = [
Segment(segment_dict["start"], segment_dict["end"])
for segment_dict in metadata[-1]["dst_segments"]
]
for meta in metadata:
if meta["name"] in ["change_video_speed"]:
speed_factor *= meta["factor"]
if name in [
"insert_in_background",
"insert_in_background_multiple",
"replace_with_background",
"change_video_speed",
"loop",
"time_crop",
"time_decimate",
"trim",
"replace_with_color_frames",
"concat",
]:
return compute_changed_segments(
name,
src_segments,
dst_segments,
src_duration,
dst_duration,
speed_factor,
**kwargs,
)
else:
return src_segments, dst_segments |
Creates a video with frames of the specified color
@param output_path: the path in which the resulting video will be stored
@param duration: how long the video should be, in seconds
@param height: the desired height of the video to be generated
@param width: the desired width of the video to be generated
@param color: RGB color of the video. Default color is black | def create_color_video(
output_path: str,
duration: float,
height: int,
width: int,
color: Tuple[int, int, int] = utils.DEFAULT_COLOR,
) -> None:
"""
Creates a video with frames of the specified color
@param output_path: the path in which the resulting video will be stored
@param duration: how long the video should be, in seconds
@param height: the desired height of the video to be generated
@param width: the desired width of the video to be generated
@param color: RGB color of the video. Default color is black
"""
utils.validate_output_path(output_path)
assert duration > 0, "Duration of the video must be a positive value"
assert height > 0, "Height of the video must be a positive value"
assert width > 0, "Width of the video must be a positive value"
with tempfile.TemporaryDirectory() as tmpdir:
image_path = os.path.join(tmpdir, "image.png")
color_frame = np.full((height, width, 3), color[::-1])
cv2.imwrite(image_path, color_frame)
create_video_from_image(output_path, image_path, duration) |
Creates a video with all frames being the image provided
@param output_path: the path in which the resulting video will be stored
@param image_path: the path to the image to use to create the video
@param duration: how long the video should be, in seconds | def create_video_from_image(output_path: str, image_path: str, duration: float) -> None:
"""
Creates a video with all frames being the image provided
@param output_path: the path in which the resulting video will be stored
@param image_path: the path to the image to use to create the video
@param duration: how long the video should be, in seconds
"""
utils.validate_output_path(output_path)
utils.validate_image_path(image_path)
assert duration > 0, "Duration of the video must be a positive value"
im_stream = ffmpeg.input(image_path, stream_loop=-1)
video = im_stream.filter("framerate", utils.DEFAULT_FRAME_RATE).filter(
"pad", **{"width": "ceil(iw/2)*2", "height": "ceil(ih/2)*2"}
)
silent_audio_path = utils.pathmgr.get_local_path(utils.SILENT_AUDIO_PATH)
audio = ffmpeg.input(silent_audio_path, stream_loop=math.ceil(duration)).audio
output = ffmpeg.output(video, audio, output_path, pix_fmt="yuv420p", t=duration)
output.overwrite_output().run(cmd=FFMPEG_PATH) |
Returns a callable version of a function with a given `FunctionId`. | def get_function(function_id: FunctionId,
np_like: NumpyLike) -> Callable[[NDArray], NDArray]:
"""Returns a callable version of a function with a given `FunctionId`."""
if function_id.name == ABS.name:
if function_id.derivative_order == 0:
return np_like.abs
elif function_id.derivative_order == 1:
return np_like.sign
else:
raise NotImplementedError(function_id.derivative_order)
if function_id.name == EXP.name:
return np_like.exp
elif function_id.name == LOG.name:
k = function_id.derivative_order
if k == 0:
return np_like.log
else:
sign = -1 if k % 2 == 0 else 1
return lambda x: sign * math.factorial(k-1) * np_like.asarray(x)**-k
elif function_id.name == SIGMOID.name:
return functools.partial(_sigmoid_derivative,
function_id.derivative_order, np_like=np_like)
elif function_id.name == SOFTPLUS.name:
return functools.partial(_softplus_derivative,
function_id.derivative_order, np_like=np_like)
elif function_id.name == SWISH.name:
return functools.partial(_swish_derivative,
function_id.derivative_order, np_like=np_like)
else:
raise NotImplementedError(function_id) |
Gets `FunctionData` given `FunctionId`. | def get_function_data(function_id: FunctionId) -> FunctionData:
"""Gets `FunctionData` given `FunctionId`."""
if function_id.name == EXP.name:
return FunctionData((), (), monotonically_increasing=True)
elif function_id.name == LOG.name:
k = function_id.derivative_order
return FunctionData(
# The domain of the log function is (0, infinity). Over this domain,
# none of the derivatives have any local extrema.
(),
(),
monotonically_decreasing=(k%2 == 1),
monotonically_increasing=(k%2 == 0)
)
elif function_id.name == SIGMOID.name:
# Sigmoid is 1st derivative of softplus, so kth derivative of sigmoid is
# (k+1)st derivative of softplus.
return get_function_data(
SOFTPLUS.derivative_id(1 + function_id.derivative_order))
elif function_id in _FUNCTION_DATA:
return _FUNCTION_DATA[function_id]
else:
raise NotImplementedError(function_id) |
Returns the Taylor polynomial coefficients for a given function at `x0`.
Args:
function_id: a `FunctionId`
degree: the degree of the Taylor polynomial whose coefficients we return
x0: the reference point
np_like: a `NumpyLike` backend.
Returns:
a list of `NDArray`s of Taylor polynomial coefficients, of length
`degree+1`. | def get_taylor_polynomial_coefficients(
function_id: FunctionId,
degree: int,
x0: NDArray,
np_like: NumpyLike) -> list[NDArray]:
"""Returns the Taylor polynomial coefficients for a given function at `x0`.
Args:
function_id: a `FunctionId`
degree: the degree of the Taylor polynomial whose coefficients we return
x0: the reference point
np_like: a `NumpyLike` backend.
Returns:
a list of `NDArray`s of Taylor polynomial coefficients, of length
`degree+1`.
"""
coefficients = []
for i in range(degree + 1):
f_deriv = get_function(function_id.derivative_id(i), np_like)
coefficients.append(f_deriv(x0) / math.factorial(i))
return coefficients |
Returns maximum value of `f` over `[x_min, x_max]`. | def maximum_value(f,
x_min: NDArray,
x_max: NDArray,
local_maxima: Sequence[float],
np_like: NumpyLike) -> NDArray:
"""Returns maximum value of `f` over `[x_min, x_max]`."""
if not local_maxima:
return np_like.maximum(f(x_min), f(x_max))
sorted_maxima = list(sorted(local_maxima, key=f, reverse=True))
x = sorted_maxima[0]
return np_like.where(
np_like.logical_and(x_min <= x, x <= x_max),
f(x),
maximum_value(f, x_min, x_max, sorted_maxima[1:], np_like)) |
Returns minimum value of `f` over `[x_min, x_max]`. | def minimum_value(f,
x_min: NDArray,
x_max: NDArray,
local_minima: Sequence[float],
np_like: NumpyLike) -> NDArray:
"""Returns minimum value of `f` over `[x_min, x_max]`."""
if not local_minima:
return np_like.minimum(f(x_min), f(x_max))
sorted_minima = list(sorted(local_minima, key=f))
x = sorted_minima[0]
return np_like.where(
np_like.logical_and(x_min <= x, x <= x_max),
f(x),
minimum_value(f, x_min, x_max, sorted_minima[1:], np_like)) |
Returns exact range of specified function over `trust_region`. | def get_range(function_id: FunctionId,
trust_region: Interval,
np_like: NumpyLike) -> Interval:
"""Returns exact range of specified function over `trust_region`."""
f = get_function(function_id, np_like)
function_data = get_function_data(function_id)
return _get_range(
f,
trust_region[0],
trust_region[1],
function_data.local_minima,
function_data.local_maxima,
np_like
) |
Returns the (elementwise) derivative of a specified order. | def _sigmoid_derivative(order: int, x: NDArrayLike,
np_like: NumpyLike) -> NDArray:
"""Returns the (elementwise) derivative of a specified order."""
# Note: we could make this work for arbitrary order using autodiff, but we
# don't because this module is backend-agnostic, and we don't have a way to
# do autodiff in a backend-agnostic way.
s = _sigmoid(x, np_like)
sm = _sigmoid(-x, np_like)
if order == 0:
return s
elif order == 1:
return s*sm
elif order == 2:
return s*sm*(1-2*s)
elif order == 3:
return s*sm*((1-2*s)**2 - 2*s*sm)
elif order == 4:
return (s*sm*(1-2*s)*((1-2*s)**2 - 2*s*sm) +
s*sm*(-4*(1-2*s)*s*sm - 2*s*sm*(1-2*s)))
else:
raise NotImplementedError(order) |
Returns a (possibly) lower-degree enclosure of a given TaylorEnclosure. | def enclose_enclosure(
enclosure: TaylorEnclosureLike,
trust_region: IntervalLike,
max_degree: int,
np_like: NumpyLike,
) -> TaylorEnclosure:
"""Returns a (possibly) lower-degree enclosure of a given TaylorEnclosure."""
set_arithmetic = interval_arithmetic.IntervalArithmetic(np_like)
trust_region = set_arithmetic.as_interval(trust_region)
enclosure = as_taylor_enclosure(enclosure, np_like)
orig_degree = len(enclosure) - 1
if orig_degree <= max_degree:
return enclosure
else:
new_final_coefficient = polynomials.eval_taylor_enclosure(
enclosure[max_degree:], trust_region, set_arithmetic.np_like)
return TaylorEnclosure(enclosure[:max_degree] + (new_final_coefficient,)) |
Like expand_dims, but adds n dims rather than just 1. | def expand_multiple_dims(a: NDArray, n: int, axis=None) -> NDArray:
"""Like expand_dims, but adds n dims rather than just 1."""
if axis is None:
axis = a.ndim
colon = slice(None, None, None)
return a[(colon,) * axis + (None,) * n + (...,)] |
Apply a function to each NDArray in a TaylorEnclosure. | def map_over_enclosure(
a: TaylorEnclosure,
fun: Callable[[NDArray], NDArray]) -> TaylorEnclosure:
"""Apply a function to each NDArray in a TaylorEnclosure."""
return TaylorEnclosure(
tuple(tuple(map(fun, c)) if isinstance(c, tuple) else fun(c) for c in a)) |
Returns d such that <c, z**i>^exponent == <d, z**(i*exponent)>.
Args:
c: a coefficient
i: a non-negative integer
exponent: a non-negative integer
x_ndim: the number of dimensions in the independent variable
np_like: a Numpy-like backend
Returns:
an NDArray or Interval d, such that
<c, z**i>^exponent == <d, z**(i*exponent)>.
where ** denotes outer product, and ^ denotes elementwise exponentiation. | def _elementwise_term_power_coefficient(
c: Union[NDArrayLike, IntervalLike],
i: int,
exponent: int,
x_ndim: int,
np_like: NumpyLike) -> Union[NDArray, Interval]:
"""Returns d such that <c, z**i>^exponent == <d, z**(i*exponent)>.
Args:
c: a coefficient
i: a non-negative integer
exponent: a non-negative integer
x_ndim: the number of dimensions in the independent variable
np_like: a Numpy-like backend
Returns:
an NDArray or Interval d, such that
<c, z**i>^exponent == <d, z**(i*exponent)>.
where ** denotes outer product, and ^ denotes elementwise exponentiation.
"""
set_arithmetic = interval_arithmetic.IntervalArithmetic(np_like)
batch_dims = set_arithmetic.ndim(c) - i*x_ndim
if batch_dims < 0:
raise ValueError((set_arithmetic.ndim(c), i, x_ndim))
return set_arithmetic.outer_power(c, exponent, batch_dims) |
Returns d such that <c0, z**i> * <c1, z**j> == <d, z**(i+j)>. | def _elementwise_term_product_coefficient(
c0: Union[NDArrayLike, IntervalLike],
c1: Union[NDArrayLike, IntervalLike],
i: int,
j: int,
x_ndim: int,
np_like: NumpyLike) -> Union[NDArray, Interval]:
"""Returns d such that <c0, z**i> * <c1, z**j> == <d, z**(i+j)>."""
def product(u, v):
return _pairwise_batched_multiply(u, v, i*x_ndim, j*x_ndim, np_like)
set_arithmetic = interval_arithmetic.IntervalArithmetic(np_like)
return set_arithmetic.arbitrary_bilinear(c0, c1, product, assume_product=True) |
Batched version of multiply, for use as input to arbitrary_bilinear().
See the docstring for TaylorEnclosureArithmetic.arbitrary_bilinear for
context.
Args:
u: an NDArray of dimension at least p
v: an NDArray of dimension at least q
p: a non-negative integer
q: a non-negative integer
np_like: a NumpyLike back end
Returns:
an NDArray 'output', such that for every pair of tuples
I = (i_1, i_2, ..., i_p) and J = (j_1, j_2, ..., j_q),
output[(...,) + I + J] = u[(...,) + I] * v[(...,) + J] . | def _pairwise_batched_multiply(
u: NDArrayLike,
v: NDArrayLike,
p: int,
q: int,
np_like: NumpyLike) -> NDArray:
"""Batched version of multiply, for use as input to arbitrary_bilinear().
See the docstring for TaylorEnclosureArithmetic.arbitrary_bilinear for
context.
Args:
u: an NDArray of dimension at least p
v: an NDArray of dimension at least q
p: a non-negative integer
q: a non-negative integer
np_like: a NumpyLike back end
Returns:
an NDArray 'output', such that for every pair of tuples
I = (i_1, i_2, ..., i_p) and J = (j_1, j_2, ..., j_q),
output[(...,) + I + J] = u[(...,) + I] * v[(...,) + J] .
"""
u = np_like.asarray(u)
v = np_like.asarray(v)
return expand_multiple_dims(u, q) * expand_multiple_dims(v, p, v.ndim-q) |
Multiplies a and b, broadcasting over leftmost dimensions. | def _left_broadcasting_multiply(a: NDArrayLike, b: NDArrayLike,
np_like: NumpyLike) -> NDArray:
"""Multiplies a and b, broadcasting over leftmost dimensions."""
a = np_like.asarray(a)
b = np_like.asarray(b)
if a.ndim > b.ndim:
raise NotImplementedError()
return expand_multiple_dims(a, b.ndim - a.ndim) * b |
Checks whether a pattern matches a subject, and returns mapping if so.
Args:
pattern: a sequence of `Operations`
subject: a sequence of `Operations`
can_bind: a callable that, given as arguments a pattern
`IntermediateVariable` `u` and a subject `IntermediateVariable` `v`,
determines whether `u` can be mapped to `v`. (This could return `False`,
for example, if `u `represents a constant and `v` represents a different
constant.)
Returns:
A dict `m` representing a match, or `None` if no match was found. If the
return value is not `None`, it maps from pattern `IntermediateVertex` to
subject `IntermediateVertex`, and satisfies:
```python
[e.subs(m) for e in pattern] == subject
``` | def match(
pattern: Sequence[Operation],
subject: Sequence[Operation],
can_bind: Callable[[IntermediateVariable, IntermediateVariable], bool]
) -> Optional[dict[IntermediateVariable, IntermediateVariable]]:
"""Checks whether a pattern matches a subject, and returns mapping if so.
Args:
pattern: a sequence of `Operations`
subject: a sequence of `Operations`
can_bind: a callable that, given as arguments a pattern
`IntermediateVariable` `u` and a subject `IntermediateVariable` `v`,
determines whether `u` can be mapped to `v`. (This could return `False`,
for example, if `u `represents a constant and `v` represents a different
constant.)
Returns:
A dict `m` representing a match, or `None` if no match was found. If the
return value is not `None`, it maps from pattern `IntermediateVertex` to
subject `IntermediateVertex`, and satisfies:
```python
[e.subs(m) for e in pattern] == subject
```
"""
if len(pattern) != len(subject):
return None
vertex_map = {} # from pattern vertex to subject vertex
for p, s in zip(pattern, subject):
if (len(p.inputs) != len(s.inputs) or len(p.outputs) != len(s.outputs)):
return None
if p.data != s.data:
return None
for u, v in itertools.chain(zip(p.inputs, s.inputs),
zip(p.outputs, s.outputs)):
if u in vertex_map:
if vertex_map[u] != v:
return None
else:
if can_bind(u, v):
vertex_map[u] = v
else:
return None
return vertex_map |
Perform a search/replace on a ComputationGraph.
This method greedily replaces occurences of a given operation sequence
`pattern` with an operation sequence `replacement`.
Args:
pattern: a sequence of `Operations`
replacement: a sequence of `Operations` with which to replace occurrences of
the pattern.
subject: a `ComputationGraph`
can_bind: a `Callable` with the same meaning as the corresponding argument
to `match()`.
Returns:
a `ComputationGraph` with occurrences of `pattern` replaced by
`replacement`. | def replace(
pattern: Sequence[Operation],
replacement: Sequence[Operation],
subject: ComputationGraph,
can_bind: Callable[[IntermediateVariable, IntermediateVariable], bool]
) -> ComputationGraph:
"""Perform a search/replace on a ComputationGraph.
This method greedily replaces occurences of a given operation sequence
`pattern` with an operation sequence `replacement`.
Args:
pattern: a sequence of `Operations`
replacement: a sequence of `Operations` with which to replace occurrences of
the pattern.
subject: a `ComputationGraph`
can_bind: a `Callable` with the same meaning as the corresponding argument
to `match()`.
Returns:
a `ComputationGraph` with occurrences of `pattern` replaced by
`replacement`.
"""
# Note: this could be made more efficient using the Knuth-Morris-Pratt
# algorithm.
k = len(pattern)
output_operations = []
i = 0
while i < len(subject.operations):
subgraph = subject.operations[i:i+k]
m = match(pattern, subgraph, can_bind)
if m is not None:
output_operations.extend([e.subs(m) for e in replacement])
i += k
else:
output_operations.append(subject.operations[i])
i += 1
return ComputationGraph(
subject.inputs,
subject.outputs,
output_operations,
data=subject.data
) |
Returns NDArray of shape shape(a)*n, with a on diagonal. | def _generalized_diag_ndarray(a: NDArrayLike, n: int,
np_like: NumpyLike) -> NDArray:
"""Returns NDArray of shape shape(a)*n, with a on diagonal."""
if n == 1:
return np_like.asarray(a)
elif n == 2:
a = np_like.asarray(a)
if a.ndim == 1:
return np_like.diag(a)
else:
raise NotImplementedError(a.ndim)
else:
raise NotImplementedError(n) |
Helper for creating an einsum() equation string. | def _stringify(axes: Sequence[int]) -> str:
"""Helper for creating an einsum() equation string."""
offset = ord('a')
return ''.join(chr(i + offset) for i in axes) |
Returns an outer product with batch dimensions.
Args:
a: an NDArray-like object.
b: an NDArray-like object.
batch_dims: number of batch dimensions.
np_like: a Numpy-like backend.
Returns:
an NDArray c such that, for every tuple I that indexes the first
batch_dims elements of a (and b), every tuple J that indexes the last
a.ndim - batch_dims elements of a, and every tuple K that indexes the last
a.ndim - batch_dims elements of b, we have:
c[I + J + K] = a[I + J] * b[I + K] | def _ndarray_outer_product(a: NDArrayLike,
b: NDArrayLike,
batch_dims: int,
np_like: NumpyLike) -> NDArray:
"""Returns an outer product with batch dimensions.
Args:
a: an NDArray-like object.
b: an NDArray-like object.
batch_dims: number of batch dimensions.
np_like: a Numpy-like backend.
Returns:
an NDArray c such that, for every tuple I that indexes the first
batch_dims elements of a (and b), every tuple J that indexes the last
a.ndim - batch_dims elements of a, and every tuple K that indexes the last
a.ndim - batch_dims elements of b, we have:
c[I + J + K] = a[I + J] * b[I + K]
"""
a = np_like.asarray(a)
b = np_like.asarray(b)
if batch_dims == 0:
return np_like.tensordot(a, b, 0)
else:
a_axes = tuple(range(a.ndim))
b_non_batch_axes = tuple(range(a.ndim, a.ndim + b.ndim - batch_dims))
b_axes = tuple(range(batch_dims)) + b_non_batch_axes
output_axes = a_axes + b_non_batch_axes
eq = (_stringify(a_axes) + ',' + _stringify(b_axes) +
'->' + _stringify(output_axes))
return np_like.einsum(eq, a, b) |
Returns the value of a polynomial at a specific point. | def eval_polynomial(
coefficients: Sequence[FooLike],
z: FooLike,
inner_product: Callable[[FooLike, FooLike], Foo],
outer_power: Callable[[FooLike, int], Foo],
add: Callable[[FooLike, FooLike], Foo] = operator.add,
additive_identity: Foo = 0,
multiplicative_identity: Foo = 1) -> Foo:
"""Returns the value of a polynomial at a specific point."""
running_sum = additive_identity
z_to_the_i = multiplicative_identity
for i, coefficient in enumerate(coefficients):
if i > 0:
z_to_the_i = outer_power(z, i)
term = inner_product(coefficient, z_to_the_i)
running_sum = add(running_sum, term)
return running_sum |
Returns value of an ElementwiseTaylorEnclosure at x-x0. | def eval_elementwise_taylor_enclosure(
enclosure: types.ElementwiseTaylorEnclosureLike,
x_minus_x0: Union[types.NDArrayLike, types.IntervalLike],
np_like: types.NumpyLike) -> Union[types.Interval, types.NDArray]:
"""Returns value of an ElementwiseTaylorEnclosure at x-x0."""
set_arithmetic = interval_arithmetic.IntervalArithmetic(np_like)
return eval_polynomial(enclosure,
set_arithmetic.as_interval_or_ndarray(x_minus_x0),
set_arithmetic.multiply,
set_arithmetic.power,
set_arithmetic.add,
np_like.array(0),
np_like.array(1)) |
Returns value of an TaylorEnclosure at x-x0. | def eval_taylor_enclosure(
enclosure: types.TaylorEnclosureLike,
x_minus_x0: Union[types.NDArrayLike, types.IntervalLike],
np_like: types.NumpyLike) -> Union[types.Interval, types.NDArray]:
"""Returns value of an TaylorEnclosure at x-x0."""
set_arithmetic = interval_arithmetic.IntervalArithmetic(np_like)
inner_product = (
lambda a, b: set_arithmetic.tensordot(a, b, set_arithmetic.ndim(b)))
return eval_polynomial(enclosure,
set_arithmetic.as_interval_or_ndarray(x_minus_x0),
inner_product,
set_arithmetic.outer_power,
set_arithmetic.add,
np_like.array(0),
np_like.array(1)) |
Applies an arbitrary bilinear operation to two polynomials.
The arguments a and b give the coefficients of polynomials, defined in terms
of some inner product <x, y> and some exponentiation operator:
P_a(z) = sum_{i=0}^{len(a)-1} <a[i], z**i>.
Similarly, the sequence b represents a polynomial P_b(z).
Args:
a: a polynomial (sequence of coefficients)
b: a polynomial (sequence of coefficients)
add: a function that returns the sum of two polynomial coefficients
additive_identity: a addtive identity object
term_product_coefficient: a callable that, given arguments c0, c1, i, j,
returns d such that op(<c0, z**i>, <c1, z**j>) = <d, z**(i+j)>, where op
is the underlying bilinear operation.
Returns:
a polynomial Q (tuple of coefficients), such that for any z,
op(P_a(z), P_b(z)) == Q(z)
where op is the underlying bilinear operation. | def arbitrary_bilinear(
a: Sequence[FooLike],
b: Sequence[FooLike],
add: Callable[[FooLike, FooLike], Foo] = operator.add,
additive_identity: Foo = 0,
term_product_coefficient: Callable[[FooLike, FooLike, int, int], Foo]
= lambda c0, c1, i, j: c0*c1,
) -> tuple[Foo, ...]:
"""Applies an arbitrary bilinear operation to two polynomials.
The arguments a and b give the coefficients of polynomials, defined in terms
of some inner product <x, y> and some exponentiation operator:
P_a(z) = sum_{i=0}^{len(a)-1} <a[i], z**i>.
Similarly, the sequence b represents a polynomial P_b(z).
Args:
a: a polynomial (sequence of coefficients)
b: a polynomial (sequence of coefficients)
add: a function that returns the sum of two polynomial coefficients
additive_identity: a addtive identity object
term_product_coefficient: a callable that, given arguments c0, c1, i, j,
returns d such that op(<c0, z**i>, <c1, z**j>) = <d, z**(i+j)>, where op
is the underlying bilinear operation.
Returns:
a polynomial Q (tuple of coefficients), such that for any z,
op(P_a(z), P_b(z)) == Q(z)
where op is the underlying bilinear operation.
"""
# By bilinearity,
# op(sum_i <a[i], z**i>, sum_j <b[j], z**j>)
# == sum_{ij} op(<a[i], z**i>, <b[j], z**j>)
# == sum_{ij} <term_product_coefficient(a[i], b[j], i, j), z**(i+j)>.
output_degree = len(a) + len(b) - 2
output = [additive_identity] * (output_degree + 1)
# If a and b have length n, this takes time O(n^2). If we ever care about
# large n, we could consider implementing an O(n log n) algorithm using
# Fourier transforms.
for i, c0 in enumerate(a):
for j, c1 in enumerate(b):
c = term_product_coefficient(c0, c1, i, j)
output[i+j] = add(output[i+j], c)
return tuple(output) |
Returns the coefficients of a polynomial raised to a power.
The arguments a gives the coefficients of a polynomial, defined in terms
of some inner product <x, y> and some exponentiation operator:
P_a(z) = sum_{i=0}^{len(a)-1} <a[i], z**i>.
Let op be some bilinear, associative, and commutative operation. We define:
power(a, 0) == multiplicative_identity
power(a, k) = op(a, power(a, k-1)).
This code uses the functions provided as arguments to efficiently compute
the coefficients of the polynomial power(a, exponent).
When the coefficients of P_a are intervals, this efficient computation
translates into tighter intervals in the returned coefficients.
Args:
a: a polynomial (sequence of coefficients)
exponent: a non-negative integer exponent
add: a function that returns the sum of two polynomial coefficients
additive_identity: a addtive identity object
multiplicative_identity: a multiplicative identity object
term_product_coefficient: a callable that, given arguments c0, c1, i, j,
returns d such that op(<c0, z**i>, <c1, z**j>) = <d, z**(i+j)>, where op
is the underlying bilinear operation.
term_power_coefficient: given arguments c, i, and j, returns d such that:
(c * z**i)**j == d * z**(i*j)
scalar_product: a callable that, given as arguments a non-negative integer i
and coefficient c, returns the result of adding c to itself i times.
Returns:
the coefficients of the polynomial P_a, raised to the exponent power. | def integer_power(
a: Sequence[FooLike],
exponent: int,
add: Callable[[FooLike, FooLike], Foo] = operator.add,
additive_identity: Foo = 0,
multiplicative_identity: Foo = 1,
term_product_coefficient: Callable[[FooLike, FooLike, int, int], Foo]
= lambda c0, c1, i, j: c0*c1,
term_power_coefficient: Callable[[FooLike, int, int], Foo]
= lambda c, i, j: c**j,
scalar_product: Callable[[int, FooLike], Foo] = operator.mul
) -> tuple[Foo, ...]:
"""Returns the coefficients of a polynomial raised to a power.
The arguments a gives the coefficients of a polynomial, defined in terms
of some inner product <x, y> and some exponentiation operator:
P_a(z) = sum_{i=0}^{len(a)-1} <a[i], z**i>.
Let op be some bilinear, associative, and commutative operation. We define:
power(a, 0) == multiplicative_identity
power(a, k) = op(a, power(a, k-1)).
This code uses the functions provided as arguments to efficiently compute
the coefficients of the polynomial power(a, exponent).
When the coefficients of P_a are intervals, this efficient computation
translates into tighter intervals in the returned coefficients.
Args:
a: a polynomial (sequence of coefficients)
exponent: a non-negative integer exponent
add: a function that returns the sum of two polynomial coefficients
additive_identity: a addtive identity object
multiplicative_identity: a multiplicative identity object
term_product_coefficient: a callable that, given arguments c0, c1, i, j,
returns d such that op(<c0, z**i>, <c1, z**j>) = <d, z**(i+j)>, where op
is the underlying bilinear operation.
term_power_coefficient: given arguments c, i, and j, returns d such that:
(c * z**i)**j == d * z**(i*j)
scalar_product: a callable that, given as arguments a non-negative integer i
and coefficient c, returns the result of adding c to itself i times.
Returns:
the coefficients of the polynomial P_a, raised to the exponent power.
"""
if exponent < 0:
raise ValueError(exponent)
elif exponent == 0:
return (multiplicative_identity,)
else:
# To understand what this code is doing, it is helpful to consider the
# special case where `a` is a sequence of floats, and all arguments have
# their default values. Then, we just need to compute the coefficients
# of the scalar polynomial:
#
# (a[0] + a[1]*z**1 + ... + a[k-1])**exponent
#
# where k = len(a).
#
# Using the multinomial theorem, the result is a polynomial whose ith
# coefficient is:
#
# sum_{p in Partitions(i, exponent, k)}
# (exponent choose (p_0, p_1, ..., p_{k-1})) *
# Prod_{j=0}^{k-1} a[j]**p_j
#
# where Partitions(i, exponent, k) is the set of length-k non-negative
# integer tuples whose elements sum to `exponent`, and that furthermore
# satisfy sum_{j=0}^{k-1} j*p_j == i.
#
# The code below uses a generalization of this idea that works for an
# arbitrary commutative and associative bilinear operation (rather than
# just scalar multiplication). In the general version, the product
# series Prod_{j=0}^{k-1} a[j]**p_j is computed via appropriate calls to
# term_product_coefficient(), term_power_coefficient() and scalar_product().
def get_coeff(i: int) -> Foo:
c = additive_identity
for p in _iter_partitions(i, exponent, len(a)):
assert sum(p) == exponent
assert sum(j*p_j for j, p_j in enumerate(p)) == i
running_product = multiplicative_identity
running_product_power = 0
for j, p_j in enumerate(p):
running_product = term_product_coefficient(
running_product,
term_power_coefficient(a[j], j, p_j),
running_product_power,
j*p_j
)
running_product_power += j*p_j
assert running_product_power == i
term = scalar_product(_multinomial_coefficient(p), running_product)
c = add(c, term)
return c
output_degree = (len(a) - 1) * exponent
return tuple(get_coeff(i) for i in range(1 + output_degree)) |
Yields length-k tuples with sum m and sum_{j=1}^k (j-1)*i_j == n. | def _iter_partitions(
n: int, m: int, k: int) -> Iterator[tuple[int, ...]]:
"""Yields length-k tuples with sum m and sum_{j=1}^k (j-1)*i_j == n."""
if n < 0:
raise ValueError(n)
if m < 0:
raise ValueError(m)
if k <= 0:
raise ValueError(k)
if k == 1:
if n == 0:
yield (m,)
else:
for z in range(min(m+1, n // (k-1) + 1)):
for p in _iter_partitions(n - (k-1)*z, m - z, k - 1):
yield p + (z,) |
Returns (n choose (ks[0], ks[1], ...)), where n = sum(ks). | def _multinomial_coefficient(ks: Sequence[int]) -> int:
"""Returns (n choose (ks[0], ks[1], ...)), where n = sum(ks)."""
if not ks:
raise ValueError(ks)
elif len(ks) == 1:
return 1
else:
return math.comb(sum(ks), ks[0]) * _multinomial_coefficient(ks[1:]) |
Returns ElementwiseTaylorEnclosure for function with given ID.
Args:
function_id: an `elementwise_functions.FunctionId`
x0: reference point
trust_region: trust region over which enclosure is valid
degree: the degree of the returned `ElementwiseTaylorEnclosure`
np_like: a `NumpyLike` backend
Returns:
an `ElementwiseTaylorEnclosure` for the elementwise function specified by
`function_id`. | def get_elementwise_taylor_enclosure(
function_id: elementwise_functions.FunctionId,
x0: NDArray,
trust_region: Interval,
degree: int,
np_like: NumpyLike) -> ElementwiseTaylorEnclosure:
"""Returns ElementwiseTaylorEnclosure for function with given ID.
Args:
function_id: an `elementwise_functions.FunctionId`
x0: reference point
trust_region: trust region over which enclosure is valid
degree: the degree of the returned `ElementwiseTaylorEnclosure`
np_like: a `NumpyLike` backend
Returns:
an `ElementwiseTaylorEnclosure` for the elementwise function specified by
`function_id`.
"""
f = elementwise_functions.get_function(function_id, np_like)
deriv_id = function_id.derivative_id(degree)
deriv_data = elementwise_functions.get_function_data(deriv_id)
taylor_coefficients = functools.partial(
elementwise_functions.get_taylor_polynomial_coefficients,
function_id, x0=x0, np_like=np_like)
if (deriv_data.monotonically_increasing or
deriv_data.monotonically_decreasing):
return sharp_enclosure_monotonic_derivative(
x0, degree, trust_region, f, taylor_coefficients(degree),
deriv_data.monotonically_increasing, np_like)
elif degree == 2 and deriv_data.even_symmetric:
return sharp_quadratic_enclosure_even_symmetric_hessian(
x0, trust_region, f, taylor_coefficients(degree), np_like)
else:
# For indices where the derivative is monotonically decreasing or
# monotonically increasing over the trust region, we return the sharp
# enclosure. For other indices, we fall back to using the enclosure
# based on the range of the derivative.
coeffs = taylor_coefficients(degree)
enclosure_if_decreasing, enclosure_if_increasing = [
sharp_enclosure_monotonic_derivative(
x0, degree, trust_region, f, coeffs, increasing, np_like)
for increasing in [False, True]
]
decreasing, increasing = deriv_data.monotone_over(trust_region, np_like)
deriv_range = elementwise_functions.get_range(deriv_id, trust_region,
np_like)
fallback = bounded_derivative_enclosure(degree, coeffs[:-1], deriv_range)
def endpoint(i: int):
return np_like.where(
decreasing,
enclosure_if_decreasing[-1][i],
np_like.where(
increasing,
enclosure_if_increasing[-1][i],
fallback[-1][i]
)
)
final_interval = (endpoint(0), endpoint(1))
return ElementwiseTaylorEnclosure(
tuple(coeffs[:degree]) + (final_interval,)) |
Returns an ElementwiseTaylorEnclosure for x**exponent in terms of x-x0. | def pow_enclosure(exponent: float,
x0: NDArray,
trust_region: Interval,
degree: int,
np_like: NumpyLike) -> ElementwiseTaylorEnclosure:
"""Returns an ElementwiseTaylorEnclosure for x**exponent in terms of x-x0."""
taylor_coefficients_at_x0 = []
c = 1.
for i in range(degree + 1):
# Note: the next line can sometimes generate bogus RuntimeWarnings when
# using Numpy. This seems to be a bug in Numpy, as even doing
# np.array(2.)**-1 generates the same RuntimeWarning.
taylor_coefficients_at_x0.append(c * x0**(exponent - i) / math.factorial(i))
c *= exponent - i
# Compute sharp enclosures of x**exponent for the case x > 0 (enc_pos below),
# and the case x < 0 (enc_neg below).
#
# In each of these cases, the (degree)th derivative of x**exponent will
# either be monotonically increasing or monotonically decreasing. Compute
# the sharp enclosures for both cases (increasing or decreasing), then use
# the sign of the (degree+1)st derivative to determine which one works for
# the cases x > 0 and x < 0.
# pylint: disable=g-complex-comprehension
enc_decreasing, enc_increasing = [
sharp_enclosure_monotonic_derivative(
x0, degree, trust_region, lambda x: x**exponent,
taylor_coefficients_at_x0, increasing, np_like
)
for increasing in [False, True]
]
enc_pos = (enc_increasing if _pow_kth_deriv_sign(1, exponent, degree+1) >= 0
else enc_decreasing)
if int(exponent) != exponent:
enc_neg = None
else:
enc_neg = (enc_increasing
if _pow_kth_deriv_sign(-1, exponent, degree+1) >= 0
else enc_decreasing)
def interval_endpoint(i):
"""Returns left (i == 0) or right (i == 1) endpoint of interval."""
a, b = trust_region
# For each index i, the interval between a[i] and b[i] either contains zero,
# or contains only positive values, or contains only negative values.
# Compute the endpoints for all three cases, and combine the results using
# np_like.where().
endpoint_if_always_positive = enc_pos[-1][i]
if int(exponent) != exponent:
# If exponent is not an integer, then z**exponent is undefined for z < 0.
# We return the interval (-inf, inf) in this case.
endpoint_if_always_negative = -np_like.inf if i == 0 else np_like.inf
endpoint_if_possibly_zero = endpoint_if_always_negative
elif exponent < 0:
endpoint_if_always_negative = enc_neg[-1][i]
endpoint_if_possibly_zero = -np_like.inf if i == 0 else np_like.inf
else:
endpoint_if_always_negative = enc_neg[-1][i]
endpoint_if_possibly_zero = functools.reduce(
np_like.minimum if i == 0 else np_like.maximum,
[endpoint_if_always_positive, endpoint_if_always_negative]
)
return np_like.where(
a >= 0,
endpoint_if_always_positive,
np_like.where(
b <= 0,
endpoint_if_always_negative,
endpoint_if_possibly_zero
)
)
interval_coefficient = tuple(interval_endpoint(i) for i in [0, 1])
return ElementwiseTaylorEnclosure(
enc_decreasing[:-1] + (interval_coefficient,)) |
Returns sharp degree-k enclosure assuming monotone k-th derivative.
Args:
x0: the center point for the Taylor enclosure
degree: the degree of the enclosure to return
trust_region: the trust region over which to compute an enclosure
sigma: the function for which to compute a sharp polynomial enclosure
taylor_coefficients_at_x0: the first (degree+1) coefficients of the
Taylor series expansion of sigma at x0.
increasing: whether the (degree)th derivative of sigma is increasing
or decreasing
np_like: a NumpyLike backend
Returns:
a sharp ElementwiseTaylorEnclosure for sigma | def sharp_enclosure_monotonic_derivative(
x0: NDArray,
degree: int,
trust_region: Interval,
sigma: Callable[[NDArray], NDArray],
taylor_coefficients_at_x0: Sequence[NDArray],
increasing: bool,
np_like: NumpyLike
) -> ElementwiseTaylorEnclosure:
"""Returns sharp degree-k enclosure assuming monotone k-th derivative.
Args:
x0: the center point for the Taylor enclosure
degree: the degree of the enclosure to return
trust_region: the trust region over which to compute an enclosure
sigma: the function for which to compute a sharp polynomial enclosure
taylor_coefficients_at_x0: the first (degree+1) coefficients of the
Taylor series expansion of sigma at x0.
increasing: whether the (degree)th derivative of sigma is increasing
or decreasing
np_like: a NumpyLike backend
Returns:
a sharp ElementwiseTaylorEnclosure for sigma
"""
if degree < 0:
raise ValueError(degree)
ratio = functools.partial(taylor_remainder_ratio,
x0, degree, sigma,
taylor_coefficients_at_x0,
np_like=np_like)
a, b = trust_region
if increasing:
final_interval = (ratio(a), ratio(b))
else:
final_interval = (ratio(b), ratio(a))
return ElementwiseTaylorEnclosure(
tuple(taylor_coefficients_at_x0[:degree]) + (final_interval,)
) |
Returns sharp quadratic enclosure for function with even-symmetric Hessian.
It's assumed that the Hessian is decreasing at z >= 0.
Args:
x0: the center point for the Taylor enclosure
trust_region: the trust region over which to compute an enclosure
sigma: an elementwise function for which to compute a Taylor enclosure
taylor_coefficients_at_x0: the first two coefficients of the
Taylor series expansion of sigma at x0.
np_like: a Numpy-like back end. | def sharp_quadratic_enclosure_even_symmetric_hessian(
x0: NDArray,
trust_region: Interval,
sigma: Callable[[NDArray], NDArray],
taylor_coefficients_at_x0: Sequence[NDArray],
np_like: NumpyLike
) -> ElementwiseTaylorEnclosure:
"""Returns sharp quadratic enclosure for function with even-symmetric Hessian.
It's assumed that the Hessian is decreasing at z >= 0.
Args:
x0: the center point for the Taylor enclosure
trust_region: the trust region over which to compute an enclosure
sigma: an elementwise function for which to compute a Taylor enclosure
taylor_coefficients_at_x0: the first two coefficients of the
Taylor series expansion of sigma at x0.
np_like: a Numpy-like back end.
"""
ratio = functools.partial(taylor_remainder_ratio,
x0, 2, sigma,
taylor_coefficients_at_x0,
np_like=np_like)
a, b = trust_region
max_ratio = ratio(np_like.clip(-x0, a, b))
min_ratio = np_like.minimum(ratio(a), ratio(b))
final_interval = (min_ratio, max_ratio)
return ElementwiseTaylorEnclosure(
tuple(taylor_coefficients_at_x0[:2]) + (final_interval,)
) |
Returns R_{degree - 1}(x; sigma, x0) / (x - x0)**degree. | def taylor_remainder_ratio(
x0: NDArray,
degree: int,
sigma: Callable[[NDArray], NDArray],
taylor_coefficients_at_x0: Sequence[NDArray],
x: NDArray,
np_like: NumpyLike):
"""Returns R_{degree - 1}(x; sigma, x0) / (x - x0)**degree."""
if len(taylor_coefficients_at_x0) != degree + 1:
raise ValueError(degree, taylor_coefficients_at_x0)
# Let r_k denote the degree k Taylor series remainder.
#
# Letting k = degree, we want to return r_{k-1} / (x-x0)**k, but in a way that
# is numerically stable when x-x0 is small (and that is well-defined when
# x=x0).
#
# We do so using:
# r_{k-1} / (x-x0)**k = (r_k + c_k*(x-x0)**k) / (x-x0)**k
# = c_k + r_k / (x-x0)**k.
r_k = sigma(x) - sum(
c * (x - x0)**i for i, c in enumerate(taylor_coefficients_at_x0))
denom = (x-x0)**degree
return (
taylor_coefficients_at_x0[degree] +
# Return r_k * 1 / denom, capping the magnitude of 1 / denom at 1e12.
# TODO(mstreeter): this results in an enclosure that's not strictly valid
# when denom is very small.
r_k * np_like.sign(denom) / (np_like.maximum(1e-12, np_like.abs(denom)))
) |
Return sign of kth derivative of x**p, for x with given sign. | def _pow_kth_deriv_sign(x_sign: int, p: float, k: int) -> int:
"""Return sign of kth derivative of x**p, for x with given sign."""
# The kth derivative of x**p at p * (p-1) * ... * (p-k) * x**(p-k)
# == c * x**(p-k), for c computed below.
c = 1.
for i in range(k):
c *= p - i
sign = lambda z: 1 if z > 0 else (0 if z == 0 else -1)
if x_sign == 1:
return sign(c)
elif x_sign == -1:
if p == int(p):
return sign(c) * (1 if (p - k) % 2 == 0 else -1)
else:
raise ValueError('x**p undefined for non-integer p when x < 0')
else:
raise ValueError(x_sign) |
Returns order `order` derivative of sigmoid at `x`. | def sigmoid_derivative(order: int, x: float) -> float:
"""Returns order `order` derivative of sigmoid at `x`."""
if order == 0:
return sigmoid(x)
elif order == 1:
return sigmoid(x)*sigmoid(-x)
elif order == 2:
s = sigmoid(x)
return s*sigmoid(-x)*(1-2*s)
elif order == 3:
s = sigmoid(x)
sm = sigmoid(-x)
return s*sm*((1-2*s)**2 - 2*s*sm)
elif order == 4:
s = sigmoid(x)
sm = sigmoid(-x)
return (s*sm*(1-2*s)*((1-2*s)**2 - 2*s*sm) +
s*sm*(-4*(1-2*s)*s*sm - 2*s*sm*(1-2*s)))
else:
raise NotImplementedError(order) |
Returns list of NumpyLike back ends to test. | def _get_backends() -> list[types.NumpyLike]:
"""Returns list of NumpyLike back ends to test."""
backends = [np]
try:
from jax.config import config as jax_config
import jax.numpy as jnp
backends.append(jnp)
jax_config.update('jax_enable_x64', True)
except ModuleNotFoundError:
pass
try:
import tensorflow.experimental.numpy as tnp
tnp.experimental_enable_numpy_behavior()
backends.append(tnp)
except ModuleNotFoundError:
pass
return backends |
Replace occurrences of a pattern Jaxpr within a subject Jaxpr.
This method return a `Jaxpr` that is an edited version of `subject`
in which occurrences of `pattern` have been replaced by `replacement`.
Occurences of `pattern` must be contiguous sequences of equations in
`subject`.
Example usage:
```python
pattern_fun = jnp.exp
replacement_fun = jnp.log
subject_fun = lambda x: 2*jnp.exp(x+1)
to_jaxpr = lambda f: jax.make_jaxpr(f)(0.).jaxpr
replace(
to_jaxpr(pattern_fun),
to_jaxpr(replacement_fun),
to_jaxpr(subject_fun),
) # returns Jaxpr for 2*jnp.log(x+1).
```
Args:
pattern: a `Jaxpr`
replacement: a `Jaxpr`
subject: a `Jaxpr`
Returns:
a `Jaxpr` in which occurrences of `pattern` have been replaced by
`replacement`. | def replace(pattern: jax.core.Jaxpr,
replacement: jax.core.Jaxpr,
subject: jax.core.Jaxpr) -> jax.core.Jaxpr:
"""Replace occurrences of a pattern Jaxpr within a subject Jaxpr.
This method return a `Jaxpr` that is an edited version of `subject`
in which occurrences of `pattern` have been replaced by `replacement`.
Occurences of `pattern` must be contiguous sequences of equations in
`subject`.
Example usage:
```python
pattern_fun = jnp.exp
replacement_fun = jnp.log
subject_fun = lambda x: 2*jnp.exp(x+1)
to_jaxpr = lambda f: jax.make_jaxpr(f)(0.).jaxpr
replace(
to_jaxpr(pattern_fun),
to_jaxpr(replacement_fun),
to_jaxpr(subject_fun),
) # returns Jaxpr for 2*jnp.log(x+1).
```
Args:
pattern: a `Jaxpr`
replacement: a `Jaxpr`
subject: a `Jaxpr`
Returns:
a `Jaxpr` in which occurrences of `pattern` have been replaced by
`replacement`.
"""
if len(pattern.invars) != len(replacement.invars):
raise ValueError()
if len(pattern.outvars) != len(replacement.outvars):
raise ValueError()
if pattern.constvars or replacement.constvars:
raise NotImplementedError()
pattern_graph = _jaxpr_to_graph(pattern)
# When creating the replacement graph, offset the variable counts to keep
# them unique.
max_count = max(
v[1] if v[0] else -1 # pytype: disable=unsupported-operands
for v in pattern_graph.intermediate_variables()
)
replacement_graph = _jaxpr_to_graph(replacement, offset=max_count + 1)
# In the replacement graph, replace inputs/outputs with those in the pattern
# graph.
intermediate_variable_map = {}
for u, v in itertools.chain(
zip(replacement_graph.inputs, pattern_graph.inputs),
zip(replacement_graph.outputs, pattern_graph.outputs),
):
intermediate_variable_map[u] = v
replacement_operations = [
e.subs(intermediate_variable_map) for e in replacement_graph.operations
]
hypergraph = graph_editor.replace(
pattern_graph.operations,
replacement_operations,
_jaxpr_to_graph(subject),
_can_bind,
)
return _graph_to_jaxpr(hypergraph) |
Returns a ComputationGraph that represents a Jaxpr.
Args:
jaxpr: a `Jaxpr`
offset: an offset for the indices that appear in intermediate variables.
This can be used to ensure uniqueness.
Returns:
a ComputationGraph that represents `jaxpr`. | def _jaxpr_to_graph(jaxpr: jax.core.Jaxpr,
offset: int = 0) -> graph_editor.ComputationGraph:
"""Returns a ComputationGraph that represents a Jaxpr.
Args:
jaxpr: a `Jaxpr`
offset: an offset for the indices that appear in intermediate variables.
This can be used to ensure uniqueness.
Returns:
a ComputationGraph that represents `jaxpr`.
"""
def get_intermediate_variable(
var_or_literal: Union[jax.core.Var, jax.core.Literal]
) -> _IntermediateVariable:
if isinstance(var_or_literal, jax.core.Var):
var = var_or_literal
return (True, var.count + offset, var.suffix, var.aval)
elif isinstance(var_or_literal, jax.core.Literal):
literal = var_or_literal
return (False, literal.val, literal.aval)
else:
raise NotImplementedError()
operations = []
for eqn in jaxpr.eqns:
data = _EqnData(eqn.primitive, eqn.params)
edge = graph_editor.Operation(
data,
[get_intermediate_variable(v) for v in eqn.invars],
[get_intermediate_variable(v) for v in eqn.outvars]
)
operations.append(edge)
data = [get_intermediate_variable(v) for v in jaxpr.constvars]
return graph_editor.ComputationGraph(
[get_intermediate_variable(v) for v in jaxpr.invars],
[get_intermediate_variable(v) for v in jaxpr.outvars],
operations,
data=data
) |
Returns the Jaxpr represented by a ComputationGraph. | def _graph_to_jaxpr(h: graph_editor.ComputationGraph) -> jax.core.Jaxpr:
"""Returns the Jaxpr represented by a ComputationGraph."""
count_to_var = {}
def vertex_to_var_or_literal(vertex):
if vertex[0]:
_, count, suffix, aval = vertex
if count not in count_to_var:
count_to_var[count] = jax.core.Var(count, suffix, aval)
return count_to_var[count]
else:
_, val, aval = vertex
return jax.core.Literal(val, aval)
eqns = []
for edge in h.operations:
eqn = jax.core.new_jaxpr_eqn(
invars=[vertex_to_var_or_literal(u) for u in edge.inputs],
outvars=[vertex_to_var_or_literal(v) for v in edge.outputs],
primitive=edge.data.primitive,
params=edge.data.params,
effects=set()
)
eqns.append(eqn)
invars = [vertex_to_var_or_literal(v) for v in h.inputs]
outvars = [vertex_to_var_or_literal(v) for v in h.outputs]
constvars = [vertex_to_var_or_literal(v) for v in h.data]
return jax.core.Jaxpr(constvars, invars, outvars, eqns) |
Returns whether two Jaxpr equation params dicts are equivalent. | def _jaxpr_eqn_params_equiv(p0, p1) -> bool:
"""Returns whether two Jaxpr equation params dicts are equivalent."""
if set(p0.keys()) != set(p1.keys()):
return False
for k0, v0 in p0.items():
if k0 in _JAXPR_EQN_PARAMS_TO_IGNORE:
continue
v1 = p1[k0]
if isinstance(v0, jax.core.ClosedJaxpr):
# TODO(mstreeter): this could incorrectly return True if v0.consts and
# v1.consts are different.
if not _same_jaxpr_up_to_variable_renaming(v0.jaxpr, v1.jaxpr,
ignore_shape=True):
return False
elif isinstance(v0, jax.core.Jaxpr):
if not _same_jaxpr_up_to_variable_renaming(v0, v1, ignore_shape=True):
return False
elif isinstance(v0, types.FunctionType):
if not isinstance(v1, types.FunctionType):
return False
elif v0 != v1:
return False
return True |
Return whether to Jaxprs are identical up to variable renaming. | def _same_jaxpr_up_to_variable_renaming(j0: jax.core.Jaxpr,
j1: jax.core.Jaxpr,
ignore_shape: bool = False) -> bool:
"""Return whether to Jaxprs are identical up to variable renaming."""
var_map = {}
def check(v0, v1):
if isinstance(v0, jax.core.Literal):
return (isinstance(v1, jax.core.Literal) and v0.val == v1.val and
v0.aval == v1.aval)
elif isinstance(v0, jax.core.Var):
if v0 not in var_map:
if _match_avals(v0.aval, v1.aval, ignore_shape):
var_map[v0] = v1
return True
else:
return False
else:
return var_map[v0] == v1
else:
raise NotImplementedError(v0)
if (len(j0.constvars) != len(j1.constvars) or
len(j0.invars) != len(j1.invars) or
len(j0.outvars) != len(j1.outvars) or
len(j0.eqns) != len(j1.eqns)):
return False
for v0, v1 in itertools.chain(zip(j0.invars, j1.invars),
zip(j0.constvars, j1.constvars),
zip(j0.outvars, j1.outvars)):
if not check(v0, v1):
return False
for eq0, eq1 in zip(j0.eqns, j1.eqns):
if eq0.primitive != eq1.primitive:
return False
if not _jaxpr_eqn_params_equiv(eq0.params, eq1.params):
return False
if (len(eq0.invars) != len(eq1.invars) or
len(eq0.outvars) != len(eq1.outvars)):
return False
for v0, v1 in zip(eq0.invars, eq1.invars):
if not check(v0, v1):
return False
for v0, v1 in zip(eq0.outvars, eq1.outvars):
if not check(v0, v1):
return False
return True |
Returns version of f that returns a TaylorBounds object.
Args:
f: a function that takes a jnp.ndarray as input, and returns a jnp.array
max_degree: the maximum degree TaylorEnclosure for the returned function to
return
propagate_trust_regions: if True, trust regions are propagated through the
Jaxpr, rather than being computed from higher-degree enclosures. This
results in tighter bounds at the cost of additional memory.
Returns:
a function that takes as input a jnp.ndarray x0, and a trust region
(min_vals, max_vals), and return a TaylorBounds object `bound` such that
`bound.coefficients` is a TaylorEnclosure g of degree at most max_degree,
such that:
f(x) in g(x-x0) for all x with min_vals <= x <= max_vals | def taylor_bounds(
f: Callable[[jnp.ndarray], jnp.ndarray],
max_degree: int,
propagate_trust_regions: bool = False,
) -> Callable[[jnp.ndarray, tuple[jnp.ndarray, jnp.ndarray]], TaylorBounds]:
"""Returns version of f that returns a TaylorBounds object.
Args:
f: a function that takes a jnp.ndarray as input, and returns a jnp.array
max_degree: the maximum degree TaylorEnclosure for the returned function to
return
propagate_trust_regions: if True, trust regions are propagated through the
Jaxpr, rather than being computed from higher-degree enclosures. This
results in tighter bounds at the cost of additional memory.
Returns:
a function that takes as input a jnp.ndarray x0, and a trust region
(min_vals, max_vals), and return a TaylorBounds object `bound` such that
`bound.coefficients` is a TaylorEnclosure g of degree at most max_degree,
such that:
f(x) in g(x-x0) for all x with min_vals <= x <= max_vals
"""
if max_degree < 0:
raise ValueError(max_degree)
if max_degree == 0:
propagate_trust_regions = False # avoid redundant computation
jaxpr_factory = jax.make_jaxpr(f)
def bound_fun(
x0: jnp.ndarray, x_trust_region: types.Interval
) -> TaylorBounds:
trust_region = interval_arithmetic.IntervalArithmetic(jnp).subtract(
x_trust_region, x0)
arithmetic = enclosure_arithmetic.TaylorEnclosureArithmetic(
max_degree, trust_region, jnp)
primitive_to_enclosure_fun = _pushforward_funs(arithmetic)
degree_0_arithmetic = (
enclosure_arithmetic.TaylorEnclosureArithmetic(0, trust_region, jnp))
primitive_to_enclosure_fun0 = _pushforward_funs(degree_0_arithmetic)
closed_jaxpr = jaxpr_factory(x0)
jaxpr = _rewrite_jaxpr(closed_jaxpr.jaxpr)
x0 = jnp.asarray(x0)
if x0.ndim == 0:
identity = jnp.asarray(1.)
elif x0.ndim == 1:
identity = jnp.eye(x0.shape[0])
else:
raise NotImplementedError(x0.ndim)
x0_enclosure = types.TaylorEnclosure(
(x0, identity) if max_degree > 0 else (x_trust_region,))
assert len(closed_jaxpr.consts) == len(jaxpr.constvars)
var_to_intermediate = {
var: _constant_intermediate_enclosure(val)
for var, val in zip(jaxpr.constvars, closed_jaxpr.consts)
}
assert len(jaxpr.invars) == 1
var_to_intermediate[jaxpr.invars[0]] = _IntermediateEnclosure(
enclosure=x0_enclosure,
trust_region=x_trust_region if propagate_trust_regions else None
)
def get_intermediate(
invar: Union[jax.core.Var, jax.core.Literal]) -> _IntermediateEnclosure:
if isinstance(invar, jax.core.Var):
return var_to_intermediate[invar]
else:
assert isinstance(invar, jax.core.Literal)
return _constant_intermediate_enclosure(invar.val)
for eqn in jaxpr.eqns:
invar_intermediates = [get_intermediate(invar) for invar in eqn.invars]
has_non_constant_invars = any(not intermediate.is_constant()
for intermediate in invar_intermediates)
if has_non_constant_invars:
fun = primitive_to_enclosure_fun.get(eqn.primitive)
if fun is None:
raise NotImplementedError(eqn.primitive)
outvar_enclosures = fun(*invar_intermediates, **eqn.params)
if len(eqn.outvars) == 1:
outvar_enclosures = (outvar_enclosures,)
if propagate_trust_regions:
fun0 = primitive_to_enclosure_fun0.get(eqn.primitive)
assert fun0 is not None
assert all(i.trust_region is not None for i in invar_intermediates)
invar_degree_0_intermediates = [
_IntermediateEnclosure(
enclosure=types.TaylorEnclosure((intermediate.trust_region,)))
for intermediate in invar_intermediates
]
outvar_degree_0_enclosures_a = fun0(*invar_degree_0_intermediates,
**eqn.params)
if len(eqn.outvars) == 1:
outvar_degree_0_enclosures_a = [outvar_degree_0_enclosures_a]
assert len(outvar_degree_0_enclosures_a) == len(outvar_enclosures)
outvar_degree_0_enclosures_b = [
enclosure_arithmetic.enclose_enclosure(enclosure, trust_region,
0, jnp)
for enclosure in outvar_enclosures
]
outvar_trust_regions = [
_intersect_intervals(a[0], b[0])
for a, b in zip(outvar_degree_0_enclosures_a,
outvar_degree_0_enclosures_b)
]
for i, (a, b) in enumerate(outvar_trust_regions):
# It should always be the case that the actual value of the ith
# output of a function (y0 below) is inside the associated trust
# region. But this invariant may not hold due to floating
# point roundoff error, so we enforce it here.
#
# TODO(mstreeter): add a test case that fails if we remove this.
y0 = outvar_enclosures[i][0]
outvar_trust_regions[i] = (jnp.minimum(y0, a), jnp.maximum(y0, b)) # pytype: disable=wrong-arg-types
else:
outvar_trust_regions = (None,) * len(outvar_enclosures)
assert all(isinstance(v, tuple) for v in outvar_enclosures), (
eqn.primitive, fun, outvar_enclosures)
outvar_intermediates = tuple(
_IntermediateEnclosure(enclosure=e, trust_region=r)
for r, e in zip(outvar_trust_regions, outvar_enclosures)
)
else:
invar_values = tuple(intermediate.constant_value()
for intermediate in invar_intermediates)
vals = eqn.primitive.bind(*invar_values, **eqn.params)
if len(eqn.outvars) == 1:
vals = (vals,)
outvar_intermediates = [_constant_intermediate_enclosure(v)
for v in vals]
assert len(outvar_intermediates) == len(eqn.outvars), (
eqn.primitive, len(outvar_intermediates), len(eqn.outvars))
for var, intermediate in zip(eqn.outvars, outvar_intermediates):
if var.count == -1:
continue # skip unused output variables
assert var not in var_to_intermediate
assert isinstance(intermediate.enclosure, tuple), (
eqn.primitive, intermediate)
_validate_taylor_enclosure(intermediate.enclosure, x0.shape)
var_to_intermediate[var] = intermediate
assert len(jaxpr.outvars) == 1
output_intermediate = get_intermediate(jaxpr.outvars[0])
return TaylorBounds(f=f, x0=x0, x_trust_region=x_trust_region,
coefficients=output_intermediate.enclosure)
return bound_fun |
Register an enclosure-generating function for a user-defined primitive.
Args:
p: a jax.core.Primitive
get_enclosure: an ElementwiseEnclosureGeneratingFunction for p. | def register_elementwise_primitive(
p: jax.core.Primitive,
get_enclosure: ElementwiseEnclosureGeneratingFunction):
"""Register an enclosure-generating function for a user-defined primitive.
Args:
p: a jax.core.Primitive
get_enclosure: an ElementwiseEnclosureGeneratingFunction for p.
"""
_ELEMENTWISE_PRIMITIVE_ENCLOSURES[p] = get_enclosure |
Register enclosure-generating function for elementwise Jax function. | def _register_elementwise_function(
f: Callable[[jnp.ndarray], jnp.ndarray],
get_enclosure: ElementwiseEnclosureGeneratingFunction,
):
"""Register enclosure-generating function for elementwise Jax function."""
name = f'__autobound_{f.__name__}__'
if name in _PRIMITIVE_NAMES:
raise ValueError(f)
_PRIMITIVE_NAMES.add(name)
p = jax.core.Primitive(name)
p.def_abstract_eval(
lambda x: jax.core.ShapedArray(x.shape, x.dtype))
rule = lambda: (jax.make_jaxpr(f)(0.).jaxpr, jax.make_jaxpr(p.bind)(0.).jaxpr)
_JAXPR_REWRITE_RULES.append(rule)
register_elementwise_primitive(p, get_enclosure) |
Rewrite a Jaxpr to make is suitable for use by taylor_bounds(). | def _rewrite_jaxpr(jaxpr: jax.core.Jaxpr) -> jax.core.Jaxpr:
"""Rewrite a Jaxpr to make is suitable for use by taylor_bounds()."""
for rule_generator in _JAXPR_REWRITE_RULES:
pattern, replacement = rule_generator()
jaxpr = jaxpr_editor.replace(pattern, replacement, jaxpr)
return jaxpr |
Enclosure-generating function for jax.lax.broadcast_in_dim. | def _broadcast_in_dim_pushforward_fun(intermediate, shape,
broadcast_dimensions):
"""Enclosure-generating function for jax.lax.broadcast_in_dim."""
enclosure = intermediate.enclosure
x0 = enclosure[0]
if isinstance(x0, tuple):
x0 = enclosure[0][0]
x_shape = (() if len(enclosure) == 1 else enclosure[1].shape[x0.ndim:])
def broadcast_ndarray(a, i):
return jax.lax.broadcast_in_dim(a, shape + i*x_shape, broadcast_dimensions)
def broadcast_ndarray_or_interval(a, i):
if isinstance(a, tuple):
return tuple(broadcast_ndarray(x, i) for x in a)
else:
return broadcast_ndarray(a, i)
return tuple(
broadcast_ndarray_or_interval(coeff, i)
for i, coeff in enumerate(enclosure)
) |
Returns function that implements conv_general_dilated on enclosures. | def _conv_general_dilated_pushforward_fun(arithmetic):
"""Returns function that implements conv_general_dilated on enclosures."""
def fun(lhs_intermediate: _IntermediateEnclosure,
rhs_intermediate: _IntermediateEnclosure,
**params):
def pairwise_batched_bilinear(
a: jnp.ndarray, b: jnp.ndarray, p: int, q: int
) -> jnp.ndarray:
def move_last_n_dims_to_front(x: jnp.ndarray, n: int):
if n == 0:
return x
perm = tuple(range(x.ndim - n, x.ndim)) + tuple(range(x.ndim - n))
transposed = jnp.transpose(x, axes=perm)
return jnp.reshape(transposed, (-1,) + x.shape[:x.ndim-n])
a_reshaped = move_last_n_dims_to_front(a, p)
b_reshaped = move_last_n_dims_to_front(b, q)
c = jax.lax.conv_general_dilated_p.bind(a_reshaped, b_reshaped, **params)
if p == 0 and q == 0:
return c
elif p == 0 or q == 0:
raise NotImplementedError((p, q))
c_perm = tuple(range(2, c.ndim)) + (0, 1)
c_transposed = jnp.transpose(c, axes=c_perm)
return jnp.reshape(c_transposed,
c.shape[2:] + a.shape[a.ndim-p:] + b.shape[b.ndim-q:])
return arithmetic.arbitrary_bilinear(
lhs_intermediate.enclosure,
rhs_intermediate.enclosure,
pairwise_batched_bilinear)
return fun |
Returns function that implements dot_general on enclosures. | def _dot_general_pushforward_fun(arithmetic):
"""Returns function that implements dot_general on enclosures."""
def fun(lhs_intermediate: _IntermediateEnclosure,
rhs_intermediate: _IntermediateEnclosure,
**params):
a_contracting_dims = set(a # pylint: disable=g-complex-comprehension
for t in params['dimension_numbers']
for a in t[0])
def pairwise_batched_bilinear(
a: jnp.ndarray, b: jnp.ndarray, p: int, q: int
) -> jnp.ndarray:
transposed_output = jax.lax.dot_general_p.bind(a, b, **params)
p_start = a.ndim - p - len(a_contracting_dims)
assert p_start >= 0
n = transposed_output.ndim
# Shift axes p_start through p_start+p to the right, so that they start
# at position n-q.
assert p_start + p <= n-q, (p_start, p, q, n)
perm = (
tuple(range(p_start)) +
tuple(range(p_start+p, n-q)) +
tuple(range(p_start, p_start + p)) +
tuple(range(n-q, n))
)
assert len(set(perm)) == n, (p_start, p, q, n, perm)
return jnp.transpose(transposed_output, axes=perm)
return arithmetic.arbitrary_bilinear(
lhs_intermediate.enclosure,
rhs_intermediate.enclosure,
pairwise_batched_bilinear
)
return fun |
Returns dict from primitive to function that inputs/outputs enclosures. | def _pushforward_funs(
arithmetic: enclosure_arithmetic.TaylorEnclosureArithmetic
) -> dict[jax.core.Primitive, PushforwardFunction]:
"""Returns dict from primitive to function that inputs/outputs enclosures."""
def pushforward_integer_pow(intermediate, y: int):
return arithmetic.power(intermediate.enclosure, y)
def pushforward_pow(intermediate_0, intermediate_1):
if not intermediate_1.is_constant():
raise NotImplementedError()
exponent = float(intermediate_1.constant_value())
return arithmetic.power(intermediate_0.enclosure, exponent)
def wrap(f):
def g(*args):
return f(*[intermediate.enclosure for intermediate in args])
return g
primitive_to_enclosure_fun = {
jax.lax.add_p: wrap(arithmetic.add),
jax.lax.div_p: wrap(arithmetic.divide),
jax.lax.integer_pow_p: pushforward_integer_pow,
jax.lax.mul_p: wrap(arithmetic.multiply),
jax.lax.neg_p: wrap(arithmetic.negative),
jax.lax.pow_p: pushforward_pow,
jax.lax.sub_p: wrap(arithmetic.subtract),
# TODO(mstreeter): handle all bilinear primitives in a uniform way.
jax.lax.dot_general_p: _dot_general_pushforward_fun(arithmetic),
jax.lax.conv_general_dilated_p: _conv_general_dilated_pushforward_fun(
arithmetic),
jax.lax.broadcast_in_dim_p: _broadcast_in_dim_pushforward_fun,
}
primitive_to_enclosure_fun.update({
primitive: _elementwise_pushforward_fun(arithmetic, get_enclosure)
for primitive, get_enclosure in _ELEMENTWISE_PRIMITIVE_ENCLOSURES.items()
})
primitive_to_enclosure_fun.update({
primitive: _pass_thru_pushforward_fun(primitive)
for primitive in _PASS_THRU_PRIMITIVES
})
return primitive_to_enclosure_fun |
Unpack an argument's value from the commandline. This is part one of a two
step process in handling commandline arguments. Emits the load-cli-arg
event with service, operation, and parameter names. Example::
load-cli-arg.ec2.describe-instances.foo | def unpack_argument(session, service_name, operation_name, cli_argument, value):
"""
Unpack an argument's value from the commandline. This is part one of a two
step process in handling commandline arguments. Emits the load-cli-arg
event with service, operation, and parameter names. Example::
load-cli-arg.ec2.describe-instances.foo
"""
param_name = getattr(cli_argument, 'name', 'anonymous')
value_override = session.emit_first_non_none_response(
'load-cli-arg.%s.%s.%s' % (service_name,
operation_name,
param_name),
param=cli_argument, value=value, service_name=service_name,
operation_name=operation_name)
if value_override is not None:
value = value_override
return value |
Parses and unpacks the encoded string command line parameter
and returns native Python data structures that can be passed
to the Operation.
:type cli_argument: :class:`awscli.arguments.BaseCLIArgument`
:param cli_argument: The CLI argument object.
:param value: The value of the parameter. This can be a number of
different python types (str, list, etc). This is the value as
it's specified on the command line.
:return: The "unpacked" argument than can be sent to the `Operation`
object in python. | def unpack_cli_arg(cli_argument, value):
"""
Parses and unpacks the encoded string command line parameter
and returns native Python data structures that can be passed
to the Operation.
:type cli_argument: :class:`awscli.arguments.BaseCLIArgument`
:param cli_argument: The CLI argument object.
:param value: The value of the parameter. This can be a number of
different python types (str, list, etc). This is the value as
it's specified on the command line.
:return: The "unpacked" argument than can be sent to the `Operation`
object in python.
"""
return _unpack_cli_arg(cli_argument.argument_model, value,
cli_argument.cli_name) |
Back-port open() that accepts an encoding argument.
In python3 this uses the built in open() and in python2 this
uses the io.open() function.
If the file is not being opened in binary mode, then we'll
use locale.getpreferredencoding() to find the preferred
encoding. | def compat_open(filename, mode='r', encoding=None, access_permissions=None):
"""Back-port open() that accepts an encoding argument.
In python3 this uses the built in open() and in python2 this
uses the io.open() function.
If the file is not being opened in binary mode, then we'll
use locale.getpreferredencoding() to find the preferred
encoding.
"""
opener = os.open
if access_permissions is not None:
opener = partial(os.open, mode=access_permissions)
if 'b' not in mode:
encoding = locale.getpreferredencoding()
return open(filename, mode, encoding=encoding, opener=opener) |
Cygwin's pty's are based on pipes. Therefore, when it interacts with a Win32
program (such as Win32 python), what that program sees is a pipe instead of
a console. This is important because python buffers pipes, and so on a
pty-based terminal, text will not necessarily appear immediately. In most
cases, this isn't a big deal. But when we're doing an interactive prompt,
the result is that the prompts won't display until we fill the buffer. Since
raw_input does not flush the prompt, we need to manually write and flush it.
See https://github.com/mintty/mintty/issues/56 for more details. | def compat_input(prompt):
"""
Cygwin's pty's are based on pipes. Therefore, when it interacts with a Win32
program (such as Win32 python), what that program sees is a pipe instead of
a console. This is important because python buffers pipes, and so on a
pty-based terminal, text will not necessarily appear immediately. In most
cases, this isn't a big deal. But when we're doing an interactive prompt,
the result is that the prompts won't display until we fill the buffer. Since
raw_input does not flush the prompt, we need to manually write and flush it.
See https://github.com/mintty/mintty/issues/56 for more details.
"""
sys.stdout.write(prompt)
sys.stdout.flush()
return raw_input() |
Return a shell-escaped version of the string *s*
Unfortunately `shlex.quote` doesn't support Windows, so this method
provides that functionality. | def compat_shell_quote(s, platform=None):
"""Return a shell-escaped version of the string *s*
Unfortunately `shlex.quote` doesn't support Windows, so this method
provides that functionality.
"""
if platform is None:
platform = sys.platform
if platform == "win32":
return _windows_shell_quote(s)
else:
return shlex_quote(s) |
Return a Windows shell-escaped version of the string *s*
Windows has potentially bizarre rules depending on where you look. When
spawning a process via the Windows C runtime the rules are as follows:
https://docs.microsoft.com/en-us/cpp/cpp/parsing-cpp-command-line-arguments
To summarize the relevant bits:
* Only space and tab are valid delimiters
* Double quotes are the only valid quotes
* Backslash is interpreted literally unless it is part of a chain that
leads up to a double quote. Then the backslashes escape the backslashes,
and if there is an odd number the final backslash escapes the quote.
:param s: A string to escape
:return: An escaped string | def _windows_shell_quote(s):
"""Return a Windows shell-escaped version of the string *s*
Windows has potentially bizarre rules depending on where you look. When
spawning a process via the Windows C runtime the rules are as follows:
https://docs.microsoft.com/en-us/cpp/cpp/parsing-cpp-command-line-arguments
To summarize the relevant bits:
* Only space and tab are valid delimiters
* Double quotes are the only valid quotes
* Backslash is interpreted literally unless it is part of a chain that
leads up to a double quote. Then the backslashes escape the backslashes,
and if there is an odd number the final backslash escapes the quote.
:param s: A string to escape
:return: An escaped string
"""
if not s:
return '""'
buff = []
num_backspaces = 0
for character in s:
if character == '\\':
# We can't simply append backslashes because we don't know if
# they will need to be escaped. Instead we separately keep track
# of how many we've seen.
num_backspaces += 1
elif character == '"':
if num_backspaces > 0:
# The backslashes are part of a chain that lead up to a
# double quote, so they need to be escaped.
buff.append('\\' * (num_backspaces * 2))
num_backspaces = 0
# The double quote also needs to be escaped. The fact that we're
# seeing it at all means that it must have been escaped in the
# original source.
buff.append('\\"')
else:
if num_backspaces > 0:
# The backslashes aren't part of a chain leading up to a
# double quote, so they can be inserted directly without
# being escaped.
buff.append('\\' * num_backspaces)
num_backspaces = 0
buff.append(character)
# There may be some leftover backspaces if they were on the trailing
# end, so they're added back in here.
if num_backspaces > 0:
buff.append('\\' * num_backspaces)
new_s = ''.join(buff)
if ' ' in new_s or '\t' in new_s:
# If there are any spaces or tabs then the string needs to be double
# quoted.
return '"%s"' % new_s
return new_s |
Returns the default pager to use dependent on platform
:rtype: str
:returns: A string represent the paging command to run based on the
platform being used. | def get_popen_kwargs_for_pager_cmd(pager_cmd=None):
"""Returns the default pager to use dependent on platform
:rtype: str
:returns: A string represent the paging command to run based on the
platform being used.
"""
popen_kwargs = {}
if pager_cmd is None:
pager_cmd = default_pager
# Similar to what we do with the help command, we need to specify
# shell as True to make it work in the pager for Windows
if is_windows:
popen_kwargs = {'shell': True}
else:
pager_cmd = shlex.split(pager_cmd)
popen_kwargs['args'] = pager_cmd
return popen_kwargs |
Ignores user entered signals to avoid process getting killed. | def ignore_user_entered_signals():
"""
Ignores user entered signals to avoid process getting killed.
"""
if is_windows:
signal_list = [signal.SIGINT]
else:
signal_list = [signal.SIGINT, signal.SIGQUIT, signal.SIGTSTP]
actual_signals = []
for user_signal in signal_list:
actual_signals.append(signal.signal(user_signal, signal.SIG_IGN))
try:
yield
finally:
for sig, user_signal in enumerate(signal_list):
signal.signal(user_signal, actual_signals[sig]) |
Return the appropriate HelpRenderer implementation for the
current platform. | def get_renderer():
"""
Return the appropriate HelpRenderer implementation for the
current platform.
"""
if platform.system() == 'Windows':
return WindowsHelpRenderer()
else:
return PosixHelpRenderer() |
Load parameter based on a resource URI.
It is possible to pass parameters to operations by referring
to files or URI's. If such a reference is detected, this
function attempts to retrieve the data from the file or URI
and returns it. If there are any errors or if the ``path``
does not appear to refer to a file or URI, a ``None`` is
returned.
:type path: str
:param path: The resource URI, e.g. file://foo.txt. This value
may also be a non resource URI, in which case ``None`` is returned.
:type cases: dict
:param cases: A dictionary of URI prefixes to function mappings
that a parameter is checked against.
:return: The loaded value associated with the resource URI.
If the provided ``path`` is not a resource URI, then a
value of ``None`` is returned. | def get_paramfile(path, cases):
"""Load parameter based on a resource URI.
It is possible to pass parameters to operations by referring
to files or URI's. If such a reference is detected, this
function attempts to retrieve the data from the file or URI
and returns it. If there are any errors or if the ``path``
does not appear to refer to a file or URI, a ``None`` is
returned.
:type path: str
:param path: The resource URI, e.g. file://foo.txt. This value
may also be a non resource URI, in which case ``None`` is returned.
:type cases: dict
:param cases: A dictionary of URI prefixes to function mappings
that a parameter is checked against.
:return: The loaded value associated with the resource URI.
If the provided ``path`` is not a resource URI, then a
value of ``None`` is returned.
"""
data = None
if isinstance(path, six.string_types):
for prefix, function_spec in cases.items():
if path.startswith(prefix):
function, kwargs = function_spec
data = function(prefix, path, **kwargs)
return data |
:type plugin_mapping: dict
:param plugin_mapping: A dict of plugin name to import path,
e.g. ``{"plugingName": "package.modulefoo"}``.
:type event_hooks: ``EventHooks``
:param event_hooks: Event hook emitter. If one if not provided,
an emitter will be created and returned. Otherwise, the
passed in ``event_hooks`` will be used to initialize plugins.
:type include_builtins: bool
:param include_builtins: If True, the builtin awscli plugins (specified in
``BUILTIN_PLUGINS``) will be included in the list of plugins to load.
:rtype: HierarchicalEmitter
:return: An event emitter object. | def load_plugins(plugin_mapping, event_hooks=None, include_builtins=True):
"""
:type plugin_mapping: dict
:param plugin_mapping: A dict of plugin name to import path,
e.g. ``{"plugingName": "package.modulefoo"}``.
:type event_hooks: ``EventHooks``
:param event_hooks: Event hook emitter. If one if not provided,
an emitter will be created and returned. Otherwise, the
passed in ``event_hooks`` will be used to initialize plugins.
:type include_builtins: bool
:param include_builtins: If True, the builtin awscli plugins (specified in
``BUILTIN_PLUGINS``) will be included in the list of plugins to load.
:rtype: HierarchicalEmitter
:return: An event emitter object.
"""
if include_builtins:
plugin_mapping.update(BUILTIN_PLUGINS)
modules = _import_plugins(plugin_mapping)
if event_hooks is None:
event_hooks = HierarchicalEmitter()
for name, plugin in zip(plugin_mapping.keys(), modules):
log.debug("Initializing plugin %s: %s", name, plugin)
plugin.awscli_initialize(event_hooks)
return event_hooks |
Center text with specified edge chars.
You can pass in the length of the text as an arg, otherwise it is computed
automatically for you. This can allow you to center a string not based
on it's literal length (useful if you're using ANSI codes). | def center_text(text, length=80, left_edge='|', right_edge='|',
text_length=None):
"""Center text with specified edge chars.
You can pass in the length of the text as an arg, otherwise it is computed
automatically for you. This can allow you to center a string not based
on it's literal length (useful if you're using ANSI codes).
"""
# postcondition: get_text_length(returned_text) == length
if text_length is None:
text_length = get_text_length(text)
output = []
char_start = (length // 2) - (text_length // 2) - 1
output.append(left_edge + ' ' * char_start + text)
length_so_far = get_text_length(left_edge) + char_start + text_length
right_side_spaces = length - get_text_length(right_edge) - length_so_far
output.append(' ' * right_side_spaces)
output.append(right_edge)
final = ''.join(output)
return final |
Left align text. | def align_left(text, length, left_edge='|', right_edge='|', text_length=None,
left_padding=2):
"""Left align text."""
# postcondition: get_text_length(returned_text) == length
if text_length is None:
text_length = get_text_length(text)
computed_length = (
text_length + left_padding + \
get_text_length(left_edge) + get_text_length(right_edge))
if length - computed_length >= 0:
padding = left_padding
else:
padding = 0
output = []
length_so_far = 0
output.append(left_edge)
length_so_far += len(left_edge)
output.append(' ' * padding)
length_so_far += padding
output.append(text)
length_so_far += text_length
output.append(' ' * (length - length_so_far - len(right_edge)))
output.append(right_edge)
return ''.join(output) |
Decorator to skip tests that should not be run on windows.
Example usage:
@skip_if_windows("Not valid")
def test_some_non_windows_stuff(self):
self.assertEqual(...) | def skip_if_windows(reason):
"""Decorator to skip tests that should not be run on windows.
Example usage:
@skip_if_windows("Not valid")
def test_some_non_windows_stuff(self):
self.assertEqual(...)
"""
def decorator(func):
return unittest.skipIf(
platform.system() not in ['Darwin', 'Linux'], reason)(func)
return decorator |
This is a cross platform temporary file creation.
tempfile.NamedTemporary file on windows creates a secure temp file
that can't be read by other processes and can't be opened a second time.
For tests, we generally *want* them to be read multiple times.
The test fixture writes the temp file contents, the test reads the
temp file. | def temporary_file(mode):
"""This is a cross platform temporary file creation.
tempfile.NamedTemporary file on windows creates a secure temp file
that can't be read by other processes and can't be opened a second time.
For tests, we generally *want* them to be read multiple times.
The test fixture writes the temp file contents, the test reads the
temp file.
"""
temporary_directory = tempfile.mkdtemp()
basename = 'tmpfile-%s' % str(random_chars(8))
full_filename = os.path.join(temporary_directory, basename)
open(full_filename, 'w').close()
try:
with open(full_filename, mode) as f:
yield f
finally:
shutil.rmtree(temporary_directory) |
Creates a bucket
:returns: the name of the bucket created | def create_bucket(session, name=None, region=None):
"""
Creates a bucket
:returns: the name of the bucket created
"""
if not region:
region = 'us-west-2'
client = session.create_client('s3', region_name=region)
if name:
bucket_name = name
else:
bucket_name = random_bucket_name()
params = {'Bucket': bucket_name, 'ObjectOwnership': 'ObjectWriter'}
if region != 'us-east-1':
params['CreateBucketConfiguration'] = {'LocationConstraint': region}
try:
client.create_bucket(**params)
except ClientError as e:
if e.response['Error'].get('Code') == 'BucketAlreadyOwnedByYou':
# This can happen in the retried request, when the first one
# succeeded on S3 but somehow the response never comes back.
# We still got a bucket ready for test anyway.
pass
else:
raise
return bucket_name |
Returns random hex characters.
Useful for creating resources with random names. | def random_chars(num_chars):
"""Returns random hex characters.
Useful for creating resources with random names.
"""
return binascii.hexlify(os.urandom(int(num_chars / 2))).decode('ascii') |
Generate a random S3 bucket name.
:param prefix: A prefix to use in the bucket name. Useful
for tracking resources. This default value makes it easy
to see which buckets were created from CLI integ tests.
:param num_random: Number of random chars to include in the bucket name.
:returns: The name of a randomly generated bucket name as a string. | def random_bucket_name(prefix='awscli-s3integ-', num_random=15):
"""Generate a random S3 bucket name.
:param prefix: A prefix to use in the bucket name. Useful
for tracking resources. This default value makes it easy
to see which buckets were created from CLI integ tests.
:param num_random: Number of random chars to include in the bucket name.
:returns: The name of a randomly generated bucket name as a string.
"""
return prefix + random_chars(num_random) |
Run an aws command.
This help function abstracts the differences of running the "aws"
command on different platforms.
If collect_memory is ``True`` the the Result object will have a list
of memory usage taken at 2 second intervals. The memory usage
will be in bytes.
If env_vars is None, this will set the environment variables
to be used by the aws process.
If wait_for_finish is False, then the Process object is returned
to the caller. It is then the caller's responsibility to ensure
proper cleanup. This can be useful if you want to test timeout's
or how the CLI responds to various signals.
:type input_data: string
:param input_data: This string will be communicated to the process through
the stdin of the process. It essentially allows the user to
avoid having to use a file handle to pass information to the process.
Note that this string is not passed on creation of the process, but
rather communicated to the process.
:type input_file: a file handle
:param input_file: This is a file handle that will act as the
the stdin of the process immediately on creation. Essentially
any data written to the file will be read from stdin of the
process. This is needed if you plan to stream data into stdin while
collecting memory. | def aws(command, collect_memory=False, env_vars=None,
wait_for_finish=True, input_data=None, input_file=None):
"""Run an aws command.
This help function abstracts the differences of running the "aws"
command on different platforms.
If collect_memory is ``True`` the the Result object will have a list
of memory usage taken at 2 second intervals. The memory usage
will be in bytes.
If env_vars is None, this will set the environment variables
to be used by the aws process.
If wait_for_finish is False, then the Process object is returned
to the caller. It is then the caller's responsibility to ensure
proper cleanup. This can be useful if you want to test timeout's
or how the CLI responds to various signals.
:type input_data: string
:param input_data: This string will be communicated to the process through
the stdin of the process. It essentially allows the user to
avoid having to use a file handle to pass information to the process.
Note that this string is not passed on creation of the process, but
rather communicated to the process.
:type input_file: a file handle
:param input_file: This is a file handle that will act as the
the stdin of the process immediately on creation. Essentially
any data written to the file will be read from stdin of the
process. This is needed if you plan to stream data into stdin while
collecting memory.
"""
if platform.system() == 'Windows':
command = _escape_quotes(command)
if 'AWS_TEST_COMMAND' in os.environ:
aws_command = os.environ['AWS_TEST_COMMAND']
else:
aws_command = 'python %s' % get_aws_cmd()
full_command = '%s %s' % (aws_command, command)
stdout_encoding = get_stdout_encoding()
if isinstance(full_command, six.text_type) and not six.PY3:
full_command = full_command.encode(stdout_encoding)
INTEG_LOG.debug("Running command: %s", full_command)
env = os.environ.copy()
if 'AWS_DEFAULT_REGION' not in env:
env['AWS_DEFAULT_REGION'] = "us-east-1"
if env_vars is not None:
env = env_vars
if input_file is None:
input_file = PIPE
process = Popen(full_command, stdout=PIPE, stderr=PIPE, stdin=input_file,
shell=True, env=env)
if not wait_for_finish:
return process
memory = None
if not collect_memory:
kwargs = {}
if input_data:
kwargs = {'input': input_data}
stdout, stderr = process.communicate(**kwargs)
else:
stdout, stderr, memory = _wait_and_collect_mem(process)
return Result(process.returncode,
stdout.decode(stdout_encoding),
stderr.decode(stdout_encoding),
memory) |
Eat items from an iterator, optionally replacing characters with
a blank and stopping when the end_char has been reached. | def _eat_items(value, iter_parts, part, end_char, replace_char=''):
"""
Eat items from an iterator, optionally replacing characters with
a blank and stopping when the end_char has been reached.
"""
current = part
chunks = [current.replace(replace_char, '')]
while True:
try:
current = six.advance_iterator(iter_parts)
except StopIteration:
raise ValueError(value)
chunks.append(current.replace(replace_char, ''))
if current.endswith(end_char):
break
return ','.join(chunks) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.