code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def predict(
self,
prompt: str = Input(description="Input prompt", default="Starry sky slowly rotating."),
image: Path = Input(description="Input image"),
num_inference_steps: int = Input(
description="Number of denoising steps", ge=1, le=500, default=50
),
guidance_scale: float = Input(
description="Scale for classifier-free guidance", ge=1, le=20, default=6
),
num_frames: int = Input(description="Number of frames for the output video", default=49),
seed: int = Input(
description="Random seed. Leave blank to randomize the seed", default=None
),
) -> Path:
"""Run a single prediction on the model"""
if seed is None:
seed = int.from_bytes(os.urandom(2), "big")
print(f"Using seed: {seed}")
img = load_image(image=str(image))
video = self.pipe(
prompt=prompt,
image=img,
num_videos_per_prompt=1,
num_inference_steps=num_inference_steps,
num_frames=num_frames,
guidance_scale=guidance_scale,
generator=torch.Generator(device="cuda").manual_seed(seed),
).frames[0]
out_path = "/tmp/out.mp4"
export_to_video(video, out_path, fps=8)
return Path(out_path)
|
Run a single prediction on the model
|
predict
|
python
|
THUDM/CogVideo
|
tools/replicate/predict_i2v.py
|
https://github.com/THUDM/CogVideo/blob/master/tools/replicate/predict_i2v.py
|
Apache-2.0
|
def setup(self) -> None:
"""Load the model into memory to make running multiple predictions efficient"""
if not os.path.exists(MODEL_CACHE):
download_weights(MODEL_URL, MODEL_CACHE)
# model_id: THUDM/CogVideoX-5b
self.pipe = CogVideoXPipeline.from_pretrained(
MODEL_CACHE,
torch_dtype=torch.bfloat16,
).to("cuda")
self.pipe.enable_model_cpu_offload()
self.pipe.vae.enable_tiling()
|
Load the model into memory to make running multiple predictions efficient
|
setup
|
python
|
THUDM/CogVideo
|
tools/replicate/predict_t2v.py
|
https://github.com/THUDM/CogVideo/blob/master/tools/replicate/predict_t2v.py
|
Apache-2.0
|
def predict(
self,
prompt: str = Input(
description="Input prompt",
default="A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical atmosphere of this unique musical performance.",
),
num_inference_steps: int = Input(
description="Number of denoising steps", ge=1, le=500, default=50
),
guidance_scale: float = Input(
description="Scale for classifier-free guidance", ge=1, le=20, default=6
),
num_frames: int = Input(description="Number of frames for the output video", default=49),
seed: int = Input(
description="Random seed. Leave blank to randomize the seed", default=None
),
) -> Path:
"""Run a single prediction on the model"""
if seed is None:
seed = int.from_bytes(os.urandom(2), "big")
print(f"Using seed: {seed}")
video = self.pipe(
prompt=prompt,
num_videos_per_prompt=1,
num_inference_steps=num_inference_steps,
num_frames=num_frames,
guidance_scale=guidance_scale,
generator=torch.Generator(device="cuda").manual_seed(seed),
).frames[0]
out_path = "/tmp/out.mp4"
export_to_video(video, out_path, fps=8)
return Path(out_path)
|
Run a single prediction on the model
|
predict
|
python
|
THUDM/CogVideo
|
tools/replicate/predict_t2v.py
|
https://github.com/THUDM/CogVideo/blob/master/tools/replicate/predict_t2v.py
|
Apache-2.0
|
def profile() -> None:
"""
Prints top N methods, sorted by time.
Equivalent to:
python -m cProfile -o data/profile.txt main.py -n 100
Options:
time, cumulative, line, name, nfl, calls
-----------
ncalls - for the number of calls.
time/tottime - for the total time spent in the given function
(and excluding time made in calls to sub-functions)
cumulative/cumtime - is the cumulative time spent in this and all subfunctions
(from invocation till exit). This figure is accurate even for recursive functions.
"""
random.seed(0)
command = (
"for _ in trange(10): "
"summary(torchvision.models.resnet152(), (1, 3, 224, 224), verbose=0)"
)
profile_file = "profile.txt"
sort = "time"
cProfile.run(command, filename=profile_file, sort=sort)
stats = pstats.Stats(profile_file)
stats.sort_stats(sort).print_stats(50)
|
Prints top N methods, sorted by time.
Equivalent to:
python -m cProfile -o data/profile.txt main.py -n 100
Options:
time, cumulative, line, name, nfl, calls
-----------
ncalls - for the number of calls.
time/tottime - for the total time spent in the given function
(and excluding time made in calls to sub-functions)
cumulative/cumtime - is the cumulative time spent in this and all subfunctions
(from invocation till exit). This figure is accurate even for recursive functions.
|
profile
|
python
|
TylerYep/torchinfo
|
profiler.py
|
https://github.com/TylerYep/torchinfo/blob/master/profiler.py
|
MIT
|
def pytest_addoption(parser: pytest.Parser) -> None:
"""This allows us to check for these params in sys.argv."""
parser.addoption("--overwrite", action="store_true", default=False)
parser.addoption("--no-output", action="store_true", default=False)
|
This allows us to check for these params in sys.argv.
|
pytest_addoption
|
python
|
TylerYep/torchinfo
|
tests/conftest.py
|
https://github.com/TylerYep/torchinfo/blob/master/tests/conftest.py
|
MIT
|
def verify_output(capsys: pytest.CaptureFixture[str], filename: str) -> None:
"""
Utility function to ensure output matches file.
If you are writing new tests, set overwrite_file=True to generate the
new test_output file.
"""
captured, _ = capsys.readouterr()
filepath = Path(filename)
if not captured and not filepath.exists():
return
if "--overwrite" in sys.argv:
filepath.parent.mkdir(exist_ok=True)
filepath.touch(exist_ok=True)
filepath.write_text(captured, encoding="utf-8")
verify_output_str(captured, filename)
|
Utility function to ensure output matches file.
If you are writing new tests, set overwrite_file=True to generate the
new test_output file.
|
verify_output
|
python
|
TylerYep/torchinfo
|
tests/conftest.py
|
https://github.com/TylerYep/torchinfo/blob/master/tests/conftest.py
|
MIT
|
def assert_sum_column_totals_match(output: str, category: ColumnSettings) -> None:
"""Asserts that column totals match the total from the table summary."""
lines = output.replace("=", "").split("\n\n")
header_row = lines[0].strip()
offset = header_row.find(HEADER_TITLES[category])
if offset == -1:
return
layers = lines[1].split("\n")
calculated_total = float(sum(get_column_value_for_row(line, offset) for line in layers))
results = lines[2].split("\n")
if category == ColumnSettings.NUM_PARAMS:
total_params = results[0].split(":")[1].replace(",", "")
splitted_results = results[0].split('(')
if len(splitted_results) > 1:
units = splitted_results[1][0]
if units == 'T':
calculated_total /= 1e12
elif units == 'G':
calculated_total /= 1e9
elif units == 'M':
calculated_total /= 1e6
elif units == 'k':
calculated_total /= 1e3
assert calculated_total == float(total_params)
elif category == ColumnSettings.MULT_ADDS:
total_mult_adds = results[-1].split(":")[1].replace(",", "")
assert float(
f"{ModelStatistics.to_readable(calculated_total)[1]:0.2f}"
) == float(total_mult_adds)
|
Asserts that column totals match the total from the table summary.
|
assert_sum_column_totals_match
|
python
|
TylerYep/torchinfo
|
tests/conftest.py
|
https://github.com/TylerYep/torchinfo/blob/master/tests/conftest.py
|
MIT
|
def test_edgecase_input_output_model() -> None:
"""
Test the following two if-clauses
from LayerInfo.calculate_size.extract_tensor: 3
(starts counting from 1) as well as the final return.
"""
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = EdgecaseInputOutputModel().to(device)
summary(model, input_data=[{}])
|
Test the following two if-clauses
from LayerInfo.calculate_size.extract_tensor: 3
(starts counting from 1) as well as the final return.
|
test_edgecase_input_output_model
|
python
|
TylerYep/torchinfo
|
tests/torchinfo_test.py
|
https://github.com/TylerYep/torchinfo/blob/master/tests/torchinfo_test.py
|
MIT
|
def set_layer_name_width(
self, summary_list: list[LayerInfo], align_val: int = 5
) -> None:
"""
Set layer name width by taking the longest line length and rounding up to
the nearest multiple of align_val.
"""
max_length = 0
for info in summary_list:
depth_indent = info.depth * align_val + 1
layer_title = info.get_layer_name(self.show_var_name, self.show_depth)
max_length = max(max_length, len(layer_title) + depth_indent)
if max_length >= self.layer_name_width:
self.layer_name_width = math.ceil(max_length / align_val) * align_val
|
Set layer name width by taking the longest line length and rounding up to
the nearest multiple of align_val.
|
set_layer_name_width
|
python
|
TylerYep/torchinfo
|
torchinfo/formatting.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/formatting.py
|
MIT
|
def format_row(self, layer_name: str, row_values: dict[ColumnSettings, str]) -> str:
"""Get the string representation of a single layer of the model."""
info_to_use = [row_values.get(row_type, "") for row_type in self.col_names]
new_line = f"{layer_name:<{self.layer_name_width}} "
for info in info_to_use:
new_line += f"{info:<{self.col_width}} "
return new_line.rstrip() + "\n"
|
Get the string representation of a single layer of the model.
|
format_row
|
python
|
TylerYep/torchinfo
|
torchinfo/formatting.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/formatting.py
|
MIT
|
def layer_info_to_row(
self, layer_info: LayerInfo, reached_max_depth: bool, total_params: int
) -> str:
"""Convert layer_info to string representation of a row."""
values_for_row = {
ColumnSettings.KERNEL_SIZE: self.str_(layer_info.kernel_size),
ColumnSettings.GROUPS: self.str_(layer_info.groups),
ColumnSettings.INPUT_SIZE: self.str_(layer_info.input_size),
ColumnSettings.OUTPUT_SIZE: self.str_(layer_info.output_size),
ColumnSettings.NUM_PARAMS: layer_info.num_params_to_str(reached_max_depth),
ColumnSettings.PARAMS_PERCENT: layer_info.params_percent(
total_params, reached_max_depth
),
ColumnSettings.MULT_ADDS: layer_info.macs_to_str(reached_max_depth),
ColumnSettings.TRAINABLE: self.str_(layer_info.trainable),
}
start_str = self.get_start_str(layer_info.depth)
layer_name = layer_info.get_layer_name(self.show_var_name, self.show_depth)
new_line = self.format_row(f"{start_str}{layer_name}", values_for_row)
if self.verbose == Verbosity.VERBOSE:
for inner_name, inner_layer_info in layer_info.inner_layers.items():
prefix = self.get_start_str(layer_info.depth + 1)
new_line += self.format_row(f"{prefix}{inner_name}", inner_layer_info)
return new_line
|
Convert layer_info to string representation of a row.
|
layer_info_to_row
|
python
|
TylerYep/torchinfo
|
torchinfo/formatting.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/formatting.py
|
MIT
|
def layers_to_str(self, summary_list: list[LayerInfo], total_params: int) -> str:
"""
Print each layer of the model using only current layer info.
Container modules are already dealt with in add_missing_container_layers.
"""
new_str = ""
for layer_info in summary_list:
if layer_info.depth > self.max_depth or (
self.hide_recursive_layers and layer_info.is_recursive
):
continue
reached_max_depth = layer_info.depth == self.max_depth
new_str += self.layer_info_to_row(
layer_info, reached_max_depth, total_params
)
return new_str
|
Print each layer of the model using only current layer info.
Container modules are already dealt with in add_missing_container_layers.
|
layers_to_str
|
python
|
TylerYep/torchinfo
|
torchinfo/formatting.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/formatting.py
|
MIT
|
def trainable(self) -> str:
"""
Checks if the module is trainable. Returns:
"True", if all the parameters are trainable (`requires_grad=True`)
"False" if none of the parameters are trainable.
"Partial" if some weights are trainable, but not all.
"--" if no module has no parameters, like Dropout.
"""
if self.num_params == 0:
return "--"
if self.trainable_params == 0:
return "False"
if self.num_params == self.trainable_params:
return "True"
if self.num_params > self.trainable_params:
return "Partial"
raise RuntimeError("Unreachable trainable calculation.")
|
Checks if the module is trainable. Returns:
"True", if all the parameters are trainable (`requires_grad=True`)
"False" if none of the parameters are trainable.
"Partial" if some weights are trainable, but not all.
"--" if no module has no parameters, like Dropout.
|
trainable
|
python
|
TylerYep/torchinfo
|
torchinfo/layer_info.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/layer_info.py
|
MIT
|
def calculate_size(
inputs: DETECTED_INPUT_OUTPUT_TYPES | None, batch_dim: int | None
) -> tuple[list[int], int]:
"""
Set input_size or output_size using the model's inputs.
Returns the corrected shape of `inputs` and the size of
a single element in bytes.
"""
if inputs is None:
size, elem_bytes = [], 0
# pack_padded_seq and pad_packed_seq store feature into data attribute
elif (
isinstance(inputs, (list, tuple))
and inputs
and hasattr(inputs[0], "data")
and hasattr(inputs[0].data, "size")
):
size = list(inputs[0].data.size())
elem_bytes = inputs[0].data.element_size()
if batch_dim is not None:
size = size[:batch_dim] + [1] + size[batch_dim + 1 :]
elif isinstance(inputs, dict):
output = list(inputs.values())[-1]
size, elem_bytes = nested_list_size(output)
if batch_dim is not None:
size = [size[:batch_dim] + [1] + size[batch_dim + 1 :]]
elif isinstance(inputs, torch.Tensor):
size = list(inputs.size())
elem_bytes = inputs.element_size()
elif isinstance(inputs, np.ndarray): # type: ignore[unreachable]
inputs_ = torch.from_numpy(inputs) # type: ignore[unreachable]
size, elem_bytes = list(inputs_.size()), inputs_.element_size()
elif isinstance(inputs, (list, tuple)):
size, elem_bytes = nested_list_size(inputs)
if batch_dim is not None and batch_dim < len(size):
size[batch_dim] = 1
else:
raise TypeError(
"Model contains a layer with an unsupported input or output type: "
f"{inputs}, type: {type(inputs)}"
)
return size, elem_bytes
|
Set input_size or output_size using the model's inputs.
Returns the corrected shape of `inputs` and the size of
a single element in bytes.
|
calculate_size
|
python
|
TylerYep/torchinfo
|
torchinfo/layer_info.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/layer_info.py
|
MIT
|
def get_param_count(
module: nn.Module, name: str, param: torch.Tensor
) -> tuple[int, str]:
"""
Get count of number of params, accounting for mask.
Masked models save parameters with the suffix "_orig" added.
They have a buffer ending with "_mask" which has only 0s and 1s.
If a mask exists, the sum of 1s in mask is number of params.
"""
if name.endswith("_orig"):
without_suffix = name[:-5]
pruned_weights = rgetattr(module, f"{without_suffix}_mask")
if pruned_weights is not None:
parameter_count = int(torch.sum(pruned_weights))
return parameter_count, without_suffix
return param.nelement(), name
|
Get count of number of params, accounting for mask.
Masked models save parameters with the suffix "_orig" added.
They have a buffer ending with "_mask" which has only 0s and 1s.
If a mask exists, the sum of 1s in mask is number of params.
|
get_param_count
|
python
|
TylerYep/torchinfo
|
torchinfo/layer_info.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/layer_info.py
|
MIT
|
def calculate_macs(self) -> None:
"""
Set MACs using the module's parameters and layer's output size, which is
used for computing number of operations for Conv layers.
Please note: Returned MACs is the number of MACs for the full tensor,
i.e., taking the batch-dimension into account.
"""
for name, param in self.module.named_parameters():
cur_params, name = self.get_param_count(self.module, name, param)
if name in ("weight", "bias"):
# ignore C when calculating Mult-Adds in ConvNd
if "Conv" in self.class_name:
self.macs += int(
cur_params * prod(self.output_size[:1] + self.output_size[2:])
)
elif "Linear" in self.class_name:
self.macs += int(cur_params * prod(self.output_size[:-1]))
else:
self.macs += self.output_size[0] * cur_params
# RNN modules have inner weights such as weight_ih_l0
elif "weight" in name or "bias" in name:
self.macs += prod(self.output_size[:2]) * cur_params
|
Set MACs using the module's parameters and layer's output size, which is
used for computing number of operations for Conv layers.
Please note: Returned MACs is the number of MACs for the full tensor,
i.e., taking the batch-dimension into account.
|
calculate_macs
|
python
|
TylerYep/torchinfo
|
torchinfo/layer_info.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/layer_info.py
|
MIT
|
def check_recursive(self, layer_ids: set[int]) -> None:
"""
If the current module is already-used, mark as (recursive).
Must check before adding line to the summary.
"""
if self.layer_id in layer_ids:
self.is_recursive = True
|
If the current module is already-used, mark as (recursive).
Must check before adding line to the summary.
|
check_recursive
|
python
|
TylerYep/torchinfo
|
torchinfo/layer_info.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/layer_info.py
|
MIT
|
def leftover_params(self) -> int:
"""
Leftover params are the number of params this current layer has that are not
included in the child num_param counts.
"""
return self.num_params - sum(
child.num_params if child.is_leaf_layer else child.leftover_params()
for child in self.children
if not child.is_recursive
)
|
Leftover params are the number of params this current layer has that are not
included in the child num_param counts.
|
leftover_params
|
python
|
TylerYep/torchinfo
|
torchinfo/layer_info.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/layer_info.py
|
MIT
|
def rgetattr(module: nn.Module, attr: str) -> torch.Tensor | None:
"""Get the tensor submodule called attr from module."""
for attr_i in attr.split("."):
if not hasattr(module, attr_i):
return None
module = getattr(module, attr_i)
assert isinstance(module, torch.Tensor) # type: ignore[unreachable]
return module # type: ignore[unreachable]
|
Get the tensor submodule called attr from module.
|
rgetattr
|
python
|
TylerYep/torchinfo
|
torchinfo/layer_info.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/layer_info.py
|
MIT
|
def get_children_layers(summary_list: list[LayerInfo], index: int) -> list[LayerInfo]:
"""Fetches all of the children of a given layer."""
num_children = 0
for layer in summary_list[index + 1 :]:
if layer.depth <= summary_list[index].depth:
break
num_children += 1
return summary_list[index + 1 : index + 1 + num_children]
|
Fetches all of the children of a given layer.
|
get_children_layers
|
python
|
TylerYep/torchinfo
|
torchinfo/layer_info.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/layer_info.py
|
MIT
|
def to_readable(num: float, units: Units = Units.AUTO) -> tuple[Units, float]:
"""Converts a number to millions, billions, or trillions."""
if units == Units.AUTO:
if num >= 1e12:
return Units.TERABYTES, num / 1e12
if num >= 1e9:
return Units.GIGABYTES, num / 1e9
if num >= 1e6:
return Units.MEGABYTES, num / 1e6
if num >= 1e3:
return Units.KILOBYTES, num / 1e3
return Units.NONE, num
return units, num / CONVERSION_FACTORS[units]
|
Converts a number to millions, billions, or trillions.
|
to_readable
|
python
|
TylerYep/torchinfo
|
torchinfo/model_statistics.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/model_statistics.py
|
MIT
|
def process_input(
input_data: INPUT_DATA_TYPE | None,
input_size: INPUT_SIZE_TYPE | None,
batch_dim: int | None,
device: torch.device | None,
dtypes: list[torch.dtype] | None = None,
) -> tuple[CORRECTED_INPUT_DATA_TYPE, Any]:
"""Reads sample input data to get the input size."""
x = None
correct_input_size = []
if input_data is not None:
correct_input_size = get_input_data_sizes(input_data)
x = set_device(input_data, device)
if isinstance(x, (torch.Tensor, np.ndarray)):
x = [x]
if input_size is not None:
assert device is not None
if dtypes is None:
dtypes = [torch.float] * len(input_size)
correct_input_size = get_correct_input_sizes(input_size)
x = get_input_tensor(correct_input_size, batch_dim, dtypes, device)
return x, correct_input_size
|
Reads sample input data to get the input size.
|
process_input
|
python
|
TylerYep/torchinfo
|
torchinfo/torchinfo.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/torchinfo.py
|
MIT
|
def forward_pass(
model: nn.Module,
x: CORRECTED_INPUT_DATA_TYPE,
batch_dim: int | None,
cache_forward_pass: bool,
device: torch.device | None,
mode: Mode,
**kwargs: Any,
) -> list[LayerInfo]:
"""Perform a forward pass on the model using forward hooks."""
global _cached_forward_pass
model_name = model.__class__.__name__
if cache_forward_pass and model_name in _cached_forward_pass:
return _cached_forward_pass[model_name]
summary_list, _, hooks = apply_hooks(model_name, model, x, batch_dim)
if x is None:
set_children_layers(summary_list)
return summary_list
kwargs = set_device(kwargs, device)
saved_model_mode = model.training
try:
if mode == Mode.TRAIN:
model.train()
elif mode == Mode.EVAL:
model.eval()
elif mode != Mode.SAME:
raise RuntimeError(
f"Specified model mode ({list(Mode)}) not recognized: {mode}"
)
with torch.no_grad():
model = model if device is None else model.to(device)
if isinstance(x, (list, tuple)):
_ = model(*x, **kwargs)
elif isinstance(x, dict):
_ = model(**x, **kwargs)
else:
# Should not reach this point, since process_input_data ensures
# x is either a list, tuple, or dict
raise ValueError("Unknown input type")
except Exception as e:
executed_layers = [layer for layer in summary_list if layer.executed]
raise RuntimeError(
"Failed to run torchinfo. See above stack traces for more details. "
f"Executed layers up to: {executed_layers}"
) from e
finally:
if hooks:
for pre_hook, hook in hooks.values():
pre_hook.remove()
hook.remove()
model.train(saved_model_mode)
add_missing_container_layers(summary_list)
set_children_layers(summary_list)
_cached_forward_pass[model_name] = summary_list
return summary_list
|
Perform a forward pass on the model using forward hooks.
|
forward_pass
|
python
|
TylerYep/torchinfo
|
torchinfo/torchinfo.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/torchinfo.py
|
MIT
|
def set_children_layers(summary_list: list[LayerInfo]) -> None:
"""Populates the children and depth_index fields of all LayerInfo."""
idx: dict[int, int] = {}
for i, layer in enumerate(summary_list):
idx[layer.depth] = idx.get(layer.depth, 0) + 1
layer.depth_index = idx[layer.depth]
layer.children = get_children_layers(summary_list, i)
|
Populates the children and depth_index fields of all LayerInfo.
|
set_children_layers
|
python
|
TylerYep/torchinfo
|
torchinfo/torchinfo.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/torchinfo.py
|
MIT
|
def add_missing_container_layers(summary_list: list[LayerInfo]) -> None:
"""Finds container modules not in the currently listed hierarchy."""
layer_ids = {layer.layer_id for layer in summary_list}
current_hierarchy: dict[int, LayerInfo] = {}
for idx, layer_info in enumerate(summary_list):
# to keep track index of current layer
# after inserting new layers
rel_idx = 0
# create full hierarchy of current layer
hierarchy = {}
parent = layer_info.parent_info
while parent is not None and parent.depth > 0:
hierarchy[parent.depth] = parent
parent = parent.parent_info
# show hierarchy if it is not there already
for d in range(1, layer_info.depth):
if (
d not in current_hierarchy
or current_hierarchy[d].module is not hierarchy[d].module
) and hierarchy[d] is not summary_list[idx + rel_idx - 1]:
hierarchy[d].calculate_num_params()
hierarchy[d].check_recursive(layer_ids)
summary_list.insert(idx + rel_idx, hierarchy[d])
layer_ids.add(hierarchy[d].layer_id)
current_hierarchy[d] = hierarchy[d]
rel_idx += 1
current_hierarchy[layer_info.depth] = layer_info
# remove deeper hierarchy
d = layer_info.depth + 1
while d in current_hierarchy:
current_hierarchy.pop(d)
d += 1
|
Finds container modules not in the currently listed hierarchy.
|
add_missing_container_layers
|
python
|
TylerYep/torchinfo
|
torchinfo/torchinfo.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/torchinfo.py
|
MIT
|
def validate_user_params(
input_data: INPUT_DATA_TYPE | None,
input_size: INPUT_SIZE_TYPE | None,
col_names: tuple[ColumnSettings, ...],
col_width: int,
device: torch.device | None,
dtypes: list[torch.dtype] | None,
verbose: int,
) -> None:
"""Raise exceptions if the user's input is invalid."""
if col_width <= 0:
raise ValueError(f"Column width must be greater than 0: col_width={col_width}")
if verbose not in (0, 1, 2):
raise ValueError(
"Verbose must be either 0 (quiet), 1 (default), or 2 (verbose)."
)
both_input_specified = input_data is not None and input_size is not None
if both_input_specified:
raise RuntimeError("Only one of (input_data, input_size) should be specified.")
neither_input_specified = input_data is None and input_size is None
not_allowed = set(col_names) & REQUIRES_INPUT
if neither_input_specified and not_allowed:
raise ValueError(
"You must pass input_data or input_size in order "
f"to use columns: {not_allowed}"
)
if dtypes is not None and any(
dtype in (torch.float16, torch.bfloat16) for dtype in dtypes
):
if input_size is not None:
warnings.warn(
"Half precision is not supported with input_size parameter, and may "
"output incorrect results. Try passing input_data directly.",
stacklevel=2,
)
if device is not None and device.type == "cpu":
warnings.warn(
"Half precision is not supported on cpu. Set the `device` field or "
"pass `input_data` using the correct device.",
stacklevel=2,
)
|
Raise exceptions if the user's input is invalid.
|
validate_user_params
|
python
|
TylerYep/torchinfo
|
torchinfo/torchinfo.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/torchinfo.py
|
MIT
|
def traverse_input_data(
data: Any, action_fn: Callable[..., Any], aggregate_fn: Callable[..., Any]
) -> Any:
"""
Traverses any type of nested input data. On a tensor, returns the action given by
action_fn, and afterwards aggregates the results using aggregate_fn.
"""
if isinstance(data, torch.Tensor):
result = action_fn(data)
elif isinstance(data, np.ndarray):
result = action_fn(torch.from_numpy(data))
# If the result of action_fn is a torch.Tensor, then action_fn was meant for
# torch.Tensors only (like calling .to(...)) -> Ignore.
if isinstance(result, torch.Tensor):
result = data
# Recursively apply to collection items
elif isinstance(data, Mapping):
aggregate = aggregate_fn(data)
result = aggregate(
{
k: traverse_input_data(v, action_fn, aggregate_fn)
for k, v in data.items()
}
)
elif isinstance(data, tuple) and hasattr(data, "_fields"): # Named tuple
aggregate = aggregate_fn(data)
result = aggregate(
*(traverse_input_data(d, action_fn, aggregate_fn) for d in data)
)
elif isinstance(data, Iterable) and not isinstance(data, str):
aggregate = aggregate_fn(data)
result = aggregate(
[traverse_input_data(d, action_fn, aggregate_fn) for d in data]
)
else:
# Data is neither a tensor nor a collection
result = data
return result
|
Traverses any type of nested input data. On a tensor, returns the action given by
action_fn, and afterwards aggregates the results using aggregate_fn.
|
traverse_input_data
|
python
|
TylerYep/torchinfo
|
torchinfo/torchinfo.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/torchinfo.py
|
MIT
|
def set_device(data: Any, device: torch.device | None) -> Any:
"""Sets device for all input types and collections of input types."""
return (
data
if device is None
else traverse_input_data(
data,
action_fn=lambda data: data.to(device, non_blocking=True),
aggregate_fn=type,
)
)
|
Sets device for all input types and collections of input types.
|
set_device
|
python
|
TylerYep/torchinfo
|
torchinfo/torchinfo.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/torchinfo.py
|
MIT
|
def get_device(
model: nn.Module, input_data: INPUT_DATA_TYPE | None
) -> torch.device | None:
"""
If input_data is given, the device should not be changed
(to allow for multi-device models, etc.)
Otherwise gets device of first parameter of model and returns it if it is on cuda,
otherwise returns cuda if available or cpu if not.
"""
if input_data is None:
try:
model_parameter = next(model.parameters())
except StopIteration:
model_parameter = None
if model_parameter is not None and model_parameter.is_cuda:
return model_parameter.device
return torch.device("cuda" if torch.cuda.is_available() else "cpu")
return None
|
If input_data is given, the device should not be changed
(to allow for multi-device models, etc.)
Otherwise gets device of first parameter of model and returns it if it is on cuda,
otherwise returns cuda if available or cpu if not.
|
get_device
|
python
|
TylerYep/torchinfo
|
torchinfo/torchinfo.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/torchinfo.py
|
MIT
|
def get_input_data_sizes(data: Any) -> Any:
"""
Converts input data to an equivalent data structure of torch.Sizes
instead of tensors.
"""
return traverse_input_data(
data, action_fn=lambda data: data.size(), aggregate_fn=type
)
|
Converts input data to an equivalent data structure of torch.Sizes
instead of tensors.
|
get_input_data_sizes
|
python
|
TylerYep/torchinfo
|
torchinfo/torchinfo.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/torchinfo.py
|
MIT
|
def get_total_memory_used(data: CORRECTED_INPUT_DATA_TYPE) -> int:
"""Calculates the total memory of all tensors stored in data."""
result = traverse_input_data(
data,
action_fn=lambda data: sys.getsizeof(
data.untyped_storage()
if hasattr(data, "untyped_storage")
else data.storage()
),
aggregate_fn=(
# We don't need the dictionary keys in this case
lambda data: (lambda d: sum(d.values()))
if isinstance(data, Mapping)
else sum
),
)
return cast(int, result)
|
Calculates the total memory of all tensors stored in data.
|
get_total_memory_used
|
python
|
TylerYep/torchinfo
|
torchinfo/torchinfo.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/torchinfo.py
|
MIT
|
def get_input_tensor(
input_size: CORRECTED_INPUT_SIZE_TYPE,
batch_dim: int | None,
dtypes: list[torch.dtype],
device: torch.device,
) -> list[torch.Tensor]:
"""Get input_tensor with batch size 1 for use in model.forward()"""
x = []
for size, dtype in zip(input_size, dtypes):
input_tensor = torch.rand(*size)
if batch_dim is not None:
input_tensor = input_tensor.unsqueeze(dim=batch_dim)
x.append(input_tensor.to(device).type(dtype))
return x
|
Get input_tensor with batch size 1 for use in model.forward()
|
get_input_tensor
|
python
|
TylerYep/torchinfo
|
torchinfo/torchinfo.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/torchinfo.py
|
MIT
|
def get_correct_input_sizes(input_size: INPUT_SIZE_TYPE) -> CORRECTED_INPUT_SIZE_TYPE:
"""
Convert input_size to the correct form, which is a list of tuples.
Also handles multiple inputs to the network.
"""
if not isinstance(input_size, (list, tuple)):
raise TypeError(
"Input_size is not a recognized type. Please ensure input_size is valid.\n"
"For multiple inputs to the network, ensure input_size is a list of tuple "
"sizes. If you are having trouble here, please submit a GitHub issue."
)
if not input_size or any(size <= 0 for size in flatten(input_size)):
raise ValueError("Input_data is invalid, or negative size found in input_data.")
if isinstance(input_size, list) and isinstance(input_size[0], int):
return [tuple(input_size)]
if isinstance(input_size, list):
return input_size
if isinstance(input_size, tuple) and isinstance(input_size[0], tuple):
return list(input_size)
return [input_size]
|
Convert input_size to the correct form, which is a list of tuples.
Also handles multiple inputs to the network.
|
get_correct_input_sizes
|
python
|
TylerYep/torchinfo
|
torchinfo/torchinfo.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/torchinfo.py
|
MIT
|
def pre_hook(module: nn.Module, inputs: Any) -> None:
"""Create a LayerInfo object to aggregate layer information."""
del inputs
info = LayerInfo(var_name, module, curr_depth, parent_info)
info.calculate_num_params()
info.check_recursive(layer_ids)
summary_list.append(info)
layer_ids.add(info.layer_id)
global_layer_info[info.layer_id] = info
|
Create a LayerInfo object to aggregate layer information.
|
pre_hook
|
python
|
TylerYep/torchinfo
|
torchinfo/torchinfo.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/torchinfo.py
|
MIT
|
def apply_hooks(
model_name: str,
module: nn.Module,
input_data: CORRECTED_INPUT_DATA_TYPE,
batch_dim: int | None,
) -> tuple[
list[LayerInfo],
dict[int, LayerInfo],
dict[int, tuple[RemovableHandle, RemovableHandle]],
]:
"""
If input_data is provided, recursively adds hooks to all layers of the model.
Else, fills summary_list with layer info without computing a
forward pass through the network.
"""
summary_list: list[LayerInfo] = []
layer_ids: set[int] = set() # Used to optimize is_recursive()
global_layer_info: dict[int, LayerInfo] = {}
hooks: dict[int, tuple[RemovableHandle, RemovableHandle]] = {}
stack: list[tuple[str, nn.Module, int, LayerInfo | None]] = [
(model_name, module, 0, None)
]
while stack:
var_name, module, curr_depth, parent_info = stack.pop()
module_id = id(module)
# Fallback is used if the layer's pre-hook is never called, for example in
# ModuleLists or Sequentials.
global_layer_info[module_id] = LayerInfo(
var_name, module, curr_depth, parent_info
)
pre_hook = construct_pre_hook(
global_layer_info,
summary_list,
layer_ids,
var_name,
curr_depth,
parent_info,
)
if input_data is None or isinstance(module, WRAPPER_MODULES):
pre_hook(module, None)
else:
# Register the hook using the last layer that uses this module.
if module_id in hooks:
for hook in hooks[module_id]:
hook.remove()
hooks[module_id] = (
module.register_forward_pre_hook(pre_hook),
module.register_forward_hook(
construct_hook(global_layer_info, batch_dim)
),
)
# Replaces the equivalent recursive call by appending all of the
# subsequent the module children stack calls in the encountered order.
# Note: module.named_modules(remove_duplicate=False) doesn't work for
# some unknown reason (infinite recursion)
stack += [
(name, mod, curr_depth + 1, global_layer_info[module_id])
for name, mod in reversed(module._modules.items())
if mod is not None
]
return summary_list, global_layer_info, hooks
|
If input_data is provided, recursively adds hooks to all layers of the model.
Else, fills summary_list with layer info without computing a
forward pass through the network.
|
apply_hooks
|
python
|
TylerYep/torchinfo
|
torchinfo/torchinfo.py
|
https://github.com/TylerYep/torchinfo/blob/master/torchinfo/torchinfo.py
|
MIT
|
def db_credentials(c):
"""Encode db credentials (for github actions)"""
path = str(Path("~", ".auth", "postgres-ploomber.json").expanduser())
creds = Path(path).read_text()
print(base64.b64encode(creds.encode()).decode())
|
Encode db credentials (for github actions)
|
db_credentials
|
python
|
ploomber/ploomber
|
tasks.py
|
https://github.com/ploomber/ploomber/blob/master/tasks.py
|
Apache-2.0
|
def fit(product, upstream):
"""Train a model and save it (pickle format)"""
clf = DecisionTreeClassifier()
df = pd.read_csv(str(upstream["join"]))
X = df.drop("target", axis="columns")
y = df["target"]
clf.fit(X, y)
with open(str(product), "wb") as f:
pickle.dump(clf, f)
|
Train a model and save it (pickle format)
|
fit
|
python
|
ploomber/ploomber
|
doc/examples/InMemoryDAG.py
|
https://github.com/ploomber/ploomber/blob/master/doc/examples/InMemoryDAG.py
|
Apache-2.0
|
def serializer(df, product):
"""Save all data frames as CSVs"""
out = str(product)
# make sure the parent folder exists
Path(out).parent.mkdir(parents=True, exist_ok=True)
df.to_csv(out, index=False)
|
Save all data frames as CSVs
|
serializer
|
python
|
ploomber/ploomber
|
doc/examples/InMemoryDAG.py
|
https://github.com/ploomber/ploomber/blob/master/doc/examples/InMemoryDAG.py
|
Apache-2.0
|
def add_features(dag):
"""
Given a DAG, adds feature engineering tasks. The DAG must have a task "get"
that returns the input data.
"""
get_task = dag["get"]
output = Path("output")
# instantiate tasks
a_feature_task = PythonCallable(
a_feature,
File(output / "a_feature.csv"),
dag,
serializer=serializer,
unserializer=unserializer,
)
another_task = PythonCallable(
another,
File(output / "another.csv"),
dag,
serializer=serializer,
unserializer=unserializer,
)
join_task = PythonCallable(
join,
File(output / "join.csv"),
dag,
serializer=serializer,
unserializer=unserializer,
)
# establish dependencies
get_task >> a_feature_task
get_task >> another_task
(get_task + a_feature_task + another_task) >> join_task
return dag
|
Given a DAG, adds feature engineering tasks. The DAG must have a task "get"
that returns the input data.
|
add_features
|
python
|
ploomber/ploomber
|
doc/examples/InMemoryDAG.py
|
https://github.com/ploomber/ploomber/blob/master/doc/examples/InMemoryDAG.py
|
Apache-2.0
|
def make_predict():
"""Instantiate a prediction DAG using a previously trained model"""
dag_pred = DAG()
# this special function adds a task with name "get" that will just forward
# whatever value we pass when calling .build(). You can pass a function
# in the "preprocessor" argument to perform arbitrary logic like parsing
# or validation
input_data_passer(dag=dag_pred, name="get", preprocessor=validate_input_data)
# we re-use the same code that we used for training!
add_features(dag_pred)
# load model generated by the training graph
with open(Path("output", "model.pickle"), "rb") as f:
model = pickle.load(f)
# add the final task, this special function just executes whatever
# function we pass as the first argument, we can pass arbitrary parameters
# using "params"
predict_task = in_memory_callable(
predict, dag=dag_pred, name="predict", params=dict(model=model)
)
# predict after joining features
dag_pred["join"] >> predict_task
# convert our batch-processing pipeline to a in-memory one and return
return InMemoryDAG(dag_pred)
|
Instantiate a prediction DAG using a previously trained model
|
make_predict
|
python
|
ploomber/ploomber
|
doc/examples/InMemoryDAG.py
|
https://github.com/ploomber/ploomber/blob/master/doc/examples/InMemoryDAG.py
|
Apache-2.0
|
def diff_strings(a, b):
"""Compute the diff between two strings"""
d = Differ()
if a is None and b is None:
return "[Both a and b are None]"
out = ""
if a is None:
out += "[a is None]\n"
elif b is None:
out += "[a is None]\n"
a = "" if a is None else a
b = "" if b is None else b
result = d.compare(a.splitlines(keepends=True), b.splitlines(keepends=True))
out += "".join(result)
return out
|
Compute the diff between two strings
|
diff_strings
|
python
|
ploomber/ploomber
|
src/ploomber/codediffer.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/codediffer.py
|
Apache-2.0
|
def is_different(self, a, b, a_params, b_params, extension=None):
"""
Compares code and params to determine if it's changed. Ignores top-keys
in a_params or b_params if they're no JSON serializable.
Parameters
----------
a : str
Code to compare
b : str
Code to compare
a_params : dict
Params passed to a
b_params : dict
Params passed to b
extension : str, default=None
Code extension. Used to normalize code to prevent changes such
as whitespace to trigger false positives. Normalization only
available for .py and .sql, other languages are compared as is
Returns
-------
result : bool
True if code is different (different code or params),
False if they are the same (same code and params)
diff : str
A diff view of the differences
Notes
-----
Params comparison is ignored if either a_params or b_params is None
"""
# TODO: this can be more efficient. ie only compare source code
# if params are the same and only get diff if result is True
normalizer = self._get_normalizer(extension)
a_norm = normalizer(a)
b_norm = normalizer(b)
if a_params is None or b_params is None:
outdated_params = False
else:
a_params_ = remove_non_serializable_top_keys(a_params)
b_params_ = remove_non_serializable_top_keys(b_params)
outdated_params = a_params_ != b_params_
result = outdated_params or (a_norm != b_norm)
# TODO: improve diff view, also show a params diff view. probably
# we need to normalize them first (maybe using pprint?) then take
# the diff
diff = self.get_diff(a_norm, b_norm, normalize=False)
return result, diff
|
Compares code and params to determine if it's changed. Ignores top-keys
in a_params or b_params if they're no JSON serializable.
Parameters
----------
a : str
Code to compare
b : str
Code to compare
a_params : dict
Params passed to a
b_params : dict
Params passed to b
extension : str, default=None
Code extension. Used to normalize code to prevent changes such
as whitespace to trigger false positives. Normalization only
available for .py and .sql, other languages are compared as is
Returns
-------
result : bool
True if code is different (different code or params),
False if they are the same (same code and params)
diff : str
A diff view of the differences
Notes
-----
Params comparison is ignored if either a_params or b_params is None
|
is_different
|
python
|
ploomber/ploomber
|
src/ploomber/codediffer.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/codediffer.py
|
Apache-2.0
|
def _get_normalizer(self, extension):
"""Get the normalizer function for a given extension"""
if extension in self.NORMALIZERS:
return self.NORMALIZERS[extension]
else:
return normalize_null
|
Get the normalizer function for a given extension
|
_get_normalizer
|
python
|
ploomber/ploomber
|
src/ploomber/codediffer.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/codediffer.py
|
Apache-2.0
|
def find_entry_point_type(entry_point):
"""
Step 1: If not ENTRY_POINT is defined nor a value is passed, a default
value is used (pipeline.yaml for CLI, recursive lookup for Jupyter client).
If ENTRY_POINT is defined, this simply overrides the default value, but
passing a value overrides the default value. Once the value is determined.
Step 2: If value is a valid directory, DAG is loaded from such directory,
if it's a file, it's loaded from that file (spec), finally, it's
interpreted as a dotted path
"""
type_ = try_to_find_entry_point_type(entry_point)
if type_:
return type_
else:
if Path(entry_point).suffix in {".yaml", ".yml"}:
raise ValueError(
"Could not determine the entry point type from value: "
f"{entry_point!r}. The file does not exist."
)
else:
raise ValueError(
"Could not determine the entry point type from value: "
f"{entry_point!r}. Expected "
"an existing file with extension .yaml or .yml, "
"existing directory, glob-like pattern "
"(i.e., *.py) or dotted path "
"(i.e., module.sub_module.factory_function)."
)
|
Step 1: If not ENTRY_POINT is defined nor a value is passed, a default
value is used (pipeline.yaml for CLI, recursive lookup for Jupyter client).
If ENTRY_POINT is defined, this simply overrides the default value, but
passing a value overrides the default value. Once the value is determined.
Step 2: If value is a valid directory, DAG is loaded from such directory,
if it's a file, it's loaded from that file (spec), finally, it's
interpreted as a dotted path
|
find_entry_point_type
|
python
|
ploomber/ploomber
|
src/ploomber/entrypoint.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/entrypoint.py
|
Apache-2.0
|
def _to_str(self, name=None, file=None, writer_kwargs=None, show_summary=True):
"""
Return the string representation of the collected messages
Parameters
----------
name
Title to show at the end
file
Text stream to use. If None, uses a temporary StringIO object
writer_kwargs
Extra keyword arguments passed to the terminal writer
"""
writer_kwargs = writer_kwargs or dict()
if file is None:
sio = StringIO()
else:
sio = file
sio.write("\n")
self.tw = TerminalWriter(file=sio)
if name:
self.tw.sep("=", title=name, **writer_kwargs)
else:
self.tw.sep("=", **writer_kwargs)
for msg in self.messages:
self.tw.sep("-", title=msg.header, **writer_kwargs)
sub_header = msg.sub_header
if sub_header:
self.tw.sep("-", title=sub_header, **writer_kwargs)
self.tw._write_source(msg.message.splitlines(), lexer="pytb")
if show_summary:
n = len(self)
t = "task" if n == 1 else "tasks"
self.tw.sep("=", title=f"Summary ({n} {t})", **writer_kwargs)
for msg in self.messages:
# TODO: include original exception type and error message in
# summary
self.tw.write(f"{msg.header}\n")
if name:
self.tw.sep("=", title=name, **writer_kwargs)
else:
self.tw.sep("=", **writer_kwargs)
sio.seek(0)
out = sio.read()
if file is None:
sio.close()
return out
|
Return the string representation of the collected messages
Parameters
----------
name
Title to show at the end
file
Text stream to use. If None, uses a temporary StringIO object
writer_kwargs
Extra keyword arguments passed to the terminal writer
|
_to_str
|
python
|
ploomber/ploomber
|
src/ploomber/messagecollector.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/messagecollector.py
|
Apache-2.0
|
def _run_command(path, command):
"""Safely run command in certain path"""
if not Path(path).is_dir():
raise ValueError("{} is not a directory".format(path))
out = subprocess.check_output(
shlex.split(command), cwd=str(path), stderr=subprocess.PIPE
)
s = out.decode("utf-8")
# remove trailing \n
if s[-1:] == "\n":
s = s[:-1]
return s
|
Safely run command in certain path
|
_run_command
|
python
|
ploomber/ploomber
|
src/ploomber/repo.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/repo.py
|
Apache-2.0
|
def is_repo(path):
"""Check if the path is in a git repo"""
if path is None:
return False
if not shutil.which("git"):
return False
out = subprocess.run(
["git", "-C", str(path), "rev-parse"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
repo_exists = out.returncode == 0
if repo_exists:
try:
# edge case: if the repo doesn't have any commits, the following
# will fail. we require a repo with at least one commit for git
# to work
git_hash(path)
except subprocess.CalledProcessError:
return False
else:
return True
|
Check if the path is in a git repo
|
is_repo
|
python
|
ploomber/ploomber
|
src/ploomber/repo.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/repo.py
|
Apache-2.0
|
def data_preprocessing(self, values):
"""Create a build report from several tasks"""
# in case the pipeline has no tasks...
elapsed = values.get("Elapsed (s)", [])
total = sum(elapsed)
def compute_pct(elapsed, total):
if not elapsed:
return 0
else:
return 100 * elapsed / total
values["Percentage"] = [compute_pct(r, total) for r in elapsed]
return values
|
Create a build report from several tasks
|
data_preprocessing
|
python
|
ploomber/ploomber
|
src/ploomber/table.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/table.py
|
Apache-2.0
|
def rows2columns(rows):
"""Convert [{key: value}, {key: value2}] to [{key: [value, value2]}]"""
if not len(rows):
return {}
cols_combinations = set(tuple(sorted(row.columns)) for row in rows)
if len(cols_combinations) > 1:
raise KeyError(
"All rows should have the same columns, got: "
"{}".format(cols_combinations)
)
columns = rows[0].columns
return {col: [row[col] for row in rows] for col in columns}
|
Convert [{key: value}, {key: value2}] to [{key: [value, value2]}]
|
rows2columns
|
python
|
ploomber/ploomber
|
src/ploomber/table.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/table.py
|
Apache-2.0
|
def wrap_table_dict(table_dict, column_width, exclude):
"""Wraps a columns to take at most column_width characters
Parameters
----------
column_width : int, 'auto' or None
Width per column. Splits evenly if 'auto', does not wrap if None
exclude : list
Exclude columns from wrapping (show them in a single line)
"""
exclude = exclude or []
if column_width is None:
return table_dict
if column_width == "auto":
column_width = calculate_wrapping(
table_dict,
do_not_wrap=exclude,
width_total=shutil.get_terminal_size().columns,
)
# NOTE: the output of this algorithm may return a table that does not use
# between 0 and {column - 1} characters. We could always take all the
# space available if we refactor and do not keep column_width fixed for
# all columns
wrapper = TextWrapper(
width=column_width, break_long_words=True, break_on_hyphens=True
)
return apply_wrapping(table_dict, wrapper, exclude=exclude)
|
Wraps a columns to take at most column_width characters
Parameters
----------
column_width : int, 'auto' or None
Width per column. Splits evenly if 'auto', does not wrap if None
exclude : list
Exclude columns from wrapping (show them in a single line)
|
wrap_table_dict
|
python
|
ploomber/ploomber
|
src/ploomber/table.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/table.py
|
Apache-2.0
|
def separator_width(header_length, max_value_length):
"""
Calculates the width of the '---' line that separates header from content
"""
n_value_extra = header_length - max_value_length
if n_value_extra >= -2:
return header_length + 2
else:
return max_value_length
|
Calculates the width of the '---' line that separates header from content
|
separator_width
|
python
|
ploomber/ploomber
|
src/ploomber/table.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/table.py
|
Apache-2.0
|
def width_required_for_column(header, values):
"""
Spaced needed to display column in a single line, accounts for the two
extra characters that the tabulate package adds to the header when the
content is too short
"""
values_max = -1 if not values else max(len(str(v)) for v in values)
return max(values_max, separator_width(len(header), values_max))
|
Spaced needed to display column in a single line, accounts for the two
extra characters that the tabulate package adds to the header when the
content is too short
|
width_required_for_column
|
python
|
ploomber/ploomber
|
src/ploomber/table.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/table.py
|
Apache-2.0
|
def calculate_wrapping(table_dict, do_not_wrap, width_total):
"""
Determines the column width by keeping some columns unwrapped (show all
rows, including the header in a single line) and distributing the
remaining space evenly. Accounts for the betwee-column spacing.
"""
# space required to display a given column on a single column
width_required = {
header: width_required_for_column(header, values)
for header, values in table_dict.items()
}
# TODO: pass set(table_dict) instead of table_dict
column_width = _calculate_wrapping(
table_dict, do_not_wrap, width_total, width_required
)
return column_width
|
Determines the column width by keeping some columns unwrapped (show all
rows, including the header in a single line) and distributing the
remaining space evenly. Accounts for the betwee-column spacing.
|
calculate_wrapping
|
python
|
ploomber/ploomber
|
src/ploomber/table.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/table.py
|
Apache-2.0
|
def equal_column_width(n_cols, width_total):
"""
Max column width if splitting width_total equally among n_cols. Note
that before computing column width, a quantity is substracted to account
for required for spacing between columns
"""
if not n_cols:
raise ValueError("n_cols must be >0")
offset = (n_cols - 1) * _BETWEEN_COLUMN_WIDTH
width_remaining = width_total - offset
width_column = int(width_remaining / n_cols)
# degenerate case: not even a single space to display. Return width of
# 1 but show a warning, since the table will be illegible
if width_column < 1:
warn(
f"Not enough space to display {n_cols} columns with "
f"a width of {width_total}. Using a column width of 1"
)
return 1
return width_column
|
Max column width if splitting width_total equally among n_cols. Note
that before computing column width, a quantity is substracted to account
for required for spacing between columns
|
equal_column_width
|
python
|
ploomber/ploomber
|
src/ploomber/table.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/table.py
|
Apache-2.0
|
def apply_wrapping(table_dict, wrapper, exclude=None):
"""
Wrap text using a wrapper, excluding columns in exclude
"""
exclude = exclude or []
return dict(
apply_wrapping_to_column(header, values, exclude, wrapper)
for header, values in table_dict.items()
)
|
Wrap text using a wrapper, excluding columns in exclude
|
apply_wrapping
|
python
|
ploomber/ploomber
|
src/ploomber/table.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/table.py
|
Apache-2.0
|
def wrap_elementwise(value, wrapper):
"""Apply wrap if str (elementwise if iterable of str)"""
if isinstance(value, Iterable) and not isinstance(value, str):
return [wrapper.fill(str(v)) for v in value]
else:
return wrapper.fill(str(value))
|
Apply wrap if str (elementwise if iterable of str)
|
wrap_elementwise
|
python
|
ploomber/ploomber
|
src/ploomber/table.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/table.py
|
Apache-2.0
|
def assert_no_extra_attributes_in_class(abstract_class, concrete_class, allowed=None):
"""
Ploomber makes heavy use of abstract classes to provide a uniform API for
tasks, products, metadata, etc. When defining abstract classes, the
interpreter refuses to instantiate an object where the concrete class
misses implementation for abstract methods. However, it does not complain
if the concrete class implements *extra methods*
This has been problematic to remove old code. As we simplify the API,
sometimes concrete classes become outdated. For example, before creating
Metadata, all metadata logic was embedded in the Product objects, when
we made the change, we removed some abstract methods from the Product class
but it took a long time to realize that we sould've removed these mehods
from the MetaProduct class as well. We use this function to alert us
when there are things we can remove.
The other case also happens: we add functionality to concrete classes but
we do not do it in the abstract class, when this happens we have to decide
whether to add them to the abstract class (recommended) or make an
exception in such case, those new methods should be named with double
leading underscore to be ignored by this check and to prevent polluting the
public interface. Single leading underscore methods are checked to allow
abstract classes define its own private API, which is also important
for consistency, even if the end user does not use these methods.
Convention:
- no leading underscode: public API
- one leading underscore: private Ploomber API. Not meant to be used by
end-users but can be user by developers
- two leading underscores: private class API. Not meant to be used
outside the implementation of the class itself. Abstract classes
should not define these, these are intended to carry logic
specific to concrete classes
NOTE: maybe a better alternative to allowed is to create an abstract
class that adds new abstract methods
"""
allowed = allowed or set()
# allow "private" methods
preffixes = [
"_{}__".format(class_.__name__) for class_ in concrete_class.__bases__
] + ["__", "_", "_{}__".format(concrete_class.__name__)]
extra_attrs = {
attr
for attr in set(dir(concrete_class)) - set(dir(abstract_class))
if not any(attr.startswith(p) for p in preffixes)
} - allowed
if extra_attrs:
raise ValueError(
"The following methods/attributes in {} "
"are not part of the {} interface: {}".format(
concrete_class.__name__, abstract_class.__name__, extra_attrs
)
)
|
Ploomber makes heavy use of abstract classes to provide a uniform API for
tasks, products, metadata, etc. When defining abstract classes, the
interpreter refuses to instantiate an object where the concrete class
misses implementation for abstract methods. However, it does not complain
if the concrete class implements *extra methods*
This has been problematic to remove old code. As we simplify the API,
sometimes concrete classes become outdated. For example, before creating
Metadata, all metadata logic was embedded in the Product objects, when
we made the change, we removed some abstract methods from the Product class
but it took a long time to realize that we sould've removed these mehods
from the MetaProduct class as well. We use this function to alert us
when there are things we can remove.
The other case also happens: we add functionality to concrete classes but
we do not do it in the abstract class, when this happens we have to decide
whether to add them to the abstract class (recommended) or make an
exception in such case, those new methods should be named with double
leading underscore to be ignored by this check and to prevent polluting the
public interface. Single leading underscore methods are checked to allow
abstract classes define its own private API, which is also important
for consistency, even if the end user does not use these methods.
Convention:
- no leading underscode: public API
- one leading underscore: private Ploomber API. Not meant to be used by
end-users but can be user by developers
- two leading underscores: private class API. Not meant to be used
outside the implementation of the class itself. Abstract classes
should not define these, these are intended to carry logic
specific to concrete classes
NOTE: maybe a better alternative to allowed is to create an abstract
class that adds new abstract methods
|
assert_no_extra_attributes_in_class
|
python
|
ploomber/ploomber
|
src/ploomber/_testing_utils.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/_testing_utils.py
|
Apache-2.0
|
def _delete_git_repo(path):
"""
If on windows, we need to change permissionsto delete the repo
"""
path_to_repo = Path(path, ".git")
if os.name == "nt" and path_to_repo.exists():
for root, dirs, files in os.walk(path_to_repo):
for dir_ in dirs:
os.chmod(Path(root, dir_), stat.S_IRWXU)
for file_ in files:
os.chmod(Path(root, file_), stat.S_IRWXU)
|
If on windows, we need to change permissionsto delete the repo
|
_delete_git_repo
|
python
|
ploomber/ploomber
|
src/ploomber/cli/examples.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/examples.py
|
Apache-2.0
|
def main(use_lock, create_env=None, use_venv=False):
"""
Install project, automatically detecting if it's a conda-based or pip-based
project.
Parameters
---------
use_lock : bool
If True Uses requirements.lock.txt/environment.lock.yml and
requirements.dev.lock.txt/environment.dev.lock.yml files. If False
uses regular files and creates the lock ones after installing
dependencies. If None, it uses lock files if they exist, if they don't
it uses regular files
create_env : bool, default=None
If True, creates a new environment, if False, it installs in the
current environment. If None, it creates a new environment if there
isn't one already active
use_venv : bool, default=False
Force to use Python's venv module, ignoring conda if installed
"""
USE_CONDA = shutil.which("conda") and not use_venv
ENV_YML_EXISTS = Path(_ENV_YML).exists()
ENV_LOCK_YML_EXISTS = Path(_ENV_LOCK_YML).exists()
REQS_TXT_EXISTS = Path(_REQS_TXT).exists()
REQS_LOCK_TXT_EXISTS = Path(_REQS_LOCK_TXT).exists()
if use_lock is None:
if USE_CONDA:
use_lock = ENV_LOCK_YML_EXISTS
else:
use_lock = REQS_LOCK_TXT_EXISTS
if use_lock and not ENV_LOCK_YML_EXISTS and not REQS_LOCK_TXT_EXISTS:
raise BaseException(
"Expected an environment.lock.yaml "
"(conda) or requirements.lock.txt (pip) in the current "
"directory. Add one of them and try again.",
type_="no_lock",
)
elif not use_lock and not ENV_YML_EXISTS and not REQS_TXT_EXISTS:
raise BaseException(
"Expected an environment.yaml (conda)"
" or requirements.txt (pip) in the current directory."
" Add one of them and try again.",
type_="no_env_requirements",
)
elif (
not USE_CONDA and use_lock and ENV_LOCK_YML_EXISTS and not REQS_LOCK_TXT_EXISTS
):
raise BaseException(
"Found env environment.lock.yaml "
"but conda is not installed. Install conda or add a "
"requirements.lock.txt to use pip instead",
type_="no_conda",
)
elif not USE_CONDA and not use_lock and ENV_YML_EXISTS and not REQS_TXT_EXISTS:
raise BaseException(
"Found environment.yaml but conda is not installed."
" Install conda or add a requirements.txt to use pip instead",
type_="no_conda2",
)
elif USE_CONDA and use_lock and ENV_LOCK_YML_EXISTS:
# TODO: emit warnings if unused requirements.txt?
main_conda(
use_lock=True,
create_env=(
create_env if create_env is not None else _should_create_conda_env()
),
)
elif USE_CONDA and not use_lock and ENV_YML_EXISTS:
# TODO: emit warnings if unused requirements.txt?
main_conda(
use_lock=False,
create_env=(
create_env if create_env is not None else _should_create_conda_env()
),
)
else:
# TODO: emit warnings if unused environment.yml?
main_pip(
use_lock=use_lock,
create_env=create_env if create_env is not None else not _in_virtualenv(),
)
|
Install project, automatically detecting if it's a conda-based or pip-based
project.
Parameters
---------
use_lock : bool
If True Uses requirements.lock.txt/environment.lock.yml and
requirements.dev.lock.txt/environment.dev.lock.yml files. If False
uses regular files and creates the lock ones after installing
dependencies. If None, it uses lock files if they exist, if they don't
it uses regular files
create_env : bool, default=None
If True, creates a new environment, if False, it installs in the
current environment. If None, it creates a new environment if there
isn't one already active
use_venv : bool, default=False
Force to use Python's venv module, ignoring conda if installed
|
main
|
python
|
ploomber/ploomber
|
src/ploomber/cli/install.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/install.py
|
Apache-2.0
|
def _get_base_prefix_compat():
"""
This function will find the pip virtualenv with different python versions.
Get base/real prefix, or sys.prefix if there is none.
"""
return (
getattr(sys, "base_prefix", None)
or sys.prefix
or getattr(sys, "real_prefix", None)
)
|
This function will find the pip virtualenv with different python versions.
Get base/real prefix, or sys.prefix if there is none.
|
_get_base_prefix_compat
|
python
|
ploomber/ploomber
|
src/ploomber/cli/install.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/install.py
|
Apache-2.0
|
def main_pip(use_lock, create_env=True):
"""
Install pip-based project (uses venv), looks for requirements.txt files
Parameters
----------
start_time : datetime
The initial runtime of the function.
use_lock : bool
If True Uses requirements.txt and requirements.dev.lock.txt files
create_env : bool
If True, it uses the venv module to create a new virtual environment,
then installs the dependencies, otherwise it installs the dependencies
in the current environment
"""
reqs_txt = _REQS_LOCK_TXT if use_lock else _REQS_TXT
reqs_dev_txt = "requirements.dev.lock.txt" if use_lock else "requirements.dev.txt"
cmdr = Commander()
# TODO: modify readme to add how to activate env? probably also in conda
name = Path(".").resolve().name
try:
_run_pip_commands(cmdr, create_env, name, reqs_dev_txt, reqs_txt, use_lock)
except Exception as e:
cmd = f"pip install --requirement {reqs_txt}"
raise BaseException(
"Failed to setup your environment. " f"Invoke pip manually.\n{cmd}\n\n"
) from e
|
Install pip-based project (uses venv), looks for requirements.txt files
Parameters
----------
start_time : datetime
The initial runtime of the function.
use_lock : bool
If True Uses requirements.txt and requirements.dev.lock.txt files
create_env : bool
If True, it uses the venv module to create a new virtual environment,
then installs the dependencies, otherwise it installs the dependencies
in the current environment
|
main_pip
|
python
|
ploomber/ploomber
|
src/ploomber/cli/install.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/install.py
|
Apache-2.0
|
def main_conda(use_lock, create_env=True):
"""
Install conda-based project, looks for environment.yml files
Parameters
----------
use_lock : bool
If True Uses environment.lock.yml and environment.dev.lock.yml files
create_env : bool
If True, it uses the venv module to create a new virtual environment,
then installs the dependencies, otherwise it installs the dependencies
in the current environment
"""
env_yml = _ENV_LOCK_YML if use_lock else _ENV_YML
# TODO: ensure ploomber-scaffold includes dependency file (including
# lock files in MANIFEST.in
cmdr = Commander()
# TODO: provide helpful error messages on each command
if create_env:
with open(env_yml) as f:
env_name = yaml.safe_load(f)["name"]
current_env = _current_conda_env_name()
if env_name == current_env:
err = (
f"{env_yml} will create an environment "
f"named {env_name!r}, which is the current active "
"environment. Activate a different one and try "
"again: conda activate base"
)
raise BaseException(err)
else:
env_name = _current_conda_env_name()
# get current installed envs
conda = shutil.which("conda")
mamba = shutil.which("mamba")
# if already installed and running on windows, ask to delete first,
# otherwise it might lead to an intermittent error (permission denied
# on vcruntime140.dll)
if os.name == "nt" and create_env:
envs = cmdr.run(conda, "env", "list", "--json", capture_output=True)
already_installed = any(
[
env
for env in json.loads(envs)["envs"]
# only check in the envs folder, ignore envs in other locations
if "envs" in env and env_name in env
]
)
if already_installed:
err = (
f"Environment {env_name!r} already exists, "
f"delete it and try again "
f"(conda env remove --name {env_name})"
)
raise BaseException(err)
pkg_manager = mamba if mamba else conda
try:
_run_conda_commands(
cmdr, pkg_manager, create_env, env_yml, env_name, use_lock, conda
)
except Exception as e:
if create_env:
cmd = f"conda env create --file {env_yml} {conda_compat.force_or_yes}"
else:
cmd = f"conda env update --file {env_yml} --name {env_name}"
raise BaseException(
"Failed to setup your environment. " f"Invoke conda manually.\n{cmd}\n\n"
) from e
|
Install conda-based project, looks for environment.yml files
Parameters
----------
use_lock : bool
If True Uses environment.lock.yml and environment.dev.lock.yml files
create_env : bool
If True, it uses the venv module to create a new virtual environment,
then installs the dependencies, otherwise it installs the dependencies
in the current environment
|
main_conda
|
python
|
ploomber/ploomber
|
src/ploomber/cli/install.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/install.py
|
Apache-2.0
|
def _is_conda():
"""
The function will tell if the code is running in a conda env
"""
conda_path = Path(sys.prefix, "conda-meta")
return (
conda_path.exists()
or os.environ.get("CONDA_PREFIX", False)
or os.environ.get("CONDA_DEFAULT_ENV", False)
)
|
The function will tell if the code is running in a conda env
|
_is_conda
|
python
|
ploomber/ploomber
|
src/ploomber/cli/install.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/install.py
|
Apache-2.0
|
def _locate_pip_inside_conda(env_name):
"""
Locates pip inside the conda env with a given name
"""
pip = _path_to_pip_in_env_with_name(shutil.which("conda"), env_name)
# this might happen if the environment does not contain python/pip
if not Path(pip).exists():
err = (
f"Could not locate pip in environment {env_name!r}, make sure "
"it is included in your environment.yml and try again"
)
raise BaseException(err)
return pip
|
Locates pip inside the conda env with a given name
|
_locate_pip_inside_conda
|
python
|
ploomber/ploomber
|
src/ploomber/cli/install.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/install.py
|
Apache-2.0
|
def _pip_install(cmdr, pip, lock, requirements=_REQS_TXT):
"""Install and freeze requirements
Parameters
----------
cmdr
Commander instance
pip
Path to pip binary
lock
If true, locks dependencies and stores them in a requirements.lock.txt
"""
cmdr.run(
pip,
"install",
"--requirement",
requirements,
description="Installing dependencies",
)
if lock:
pip_lock = cmdr.run(
pip,
"freeze",
"--exclude-editable",
description="Locking dependencies",
capture_output=True,
)
check_mixed_envs(pip_lock)
name = Path(requirements).stem
Path(f"{name}.lock.txt").write_text(pip_lock)
|
Install and freeze requirements
Parameters
----------
cmdr
Commander instance
pip
Path to pip binary
lock
If true, locks dependencies and stores them in a requirements.lock.txt
|
_pip_install
|
python
|
ploomber/ploomber
|
src/ploomber/cli/install.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/install.py
|
Apache-2.0
|
def cli_endpoint(fn):
"""
Decorator for command line endpoints that execute dags or tasks. It runs
the decorated function, captures exception (if any), sends a colored
traceback to standard error and exits with code 1.
Notes
-----
This will hide the traceback when raising subclasses of
ploomber.exeptions.BaseException. To display the traceback, set the
PLOOMBER_DEBUG variable to true. To start a post-mortem session, set
PLOOMBER_POST_MORTEM to true.
Functions decorated with this must be called with keyword arguments
Call some_endpoint(catch_exception=False) to disable this behavior (e.g.
for testing)
"""
@wraps(fn)
def wrapper(catch_exception=True, **kwargs):
if os.environ.get("PLOOMBER_DEBUG"):
catch_exception = False
if catch_exception:
try:
fn(**kwargs)
# these already color output
except (DAGBuildError, DAGRenderError) as e:
error = str(e)
color = False
# for base exceptions (we raise this), we display the message
# in red (no traceback since it's irrelevant for the user)
except BaseException as e:
click.secho(e.get_message(), file=sys.stderr, fg="red")
sys.exit(1)
# this means it's an unknown error (either a bug in ploomber or
# an error in the user's code). we display the full traceback,
# but still hide irrelevant tracebacks (i.e. if a nested exception
# is raised where some of the exceptions are TaskBuildError)
except Exception as e:
error = _format.exception(e)
color = True
else:
error = None
if error:
if color:
tw = TerminalWriter(file=sys.stderr)
tw._write_source(error.splitlines())
else:
print(error, file=sys.stderr)
sys.exit(1)
else:
if os.environ.get("PLOOMBER_POST_MORTEM"):
try:
fn(**kwargs)
except Exception:
_, _, tb = sys.exc_info()
pdb.post_mortem(tb)
else:
fn(**kwargs)
return wrapper
|
Decorator for command line endpoints that execute dags or tasks. It runs
the decorated function, captures exception (if any), sends a colored
traceback to standard error and exits with code 1.
Notes
-----
This will hide the traceback when raising subclasses of
ploomber.exeptions.BaseException. To display the traceback, set the
PLOOMBER_DEBUG variable to true. To start a post-mortem session, set
PLOOMBER_POST_MORTEM to true.
Functions decorated with this must be called with keyword arguments
Call some_endpoint(catch_exception=False) to disable this behavior (e.g.
for testing)
|
cli_endpoint
|
python
|
ploomber/ploomber
|
src/ploomber/cli/io.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/io.py
|
Apache-2.0
|
def command_endpoint(fn):
"""
Decorator for command line endpoints that only parse dags or tasks but do
not execute them. If it tails, it prints error message to stderror, then
calls with exit code 1.
"""
@wraps(fn)
def wrapper(**kwargs):
try:
fn(**kwargs)
# echo error message when it's a subclass Exception
except BaseException as e:
click.secho(e.get_message(), file=sys.stderr, fg="red")
sys.exit(1)
# show the full traceback if it's not a subclass Exception
except Exception as e:
error = _format.exception(e) # get the traceback
if error:
tw = TerminalWriter(
file=sys.stderr
) # write to terminal all the traceback
tw._write_source(error.splitlines())
else:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)
return wrapper
|
Decorator for command line endpoints that only parse dags or tasks but do
not execute them. If it tails, it prints error message to stderror, then
calls with exit code 1.
|
command_endpoint
|
python
|
ploomber/ploomber
|
src/ploomber/cli/io.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/io.py
|
Apache-2.0
|
def _call_in_source(dag, method_name, message, kwargs=None, verbose=True):
"""
Execute method on each task.source in dag, passing kwargs
"""
kwargs = kwargs or {}
files = []
results = []
for task in dag.values():
ok_to_inject_task = True
if "priority" in kwargs:
ok_to_inject_task = task.name in kwargs["priority"]
if ok_to_inject_task:
try:
method = getattr(task.source, method_name)
except AttributeError:
pass
else:
results.append(method(**kwargs))
files.append(str(task.source._path))
files_ = "\n".join((f" {f}" for f in files))
if verbose:
click.echo(f"{message}:\n{files_}")
return results
|
Execute method on each task.source in dag, passing kwargs
|
_call_in_source
|
python
|
ploomber/ploomber
|
src/ploomber/cli/nb.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/nb.py
|
Apache-2.0
|
def _install_hook(path_to_hook, content, entry_point):
"""
Install a git hook script at the given path
"""
if path_to_hook.exists():
raise RuntimeError(
"hook already exists "
f'at {path_to_hook}. Run: "ploomber nb -u" to uninstall the '
"existing hook and try again"
)
path_to_hook.write_text(content.format(entry_point=entry_point))
# make the file executable
path_to_hook.chmod(path_to_hook.stat().st_mode | stat.S_IEXEC)
|
Install a git hook script at the given path
|
_install_hook
|
python
|
ploomber/ploomber
|
src/ploomber/cli/nb.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/nb.py
|
Apache-2.0
|
def _delete_hook(path):
"""Delete a git hook at the given path"""
if path.exists():
if path.is_file():
path.unlink()
else:
# in the remote case that it's a directory
shutil.rmtree(path)
click.echo(f"Deleted hook located at {path}")
|
Delete a git hook at the given path
|
_delete_hook
|
python
|
ploomber/ploomber
|
src/ploomber/cli/nb.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/nb.py
|
Apache-2.0
|
def _py_with_single_click_enable():
"""
Writes ~/.jupyterlab/labconfig/default_setting_overrides.json to enable
opening .py files as notebooks with a single click. If the secion already
exists, it overrides its value
"""
parent = Path("~/.jupyter", "labconfig").expanduser()
path = parent / "default_setting_overrides.json"
if path.exists():
target = json.loads(path.read_text())
else:
target = {}
recursive_update(target, json.loads(_jupyterlab_default_settings_overrides))
click.echo(f"Overriding JupyterLab defaults at: {str(path)}")
parent.mkdir(exist_ok=True, parents=True)
path.write_text(json.dumps(target))
click.secho(
"Done. You can now open .py and other formats in JupyterLab "
"with a single click. You may need to reload JupyterLab",
fg="green",
)
|
Writes ~/.jupyterlab/labconfig/default_setting_overrides.json to enable
opening .py files as notebooks with a single click. If the secion already
exists, it overrides its value
|
_py_with_single_click_enable
|
python
|
ploomber/ploomber
|
src/ploomber/cli/nb.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/nb.py
|
Apache-2.0
|
def _py_with_single_click_disable():
"""
Opens ~/.jupyterlab/labconfig/default_setting_overrides.json and deletes
the value in
['@jupyterlab/docmanager-extension:plugin'][''defaultViewers'], if any
"""
parent = Path("~/.jupyter", "labconfig")
target = (parent / "default_setting_overrides.json").expanduser()
if target.exists():
content = json.loads(target.read_text())
key1 = "@jupyterlab/docmanager-extension:plugin"
key2 = "defaultViewers"
if content.get(key1, {}).get(key2):
del content[key1][key2]
if key1 in content and not content.get(key1):
del content[key1]
Path(target).write_text(json.dumps(content))
click.secho(
"Done. Disabled opening .py files and other formats in JupyterLab "
"with a single click. You may need to reload JupyterLab",
fg="yellow",
)
|
Opens ~/.jupyterlab/labconfig/default_setting_overrides.json and deletes
the value in
['@jupyterlab/docmanager-extension:plugin'][''defaultViewers'], if any
|
_py_with_single_click_disable
|
python
|
ploomber/ploomber
|
src/ploomber/cli/nb.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/nb.py
|
Apache-2.0
|
def parse_entry_point_value(self):
"""
Returns the entry_point value pased without calling parse_args(),
this is required to find env params to show, if we call parse_args()
the CLI stops there and shows available params
"""
index = None
try:
index = sys.argv.index("--entry-point")
except ValueError:
pass
try:
index = sys.argv.index("-e")
except ValueError:
pass
# no --entry-point/-e arg passed, use default
if index is None:
if self.DEFAULT_ENTRY_POINT is None:
self.error(
"Unable to find a pipeline. "
"Use --entry-point/-e to pass a "
"entry point's location or "
"place it in a standard location.\n\n"
"Otherwise check if your pipeline have "
".yml as extension, "
"change it to .yaml instead.\n\n"
"Need help? https://ploomber.io/community"
)
return self.DEFAULT_ENTRY_POINT
else:
try:
return sys.argv[index + 1]
except IndexError:
pass
# replicate the original message emitted by argparse
action = self._option_string_actions["-e"]
options = "/".join(action.option_strings)
self.error(f"argument {options}: expected one argument")
|
Returns the entry_point value pased without calling parse_args(),
this is required to find env params to show, if we call parse_args()
the CLI stops there and shows available params
|
parse_entry_point_value
|
python
|
ploomber/ploomber
|
src/ploomber/cli/parsers.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/parsers.py
|
Apache-2.0
|
def add_argument(self, *args, **kwargs):
"""
Add a CLI argument. If called after the context manager, it is
considered part of the dynamic API, if called within the context
manager, the arg is considered part of the static API. If it's
called outside a context manager, and no static API has been set,
it raises an error
"""
if not self.finished_static_api:
if not self.in_context and self.finished_init:
raise RuntimeError(
"Cannot add arguments until the static " "API has been declared"
)
else:
# running inside the context manager
self.static_args.extend([process_arg(arg) for arg in args])
# outside context manager
return super().add_argument(*args, **kwargs)
|
Add a CLI argument. If called after the context manager, it is
considered part of the dynamic API, if called within the context
manager, the arg is considered part of the static API. If it's
called outside a context manager, and no static API has been set,
it raises an error
|
add_argument
|
python
|
ploomber/ploomber
|
src/ploomber/cli/parsers.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/parsers.py
|
Apache-2.0
|
def add_mutually_exclusive_group(self, **kwargs):
"""
Add a mutually exclusive group. It returns a custom class that
correctly stores the arguments in the static or dynamic API
"""
group = CustomMutuallyExclusiveGroup(self, **kwargs)
self._mutually_exclusive_groups.append(group)
return group
|
Add a mutually exclusive group. It returns a custom class that
correctly stores the arguments in the static or dynamic API
|
add_mutually_exclusive_group
|
python
|
ploomber/ploomber
|
src/ploomber/cli/parsers.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/parsers.py
|
Apache-2.0
|
def process_factory_dotted_path(self, dotted_path):
"""Parse a factory entry point, returns initialized dag and parsed args"""
entry = load_dotted_path(str(dotted_path), raise_=True)
# add args using the function's signature
required, _ = _add_args_from_callable(self, entry)
# if entry point was decorated with @with_env, add arguments
# to replace declared variables in env.yaml
if hasattr(entry, "_env_dict"):
_add_cli_args_from_env_dict_keys(self, entry._env_dict)
args = self.parse_args()
_configure_logger(args)
# extract required (by using function signature) params from the cli
# args
kwargs = {key: getattr(args, key) for key in required}
# env and function defaults replaced
replaced = _env_keys_to_override(args, self.static_args)
# TODO: add a way of test this by the parameters it will use to
# call the function, have an aux function to get those then another
# to execute, test using the first one
dag = entry(**{**kwargs, **replaced})
return dag, args
|
Parse a factory entry point, returns initialized dag and parsed args
|
process_factory_dotted_path
|
python
|
ploomber/ploomber
|
src/ploomber/cli/parsers.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/parsers.py
|
Apache-2.0
|
def load_from_entry_point_arg(self):
"""
Parses an entry point, adding arguments by extracting them from
the env.
Returns a dag and the parsed args
"""
entry_point = EntryPoint(self.parse_entry_point_value())
dag, args = load_dag_from_entry_point_and_parser(entry_point, self, sys.argv)
return dag, args
|
Parses an entry point, adding arguments by extracting them from
the env.
Returns a dag and the parsed args
|
load_from_entry_point_arg
|
python
|
ploomber/ploomber
|
src/ploomber/cli/parsers.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/parsers.py
|
Apache-2.0
|
def _parse_doc(callable_):
"""
Convert numpydoc docstring to a list of dictionaries
"""
doc = callable_.__doc__
# no docstring
if doc is None:
return {"params": {}, "summary": None}
# try to import numpydoc, if can't find it, just returnt the first line
try:
docscrape = importlib.import_module("numpydoc.docscrape")
except ModuleNotFoundError:
return {"params": {}, "summary": _first_non_empty_line(doc)}
doc_parsed = docscrape.NumpyDocString(doc)
parameters = {
p.name: {"desc": " ".join(p.desc), "type": p.type}
for p in doc_parsed["Parameters"]
}
# docscrape returns one element per line
summary = "Docstring: {}".format("\n".join(doc_parsed["Summary"]))
return {"params": parameters, "summary": summary}
|
Convert numpydoc docstring to a list of dictionaries
|
_parse_doc
|
python
|
ploomber/ploomber
|
src/ploomber/cli/parsers.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/parsers.py
|
Apache-2.0
|
def _env_keys_to_override(args, static_args):
"""
Returns a dictionary with all extra cli parameters passed, all these must
be parameters that part of the env or params (with no defaults) if
entry point is a factory function
"""
return {
name: getattr(args, name)
for name in dir(args)
if not name.startswith("_")
if getattr(args, name) is not None
if name not in static_args
}
|
Returns a dictionary with all extra cli parameters passed, all these must
be parameters that part of the env or params (with no defaults) if
entry point is a factory function
|
_env_keys_to_override
|
python
|
ploomber/ploomber
|
src/ploomber/cli/parsers.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/parsers.py
|
Apache-2.0
|
def _add_cli_args_from_env_dict_keys(parser, env_dict):
"""
Add one parameter to the args parser by taking a look at all values
defined in an env dict object
"""
# flatten keys from the env dictionary. e.g. from {'a': {'b': 1}} is
# converted to {'a--b': 1}. This allows us to add cli args such as --a--b
# to modify any key in the env. Note that we use double hyphens to have
# an unambious section separator. Environments are commonly loaded from
# YAML files. Keys in such files might contain hyphens/underscores, we
# allow users to have those characters but double hyphens/underscores are
# not permitted as they'd conflict with the CLI generation logic
flat_env_dict = _flatten_dict(env_dict._data)
for arg, val in flat_env_dict.items():
# do not add default keys like {{cwd}}, {{here}}
if arg not in env_dict.default_keys:
parser.add_argument("--env--" + arg, help="Default: {}".format(val))
|
Add one parameter to the args parser by taking a look at all values
defined in an env dict object
|
_add_cli_args_from_env_dict_keys
|
python
|
ploomber/ploomber
|
src/ploomber/cli/parsers.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/parsers.py
|
Apache-2.0
|
def _parse_signature_from_callable(callable_):
"""
Parse a callable signature, return a dictionary with
{param_key: default_value} and a list of required parameters
"""
sig = inspect.signature(callable_)
required = [k for k, v in sig.parameters.items() if v.default == inspect._empty]
defaults = {
k: v.default for k, v in sig.parameters.items() if v.default != inspect._empty
}
return required, defaults, sig.parameters
|
Parse a callable signature, return a dictionary with
{param_key: default_value} and a list of required parameters
|
_parse_signature_from_callable
|
python
|
ploomber/ploomber
|
src/ploomber/cli/parsers.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/parsers.py
|
Apache-2.0
|
def _add_args_from_callable(parser, callable_):
"""
Modifies an args parser to include parameters from a callable, adding
parameters with default values as optional and parameters with no defaults
as mandatory. Adds descriptions from parsing the callable's docstring
It also adds the description from the docstring, if any
Returns parsed args: required (list) and defaults (dict)
"""
doc = _parse_doc(callable_)
required, defaults, params = _parse_signature_from_callable(callable_)
for arg in defaults.keys():
conflict = False
try:
parser.add_argument(
"--" + arg, help=get_desc(doc, arg), **add_argument_kwargs(params, arg)
)
except argparse.ArgumentError as e:
conflict = e
if conflict:
if "conflicting option string" in conflict.message:
raise ValueError(
f"The signature from {callable_.__name__!r} "
"conflicts with existing arguments in the command-line "
"interface, please rename the following "
f"argument: {arg!r}"
)
else:
raise conflict
for arg in required:
parser.add_argument(
arg, help=get_desc(doc, arg), **add_argument_kwargs(params, arg)
)
if doc["summary"]:
desc = parser.description
parser.description = "{}. {}".format(desc, doc["summary"])
return required, defaults
|
Modifies an args parser to include parameters from a callable, adding
parameters with default values as optional and parameters with no defaults
as mandatory. Adds descriptions from parsing the callable's docstring
It also adds the description from the docstring, if any
Returns parsed args: required (list) and defaults (dict)
|
_add_args_from_callable
|
python
|
ploomber/ploomber
|
src/ploomber/cli/parsers.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/parsers.py
|
Apache-2.0
|
def _process_file_dir_or_glob(parser, dagspec_arg=None):
"""
Process a file entry point file, directory or glob-like pattern,
the initialized dag and parsed args
Parameters
----------
parser : CustomParser
CLI arg parser
"""
# NOTE: we must use parser.parse_entry_point_value() instead or
# args.parse_args because calling the latter wont allow us to add more
# cli parameters, but we want that to expose parms from env
entry_point_value = dagspec_arg or parser.parse_entry_point_value()
entry = EntryPoint(entry_point_value)
if entry.type in {EntryPoint.Directory, EntryPoint.Pattern}:
# pipelines initialized from directories or patterns cannot be
# parametrized
path_to_env = None
# file
else:
path_to_env = default.path_to_env_from_spec(entry_point_value)
if path_to_env:
env_dict = EnvDict(
path_to_env,
path_to_here=(
Path(entry_point_value).parent
if entry.type == EntryPoint.File
else None
),
)
_add_cli_args_from_env_dict_keys(parser, env_dict)
args = parser.parse_args()
dagspec_arg = dagspec_arg or args.entry_point
_configure_logger(args)
entry_point = EntryPoint(dagspec_arg)
# directory
if entry_point.type == EntryPoint.Directory:
dag = DAGSpec.from_directory(dagspec_arg).to_dag()
# pattern
elif entry_point.type == EntryPoint.Pattern:
dag = DAGSpec.from_files(dagspec_arg).to_dag()
# file
else:
if path_to_env:
# and replace keys depending on passed cli args
replaced = _env_keys_to_override(args, parser.static_args)
env = env_dict._replace_flatten_keys(replaced)
dag = DAGSpec(dagspec_arg, env=env).to_dag()
else:
dag = DAGSpec(dagspec_arg).to_dag()
return dag, args
|
Process a file entry point file, directory or glob-like pattern,
the initialized dag and parsed args
Parameters
----------
parser : CustomParser
CLI arg parser
|
_process_file_dir_or_glob
|
python
|
ploomber/ploomber
|
src/ploomber/cli/parsers.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/parsers.py
|
Apache-2.0
|
def load_dag_from_entry_point_and_parser(entry_point, parser, argv):
"""Load DAG from entry point
Parameters
----------
parser : CustomParser
The cli parser object
argv : list
Command line arguments
"""
help_cmd = "--help" in argv or "-h" in argv
# if the file does not exist but the value has sufix yaml/yml, show a
# warning because the last thing to try is to interpret it as a dotted
# path and that's probably not what the user wants
if not entry_point.exists() and entry_point.suffix in {".yaml", ".yml"}:
warnings.warn(
'Entry point value "{}" has extension "{}", which '
"suggests a spec file, but the file doesn't "
"exist".format(entry_point, entry_point.suffix)
)
# even if the entry file is not a file nor a valid module, show the
# help menu, but show a warning
if help_cmd and not entry_point.exists():
warnings.warn(
'Failed to load entry point "{}". It is not a file '
"nor a valid dotted path".format(entry_point)
)
args = parser.parse_args()
# at this point there are two remaining cases:
# no help command (entry point may or may not exist),:
# we attempt to run the command
# help command and exists:
# we just parse parameters to display them in the help menu
elif entry_point.type == EntryPoint.DottedPath:
dag, args = parser.process_factory_dotted_path(entry_point)
elif entry_point.type == EntryPoint.ModulePath:
dag, args = _process_file_dir_or_glob(
parser, dagspec_arg=_path_for_module_path(entry_point.value)
)
else:
# process file, directory or glob pattern
dag, args = _process_file_dir_or_glob(parser)
return dag, args
|
Load DAG from entry point
Parameters
----------
parser : CustomParser
The cli parser object
argv : list
Command line arguments
|
load_dag_from_entry_point_and_parser
|
python
|
ploomber/ploomber
|
src/ploomber/cli/parsers.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/parsers.py
|
Apache-2.0
|
def _configure_logger(args):
"""Configure logger if user passed --log/--log-file args"""
if hasattr(args, "log"):
if args.log is not None:
logging.basicConfig(level=args.log.upper())
if hasattr(args, "log_file"):
if args.log_file is not None:
file_handler = logging.FileHandler(args.log_file)
logging.getLogger().addHandler(file_handler)
|
Configure logger if user passed --log/--log-file args
|
_configure_logger
|
python
|
ploomber/ploomber
|
src/ploomber/cli/parsers.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/cli/parsers.py
|
Apache-2.0
|
def connection(self):
"""Return a connection, open one if there isn't any"""
# if there isn't an open connection, open one...
if self._connection is None:
self._connection = self.connect_fn(**self.connect_kwargs)
return self._connection
|
Return a connection, open one if there isn't any
|
connection
|
python
|
ploomber/ploomber
|
src/ploomber/clients/db.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/clients/db.py
|
Apache-2.0
|
def execute(self, code):
"""Execute code with the existing connection"""
cur = self.connection.cursor()
if self.split_source:
for command in code_split(code, token=self.split_source):
cur.execute(command)
else:
cur.execute(code)
self.connection.commit()
cur.close()
|
Execute code with the existing connection
|
execute
|
python
|
ploomber/ploomber
|
src/ploomber/clients/db.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/clients/db.py
|
Apache-2.0
|
def __init__(
self, connect_kwargs, path_to_directory, run_template="bash {{path_to_code}}"
):
"""
path_to_directory: str
A path to save temporary files
connect_kwargs: dict
Parameters to send to the paramiko.SSHClient.connect constructor
"""
self.path_to_directory = path_to_directory
self.connect_kwargs = connect_kwargs
self.run_template = run_template
self._raw_client = None
self._logger = logging.getLogger("{}.{}".format(__name__, type(self).__name__))
|
path_to_directory: str
A path to save temporary files
connect_kwargs: dict
Parameters to send to the paramiko.SSHClient.connect constructor
|
__init__
|
python
|
ploomber/ploomber
|
src/ploomber/clients/shell.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/clients/shell.py
|
Apache-2.0
|
def upload(self, local):
"""Upload file or folder from a local path by calling _upload as needed
Parameters
----------
local
Path to local file or folder to upload
"""
if Path(local).is_dir():
for f in glob.iglob(str(Path(local, "**")), recursive=True):
if Path(f).is_file():
self._upload(f)
else:
self._upload(local)
|
Upload file or folder from a local path by calling _upload as needed
Parameters
----------
local
Path to local file or folder to upload
|
upload
|
python
|
ploomber/ploomber
|
src/ploomber/clients/storage/abc.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/clients/storage/abc.py
|
Apache-2.0
|
def _remote_path(self, local):
"""
Given a local path, compute the remote path where the file will be
stored.
1. Obtain the absolute project root (``/path/to/project``)
2. Get the local absolute path (``/path/to/project/out/data.csv``)
3. Compute the relative path (``out/data.csv``)
4. Prefix the relative path with the ``parent`` argument
(passed to the Client constructor) (``path/to/parent/out/data.csv``)
"""
relative = _resolve(local).relative_to(self._path_to_project_root)
return str(PurePosixPath(self._parent, *relative.parts))
|
Given a local path, compute the remote path where the file will be
stored.
1. Obtain the absolute project root (``/path/to/project``)
2. Get the local absolute path (``/path/to/project/out/data.csv``)
3. Compute the relative path (``out/data.csv``)
4. Prefix the relative path with the ``parent`` argument
(passed to the Client constructor) (``path/to/parent/out/data.csv``)
|
_remote_path
|
python
|
ploomber/ploomber
|
src/ploomber/clients/storage/abc.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/clients/storage/abc.py
|
Apache-2.0
|
def _resolve(path):
"""
Path.resolve() does not work on windows if the path doesn't exist
this makes it work
"""
path = Path(path)
return path if path.is_absolute() else Path(".").resolve() / path
|
Path.resolve() does not work on windows if the path doesn't exist
this makes it work
|
_resolve
|
python
|
ploomber/ploomber
|
src/ploomber/clients/storage/util.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/clients/storage/util.py
|
Apache-2.0
|
def render(self, force=False, show_progress=True, remote=False):
"""
Render resolves all placeholders in tasks and determines whether
a task should run or not based on the task.product metadata, this
allows up-to-date tasks to be skipped
Parameters
----------
force : bool, default=False
Ignore product metadata status and prepare all tasks to be
executed. This option renders much faster in DAGs with products
whose metadata is stored in remote systems, because there is no
need to fetch metadata over the network. If the DAG won't be
built, this option is recommended.
show_progress : bool, default=True
Show progress bar
remote : bool, default=False
Use remote metadata for determining task status. In most scenarios,
you want this to be False, Ploomber uses this internally when
exporting pipelines to other platforms (via Soopervisor).
"""
g = self._to_graph(fmt="networkx")
def unique(elements):
elements_unique = []
for elem in elements:
if elem not in elements_unique:
elements_unique.append(elem)
return elements_unique
dags = unique([t.dag for t in g])
# first render any other dags involved (this happens when some
# upstream parameters come form other dags)
# NOTE: for large compose dags it might be wasteful to render over
# and over
for dag in dags:
if dag is not self:
dag._render_current(
force=force, show_progress=show_progress, remote=remote
)
# then, render this dag
self._render_current(force=force, show_progress=show_progress, remote=remote)
return self
|
Render resolves all placeholders in tasks and determines whether
a task should run or not based on the task.product metadata, this
allows up-to-date tasks to be skipped
Parameters
----------
force : bool, default=False
Ignore product metadata status and prepare all tasks to be
executed. This option renders much faster in DAGs with products
whose metadata is stored in remote systems, because there is no
need to fetch metadata over the network. If the DAG won't be
built, this option is recommended.
show_progress : bool, default=True
Show progress bar
remote : bool, default=False
Use remote metadata for determining task status. In most scenarios,
you want this to be False, Ploomber uses this internally when
exporting pipelines to other platforms (via Soopervisor).
|
render
|
python
|
ploomber/ploomber
|
src/ploomber/dag/dag.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/dag/dag.py
|
Apache-2.0
|
def build(self, force=False, show_progress=True, debug=None, close_clients=True):
"""
Runs the DAG in order so that all upstream dependencies are run for
every task
Parameters
----------
force : bool, default=False
If True, it will run all tasks regardless of status, defaults to
False
show_progress : bool, default=True
Show progress bar
debug : 'now' or 'later', default=None
If 'now', Drop a debugging session if building raises an exception.
Note that this modifies the executor and temporarily sets it
to Serial with subprocess off and catching exceptions/warnings off.
Restores the original executor at the end. If 'later' it keeps the
executor the same and serializes the traceback errors for later
debugging
close_clients : bool, default=True
Close all clients (dag-level, task-level and product-level) upon
successful build
Notes
-----
All dag-level clients are closed after calling this function
.. collapse:: changelog
.. versionchanged:: 0.20
``debug`` changed from True/False to 'now'/'later'/None
.. versionadded:: 0.20
``debug`` now supports debugging NotebookRunner tasks
Returns
-------
BuildReport
A dict-like object with tasks as keys and dicts with task
status as values
"""
kwargs = callback_check(
self._params.logging_factory, available={"dag_name": self.name}
)
res = self._params.logging_factory(**kwargs)
if isinstance(res, Iterable):
dag_logger = DAGLogger(*res)
else:
dag_logger = DAGLogger(handler=res)
# if debug, we have to change the executor to these settings, if we run
# tasks in a subprocess or catch exception, we won't be able to start
# the debugging session in the right place
if debug:
executor_original = self.executor
# serial debugger needed if debugnow
if debug == "now":
self.executor = executors.Serial(
build_in_subprocess=False,
catch_exceptions=False,
catch_warnings=False,
)
# set debug flag to True on all tasks that have one. Currently
# only NotebookRunner exposes this
for name in self._iter():
task = self[name]
if isinstance(task, (NotebookRunner, PythonCallable)):
task.debug_mode = debug
callable_ = partial(self._build, force=force, show_progress=show_progress)
with dag_logger:
try:
report = callable_()
finally:
if close_clients:
self.close_clients()
# if debugging now, revert back the original executor
if debug == "now":
self.executor = executor_original
return report
|
Runs the DAG in order so that all upstream dependencies are run for
every task
Parameters
----------
force : bool, default=False
If True, it will run all tasks regardless of status, defaults to
False
show_progress : bool, default=True
Show progress bar
debug : 'now' or 'later', default=None
If 'now', Drop a debugging session if building raises an exception.
Note that this modifies the executor and temporarily sets it
to Serial with subprocess off and catching exceptions/warnings off.
Restores the original executor at the end. If 'later' it keeps the
executor the same and serializes the traceback errors for later
debugging
close_clients : bool, default=True
Close all clients (dag-level, task-level and product-level) upon
successful build
Notes
-----
All dag-level clients are closed after calling this function
.. collapse:: changelog
.. versionchanged:: 0.20
``debug`` changed from True/False to 'now'/'later'/None
.. versionadded:: 0.20
``debug`` now supports debugging NotebookRunner tasks
Returns
-------
BuildReport
A dict-like object with tasks as keys and dicts with task
status as values
|
build
|
python
|
ploomber/ploomber
|
src/ploomber/dag/dag.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/dag/dag.py
|
Apache-2.0
|
def close_clients(self):
"""Close all clients (dag-level, task-level and product-level)"""
# keep track of closed clients so we only call .close() once.
# For most clients, calling .close() multiple times does not throw
# any errors. However, when using google.cloud.bigquery.dbapi (and
# possible others), calling .close() many times will throw an error
closed = []
for client in self.clients.values():
if client not in closed:
client.close()
closed.append(client)
for task_name in self._iter():
task = self[task_name]
if task.client and task.client not in closed:
task.client.close()
closed.append(task.client)
if task.product.client and task.product.client not in closed:
task.product.client.close()
closed.append(task.product.client)
|
Close all clients (dag-level, task-level and product-level)
|
close_clients
|
python
|
ploomber/ploomber
|
src/ploomber/dag/dag.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/dag/dag.py
|
Apache-2.0
|
def build_partially(
self, target, force=False, show_progress=True, debug=None, skip_upstream=False
):
"""Partially build a dag until certain task
Parameters
----------
target : str
Name of the target task (last one to build). Can pass a wildcard
such as 'tasks-*'
force : bool, default=False
If True, it will run all tasks regardless of status, defaults to
False
show_progress : bool, default=True
Show progress bar
debug : 'now' or 'later', default=None
If 'now', Drop a debugging session if building raises an exception.
Note that this modifies the executor and temporarily sets it
to Serial with subprocess off and catching exceptions/warnings off.
Restores the original executor at the end. If 'later' it keeps the
executor the same and serializes the traceback errors for later
debugging
skip_upstream : bool, default=False
If False, includes all upstream dependencies required to build
target, otherwise it skips them. Note that if this is True and
it's not possible to build a given task (e.g., missing upstream
products), this will fail
Notes
-----
.. collapse:: changelog
.. versionchanged:: 0.20
``debug`` changed from True/False to 'now'/'later'/None
.. versionadded:: 0.20
``debug`` now supports debugging NotebookRunner tasks
"""
return self._build_partially(
target=target,
force=force,
show_progress=show_progress,
debug=debug,
skip_upstream=skip_upstream,
deepcopy=True,
)
|
Partially build a dag until certain task
Parameters
----------
target : str
Name of the target task (last one to build). Can pass a wildcard
such as 'tasks-*'
force : bool, default=False
If True, it will run all tasks regardless of status, defaults to
False
show_progress : bool, default=True
Show progress bar
debug : 'now' or 'later', default=None
If 'now', Drop a debugging session if building raises an exception.
Note that this modifies the executor and temporarily sets it
to Serial with subprocess off and catching exceptions/warnings off.
Restores the original executor at the end. If 'later' it keeps the
executor the same and serializes the traceback errors for later
debugging
skip_upstream : bool, default=False
If False, includes all upstream dependencies required to build
target, otherwise it skips them. Note that if this is True and
it's not possible to build a given task (e.g., missing upstream
products), this will fail
Notes
-----
.. collapse:: changelog
.. versionchanged:: 0.20
``debug`` changed from True/False to 'now'/'later'/None
.. versionadded:: 0.20
``debug`` now supports debugging NotebookRunner tasks
|
build_partially
|
python
|
ploomber/ploomber
|
src/ploomber/dag/dag.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/dag/dag.py
|
Apache-2.0
|
def to_markup(self, path=None, fmt="html", sections=None, backend=None):
"""Returns a str (md or html) with the pipeline's description
Parameters
----------
sections : list
Which sections to include, possible values are "plot", "status"
and "source". Defaults to ["plot", "status"]
"""
sections = sections or ["plot", "status"]
if fmt not in {"html", "md"}:
raise ValueError("fmt must be html or md, got {}".format(fmt))
if "status" in sections:
status = self.status().to_format("html")
else:
status = False
backend = plot.choose_backend(backend, path)
if "plot" in sections:
ext = ".png" if backend == "pygraphviz" else ".html"
fd, path_to_plot = tempfile.mkstemp(suffix=ext)
os.close(fd)
if backend == "pygraphviz":
self.plot(output=path_to_plot, backend=backend)
plot_ = image_bytes2html(Path(path_to_plot).read_bytes())
else:
self.plot(output=path_to_plot, backend=backend, image_only=True)
json_data = Path(path_to_plot).read_text()
plot_ = svg2html()
else:
plot_ = False
template_md = importlib_resources.read_text(resources, "dag.md")
out = Template(template_md).render(
plot=plot_, status=status, source="source" in sections, dag=self
)
if fmt == "html":
from ploomber.util import markup
out = markup.markdown_to_html(out)
# add css
if backend == "d3" and "plot" in sections:
html = importlib_resources.read_text(
resources, "github-markdown-d3.html"
)
out = Template(html).render(content=out, json_data=json_data)
else:
html = importlib_resources.read_text(resources, "github-markdown.html")
out = Template(html).render(content=out)
if path is not None:
Path(path).write_text(out)
return out
|
Returns a str (md or html) with the pipeline's description
Parameters
----------
sections : list
Which sections to include, possible values are "plot", "status"
and "source". Defaults to ["plot", "status"]
|
to_markup
|
python
|
ploomber/ploomber
|
src/ploomber/dag/dag.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/dag/dag.py
|
Apache-2.0
|
def plot(
self, output="embed", include_products=False, backend=None, image_only=False
):
"""Plot the DAG
Parameters
----------
output : str, default='embed'
Where to save the output (e.g., pipeline.png). If 'embed', it
returns an IPython image instead.
include_products : bool, default=False
If False, each node only contains the task name, if True
if contains the task name and products. Only available when using
the pygraphviz backend
backend : str, default=None
How to generate the plot, if None it uses pygraphviz if installed,
otherwise it uses D3 (which doesn't require extra dependencies),
you can force to use a backend by passing 'pygraphviz', 'd3', or 'mermaid'.
"""
if backend not in {None, "d3", "pygraphviz", "mermaid"}:
raise PlotException(
"Expected backend to be: None, 'd3', 'mermaid' "
f"or 'pygraphviz', but got: {backend!r}"
)
# FIXME: add tests for this
self.render()
# D3
if plot.choose_backend(backend, output) == "d3":
if include_products:
raise PlotException(
"'include_products' is not supported "
"when using the d3 backend. Switch the "
"flag or change to the pypgrahviz backend"
)
if output != "embed":
suffix = Path(output).suffix
if suffix == ".png":
raise PlotException(
"'d3' plotting backend cannot generate .png plots. "
"Change the extension to .html or install pygraphviz"
)
if suffix != ".html":
raise PlotException(
"Error when using d3 backend: "
"expected a path with "
f"extension .html, but got: {output!r}, "
"please change the extension"
)
G = self._to_graph(fmt="d3", include_products=include_products)
dag_json = nx.readwrite.json_graph.node_link_data(G)
with _path_for_plot(path_to_plot=output, fmt="html") as path:
plot.with_d3(dag_json, output=path, image_only=image_only)
if output == "embed":
return plot.embedded_html(path=path)
else:
return path
# mermaid
elif plot.choose_backend(backend, output) == "mermaid":
if include_products:
raise PlotException(
"'include_products' is not supported "
"when using the mermaid backend. Switch the "
"flag or change to the pypgrahviz backend"
)
if output != "embed":
suffix = Path(output).suffix
if suffix == ".png":
raise PlotException(
"'mermaid' plotting backend cannot generate .png plots. "
"Change the extension to .html or install pygraphviz"
)
if suffix != ".html":
raise PlotException(
"Error when using mermaid backend: "
"expected a path with "
f"extension .html, but got: {output!r}, "
"please change the extension"
)
G = self._to_graph(fmt="d3", include_products=include_products)
dag_json = nx.readwrite.json_graph.node_link_data(G)
with _path_for_plot(path_to_plot=output, fmt="html") as path:
plot.with_mermaid(dag_json, output=path, image_only=image_only)
if output == "embed":
return plot.embedded_html(path=path)
else:
return path
elif not plot.check_pygraphviz_installed() and backend == "pygraphviz":
raise ModuleNotFoundError(
_make_requires_error_message(
["pygraphviz<1.8"] if sys.version_info < (3, 8) else ["pygraphviz"],
"plot",
_pygraphviz_message,
)
)
# use pygraphviz
with _path_for_plot(path_to_plot=output, fmt="png") as path:
# attributes docs:
# https://graphviz.gitlab.io/_pages/doc/info/attrs.html
G = self._to_graph(fmt="pygraphviz", include_products=include_products)
G.draw(path, prog="dot", args="-Grankdir=LR")
if output == "embed":
return Image(filename=path)
else:
return path
|
Plot the DAG
Parameters
----------
output : str, default='embed'
Where to save the output (e.g., pipeline.png). If 'embed', it
returns an IPython image instead.
include_products : bool, default=False
If False, each node only contains the task name, if True
if contains the task name and products. Only available when using
the pygraphviz backend
backend : str, default=None
How to generate the plot, if None it uses pygraphviz if installed,
otherwise it uses D3 (which doesn't require extra dependencies),
you can force to use a backend by passing 'pygraphviz', 'd3', or 'mermaid'.
|
plot
|
python
|
ploomber/ploomber
|
src/ploomber/dag/dag.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/dag/dag.py
|
Apache-2.0
|
def _to_graph(self, fmt, only_current_dag=False, include_products=False):
"""
Converts the DAG to a Networkx DiGraph object. Since upstream
dependencies are not required to come from the same DAG,
this object might include tasks that are not included in the current
object
Parameters
----------
fmt : 'networkx', 'pygraphviz', or 'd3'
Output format
include_products : bool, default=False
If False, each node only contains the task name, if True
if contains the task name and products.
"""
FMT = {"networkx", "pygraphviz", "d3"}
if fmt not in FMT:
raise ValueError(f"Invalid format, expected one of: {FMT}")
# https://networkx.github.io/documentation/networkx-1.10/reference/drawing.html
# http://graphviz.org/doc/info/attrs.html
# NOTE: delete this, use existing DiGraph object
G = nx.DiGraph()
for task in self.values():
# these formats are used for plotting, so only pass certain task
# attributes
if fmt in {"pygraphviz", "d3"}:
outdated = task.product._is_outdated()
# add parameters for graphviz plotting
color = "#F08080" if outdated else "#90EE90"
label = _task_short_repr(task) if include_products else task.name
attr = {
"fillcolor": color,
"style": "dashed, filled" if outdated else "filled",
"fontname": "Helvetica",
"fontsize": "16pt",
"id": task.name,
"label": label,
}
# graphviz uses the str representation of the node object to
# distinguish them - by default str(task) returns
# str(task.product), we have to make sure that when
# return_graphviz=True, we pass the task id as node, instead
# of the full task object, otherwise if two products have the
# same str representation, nodes will clash
G.add_node(task.name, **attr)
# when exporting to networkx, we want the task object
else:
G.add_node(task)
def get_task_id(task):
"""
Determine what to use to identify the edges the task object
or task name
"""
return task if fmt == "networkx" else task.name
# add edges
if only_current_dag:
G.add_edges_from(
[
(get_task_id(up), get_task_id(task))
for up in task.upstream.values()
if up.dag is self
]
)
else:
G.add_edges_from(
[
(get_task_id(up), get_task_id(task))
for up in task.upstream.values()
]
)
if fmt in {"networkx", "d3"}:
return G
else:
# to_agraph converts to pygraphviz
return nx.nx_agraph.to_agraph(G)
|
Converts the DAG to a Networkx DiGraph object. Since upstream
dependencies are not required to come from the same DAG,
this object might include tasks that are not included in the current
object
Parameters
----------
fmt : 'networkx', 'pygraphviz', or 'd3'
Output format
include_products : bool, default=False
If False, each node only contains the task name, if True
if contains the task name and products.
|
_to_graph
|
python
|
ploomber/ploomber
|
src/ploomber/dag/dag.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/dag/dag.py
|
Apache-2.0
|
def _add_edge(self, task_from, task_to, group_name=None):
"""Add an edge between two tasks
Parameters
----------
group_name : str
Pass a string to group this edge, upon rendering, upstream
products are available via task[group_name][tas_name]
"""
attrs = {} if group_name is None else {"group_name": group_name}
# when adding a task group (but not a dag)
if isiterable(task_from) and not isinstance(task_from, DAG):
# if iterable, add all components as separate upstream tasks
for a_task_from in task_from:
# this happens when the task was originally declared in
# another dag...
if a_task_from.name not in self._G:
self._G.add_node(a_task_from.name, task=a_task_from)
self._G.add_edge(a_task_from.name, task_to.name, **attrs)
else:
# this happens when the task was originally declared in
# another dag...
if task_from.name not in self._G:
self._G.add_node(task_from.name, task=task_from)
# DAGs are treated like a single task
self._G.add_edge(task_from.name, task_to.name, **attrs)
|
Add an edge between two tasks
Parameters
----------
group_name : str
Pass a string to group this edge, upon rendering, upstream
products are available via task[group_name][tas_name]
|
_add_edge
|
python
|
ploomber/ploomber
|
src/ploomber/dag/dag.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/dag/dag.py
|
Apache-2.0
|
def _get_upstream(self, task_name):
"""Get upstream tasks given a task name (returns Task objects)"""
upstream_names = self._G.predecessors(task_name)
return {name: self._G.nodes[name]["task"] for name in upstream_names}
|
Get upstream tasks given a task name (returns Task objects)
|
_get_upstream
|
python
|
ploomber/ploomber
|
src/ploomber/dag/dag.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/dag/dag.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.