code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def _read_nb_str_unrendered(self, force=False):
"""
Returns the notebook representation (JSON string), this is the raw
source code passed, does not contain injected parameters.
Adds kernelspec info if not present based on the kernelspec_name,
this metadata is required for papermill to know which kernel to use.
An exception is raised if we cannot determine kernel information.
"""
# hot_reload causes to always re-evalaute the notebook representation
if self._nb_str_unrendered is None or self._hot_reload or force:
if force:
primitive = _read_primitive(self._path)
else:
primitive = self.primitive
# this is the notebook node representation
nb = _to_nb_obj(
primitive,
ext=self._ext_in,
# passing the underscored version
# because that's the only one available
# when this is initialized
language=self._language,
kernelspec_name=self._kernelspec_name,
check_if_kernel_installed=self._check_if_kernel_installed,
path=self._path,
)
# if the user injected cells manually (with ploomber nb --inject)
# the source will contain the injected cell, remove it because
# it should not be considered part of the source code
self._nb_obj_unrendered = _cleanup_rendered_nb(nb, print_=False)
# get the str representation. always write from nb_obj, even if
# this was initialized with a ipynb file, nb_obj contains
# kernelspec info
self._nb_str_unrendered = nbformat.writes(
self._nb_obj_unrendered, version=nbformat.NO_CONVERT
)
return self._nb_str_unrendered, self._nb_obj_unrendered
|
Returns the notebook representation (JSON string), this is the raw
source code passed, does not contain injected parameters.
Adds kernelspec info if not present based on the kernelspec_name,
this metadata is required for papermill to know which kernel to use.
An exception is raised if we cannot determine kernel information.
|
_read_nb_str_unrendered
|
python
|
ploomber/ploomber
|
src/ploomber/sources/notebooksource.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/sources/notebooksource.py
|
Apache-2.0
|
def _validate_parameters_cell(self, extract_upstream=False, extract_product=False):
"""Check parameters call and add it when it's missing
Parameters
----------
extract_upstream : bool, default: False
Flags used to determine the content of the parameters cell,
only used if the notebook is missing the parameters cell
extract_product : bool, default: False
Same as extract_upstream
"""
params_cell, _ = find_cell_with_tag(self._nb_obj_unrendered, "parameters")
if params_cell is None:
loc = pretty_print.try_relative_path(self.loc)
add_parameters_cell(self.loc, extract_upstream, extract_product)
click.secho(
f"Notebook {loc} is missing the parameters cell, "
"adding it at the top of the file...",
fg="yellow",
)
|
Check parameters call and add it when it's missing
Parameters
----------
extract_upstream : bool, default: False
Flags used to determine the content of the parameters cell,
only used if the notebook is missing the parameters cell
extract_product : bool, default: False
Same as extract_upstream
|
_validate_parameters_cell
|
python
|
ploomber/ploomber
|
src/ploomber/sources/notebooksource.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/sources/notebooksource.py
|
Apache-2.0
|
def _post_render_validation(self):
"""
Validate params passed against parameters in the notebook
"""
# NOTE: maybe static_analysis = off should not turn off everything
# but only warn
# strict mode: raise and check signature
# regular mode: _check_notebook called in .run
if self.static_analysis == "strict":
self._check_notebook(raise_=True, check_signature=True)
else:
# otherwise, only warn on unused parameters
_warn_on_unused_params(self._nb_obj_unrendered, self._params)
|
Validate params passed against parameters in the notebook
|
_post_render_validation
|
python
|
ploomber/ploomber
|
src/ploomber/sources/notebooksource.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/sources/notebooksource.py
|
Apache-2.0
|
def nb_str_rendered(self):
"""
Returns the notebook (as a string) with parameters injected, hot
reloadig if necessary
"""
if self._nb_str_rendered is None:
raise RuntimeError(
"Attempted to get location for an unrendered "
"notebook, render it first"
)
if self._hot_reload:
self._render()
return self._nb_str_rendered
|
Returns the notebook (as a string) with parameters injected, hot
reloadig if necessary
|
nb_str_rendered
|
python
|
ploomber/ploomber
|
src/ploomber/sources/notebooksource.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/sources/notebooksource.py
|
Apache-2.0
|
def nb_obj_rendered(self):
"""
Returns the notebook (as an objet) with parameters injected, hot
reloadig if necessary
"""
if self._nb_obj_rendered is None:
# using self.nb_str_rendered triggers hot reload if needed
self._nb_obj_rendered = self._nb_str_to_obj(self.nb_str_rendered)
return self._nb_obj_rendered
|
Returns the notebook (as an objet) with parameters injected, hot
reloadig if necessary
|
nb_obj_rendered
|
python
|
ploomber/ploomber
|
src/ploomber/sources/notebooksource.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/sources/notebooksource.py
|
Apache-2.0
|
def language(self):
"""
Notebook Language (Python, R, etc), this is a best-effort property,
can be None if we could not determine the language
"""
if self._language is None:
self._read_nb_str_unrendered()
try:
# make sure you return "r" instead of "R"
return self._nb_obj_unrendered.metadata.kernelspec.language.lower()
except AttributeError:
return None
else:
return self._language
|
Notebook Language (Python, R, etc), this is a best-effort property,
can be None if we could not determine the language
|
language
|
python
|
ploomber/ploomber
|
src/ploomber/sources/notebooksource.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/sources/notebooksource.py
|
Apache-2.0
|
def save_injected_cell(self, **kwargs):
"""
Inject cell, overwrite the source file (and any paired files)
"""
fmt_ = _jupytext_fmt(self._primitive, self._ext_in)
# add metadata to flag that the cell was injected manually
recursive_update(
self.nb_obj_rendered,
dict(metadata=dict(ploomber=dict(injected_manually=True))),
)
# Are we updating a text file that has a metadata filter? If so,
# add ploomber as a section that must be stored
if (
self.nb_obj_rendered.metadata.get("jupytext", {}).get(
"notebook_metadata_filter"
)
== "-all"
):
recursive_update(
self.nb_obj_rendered,
dict(
metadata=dict(
jupytext=dict(notebook_metadata_filter="ploomber,-all")
)
),
)
# overwrite
jupytext.write(self.nb_obj_rendered, self._path, fmt=fmt_)
# overwrite all paired files
for path, fmt_ in iter_paired_notebooks(
self.nb_obj_rendered, fmt_, self._path.stem
):
# get absolute path for each notebook
path = Path(self._path.parent / path)
jupytext.write(self.nb_obj_rendered, fp=path, fmt=fmt_)
|
Inject cell, overwrite the source file (and any paired files)
|
save_injected_cell
|
python
|
ploomber/ploomber
|
src/ploomber/sources/notebooksource.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/sources/notebooksource.py
|
Apache-2.0
|
def remove_injected_cell(self):
"""
Delete injected cell, overwrite the source file (and any paired files)
"""
nb_clean = _cleanup_rendered_nb(self._nb_obj_unrendered)
# remove metadata
recursive_update(
nb_clean, dict(metadata=dict(ploomber=dict(injected_manually=None)))
)
fmt_ = _jupytext_fmt(self._primitive, self._ext_in)
# overwrite
jupytext.write(nb_clean, self._path, fmt=fmt_)
# overwrite all paired files
for path, fmt_ in iter_paired_notebooks(
self._nb_obj_unrendered, fmt_, self._path.stem
):
# get absolute path for each notebook
path = Path(self._path.parent / path)
jupytext.write(nb_clean, fp=path, fmt=fmt_)
|
Delete injected cell, overwrite the source file (and any paired files)
|
remove_injected_cell
|
python
|
ploomber/ploomber
|
src/ploomber/sources/notebooksource.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/sources/notebooksource.py
|
Apache-2.0
|
def format(self, fmt, entry_point):
"""Change source format
Returns
-------
str
The path if the extension changed, None otherwise
"""
nb_clean = _cleanup_rendered_nb(self._nb_obj_unrendered)
ext_file = self._path.suffix
ext_format = long_form_one_format(fmt)["extension"]
extension_changed = ext_file != ext_format
if extension_changed:
if Path(entry_point).is_file():
path = self._path.with_suffix(ext_format)
Path(self._path).unlink()
modified_entry = Path(entry_point).read_text()
main_file = f"{self.name}{ext_file}"
if main_file in modified_entry:
modified_entry = modified_entry.replace(
main_file, f"{self.name}{ext_format}"
)
Path(entry_point).write_text(modified_entry)
else:
click.secho(
f"{main_file} does not appear in entry-point"
f"please edit manually\n",
fg="yellow",
)
path = self._path
else:
click.secho(
"The entry-point is not a valid file, please"
" update the pipeline file extensions manually\n",
fg="yellow",
)
path = self._path
else:
path = self._path
jupytext.write(nb_clean, path, fmt=fmt)
return path if extension_changed else None
|
Change source format
Returns
-------
str
The path if the extension changed, None otherwise
|
format
|
python
|
ploomber/ploomber
|
src/ploomber/sources/notebooksource.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/sources/notebooksource.py
|
Apache-2.0
|
def _to_nb_obj(
source,
language,
ext=None,
kernelspec_name=None,
check_if_kernel_installed=True,
path=None,
):
"""
Convert to jupyter notebook via jupytext, if the notebook does not contain
kernel information and the user did not pass a kernelspec_name explicitly,
we will try to infer the language and select a kernel appropriately.
If a valid kernel is found, it is added to the notebook. If none of this
works, an exception is raised.
If also converts the code string to its notebook node representation,
adding kernel data accordingly.
Parameters
----------
source : str
Jupyter notebook (or jupytext compatible formatted) document
language : str
Programming language
path : str, default=None
Script/notebook path. If not None, it's used to throw an informative
error if the notebook fails to load
Returns
-------
nb
Notebook object
Raises
------
RenderError
If the notebook has no kernelspec metadata and kernelspec_name is
None. A notebook without kernelspec metadata will not display in
jupyter notebook correctly. We have to make sure all notebooks
have this.
"""
import jupytext
# let jupytext figure out the format
try:
nb = jupytext.reads(source, fmt=ext)
except Exception as e:
what = "notebook" if ext == "ipynb" else "script"
err = f"Failed to read {what}"
if path is not None:
err += f" from {str(path)!r}"
raise SourceInitializationError(err) from e
# NOTE: I can add the cell with parameters here, but what happens if
# extract_upstream is false? would that be a problem?
check_nb_kernelspec_info(
nb, kernelspec_name, ext, language, check_if_installed=check_if_kernel_installed
)
return nb
|
Convert to jupyter notebook via jupytext, if the notebook does not contain
kernel information and the user did not pass a kernelspec_name explicitly,
we will try to infer the language and select a kernel appropriately.
If a valid kernel is found, it is added to the notebook. If none of this
works, an exception is raised.
If also converts the code string to its notebook node representation,
adding kernel data accordingly.
Parameters
----------
source : str
Jupyter notebook (or jupytext compatible formatted) document
language : str
Programming language
path : str, default=None
Script/notebook path. If not None, it's used to throw an informative
error if the notebook fails to load
Returns
-------
nb
Notebook object
Raises
------
RenderError
If the notebook has no kernelspec metadata and kernelspec_name is
None. A notebook without kernelspec metadata will not display in
jupyter notebook correctly. We have to make sure all notebooks
have this.
|
_to_nb_obj
|
python
|
ploomber/ploomber
|
src/ploomber/sources/notebooksource.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/sources/notebooksource.py
|
Apache-2.0
|
def check_nb_kernelspec_info(
nb, kernelspec_name, ext, language, check_if_installed=True
):
"""Make sure the passed notebook has kernel info
Parameters
----------
check_if_installed : bool
Also check if the kernelspec is installed, nb.metadata.kernelspec
to be replaced by whatever information jupyter returns when requesting
the kernelspec
"""
import jupyter_client
kernel_name = determine_kernel_name(nb, kernelspec_name, ext, language)
# cannot keep going if we don't have the kernel name
if kernel_name is None:
raise SourceInitializationError(
"Notebook does not contain kernelspec metadata and "
"kernelspec_name was not specified, either add "
"kernelspec info to your source file or specify "
"a kernelspec by name. To see list of installed kernels run "
'"jupyter kernelspec list" in the terminal (first column '
'indicates the name). Python is usually named "python3", '
'R usually "ir"'
)
if check_if_installed:
kernelspec = jupyter_client.kernelspec.get_kernel_spec(kernel_name)
nb.metadata.kernelspec = {
"display_name": kernelspec.display_name,
"language": kernelspec.language,
"name": kernel_name,
}
else:
if "metadata" not in nb:
nb["metadata"] = dict()
if "kernelspec" not in nb["metadata"]:
nb["metadata"]["kernelspec"] = dict()
# we cannot ask jupyter, so we fill this in ourselves
nb.metadata.kernelspec = {
"display_name": "R" if kernel_name == "ir" else "Python 3",
"language": "R" if kernel_name == "ir" else "python",
"name": kernel_name,
}
|
Make sure the passed notebook has kernel info
Parameters
----------
check_if_installed : bool
Also check if the kernelspec is installed, nb.metadata.kernelspec
to be replaced by whatever information jupyter returns when requesting
the kernelspec
|
check_nb_kernelspec_info
|
python
|
ploomber/ploomber
|
src/ploomber/sources/notebooksource.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/sources/notebooksource.py
|
Apache-2.0
|
def determine_kernel_name(nb, kernelspec_name, ext, language):
"""
Determines the kernel name by using the following data (returns whatever
gives kernel info first): 1) explicit kernel from the user 2) notebook's
metadata 3) file extension 4) language 5) best guess
"""
# explicit kernelspec name
if kernelspec_name is not None:
return kernelspec_name
# use metadata info
try:
return nb.metadata.kernelspec.name
except AttributeError:
pass
# use language from extension if passed, otherwise use language variable
if ext:
language = determine_language(ext)
lang2kernel = {"python": "python3", "r": "ir"}
if language in lang2kernel:
return lang2kernel[language]
# nothing worked, try to guess if it's python...
is_python_ = is_python(nb)
if is_python_:
return "python3"
else:
return None
|
Determines the kernel name by using the following data (returns whatever
gives kernel info first): 1) explicit kernel from the user 2) notebook's
metadata 3) file extension 4) language 5) best guess
|
determine_kernel_name
|
python
|
ploomber/ploomber
|
src/ploomber/sources/notebooksource.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/sources/notebooksource.py
|
Apache-2.0
|
def inject_cell(model, params):
"""Inject params (by adding a new cell) to a model
Notes
-----
A model is different than a notebook:
https://jupyter-server.readthedocs.io/en/stable/developers/contents.html
"""
nb = nbformat.from_dict(model["content"])
# we must ensure nb has kernelspec info, otherwise papermill will fail to
# parametrize
ext = model["name"].split(".")[-1]
check_nb_kernelspec_info(nb, kernelspec_name=None, ext=ext, language=None)
# papermill adds a bunch of things before calling parameterize_notebook
# if we don't add those things, parameterize_notebook breaks
# https://github.com/nteract/papermill/blob/0532d499e13e93d8990211be33e9593f1bffbe6c/papermill/iorw.py#L400
if not hasattr(nb.metadata, "papermill"):
nb.metadata["papermill"] = {
"parameters": dict(),
"environment_variables": dict(),
"version": None,
}
for cell in nb.cells:
if not hasattr(cell.metadata, "tags"):
cell.metadata["tags"] = []
params = json_serializable_params(params)
comment = (
"This cell was injected automatically based on your stated "
"upstream dependencies (cell above) and pipeline.yaml "
"preferences. It is temporary and will be removed when you "
"save this notebook"
)
model["content"] = parameterize_notebook(
nb, params, report_mode=False, comment=comment
)
|
Inject params (by adding a new cell) to a model
Notes
-----
A model is different than a notebook:
https://jupyter-server.readthedocs.io/en/stable/developers/contents.html
|
inject_cell
|
python
|
ploomber/ploomber
|
src/ploomber/sources/notebooksource.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/sources/notebooksource.py
|
Apache-2.0
|
def _cleanup_rendered_nb(nb, print_=True):
"""
Cleans up a rendered notebook object. Removes cells with tags:
injected-parameters, debugging-settings, and metadata injected by
papermill
"""
out = find_cell_with_tags(nb, ["injected-parameters", "debugging-settings"])
if print_:
for key in out.keys():
print(f"Removing {key} cell...")
idxs = set(cell["index"] for cell in out.values())
nb["cells"] = [cell for idx, cell in enumerate(nb["cells"]) if idx not in idxs]
# papermill adds "tags" to all cells that don't have them, remove them
# if they are empty to avoid cluttering the script
for cell in nb["cells"]:
if "tags" in cell.get("metadata", {}):
if not len(cell["metadata"]["tags"]):
del cell["metadata"]["tags"]
return nb
|
Cleans up a rendered notebook object. Removes cells with tags:
injected-parameters, debugging-settings, and metadata injected by
papermill
|
_cleanup_rendered_nb
|
python
|
ploomber/ploomber
|
src/ploomber/sources/notebooksource.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/sources/notebooksource.py
|
Apache-2.0
|
def is_python(nb):
"""
Determine if the notebook is Python code for a given notebook object, look
for metadata.kernelspec.language first, if not defined, try to guess if
it's Python, it's conservative and it returns False if the code is valid
Python but contains (<-), in which case it's much more likely to be R
"""
is_python_ = None
# check metadata first
try:
language = nb.metadata.kernelspec.language
except AttributeError:
pass
else:
is_python_ = language == "python"
# no language defined in metadata, check if it's valid python
if is_python_ is None:
code_str = "\n".join([c.source for c in nb.cells])
try:
ast.parse(code_str)
except SyntaxError:
is_python_ = False
else:
# there is a lot of R code which is also valid Python code! So
# let's
# run a quick test. It is very unlikely to have "<-" in Python (
# {less than} {negative} but extremely common {assignment}
if "<-" not in code_str:
is_python_ = True
# inconclusive test...
if is_python_ is None:
is_python_ = False
return is_python_
|
Determine if the notebook is Python code for a given notebook object, look
for metadata.kernelspec.language first, if not defined, try to guess if
it's Python, it's conservative and it returns False if the code is valid
Python but contains (<-), in which case it's much more likely to be R
|
is_python
|
python
|
ploomber/ploomber
|
src/ploomber/sources/notebooksource.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/sources/notebooksource.py
|
Apache-2.0
|
def determine_language(extension):
"""
A function to determine programming language given file extension,
returns programming language name (all lowercase) if could be determined,
None if the test is inconclusive
"""
if extension.startswith("."):
extension = extension[1:]
mapping = {"py": "python", "r": "r", "R": "r", "Rmd": "r", "rmd": "r"}
# ipynb can be many languages, it must return None
return mapping.get(extension)
|
A function to determine programming language given file extension,
returns programming language name (all lowercase) if could be determined,
None if the test is inconclusive
|
determine_language
|
python
|
ploomber/ploomber
|
src/ploomber/sources/notebooksource.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/sources/notebooksource.py
|
Apache-2.0
|
def recursive_update(target, update):
"""Recursively update a dictionary. Taken from jupytext.header"""
for key in update:
value = update[key]
if value is None:
# remove if it exists
target.pop(key, None)
elif isinstance(value, dict):
target[key] = recursive_update(target.get(key, {}), value)
else:
target[key] = value
return target
|
Recursively update a dictionary. Taken from jupytext.header
|
recursive_update
|
python
|
ploomber/ploomber
|
src/ploomber/sources/notebooksource.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/sources/notebooksource.py
|
Apache-2.0
|
def parse_jupytext_format(fmt, name):
"""
Parse a jupytext format string (such as notebooks//ipynb) and return the
path to the file and the extension
"""
fmt_parsed = long_form_one_format(fmt)
path = Path(fmt_parsed["prefix"], f'{name}{fmt_parsed["extension"]}')
del fmt_parsed["prefix"]
return path, short_form_one_format(fmt_parsed)
|
Parse a jupytext format string (such as notebooks//ipynb) and return the
path to the file and the extension
|
parse_jupytext_format
|
python
|
ploomber/ploomber
|
src/ploomber/sources/notebooksource.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/sources/notebooksource.py
|
Apache-2.0
|
def add_parameters_cell(path, extract_upstream=False, extract_product=False):
"""
Add parameters cell to a script/notebook in the given path, overwrites the
original file
"""
source = ""
if extract_upstream:
source += """\
# declare a list tasks whose products you want to use as inputs
upstream = None
"""
if extract_product:
source += """\
# declare a dictionary with the outputs of this task
product = None
"""
if not extract_upstream and not extract_product:
source += "# add default values for parameters here"
c = JupytextConfiguration()
c.notebook_metadata_filter
c.cell_metadata_filter = "all"
nb = jupytext.read(path)
new_cell = nbformat.v4.new_code_cell(source, metadata={"tags": ["parameters"]})
nb.cells.insert(0, new_cell)
jupytext.write(nb, path, config=c)
|
Add parameters cell to a script/notebook in the given path, overwrites the
original file
|
add_parameters_cell
|
python
|
ploomber/ploomber
|
src/ploomber/sources/notebooksource.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/sources/notebooksource.py
|
Apache-2.0
|
def _post_render_validation(self, rendered_value, params):
"""Analyze code and warn if issues are found"""
if "product" in params:
inferred_relations = set(
static_analysis.sql.created_relations(
rendered_value, split_source=self._split_source
)
)
if isinstance(params["product"], Product):
product_relations = {params["product"]}
else:
# metaproduct
product_relations = {p for p in params["product"]}
inferred_n = len(inferred_relations)
actual_n = len(product_relations)
# warn is sql code will not create any tables/views
if not inferred_n:
warnings.warn(
"It appears that your script will not create "
"any tables/views but the product parameter is "
f'{params["product"]!r}'
)
# warn if the number of CREATE statements does not match the
# number of products
elif inferred_n != actual_n:
warnings.warn(
"It appears that your script will create "
f"{inferred_n} relation(s) but you declared "
f'{actual_n} product(s): {params["product"]!r}'
)
elif inferred_relations != product_relations:
warnings.warn(
"It appears that your script will create "
f"relations {inferred_relations!r}, "
"which doesn't match products: "
f"{product_relations!r}. Make sure schema, "
"name and kind (table or view) match"
)
|
Analyze code and warn if issues are found
|
_post_render_validation
|
python
|
ploomber/ploomber
|
src/ploomber/sources/sources.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/sources/sources.py
|
Apache-2.0
|
def _validate_top_keys(self, spec, path):
"""Validate keys at the top of the spec"""
if "tasks" not in spec and "location" not in spec:
raise DAGSpecInitializationError(
'Failed to initialize spec. Missing "tasks" key'
)
if "location" in spec:
if len(spec) > 1:
raise DAGSpecInitializationError(
"Failed to initialize spec. If "
'using the "location" key there should not '
"be other keys"
)
else:
valid = {
"meta",
"config",
"clients",
"tasks",
"serializer",
"unserializer",
"executor",
"on_finish",
"on_render",
"on_failure",
}
validate.keys(valid, spec.keys(), name="dag spec")
|
Validate keys at the top of the spec
|
_validate_top_keys
|
python
|
ploomber/ploomber
|
src/ploomber/spec/dagspec.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/spec/dagspec.py
|
Apache-2.0
|
def to_dag(self):
"""Converts the DAG spec to a DAG object"""
# when initializing DAGs from pipeline.yaml files, we have to ensure
# that the folder where pipeline.yaml is located is in sys.path for
# imports to work (for dag clients), this happens most of the time but
# for some (unknown) reason, it doesn't
# happen when initializing PloomberContentsManager.
# pipeline.yaml paths are written relative to that file, for source
# scripts to be located we temporarily change the current working
# directory
with add_to_sys_path(self._parent_path, chdir=True):
dag = self._to_dag()
return dag
|
Converts the DAG spec to a DAG object
|
to_dag
|
python
|
ploomber/ploomber
|
src/ploomber/spec/dagspec.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/spec/dagspec.py
|
Apache-2.0
|
def _to_dag(self):
"""
Internal method to manage the different cases to convert to a DAG
object
"""
if "location" in self:
return dotted_path.call_dotted_path(self["location"])
dag = DAG(name=self._name)
if "config" in self:
dag._params = DAGConfiguration.from_dict(self["config"])
if "executor" in self:
executor = self["executor"]
if isinstance(executor, str) and executor in {"serial", "parallel"}:
if executor == "parallel":
dag.executor = Parallel()
elif isinstance(executor, Mapping):
dag.executor = dotted_path.DottedPath(
executor, lazy_load=False, allow_return_none=False
)()
else:
raise DAGSpecInitializationError(
'"executor" must be '
'"serial", "parallel", or a dotted path'
f", got: {executor!r}"
)
clients = self.get("clients")
if clients:
for class_name, dotted_path_spec in clients.items():
if dotted_path_spec is None:
continue
dps = dotted_path.DottedPath(
dotted_path_spec,
lazy_load=self._lazy_import,
allow_return_none=False,
)
if self._lazy_import:
dag.clients[class_name] = dps
else:
dag.clients[class_name] = dps()
for attr in (
"serializer",
"unserializer",
"on_finish",
"on_render",
"on_failure",
):
if attr in self:
setattr(
dag,
attr,
dotted_path.DottedPath(self[attr], lazy_load=self._lazy_import),
)
process_tasks(dag, self, root_path=self._parent_path)
return dag
|
Internal method to manage the different cases to convert to a DAG
object
|
_to_dag
|
python
|
ploomber/ploomber
|
src/ploomber/spec/dagspec.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/spec/dagspec.py
|
Apache-2.0
|
def find(
cls, env=None, reload=False, lazy_import=False, starting_dir=None, name=None
):
"""
Automatically find pipeline.yaml and return a DAGSpec object, which
can be converted to a DAG using .to_dag()
Parameters
----------
env
The environment to pass to the spec
name : str, default=None
Filename to search for. If None, it looks for a pipeline.yaml file,
otherwise it looks for a file with such name.
"""
starting_dir = starting_dir or os.getcwd()
path_to_entry_point = default.entry_point_with_name(
root_path=starting_dir, name=name
)
try:
return cls(
path_to_entry_point, env=env, lazy_import=lazy_import, reload=reload
)
except Exception as e:
exc = DAGSpecInitializationError(
"Error initializing DAG from " f"{path_to_entry_point!s}"
)
raise exc from e
|
Automatically find pipeline.yaml and return a DAGSpec object, which
can be converted to a DAG using .to_dag()
Parameters
----------
env
The environment to pass to the spec
name : str, default=None
Filename to search for. If None, it looks for a pipeline.yaml file,
otherwise it looks for a file with such name.
|
find
|
python
|
ploomber/ploomber
|
src/ploomber/spec/dagspec.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/spec/dagspec.py
|
Apache-2.0
|
def from_directory(cls, path_to_dir):
"""
Construct a DAGSpec from a directory. Product and upstream are
extracted from sources
Parameters
----------
path_to_dir : str
The directory to use. Looks for scripts
(``.py``, ``.R`` or ``.ipynb``) in the directory and interprets
them as task sources, file names are assigned as task names
(without extension). The spec is generated with the default values
in the "meta" section. Ignores files with invalid extensions.
Notes
-----
``env`` is not supported because the spec is generated from files
in ``path_to_dir``, hence, there is no way to embed tags
"""
valid_extensions = [
name
for name, class_ in suffix2taskclass.items()
if class_ is NotebookRunner
]
if Path(path_to_dir).is_dir():
pattern = str(Path(path_to_dir, "*"))
files = list(
chain.from_iterable(iglob(pattern + ext) for ext in valid_extensions)
)
return cls.from_files(files)
else:
raise NotADirectoryError(f"{path_to_dir!r} is not a directory")
|
Construct a DAGSpec from a directory. Product and upstream are
extracted from sources
Parameters
----------
path_to_dir : str
The directory to use. Looks for scripts
(``.py``, ``.R`` or ``.ipynb``) in the directory and interprets
them as task sources, file names are assigned as task names
(without extension). The spec is generated with the default values
in the "meta" section. Ignores files with invalid extensions.
Notes
-----
``env`` is not supported because the spec is generated from files
in ``path_to_dir``, hence, there is no way to embed tags
|
from_directory
|
python
|
ploomber/ploomber
|
src/ploomber/spec/dagspec.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/spec/dagspec.py
|
Apache-2.0
|
def from_files(cls, files):
"""
Construct DAGSpec from list of files or glob-like pattern. Product and
upstream are extracted from sources
Parameters
----------
files : list or str
List of files to use or glob-like string pattern. If glob-like
pattern, ignores directories that match the criteria.
"""
valid_extensions = [
name
for name, class_ in suffix2taskclass.items()
if class_ is NotebookRunner
]
if isinstance(files, str):
files = [f for f in iglob(files) if Path(f).is_file()]
invalid = [f for f in files if Path(f).suffix not in valid_extensions]
if invalid:
raise ValueError(
f"Cannot instantiate DAGSpec from files with "
f"invalid extensions: {invalid}. "
f"Allowed extensions are: {valid_extensions}"
)
tasks = [{"source": file_} for file_ in files]
meta = {"extract_product": True, "extract_upstream": True}
return cls({"tasks": tasks, "meta": meta})
|
Construct DAGSpec from list of files or glob-like pattern. Product and
upstream are extracted from sources
Parameters
----------
files : list or str
List of files to use or glob-like string pattern. If glob-like
pattern, ignores directories that match the criteria.
|
from_files
|
python
|
ploomber/ploomber
|
src/ploomber/spec/dagspec.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/spec/dagspec.py
|
Apache-2.0
|
def initialize_inplace(cls, data):
"""Validate and instantiate the "meta" section"""
if "meta" not in data:
data["meta"] = {}
data["meta"] = Meta.default_meta(data["meta"])
|
Validate and instantiate the "meta" section
|
initialize_inplace
|
python
|
ploomber/ploomber
|
src/ploomber/spec/dagspec.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/spec/dagspec.py
|
Apache-2.0
|
def default_meta(cls, meta=None):
"""Fill missing values in a meta dictionary"""
if meta is None:
meta = {}
validate.keys(cls.VALID, meta, name="dag spec")
if "extract_upstream" not in meta:
meta["extract_upstream"] = True
if "extract_product" not in meta:
meta["extract_product"] = False
if "product_relative_to_source" not in meta:
meta["product_relative_to_source"] = False
if "jupyter_hot_reload" not in meta:
meta["jupyter_hot_reload"] = True
if "jupyter_functions_as_notebooks" not in meta:
meta["jupyter_functions_as_notebooks"] = False
if "import_tasks_from" not in meta:
meta["import_tasks_from"] = None
if "source_loader" not in meta:
meta["source_loader"] = None
else:
try:
meta["source_loader"] = SourceLoader(**meta["source_loader"])
except Exception as e:
msg = (
"Error initializing SourceLoader with "
f'{meta["source_loader"]}. Error message: {e.args[0]}'
)
e.args = (msg,)
raise
defaults = {
"SQLDump": "File",
"NotebookRunner": "File",
"ScriptRunner": "File",
"SQLScript": "SQLRelation",
"PythonCallable": "File",
"ShellScript": "File",
}
if "product_default_class" not in meta:
meta["product_default_class"] = defaults
else:
for class_, prod in defaults.items():
if class_ not in meta["product_default_class"]:
meta["product_default_class"][class_] = prod
# validate keys and values in product_default_class
for task_name, product_name in meta["product_default_class"].items():
try:
validate_task_class_name(task_name)
validate_product_class_name(product_name)
except Exception as e:
msg = f"Error validating product_default_class: {e.args[0]}"
e.args = (msg,)
raise
return meta
|
Fill missing values in a meta dictionary
|
default_meta
|
python
|
ploomber/ploomber
|
src/ploomber/spec/dagspec.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/spec/dagspec.py
|
Apache-2.0
|
def process_tasks(dag, dag_spec, root_path=None):
"""
Initialize Task objects from TaskSpec, extract product and dependencies
if needed and set the dag dependencies structure
"""
root_path = root_path or "."
# options
extract_up = dag_spec["meta"]["extract_upstream"]
extract_prod = dag_spec["meta"]["extract_product"]
# raw values extracted from the upstream key
upstream_raw = {}
# first pass: init tasks and them to dag
for task_dict in dag_spec["tasks"]:
# init source to extract product
fn = task_dict["class"]._init_source
kwargs = {
"kwargs": {},
"extract_up": extract_up,
"extract_prod": extract_prod,
**task_dict,
}
source = call_with_dictionary(fn, kwargs=kwargs)
if extract_prod:
task_dict["product"] = source.extract_product()
# convert to task, up has the content of "upstream" if any
task, up = task_dict.to_task(dag)
if isinstance(task, TaskGroup):
for t in task:
upstream_raw[t] = up
else:
if extract_prod:
logger.debug(
'Extracted product for task "%s": %s', task.name, task.product
)
upstream_raw[task] = up
# second optional pass: extract upstream
tasks = list(dag.values())
task_names = list(dag._iter())
# actual upstream values after matching wildcards
upstream = {}
# expand upstream dependencies (in case there are any wildcards)
for task in tasks:
if extract_up:
try:
extracted = task.source.extract_upstream()
except Exception as e:
raise DAGSpecInitializationError(
f"Failed to initialize task {task.name!r}"
) from e
upstream[task] = _expand_upstream(extracted, task_names)
else:
upstream[task] = _expand_upstream(upstream_raw[task], task_names)
logger.debug(
"Extracted upstream dependencies for task %s: %s", task.name, upstream[task]
)
# Last pass: set upstream dependencies
for task in tasks:
if upstream[task]:
for task_name, group_name in upstream[task].items():
up = dag.get(task_name)
if up is None:
names = [t.name for t in tasks]
raise DAGSpecInitializationError(
f"Task {task.name!r} "
"has an upstream dependency "
f"{task_name!r}, but such task "
"doesn't exist. Available tasks: "
f"{pretty_print.iterable(names)}"
)
task.set_upstream(up, group_name=group_name)
|
Initialize Task objects from TaskSpec, extract product and dependencies
if needed and set the dag dependencies structure
|
process_tasks
|
python
|
ploomber/ploomber
|
src/ploomber/spec/dagspec.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/spec/dagspec.py
|
Apache-2.0
|
def _expand_upstream(upstream, task_names):
"""
Processes a list of upstream values extracted from source (or declared in
the spec's "upstream" key). Expands wildcards like "some-task-*" to all
the values that match. Returns a dictionary where keys are the upstream
dependencies and the corresponding value is the wildcard. If no wildcard,
the value is None
"""
if not upstream:
return None
expanded = {}
for up in upstream:
if "*" in up:
matches = fnmatch.filter(task_names, up)
expanded.update({match: up for match in matches})
else:
expanded[up] = None
return expanded
|
Processes a list of upstream values extracted from source (or declared in
the spec's "upstream" key). Expands wildcards like "some-task-*" to all
the values that match. Returns a dictionary where keys are the upstream
dependencies and the corresponding value is the wildcard. If no wildcard,
the value is None
|
_expand_upstream
|
python
|
ploomber/ploomber
|
src/ploomber/spec/dagspec.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/spec/dagspec.py
|
Apache-2.0
|
def _build_example_spec(source):
"""
Build an example spec from just the source.
"""
example_spec = {}
example_spec["source"] = source
if suffix2taskclass[Path(source).suffix] is NotebookRunner:
example_product = {
"nb": "products/report.ipynb",
"data": "products/data.csv",
}
else:
example_product = "products/data.csv"
example_spec = [
{
"source": source,
"product": example_product,
}
]
return example_spec
|
Build an example spec from just the source.
|
_build_example_spec
|
python
|
ploomber/ploomber
|
src/ploomber/spec/dagspec.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/spec/dagspec.py
|
Apache-2.0
|
def task_class_from_source_str(source_str, lazy_import, reload, product):
"""
The source field in a DAG spec is a string. The actual value needed to
instantiate the task depends on the task class, but to make task class
optional, we try to guess the appropriate task here. If the source_str
needs any pre-processing to pass it to the task constructor, it also
happens here. If product is not None, it's also used to determine if
a task is a SQLScript or SQLDump
"""
try:
extension = Path(source_str).suffix
except Exception as e:
raise DAGSpecInitializationError(
"Failed to initialize task " f"from source {source_str!r}"
) from e
# we verify if this is a valid dotted path
# if lazy load is set to true, just locate the module without importing it
fn_checker = (
dotted_path.locate_dotted_path_root
if lazy_import is True
else partial(dotted_path.load_dotted_path, raise_=True, reload=reload)
)
if extension and extension in suffix2taskclass:
if extension == ".sql":
if _safe_suffix(product) in {".csv", ".parquet"}:
return tasks.SQLDump
else:
possibilities = _extension_typo(
_safe_suffix(product), [".csv", ".parquet"]
)
if possibilities:
ext = possibilities[0]
raise DAGSpecInitializationError(
f"Error parsing task with source {source_str!r}: "
f"{_safe_suffix(product)!r} is not a valid product "
f"extension. Did you mean: {ext!r}?"
)
return suffix2taskclass[extension]
elif _looks_like_path(source_str):
raise DAGSpecInitializationError(
"Failed to determine task class for "
f"source {source_str!r} (invalid "
f"extension {extension!r}). Valid extensions "
f"are: {pretty_print.iterable(suffix2taskclass)}"
)
elif lazy_import == "skip":
# Anything that has not been caught before is treated as a
# Python function, thus we return a PythonCallable
return tasks.PythonCallable
elif "." in source_str:
try:
imported = fn_checker(source_str)
error = None
except Exception as e:
imported = None
error = e
if imported is None:
if _looks_like_file_name(source_str):
raise DAGSpecInitializationError(
"Failed to determine task class for "
f"source {source_str!r} (invalid "
f"extension {extension!r}). Valid extensions "
f"are: {pretty_print.iterable(suffix2taskclass)}\n"
"If you meant to import a function, please rename it."
)
else:
raise DAGSpecInitializationError(
"Failed to determine task class for "
f"source {source_str!r}: {error!s}."
)
else:
return tasks.PythonCallable
else:
raise DAGSpecInitializationError(
f"Failed to determine task source {source_str!r}\nValid extensions"
f" are: {pretty_print.iterable(suffix2taskclass)}\n"
"You can also define functions as [module_name].[function_name]"
)
|
The source field in a DAG spec is a string. The actual value needed to
instantiate the task depends on the task class, but to make task class
optional, we try to guess the appropriate task here. If the source_str
needs any pre-processing to pass it to the task constructor, it also
happens here. If product is not None, it's also used to determine if
a task is a SQLScript or SQLDump
|
task_class_from_source_str
|
python
|
ploomber/ploomber
|
src/ploomber/spec/taskspec.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/spec/taskspec.py
|
Apache-2.0
|
def task_class_from_spec(task_spec, lazy_import, reload):
"""
Returns the class for the TaskSpec, if the spec already has the class
name (str), it just returns the actual class object with such name,
otherwise it tries to guess based on the source string
"""
class_name = task_spec.get("class", None)
if class_name:
try:
class_ = validators.string.validate_task_class_name(class_name)
except Exception as e:
msg = f"Error validating Task spec (class field): {e.args[0]}"
e.args = (msg,)
raise
else:
class_ = task_class_from_source_str(
task_spec["source"],
lazy_import,
reload,
task_spec.get("product"),
)
return class_
|
Returns the class for the TaskSpec, if the spec already has the class
name (str), it just returns the actual class object with such name,
otherwise it tries to guess based on the source string
|
task_class_from_spec
|
python
|
ploomber/ploomber
|
src/ploomber/spec/taskspec.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/spec/taskspec.py
|
Apache-2.0
|
def _init_source_for_task_class(
source_str, task_class, project_root, lazy_import, make_absolute
):
"""
Initialize source. Loads dotted path to callable if a PythonCallable
task, otherwise it returns a path
"""
if task_class is tasks.PythonCallable:
if lazy_import:
return source_str
else:
return dotted_path.load_dotted_path(source_str)
else:
path = Path(source_str)
# NOTE: there is some inconsistent behavior here. project_root
# will be none if DAGSpec was initialized with a dictionary, hence
# this won't resolve to absolute paths - this is a bit confusing.
# maybe always convert to absolute?
if project_root and not path.is_absolute() and make_absolute:
return Path(project_root, source_str)
else:
return path
|
Initialize source. Loads dotted path to callable if a PythonCallable
task, otherwise it returns a path
|
_init_source_for_task_class
|
python
|
ploomber/ploomber
|
src/ploomber/spec/taskspec.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/spec/taskspec.py
|
Apache-2.0
|
def to_task(self, dag):
"""
Convert the spec to a Task or TaskGroup and add it to the dag.
Returns a (task, upstream) tuple with the Task instance and list of
upstream dependencies (as described in the 'upstream' key, if any,
empty if no 'upstream' key). If the spec has a 'grid' key, a TaskGroup
instance instead
Parameters
----------
dag
The DAG to add the task(s) to
"""
data = copy(self.data)
upstream = _make_iterable(data.pop("upstream"))
if "grid" in data:
data_source_ = data["source"]
data_source = str(
data_source_
if not hasattr(data_source_, "__name__")
else data_source_.__name__
)
if "name" not in data:
raise DAGSpecInitializationError(
f"Error initializing task with "
f"source {data_source!r}: "
"tasks with 'grid' must have a 'name'"
)
task_class = data.pop("class")
product_class = _find_product_class(task_class, data, self.meta)
product = data.pop("product")
name = data.pop("name")
grid = _preprocess_grid_spec(data.pop("grid"))
# hooks
on_render = data.pop("on_render", None)
on_finish = data.pop("on_finish", None)
on_failure = data.pop("on_failure", None)
if on_render:
on_render = dotted_path.DottedPath(
on_render, lazy_load=self.lazy_import
)
if on_finish:
on_finish = dotted_path.DottedPath(
on_finish, lazy_load=self.lazy_import
)
if on_failure:
on_failure = dotted_path.DottedPath(
on_failure, lazy_load=self.lazy_import
)
params = data.pop("params", None)
# if the name argument is a placeholder, pass it in the namer
# argument to the placeholders are replaced by their values
if "[[" in name and "]]" in name:
name_arg = dict(namer=name)
else:
name_arg = dict(name=name)
return (
TaskGroup.from_grid(
task_class=task_class,
product_class=product_class,
product_primitive=product,
task_kwargs=data,
dag=dag,
grid=grid,
resolve_relative_to=self.project_root,
on_render=on_render,
on_finish=on_finish,
on_failure=on_failure,
params=params,
**name_arg,
),
upstream,
)
else:
return (
_init_task(
data=data,
meta=self.meta,
project_root=self.project_root,
lazy_import=self.lazy_import,
dag=dag,
),
upstream,
)
|
Convert the spec to a Task or TaskGroup and add it to the dag.
Returns a (task, upstream) tuple with the Task instance and list of
upstream dependencies (as described in the 'upstream' key, if any,
empty if no 'upstream' key). If the spec has a 'grid' key, a TaskGroup
instance instead
Parameters
----------
dag
The DAG to add the task(s) to
|
to_task
|
python
|
ploomber/ploomber
|
src/ploomber/spec/taskspec.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/spec/taskspec.py
|
Apache-2.0
|
def _init_task(data, meta, project_root, lazy_import, dag):
"""Initialize a single task from a dictionary spec"""
task_dict = copy(data)
class_ = task_dict.pop("class")
product = _init_product(
task_dict, meta, class_, project_root, lazy_import=lazy_import
)
_init_client(task_dict, lazy_import=lazy_import)
source = task_dict.pop("source")
name = task_dict.pop("name", None)
on_finish = task_dict.pop("on_finish", None)
on_render = task_dict.pop("on_render", None)
on_failure = task_dict.pop("on_failure", None)
if "serializer" in task_dict:
task_dict["serializer"] = dotted_path.DottedPath(
task_dict["serializer"], lazy_load=lazy_import
)
if "unserializer" in task_dict:
task_dict["unserializer"] = dotted_path.DottedPath(
task_dict["unserializer"], lazy_load=lazy_import
)
# edge case: if using lazy_import, we should not check if the kernel
# is installed. this is used when exporting to Argo/Airflow using
# soopervisor, since the exporting process should not require to have
# the ir kernel installed. The same applies when Airflow has to convert
# the DAG, the Airflow environment shouldn't require the ir kernel
if (
class_ == tasks.NotebookRunner
and lazy_import
and "check_if_kernel_installed" not in task_dict
):
task_dict["check_if_kernel_installed"] = False
# make paths to resources absolute
if "params" in task_dict:
task_dict["params"] = _process_dotted_paths(task_dict["params"])
task_dict["params"] = resolve_resources(
task_dict["params"], relative_to=project_root
)
try:
task = class_(source=source, product=product, name=name, dag=dag, **task_dict)
except Exception as e:
source_ = pretty_print.try_relative_path(source)
msg = (
f"Failed to initialize {class_.__name__} task with " f"source {source_!r}."
)
raise DAGSpecInitializationError(msg) from e
if on_finish:
task.on_finish = dotted_path.DottedPath(on_finish, lazy_load=lazy_import)
if on_render:
task.on_render = dotted_path.DottedPath(on_render, lazy_load=lazy_import)
if on_failure:
task.on_failure = dotted_path.DottedPath(on_failure, lazy_load=lazy_import)
return task
|
Initialize a single task from a dictionary spec
|
_init_task
|
python
|
ploomber/ploomber
|
src/ploomber/spec/taskspec.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/spec/taskspec.py
|
Apache-2.0
|
def try_product_init(class_, product_raw, relative_to, kwargs):
"""Initializes Product (or MetaProduct)
Parameters
----------
class_ : class
Product class
product_raw : str, list or dict
The raw value as indicated by the user in the pipeline.yaml file. str
if a single file, list if a SQL relation or dict if a MetaProduct
relative_to : str
Prefix for all relative paths (only applicable to File products)
kwargs : dict
Other kwargs to initialize product
"""
if isinstance(product_raw, Mapping):
return {
key: _try_product_init(
class_, resolve_if_file(value, relative_to, class_), kwargs
)
for key, value in product_raw.items()
}
else:
path_to_source = resolve_if_file(product_raw, relative_to, class_)
return _try_product_init(class_, path_to_source, kwargs)
|
Initializes Product (or MetaProduct)
Parameters
----------
class_ : class
Product class
product_raw : str, list or dict
The raw value as indicated by the user in the pipeline.yaml file. str
if a single file, list if a SQL relation or dict if a MetaProduct
relative_to : str
Prefix for all relative paths (only applicable to File products)
kwargs : dict
Other kwargs to initialize product
|
try_product_init
|
python
|
ploomber/ploomber
|
src/ploomber/spec/taskspec.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/spec/taskspec.py
|
Apache-2.0
|
def _try_product_init(class_, path_to_source, kwargs):
"""
Try to initialize product, raises a chained exception if not possible.
To provide more context.
"""
try:
return class_(path_to_source, **kwargs)
except Exception as e:
kwargs_msg = f" and keyword arguments: {kwargs!r}" if kwargs else ""
raise DAGSpecInitializationError(
f"Error initializing {class_.__name__} with source: "
f"{path_to_source!r}" + kwargs_msg
) from e
|
Try to initialize product, raises a chained exception if not possible.
To provide more context.
|
_try_product_init
|
python
|
ploomber/ploomber
|
src/ploomber/spec/taskspec.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/spec/taskspec.py
|
Apache-2.0
|
def resolve_if_file(product_raw, relative_to, class_):
"""Resolve Product argument if it's a File to make it an absolute path"""
try:
return _resolve_if_file(product_raw, relative_to, class_)
except Exception as e:
e.args = ("Error initializing File with argument " f"{product_raw!r} ({e})",)
raise
|
Resolve Product argument if it's a File to make it an absolute path
|
resolve_if_file
|
python
|
ploomber/ploomber
|
src/ploomber/spec/taskspec.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/spec/taskspec.py
|
Apache-2.0
|
def _resolve_if_file(product_raw, relative_to, class_):
"""Resolve File argument to make it an absolute path"""
# not a file, nothing to do...
if class_ != products.File:
return product_raw
# resolve...
elif relative_to:
# To keep things consistent, product relative paths are so to the
# pipeline.yaml file (not to the current working directory). This is
# important because there is no guarantee that the process calling
# this will be at the pipeline.yaml location. One example is
# when using the integration with Jupyter notebooks, each notebook
# will set its working directory to the current parent.
return str(Path(relative_to, product_raw).resolve())
# no realtive_to argument, nothing to do...
else:
return Path(product_raw).resolve()
|
Resolve File argument to make it an absolute path
|
_resolve_if_file
|
python
|
ploomber/ploomber
|
src/ploomber/spec/taskspec.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/spec/taskspec.py
|
Apache-2.0
|
def _preprocess_grid_spec(grid_spec):
"""
Preprocess a grid (list or dictionary) to expand values if it contains
dotted paths
"""
if isinstance(grid_spec, Mapping):
return _process_dotted_paths(grid_spec)
else:
out = []
for element in grid_spec:
out.append(_process_dotted_paths(element))
return out
|
Preprocess a grid (list or dictionary) to expand values if it contains
dotted paths
|
_preprocess_grid_spec
|
python
|
ploomber/ploomber
|
src/ploomber/spec/taskspec.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/spec/taskspec.py
|
Apache-2.0
|
def _process_dotted_paths(grid_spec):
"""
Preprocess a grid (dictionary) to expand values if it contains
dotted paths
"""
out = dict()
for key, value in grid_spec.items():
try:
dp = dotted_path.DottedPath(value, allow_return_none=False, strict=True)
# TypeError: not a string or dictionary
# ValueError: not the module::function format
# KeyError: dictionary with missing dotted_path key
except (TypeError, ValueError, KeyError):
dp = None
if dp:
out[key] = dp()
else:
out[key] = value
return out
|
Preprocess a grid (dictionary) to expand values if it contains
dotted paths
|
_process_dotted_paths
|
python
|
ploomber/ploomber
|
src/ploomber/spec/taskspec.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/spec/taskspec.py
|
Apache-2.0
|
def find_variable_access(self, variable):
"""
Find occurrences of {{variable.something}} and
{{variable['something']}}
"""
attr = self.ast.find_all(Getattr)
item = self.ast.find_all(Getitem)
return (
set(
[
obj.arg.as_const() if isinstance(obj, Getitem) else obj.attr
# iterate over get attribute and get item
for obj in chain(attr, item)
# only check variable access
if hasattr(obj.node, "name") and obj.node.name == variable
]
)
or None
)
|
Find occurrences of {{variable.something}} and
{{variable['something']}}
|
find_variable_access
|
python
|
ploomber/ploomber
|
src/ploomber/static_analysis/jinja.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/static_analysis/jinja.py
|
Apache-2.0
|
def find_variable_assignment(self, variable):
"""
Find a variable assignment: {% set variable = something %}, returns
the node that assigns the variable
"""
variables = {n.target.name: n.node for n in self.ast.find_all(Assign)}
return None if variable not in variables else variables[variable]
|
Find a variable assignment: {% set variable = something %}, returns
the node that assigns the variable
|
find_variable_assignment
|
python
|
ploomber/ploomber
|
src/ploomber/static_analysis/jinja.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/static_analysis/jinja.py
|
Apache-2.0
|
def unexpectedError(self, *args, **kwargs):
"""pyflakes calls this when ast.parse raises an unexpected error"""
self._unexpected = True
return super().unexpectedError(*args, **kwargs)
|
pyflakes calls this when ast.parse raises an unexpected error
|
unexpectedError
|
python
|
ploomber/ploomber
|
src/ploomber/static_analysis/pyflakes.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/static_analysis/pyflakes.py
|
Apache-2.0
|
def syntaxError(self, *args, **kwargs):
"""pyflakes calls this when ast.parse raises a SyntaxError"""
self._syntax = True
return super().syntaxError(*args, **kwargs)
|
pyflakes calls this when ast.parse raises a SyntaxError
|
syntaxError
|
python
|
ploomber/ploomber
|
src/ploomber/static_analysis/pyflakes.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/static_analysis/pyflakes.py
|
Apache-2.0
|
def check_notebook(nb, params, filename, raise_=True, check_signature=True):
"""
Perform static analysis on a Jupyter notebook code cell sources
Parameters
----------
nb : NotebookNode
Notebook object. Must have a cell with the tag "parameters"
params : dict
Parameter that will be added to the notebook source
filename : str
Filename to identify pyflakes warnings and errors
raise_ : bool, default=True
If True, raises an Exception if it encounters errors, otherwise a
warning
Raises
------
SyntaxError
If the notebook's code contains syntax errors
TypeError
If params and nb do not match (unexpected or missing parameters)
RenderError
When certain pyflakes errors are detected (e.g., undefined name)
"""
params_cell, _ = find_cell_with_tag(nb, "parameters")
check_source(nb, raise_=raise_)
if check_signature:
check_params(params, params_cell["source"], filename, warn=not raise_)
|
Perform static analysis on a Jupyter notebook code cell sources
Parameters
----------
nb : NotebookNode
Notebook object. Must have a cell with the tag "parameters"
params : dict
Parameter that will be added to the notebook source
filename : str
Filename to identify pyflakes warnings and errors
raise_ : bool, default=True
If True, raises an Exception if it encounters errors, otherwise a
warning
Raises
------
SyntaxError
If the notebook's code contains syntax errors
TypeError
If params and nb do not match (unexpected or missing parameters)
RenderError
When certain pyflakes errors are detected (e.g., undefined name)
|
check_notebook
|
python
|
ploomber/ploomber
|
src/ploomber/static_analysis/pyflakes.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/static_analysis/pyflakes.py
|
Apache-2.0
|
def check_source(nb, raise_=True):
"""
Run pyflakes on a notebook, wil catch errors such as missing passed
parameters that do not have default values
"""
# concatenate all cell's source code in a single string
source_code = "\n".join(
[
_comment_if_ipython_magic(c["source"])
for c in nb.cells
if c.cell_type == "code"
]
)
# this objects are needed to capture pyflakes output
reporter = MyReporter()
# run pyflakes.api.check on the source code
pyflakes_api.check(source_code, filename="", reporter=reporter)
reporter._check(raise_)
|
Run pyflakes on a notebook, wil catch errors such as missing passed
parameters that do not have default values
|
check_source
|
python
|
ploomber/ploomber
|
src/ploomber/static_analysis/pyflakes.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/static_analysis/pyflakes.py
|
Apache-2.0
|
def _comment_if_ipython_magic(source):
"""Comments lines into comments if they're IPython magics (cell level)"""
# TODO: support for nested cell magics. e.g.,
# %%timeit
# %%timeit
# something()
lines_out = []
comment_rest = False
# TODO: inline magics should add a comment at the end of the line, because
# the python code may change the dependency structure. e.g.,
# %timeit z = x + y -> z = x + y # [magic] %timeit
# note that this only applies to inline magics that take Python code as arg
# NOTE: magics can take inputs but their outputs ARE NOT saved. e.g.,
# %timeit x = y + 1
# running such magic requires having y but after running it, x IS NOT
# declared. But this is magic dependent %time x = y + 1 will add x to the
# scope
for line in source.splitlines():
cell_magic = _is_ipython_cell_magic(line)
if comment_rest:
lines_out.append(_comment(line))
else:
line_magic = _is_ipython_line_magic(line)
# if line magic, comment line
if line_magic:
lines_out.append(_comment(line))
# if inline shell, comment line
elif _is_inline_shell(line):
lines_out.append(_comment(line))
# if cell magic, comment line
elif cell_magic in HAS_INLINE_PYTHON:
lines_out.append(_comment(line))
# if cell magic whose content *is not* Python, comment line and
# all the remaining lines in the cell
elif cell_magic:
lines_out.append(_comment(line))
comment_rest = True
# otherwise, don't do anything
else:
lines_out.append(line)
return "\n".join(lines_out)
|
Comments lines into comments if they're IPython magics (cell level)
|
_comment_if_ipython_magic
|
python
|
ploomber/ploomber
|
src/ploomber/static_analysis/pyflakes.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/static_analysis/pyflakes.py
|
Apache-2.0
|
def _is_ipython_cell_magic(source):
"""Determines if the source is an IPython cell magic. e.g.,
%cd some-directory
"""
m = re.match(_IS_IPYTHON_CELL_MAGIC, source.lstrip())
if not m:
return False
return m.group()
|
Determines if the source is an IPython cell magic. e.g.,
%cd some-directory
|
_is_ipython_cell_magic
|
python
|
ploomber/ploomber
|
src/ploomber/static_analysis/pyflakes.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/static_analysis/pyflakes.py
|
Apache-2.0
|
def check_params(passed, params_source, filename, warn=False):
"""
Check that parameters passed to the notebook match the ones defined
in the parameters variable
Parameters
----------
passed : iterable
Paramters passed to the notebook (params argument)
params_source : str
Parameters cell source code
filename : str
The task's filename. Only used for displaying in the error message
warn : bool
If False, it raises a TypeError if params do not match. If True,
it displays a warning instead.
Raises
------
TypeError
If passed parameters do not match variables declared in params_source
and warn is False
"""
params_cell = ParamsCell(params_source)
missing = params_cell.get_missing(passed)
unexpected = params_cell.get_unexpected(passed)
if missing or unexpected:
errors = []
if missing:
errors.append(
f"Missing params: {pretty_print.iterable(missing)} "
"(to fix this, pass "
f"{pretty_print.them_or_name(missing)} in "
"the 'params' argument)"
)
if unexpected:
first = list(unexpected)[0]
errors.append(
"Unexpected "
f"params: {pretty_print.iterable(unexpected)} (to fix this, "
f"add {pretty_print.them_or_name(unexpected)} to the "
"'parameters' cell and assign the value as "
f"None. e.g., {first} = None)"
)
msg = (
f"Parameters "
"declared in the 'parameters' cell do not match task "
f"params. {pretty_print.trailing_dot(errors)} To disable this "
"check, set 'static_analysis' to 'disable' in the "
"task declaration."
)
if warn:
warnings.warn(msg)
else:
raise TypeError(f"Error rendering notebook {str(filename)!r}. {msg}")
|
Check that parameters passed to the notebook match the ones defined
in the parameters variable
Parameters
----------
passed : iterable
Paramters passed to the notebook (params argument)
params_source : str
Parameters cell source code
filename : str
The task's filename. Only used for displaying in the error message
warn : bool
If False, it raises a TypeError if params do not match. If True,
it displays a warning instead.
Raises
------
TypeError
If passed parameters do not match variables declared in params_source
and warn is False
|
check_params
|
python
|
ploomber/ploomber
|
src/ploomber/static_analysis/pyflakes.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/static_analysis/pyflakes.py
|
Apache-2.0
|
def _get_defined_variables(params_source):
"""
Return the variables defined in a given source. If a name is defined more
than once, it uses the last definition. Ignores anything other than
variable assignments (e.g., function definitions, exceptions)
"""
used_names = parso.parse(params_source).get_used_names()
def _get_value(value):
possible_literal = value.get_definition().children[-1].get_code().strip()
try:
# NOTE: this cannot parse dict(a=1, b=2)
return ast.literal_eval(possible_literal)
except ValueError:
return None
return {
key: _get_value(value[-1])
for key, value in used_names.items()
if value[-1].is_definition() and value[-1].get_definition().type == "expr_stmt"
}
|
Return the variables defined in a given source. If a name is defined more
than once, it uses the last definition. Ignores anything other than
variable assignments (e.g., function definitions, exceptions)
|
_get_defined_variables
|
python
|
ploomber/ploomber
|
src/ploomber/static_analysis/pyflakes.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/static_analysis/pyflakes.py
|
Apache-2.0
|
def extract_product(self):
"""
Extract "product" from a Python code string
"""
product_found, product = extract_variable(self.parameters_cell, "product")
if not product_found or product is None:
raise ValueError(
"Couldn't extract 'product' " f"from code: {self.parameters_cell!r}"
)
else:
return product
|
Extract "product" from a Python code string
|
extract_product
|
python
|
ploomber/ploomber
|
src/ploomber/static_analysis/python.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/static_analysis/python.py
|
Apache-2.0
|
def extract_upstream(self):
"""
Extract keys requested to an upstream variable (e.g. upstream['key'])
"""
module = ast.parse(self.code)
return {
get_value(node)
for node in ast.walk(module)
if isinstance(node, ast.Subscript)
and get_key_value(node) == "upstream"
and isinstance(get_constant(node), ast.Str)
} or None
|
Extract keys requested to an upstream variable (e.g. upstream['key'])
|
extract_upstream
|
python
|
ploomber/ploomber
|
src/ploomber/static_analysis/python.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/static_analysis/python.py
|
Apache-2.0
|
def extract_variable(code_str, name):
"""
Get the value assigned to a variable with name "name" by passing a code
string
"""
variable_found = False
value = None
for stmt in _iterate_assignments(code_str):
if hasattr(stmt, "get_defined_names"):
defined = stmt.get_defined_names()
if len(defined) == 1 and defined[0].value == name:
variable_found = True
value = eval(stmt.children[2].get_code())
return variable_found, value
|
Get the value assigned to a variable with name "name" by passing a code
string
|
extract_variable
|
python
|
ploomber/ploomber
|
src/ploomber/static_analysis/python.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/static_analysis/python.py
|
Apache-2.0
|
def extract_upstream_assign(cell_code):
"""
Infer dependencies from a single Python cell. Looks for a cell that
defines an upstream variable which must be either a dictionary or None
"""
upstream_found, upstream = extract_variable(cell_code, "upstream")
if not upstream_found:
raise ValueError(
"Could not parse a valid 'upstream' variable "
"in the 'parameters' cell with code:\n'%s'\n"
"If the notebook does not have dependencies add "
"upstream = None" % cell_code
)
else:
valid_types = (Mapping, list, tuple, set)
if not (isinstance(upstream, valid_types) or upstream is None):
raise ValueError(
"Found an upstream variable but it is not a "
"valid type (dictionary, list, tuple set or None "
", got '%s' type from code:\n"
"'%s'" % (type(upstream), cell_code)
)
elif isinstance(upstream, valid_types):
return set(upstream)
else:
return None
|
Infer dependencies from a single Python cell. Looks for a cell that
defines an upstream variable which must be either a dictionary or None
|
extract_upstream_assign
|
python
|
ploomber/ploomber
|
src/ploomber/static_analysis/python.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/static_analysis/python.py
|
Apache-2.0
|
def naive_parsing(code, var_name):
"""
Our current R parser can only deal with a single statement (one line)
at a time. So we parse on a per-line basis and look for the variable
we want, this will be replaced for a more efficient implementation
once we improve the parser
"""
for code in code.split("\n"):
# ignore empty lines and comments
if code != "" and not code.startswith("#"):
parser = Parser(list(RLexer(code)))
exp = parser.parse()
if exp.left.value == var_name:
return exp.right.to_python()
|
Our current R parser can only deal with a single statement (one line)
at a time. So we parse on a per-line basis and look for the variable
we want, this will be replaced for a more efficient implementation
once we improve the parser
|
naive_parsing
|
python
|
ploomber/ploomber
|
src/ploomber/static_analysis/r.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/static_analysis/r.py
|
Apache-2.0
|
def extract_product(self, raise_if_none=True):
"""
Extract an object from a SQL template that defines as product variable:
{% set product = SOME_CLASS(...) %}
Where SOME_CLASS is a class defined in ploomber.products. If no product
variable is defined, returns None
"""
product = self._jinja_extractor.find_variable_assignment(variable="product")
if product is None:
if raise_if_none:
code = self._jinja_extractor.get_code_as_str()
raise ValueError(f"Couldn't extract 'product' from code: {code!r}")
else:
# validate product
try:
# get the class name used
class_ = getattr(products, product.node.name)
# get the arg passed to the class
arg = product.args[0].as_const()
# try to initialize object
return class_(arg)
except Exception as e:
exc = ValueError(
"Found a variable named 'product' in "
"code: {} but it does not appear to "
"be a valid SQL product, verify it ".format(
self._jinja_extractor.code
)
)
raise exc from e
|
Extract an object from a SQL template that defines as product variable:
{% set product = SOME_CLASS(...) %}
Where SOME_CLASS is a class defined in ploomber.products. If no product
variable is defined, returns None
|
extract_product
|
python
|
ploomber/ploomber
|
src/ploomber/static_analysis/sql.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/static_analysis/sql.py
|
Apache-2.0
|
def _normalize(identifier):
"""
Normalize a SQL identifier. Given that different SQL implementations have
different rules, we will implement logic based on PostgreSQL. Double
quotes make an identifier case sensitive, unquoted are forced to lower
case. MySQL on the other hand, depends on the file system, furthermore
the quoting identifier character can also be a backtick.
Notes
-----
PostgreSQL - Section 4.1.1 mentions quoted identifiers:
https://www.postgresql.org/docs/9.1/sql-syntax-lexical.html
MySQL - Section 9.2:
https://dev.mysql.com/doc/refman/8.0/en/identifiers.html
https://dev.mysql.com/doc/refman/8.0/en/identifier-case-sensitivity.html
"""
# does this cover all use cases?
if identifier is None:
return None
elif _quoted_with(identifier, '"'):
return identifier.replace('"', "")
else:
return identifier.lower()
|
Normalize a SQL identifier. Given that different SQL implementations have
different rules, we will implement logic based on PostgreSQL. Double
quotes make an identifier case sensitive, unquoted are forced to lower
case. MySQL on the other hand, depends on the file system, furthermore
the quoting identifier character can also be a backtick.
Notes
-----
PostgreSQL - Section 4.1.1 mentions quoted identifiers:
https://www.postgresql.org/docs/9.1/sql-syntax-lexical.html
MySQL - Section 9.2:
https://dev.mysql.com/doc/refman/8.0/en/identifiers.html
https://dev.mysql.com/doc/refman/8.0/en/identifier-case-sensitivity.html
|
_normalize
|
python
|
ploomber/ploomber
|
src/ploomber/static_analysis/sql.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/static_analysis/sql.py
|
Apache-2.0
|
def parse(self):
"""The current implementation can only parse one expression at a time"""
if not isinstance(self.current_token, Name):
raise SyntaxError("First token must be a valid name")
if not isinstance(self.next_token, Assignment):
raise SyntaxError("Second token must be an assignment")
return Expression(
self.current_token, self.next_token, build_node(self.get_tail())
)
|
The current implementation can only parse one expression at a time
|
parse
|
python
|
ploomber/ploomber
|
src/ploomber/static_analysis/parser/parser.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/static_analysis/parser/parser.py
|
Apache-2.0
|
def upstream(self):
"""
A mapping for upstream dependencies {task name} -> [task object]
"""
# this is just syntactic sugar, upstream relations are tracked by the
# DAG object
# this always return a copy to prevent global state if contents
# are modified (e.g. by using pop)
return self.dag._get_upstream(self.name)
|
A mapping for upstream dependencies {task name} -> [task object]
|
upstream
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/abc.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/abc.py
|
Apache-2.0
|
def _upstream_product_grouped(self):
"""
Similar to .upstream but this one nests groups. Output will be the
same as .upstream if no upstream dependencies are grouped. This is
only used internally in .render to correctly pass upstream to
task.params.
Unlike .upstream, this method returns products instead of task names
"""
grouped = defaultdict(lambda: {})
for up_name, up_task in self.upstream.items():
data = self.dag._G.get_edge_data(up_name, self.name)
if data and "group_name" in data:
group_name = data["group_name"]
grouped[group_name][up_name] = up_task.product
else:
grouped[up_name] = up_task.product
return dict(grouped)
|
Similar to .upstream but this one nests groups. Output will be the
same as .upstream if no upstream dependencies are grouped. This is
only used internally in .render to correctly pass upstream to
task.params.
Unlike .upstream, this method returns products instead of task names
|
_upstream_product_grouped
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/abc.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/abc.py
|
Apache-2.0
|
def _lineage(self):
"""
Set with task names of all the dependencies for this task
(including dependencies of dependencies)
"""
# if no upstream deps, there is no lineage
if not len(self.upstream):
return None
else:
# retrieve lineage: upstream tasks + lineage from upstream tasks
up = list(self.upstream.keys())
lineage_up = [up._lineage for up in self.upstream.values() if up._lineage]
lineage = up + [task for lineage in lineage_up for task in lineage]
return set(lineage)
|
Set with task names of all the dependencies for this task
(including dependencies of dependencies)
|
_lineage
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/abc.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/abc.py
|
Apache-2.0
|
def _post_run_actions(self):
"""
Call on_finish hook, save metadata, verify products exist and upload
product
"""
# run on finish first, if this fails, we don't want to save metadata
try:
self._run_on_finish()
except Exception:
# NOTE: we also set the status in Task._build, which runs during
# DAG.build() - but setting if here as well to prevent DAG
# inconsistent state when the user calls Tas.build() directly
self.exec_status = TaskStatus.Errored
raise
if self.exec_status == TaskStatus.WaitingDownload:
# clear current metadata to force reload
# and ensure the task uses the downloaded metadata
self.product.metadata.clear()
else:
self.product.metadata.update(
source_code=str(self.source),
params=self.params.to_json_serializable(params_only=True),
)
# For most Products, it's ok to do this check before
# saving metadata, but not for GenericProduct, since the way
# exists() works is by checking metadata, so we have to do it
# here, after saving metadata
if isinstance(self.product, MetaProduct):
missing = self.product.missing()
if missing:
raise TaskBuildError(
f"Error building task {self.name!r}: "
"the task ran successfully but the following "
"products are "
f"missing: {pretty_print.iterable(missing, repr_=True)}"
)
elif not self.product.exists():
raise TaskBuildError(
f"Error building task {self.name!r}: "
"the task ran successfully but the "
f"product {self.product!r} is missing. "
"Ensure your code is generating it."
)
if self.exec_status != TaskStatus.WaitingDownload:
self.product.upload()
|
Call on_finish hook, save metadata, verify products exist and upload
product
|
_post_run_actions
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/abc.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/abc.py
|
Apache-2.0
|
def build(self, force=False, catch_exceptions=True):
"""Build a single task
Although Tasks are primarily designed to execute via DAG.build(), it
is possible to do so in isolation. However, this only works if the
task does not have any unrendered upstream dependencies, if that's the
case, you should call DAG.render() before calling Task.build()
Returns
-------
dict
A dictionary with keys 'run' and 'elapsed'
Raises
------
TaskBuildError
If the error failed to build because it has upstream dependencies,
the build itself failed or build succeded but on_finish hook failed
DAGBuildEarlyStop
If any task or on_finish hook raises a DAGBuildEarlyStop error
"""
# This is the public API for users who'd to run tasks in isolation,
# we have to make sure we clear product cache status, otherwise
# this will interfere with other render calls
self.render(force=force)
upstream_exec_status = [t.exec_status for t in self.upstream.values()]
if any(
exec_status == TaskStatus.WaitingRender
for exec_status in upstream_exec_status
):
raise TaskBuildError(
'Cannot directly build task "{}" as it '
"has upstream dependencies, call "
"dag.render() first".format(self.name)
)
# we can execute an individual tasks if missing up-to-date upstream
# dependencies exist in remote storage
if self.exec_status == TaskStatus.WaitingUpstream:
ok = {
t
for t in self.upstream.values()
if t.exec_status in {TaskStatus.Skipped, TaskStatus.WaitingDownload}
}
not_ok = set(self.upstream.values()) - ok
if not_ok:
raise TaskBuildError(
f"Cannot build task {self.name!r} because "
"the following upstream dependencies are "
f"missing: {[t.name for t in not_ok]!r}. Execute upstream "
"tasks first. If upstream tasks generate File(s) and you "
"configured a File.client, you may also upload "
"up-to-date copies to remote storage and they will be "
"automatically downloaded"
)
download_products_in_parallel(
t for t in ok if t.exec_status == TaskStatus.WaitingDownload
)
# at this point the task must be WaitingDownload or WaitingExecution
res, _ = self._build(catch_exceptions=catch_exceptions)
self.product.metadata.clear()
return res
|
Build a single task
Although Tasks are primarily designed to execute via DAG.build(), it
is possible to do so in isolation. However, this only works if the
task does not have any unrendered upstream dependencies, if that's the
case, you should call DAG.render() before calling Task.build()
Returns
-------
dict
A dictionary with keys 'run' and 'elapsed'
Raises
------
TaskBuildError
If the error failed to build because it has upstream dependencies,
the build itself failed or build succeded but on_finish hook failed
DAGBuildEarlyStop
If any task or on_finish hook raises a DAGBuildEarlyStop error
|
build
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/abc.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/abc.py
|
Apache-2.0
|
def _build(self, catch_exceptions):
"""
Private API for building DAGs. This is what executors should call.
Unlike the public method, this one does not call render, as it
should happen via a dag.render() call. It takes care of running the
task and updating status accordingly
Parameters
----------
catch_exceptions : bool
If True, catches exceptions during execution and shows a chained
exception at the end: [original exception] then
[exception with context info]. Set it to False when debugging
tasks to drop-in a debugging session at the failing line.
"""
if not catch_exceptions:
res = self._run()
self._post_run_actions()
return res, self.product.metadata.to_dict()
else:
try:
# TODO: this calls download, if this happens. should
# hooks be executed when dwnloading? if so, we could
# change the ran? column from the task report to something
# like:
# ran/downloaded/skipped and use that to determine if we should
# run hooks
res = self._run()
except Exception as e:
msg = 'Error building task "{}"'.format(self.name)
self._logger.exception(msg)
self.exec_status = TaskStatus.Errored
# if there isn't anything left to run, raise exception here
if self.on_failure is None:
if isinstance(e, DAGBuildEarlyStop):
raise DAGBuildEarlyStop(
"Stopping task {} gracefully".format(self.name)
) from e
else:
# FIXME: this makes the traceback longer, consider
# removing it. The only information this nested
# exception provides is the name of the task but we
# are still able to provide that if theh executor
# has the option to capture exceptions turned on.
# An option to consider is to
raise TaskBuildError(msg) from e
build_success = False
build_exception = e
else:
build_success = True
build_exception = None
if build_success:
try:
self._post_run_actions()
except Exception as e:
self.exec_status = TaskStatus.Errored
msg = "Exception when running on_finish " f"for task {self.name!r}"
self._logger.exception(msg)
if isinstance(e, DAGBuildEarlyStop):
raise DAGBuildEarlyStop(
"Stopping task {} gracefully".format(self.name)
) from e
else:
raise TaskBuildError(msg) from e
else:
# sucessful task execution, on_finish hook execution,
# metadata saving and upload
self.exec_status = TaskStatus.Executed
return res, self.product.metadata.to_dict()
# error bulding task
else:
try:
self._run_on_failure()
except Exception as e:
msg = "Exception when running on_failure " f"for task {self.name!r}"
self._logger.exception(msg)
raise TaskBuildError(msg) from e
if isinstance(build_exception, DAGBuildEarlyStop):
raise DAGBuildEarlyStop(
"Stopping task {} gracefully".format(self.name)
) from build_exception
else:
msg = 'Error building task "{}"'.format(self.name)
raise TaskBuildError(msg) from build_exception
|
Private API for building DAGs. This is what executors should call.
Unlike the public method, this one does not call render, as it
should happen via a dag.render() call. It takes care of running the
task and updating status accordingly
Parameters
----------
catch_exceptions : bool
If True, catches exceptions during execution and shows a chained
exception at the end: [original exception] then
[exception with context info]. Set it to False when debugging
tasks to drop-in a debugging session at the failing line.
|
_build
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/abc.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/abc.py
|
Apache-2.0
|
def _run(self):
"""
Run or download task if certain status conditions are met, otherwise
raise a TaskBuildError exception
"""
# cannot keep running, we depend on the render step to get all the
# parameters resolved (params, upstream, product)
if self.exec_status == TaskStatus.WaitingRender:
raise TaskBuildError(
'Error building task "{}". '
"Cannot build task that has not been "
"rendered, call DAG.render() first".format(self.name)
)
elif self.exec_status == TaskStatus.Aborted:
raise TaskBuildError(
'Attempted to run task "{}", whose '
"status is TaskStatus.Aborted".format(self.name)
)
elif self.exec_status == TaskStatus.Skipped:
raise TaskBuildError(
'Attempted to run task "{}", whose '
"status TaskStatus.Skipped. Render again and "
"set force=True if you want to force "
"execution".format(self.name)
)
# NOTE: should i fetch metadata here? I need to make sure I have
# the latest before building
self._logger.info("Starting execution: %s", repr(self))
then = datetime.now()
_ensure_parents_exist(self.product)
if self.exec_status == TaskStatus.WaitingDownload:
try:
self.product.download()
except Exception as e:
raise TaskBuildError(
f"Error downloading Product {self.product!r} "
f"from task {self!r}. Check the full traceback above for "
"details"
) from e
# NOTE: should we validate status here?
# (i.e., check it's WaitingExecution)
else:
self.run()
now = datetime.now()
elapsed = (now - then).total_seconds()
self._logger.info("Done. Operation took {:.1f} seconds".format(elapsed))
# TODO: also check that the Products were updated:
# if they did not exist, they must exist now, if they alredy
# exist, timestamp must be recent equal to the datetime.now()
# used. maybe run fetch metadata again and validate?
return TaskReport.with_data(name=self.name, ran=True, elapsed=elapsed)
|
Run or download task if certain status conditions are met, otherwise
raise a TaskBuildError exception
|
_run
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/abc.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/abc.py
|
Apache-2.0
|
def render(self, force=False, outdated_by_code=True, remote=False):
"""
Renders code and product, all upstream tasks must have been rendered
first, for that reason, this method will usually not be called
directly but via DAG.render(), which renders in the right order.
Render fully determines whether a task should run or not.
Parameters
----------
force : bool, default=False
If True, mark status as WaitingExecution/WaitingUpstream even if
the task is up-to-date (if there are any File(s) with clients, this
also ignores the status of the remote copy), otherwise, the normal
process follows and only up-to-date tasks are marked as Skipped.
outdated_by_code : bool, default=True
Factors to determine if Task.product is marked outdated when source
code changes. Otherwise just the upstream timestamps are used.
remote : bool, default=False
Use remote metadata to determine status
Notes
-----
This method tries to avoid calls to check for product status whenever
possible, since checking product's metadata can be a slow operation
(e.g. if metadata is stored in a remote database)
When passing force=True, product's status checking is skipped
altogether, this can be useful when we only want to quickly get
a rendered DAG object to interact with it
"""
self._logger.debug("Calling render on task %s", self.name)
try:
self._render_product()
except Exception as e:
self.exec_status = TaskStatus.ErroredRender
raise type(e)(
'Error rendering product from Task "{}", '
" check the full traceback above for details. "
"Task params: {}".format(repr(self), self.params)
) from e
# product does not becomes part of the task parameters when passing
# an EmptyProduct - this special kind of task is used by InMemoryDAG.
if not isinstance(self.product, EmptyProduct):
self.params._setitem("product", self.product)
try:
self.source.render(self.params)
except Exception as e:
self.exec_status = TaskStatus.ErroredRender
raise e
# we use outdated status several time, this caches the result
# for performance reasons. TODO: move this to Product and make it
# a property
is_outdated = ProductEvaluator(self.product, outdated_by_code, remote=remote)
# task with no dependencies
if not self.upstream:
# nothing to do, just mark it ready for execution
if force:
self._exec_status = TaskStatus.WaitingExecution
self._logger.debug(
'Forcing status "%s", outdated conditions' " ignored...", self.name
)
# task is outdated, check if we need to execute or download
elif is_outdated.check():
# This only happens with File
if is_outdated.check() == TaskStatus.WaitingDownload:
self._exec_status = TaskStatus.WaitingDownload
else:
self._exec_status = TaskStatus.WaitingExecution
# task is up-to-date
else:
self._exec_status = TaskStatus.Skipped
# tasks with dependencies
else:
upstream_exec_status = set(t.exec_status for t in self.upstream.values())
all_upstream_ready = upstream_exec_status <= {
TaskStatus.Executed,
TaskStatus.Skipped,
}
# some upstream tasks need execution (or download)
if not all_upstream_ready:
if force or is_outdated.check() is True:
self._exec_status = TaskStatus.WaitingUpstream
elif is_outdated.check() == TaskStatus.WaitingDownload:
self._exec_status = TaskStatus.WaitingDownload
else:
self._exec_status = TaskStatus.Skipped
# all upstream ready
else:
if force or is_outdated.check() is True:
self._exec_status = TaskStatus.WaitingExecution
elif is_outdated.check() == TaskStatus.WaitingDownload:
self._exec_status = TaskStatus.WaitingDownload
else:
self._exec_status = TaskStatus.Skipped
self._run_on_render()
|
Renders code and product, all upstream tasks must have been rendered
first, for that reason, this method will usually not be called
directly but via DAG.render(), which renders in the right order.
Render fully determines whether a task should run or not.
Parameters
----------
force : bool, default=False
If True, mark status as WaitingExecution/WaitingUpstream even if
the task is up-to-date (if there are any File(s) with clients, this
also ignores the status of the remote copy), otherwise, the normal
process follows and only up-to-date tasks are marked as Skipped.
outdated_by_code : bool, default=True
Factors to determine if Task.product is marked outdated when source
code changes. Otherwise just the upstream timestamps are used.
remote : bool, default=False
Use remote metadata to determine status
Notes
-----
This method tries to avoid calls to check for product status whenever
possible, since checking product's metadata can be a slow operation
(e.g. if metadata is stored in a remote database)
When passing force=True, product's status checking is skipped
altogether, this can be useful when we only want to quickly get
a rendered DAG object to interact with it
|
render
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/abc.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/abc.py
|
Apache-2.0
|
def status(self, return_code_diff=False, sections=None):
"""Prints the current task status
Parameters
----------
sections : list, optional
Sections to include. Defaults to "name", "last_run",
"oudated", "product", "doc", "location"
"""
sections = sections or [
"name",
"last_run",
"outdated",
"product",
"doc",
"location",
]
p = self.product
data = {}
if "name" in sections:
data["name"] = self.name
if "type" in sections:
data["type"] = type(self).__name__
if "status" in sections:
data["status"] = self.exec_status.name
if "client" in sections:
# FIXME: all tasks should have a client property
data["client"] = repr(self.client) if hasattr(self, "client") else None
if "last_run" in sections:
if p.metadata.timestamp is not None:
dt = datetime.fromtimestamp(p.metadata.timestamp)
date_h = dt.strftime("%b %d, %Y at %H:%M")
time_h = humanize.naturaltime(dt)
data["Last run"] = "{} ({})".format(time_h, date_h)
else:
data["Last run"] = "Has not been run"
outd_data = p._outdated_data_dependencies()
outd_code = p._outdated_code_dependency()
outd = False
if outd_code:
outd = "Source code"
if outd_data:
if not outd:
outd = "Upstream"
else:
outd += " & Upstream"
if "outdated" in sections:
data["Outdated?"] = outd
if "outdated_dependencies" in sections:
data["Outdated dependencies"] = outd_data
if "outdated_code" in sections:
data["Outdated code"] = outd_code
if outd_code and return_code_diff:
data["Code diff"] = self.dag.differ.get_diff(
p.metadata.stored_source_code,
str(self.source),
extension=self.source.extension,
)
else:
outd_code = ""
if "product_type" in sections:
data["Product type"] = type(self.product).__name__
if "product" in sections:
data["Product"] = repr(self.product)
if "product_client" in sections:
# FIXME: all products should have a client property
data["Product client"] = (
repr(self.product.client) if hasattr(self.product, "client") else None
)
if "doc" in sections:
data["Doc (short)"] = _doc_short(self.source.doc)
if "location" in sections:
data["Location"] = self.source.loc
return Row(data)
|
Prints the current task status
Parameters
----------
sections : list, optional
Sections to include. Defaults to "name", "last_run",
"oudated", "product", "doc", "location"
|
status
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/abc.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/abc.py
|
Apache-2.0
|
def debug(self):
"""Debug task, only implemented in certain tasks"""
raise NotImplementedError(
'"debug" is not implemented in "{}" tasks'.format(type(self).__name__)
)
|
Debug task, only implemented in certain tasks
|
debug
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/abc.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/abc.py
|
Apache-2.0
|
def load(self):
"""Load task as pandas.DataFrame. Only implemented in certain tasks"""
raise NotImplementedError(
'"load" is not implemented in "{}" tasks'.format(type(self).__name__)
)
|
Load task as pandas.DataFrame. Only implemented in certain tasks
|
load
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/abc.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/abc.py
|
Apache-2.0
|
def _check_exporter(exporter, path_to_output):
"""
Validate if the user can use the selected exporter
"""
if WebPDFExporter is not None and exporter is WebPDFExporter:
pyppeteer_installed = find_spec("pyppeteer") is not None
if not pyppeteer_installed:
raise TaskInitializationError(
"pyppeteer is required to use "
"webpdf, install it "
'with:\npip install "nbconvert[webpdf]"'
)
else:
if not chromium_downloader.check_chromium():
chromium_downloader.download_chromium()
if Path(path_to_output).suffix != ".pdf":
raise TaskInitializationError(
"Expected output to have "
"extension .pdf when using the webpdf "
f"exporter, got: {path_to_output}. Change the extension "
"and try again"
)
|
Validate if the user can use the selected exporter
|
_check_exporter
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/notebook.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/notebook.py
|
Apache-2.0
|
def _get_exporter(exporter_name, path_to_output):
"""
Get function to convert notebook to another format using nbconvert,
first. In some cases, the exporter name matches the file extension
(e.g html) but other times it doesn't (e.g. slides), use
`nbconvert.get_export_names()` to get available exporter_names
Returns None if passed exported name is 'ipynb', raises ValueError
if an exporter can't be located
"""
extension2exporter_name = {"md": "markdown"}
# sometimes extension does not match with the exporter name, fix
# if needed
if exporter_name in extension2exporter_name:
exporter_name = extension2exporter_name[exporter_name]
if exporter_name == "ipynb":
exporter = None
else:
try:
exporter = nbconvert.get_exporter(exporter_name)
# nbconvert 5.6.1 raises ValueError, beginning in version 6,
# it raises ExporterNameError. However the exception is defined
# since 5.6.1 so we can safely import it
except (ValueError, ExporterNameError):
error = True
else:
error = False
if error:
names = nbconvert.get_export_names()
raise TaskInitializationError(
f"{exporter_name!r} is not a "
"valid 'nbconvert_exporter_name' value. "
"Choose one from: "
f"{pretty_print.iterable(names)}"
)
_check_exporter(exporter, path_to_output)
return exporter
|
Get function to convert notebook to another format using nbconvert,
first. In some cases, the exporter name matches the file extension
(e.g html) but other times it doesn't (e.g. slides), use
`nbconvert.get_export_names()` to get available exporter_names
Returns None if passed exported name is 'ipynb', raises ValueError
if an exporter can't be located
|
_get_exporter
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/notebook.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/notebook.py
|
Apache-2.0
|
def debug(self, kind="ipdb"):
"""
Opens the notebook (with injected parameters) in debug mode in a
temporary location
Parameters
----------
kind : str, default='ipdb'
Debugger to use, 'ipdb' to use line-by-line IPython debugger,
'pdb' to use line-by-line Python debugger or 'pm' to to
post-portem debugging using IPython
Notes
-----
Be careful when debugging tasks. If the task has run
successfully, you overwrite products but don't save the
updated source code, your DAG will enter an inconsistent state where
the metadata won't match the overwritten product.
"""
if self.source.language != "python":
raise NotImplementedError(
'debug is not implemented for "{}" '
"notebooks, only python is supported".format(self.source.language)
)
opts = {"ipdb", "pdb", "pm"}
if kind not in opts:
raise ValueError("kind must be one of {}".format(opts))
nb = _read_rendered_notebook(self.source.nb_str_rendered)
fd, tmp_path = tempfile.mkstemp(suffix=".py")
os.close(fd)
code = jupytext.writes(nb, version=nbformat.NO_CONVERT, fmt="py")
_write_text_utf_8(tmp_path, code)
if kind == "pm":
# post-mortem debugging
try:
subprocess.run(["ipython", tmp_path, "--pdb"])
finally:
Path(tmp_path).unlink()
else:
if kind == "ipdb":
from IPython.terminal.debugger import TerminalPdb, Pdb
code = compile(source=code, filename=tmp_path, mode="exec")
try:
# this seems to only work in a Terminal
debugger = TerminalPdb()
except Exception:
# this works in a Jupyter notebook
debugger = Pdb()
elif kind == "pdb":
debugger = pdb
try:
debugger.run(code)
finally:
Path(tmp_path).unlink()
|
Opens the notebook (with injected parameters) in debug mode in a
temporary location
Parameters
----------
kind : str, default='ipdb'
Debugger to use, 'ipdb' to use line-by-line IPython debugger,
'pdb' to use line-by-line Python debugger or 'pm' to to
post-portem debugging using IPython
Notes
-----
Be careful when debugging tasks. If the task has run
successfully, you overwrite products but don't save the
updated source code, your DAG will enter an inconsistent state where
the metadata won't match the overwritten product.
|
debug
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/notebook.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/notebook.py
|
Apache-2.0
|
def _read_rendered_notebook(nb_str):
"""
Read rendered notebook and inject cell with debugging settings
"""
# add debug cells
nb = nbformat.reads(nb_str, as_version=nbformat.NO_CONVERT)
nbformat_v = nbformat.versions[nb.nbformat]
source = """
# Debugging settings (this cell will be removed before saving)
# change the current working directory to directory of the session that
# invoked the jupyter app to make relative paths work
import os
{}
""".format(
chdir_code(Path(".").resolve())
)
cell = nbformat_v.new_code_cell(source, metadata={"tags": ["debugging-settings"]})
nb.cells.insert(0, cell)
return nb
|
Read rendered notebook and inject cell with debugging settings
|
_read_rendered_notebook
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/notebook.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/notebook.py
|
Apache-2.0
|
def input_data_passer(dag, name, preprocessor=None):
"""
Returns a special in-memory task that forwards input data as product to
downstream tasks.
Parameters
----------
dag : ploomber.DAG
DAG where the task should be added
name : str
Task name
preprocessor : callable, default=None
An arbitrary callable that can be used to add custom logic to the
input data before passing it to downstream tasks
Returns
-------
PythonCallable
A PythonCallable task with special characteristics. It cannot be
invoked directly, but through ``InMemoryDAG(dag).build()``
"""
return PythonCallable(
_input_data_passer,
EmptyProduct(),
dag=dag,
name=name,
params={"input_data": None, "preprocessor": preprocessor},
)
|
Returns a special in-memory task that forwards input data as product to
downstream tasks.
Parameters
----------
dag : ploomber.DAG
DAG where the task should be added
name : str
Task name
preprocessor : callable, default=None
An arbitrary callable that can be used to add custom logic to the
input data before passing it to downstream tasks
Returns
-------
PythonCallable
A PythonCallable task with special characteristics. It cannot be
invoked directly, but through ``InMemoryDAG(dag).build()``
|
input_data_passer
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/param_forward.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/param_forward.py
|
Apache-2.0
|
def from_grid(
cls,
task_class,
product_class,
product_primitive,
task_kwargs,
dag,
grid,
name=None,
namer=None,
resolve_relative_to=None,
on_render=None,
on_finish=None,
on_failure=None,
params=None,
):
"""
Build a group of tasks of the same class from an grid of parameters
using the same source.
Parameters
----------
grid : dict or list of dicts
If dict, all combinations of individual parameters are generated.
If list of dicts, each dict is processed individually, then
concatenated to generate the final set.
params : dict
Values that will remain constant
Notes
-----
All parameters, except for grid are the same as in .from_params
"""
params_array = ParamGrid(grid, params=params).product()
return cls.from_params(
task_class=task_class,
product_class=product_class,
product_primitive=product_primitive,
task_kwargs=task_kwargs,
dag=dag,
name=name,
params_array=params_array,
namer=namer,
resolve_relative_to=resolve_relative_to,
on_render=on_render,
on_finish=on_finish,
on_failure=on_failure,
)
|
Build a group of tasks of the same class from an grid of parameters
using the same source.
Parameters
----------
grid : dict or list of dicts
If dict, all combinations of individual parameters are generated.
If list of dicts, each dict is processed individually, then
concatenated to generate the final set.
params : dict
Values that will remain constant
Notes
-----
All parameters, except for grid are the same as in .from_params
|
from_grid
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/taskgroup.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/taskgroup.py
|
Apache-2.0
|
def _unserialize_params(params_original, unserializer):
"""
User the user-provided function to unserialize params['upstream']
"""
params = params_original.to_dict()
params["upstream"] = {
k: _unserializer(v, unserializer) for k, v in params["upstream"].items()
}
params = Params._from_dict(params, copy=False)
return params
|
User the user-provided function to unserialize params['upstream']
|
_unserialize_params
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/tasks.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/tasks.py
|
Apache-2.0
|
def debug(self, kind="ipdb"):
"""
Run callable in debug mode.
Parameters
----------
kind : str ('ipdb' or 'pdb')
Which debugger to use 'ipdb' for IPython debugger or 'pdb' for
debugger from the standard library
Notes
-----
Be careful when debugging tasks. If the task has run
successfully, you overwrite products but don't save the
updated source code, your DAG will enter an inconsistent state where
the metadata won't match the overwritten product.
"""
opts = {"ipdb", "pdb"}
if kind == "pm":
raise ValueError(
"Post-mortem debugging is not supported " "via the .debug() method."
)
if kind not in opts:
raise ValueError('"kind" must be one of {}, got: "{}"'.format(opts, kind))
if self.exec_status == TaskStatus.WaitingRender:
raise TaskBuildError(
'Error in task "{}". '
"Cannot call task.debug() on a task that has "
"not been "
"rendered, call DAG.render() first".format(self.name)
)
if "upstream" in self.params and self._unserializer:
params = _unserialize_params(self.params, self._unserializer)
else:
params = self.params.to_dict()
if self._serializer:
params.pop("product")
if kind == "ipdb":
try:
# this seems to only work in a Terminal
ipdb = TerminalPdb()
except Exception:
# this works in a Jupyter notebook
ipdb = Pdb()
ipdb.runcall(self.source.primitive, **params)
elif kind == "pdb":
pdb.runcall(self.source.primitive, **params)
|
Run callable in debug mode.
Parameters
----------
kind : str ('ipdb' or 'pdb')
Which debugger to use 'ipdb' for IPython debugger or 'pdb' for
debugger from the standard library
Notes
-----
Be careful when debugging tasks. If the task has run
successfully, you overwrite products but don't save the
updated source code, your DAG will enter an inconsistent state where
the metadata won't match the overwritten product.
|
debug
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/tasks.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/tasks.py
|
Apache-2.0
|
def load(self, key=None, **kwargs):
"""
Loads the product. It uses the unserializer function if any, otherwise
it tries to load it based on the file extension
Parameters
----------
key
Key to load, if this task generates more than one product
**kwargs
Arguments passed to the unserializer function
"""
if isinstance(self.product, MetaProduct) and key is None:
raise ValueError(
f"Task {self!r} generates multiple products, "
'use the "key" argument to load one'
)
prod = self.product if not key else self.product[key]
if self._unserializer is not None:
return self._unserializer(str(prod), **kwargs)
else:
return _file_load(prod, **kwargs)
|
Loads the product. It uses the unserializer function if any, otherwise
it tries to load it based on the file extension
Parameters
----------
key
Key to load, if this task generates more than one product
**kwargs
Arguments passed to the unserializer function
|
load
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/tasks.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/tasks.py
|
Apache-2.0
|
def task_factory(_func=None, **factory_kwargs):
"""Syntactic sugar for building PythonCallable tasks"""
def decorator(func):
@functools.wraps(func)
def wrapper(**wrapper_kwargs):
kwargs = {**factory_kwargs, **wrapper_kwargs}
return PythonCallable(func, **kwargs)
return wrapper
return decorator if _func is None else decorator(_func)
|
Syntactic sugar for building PythonCallable tasks
|
task_factory
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/tasks.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/tasks.py
|
Apache-2.0
|
def download_products_in_parallel(tasks):
"""Call Task.product.download in parallel"""
with ThreadPoolExecutor(max_workers=64) as executor:
future2task = {executor.submit(t.product.download): t for t in tasks}
for future in as_completed(future2task):
exception = future.exception()
if exception:
task = future2task[future]
raise RuntimeError(
"An error occurred when downloading product from " f"task: {task!r}"
) from exception
|
Call Task.product.download in parallel
|
download_products_in_parallel
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/util.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/util.py
|
Apache-2.0
|
def _from_dict(cls, params, copy=True):
"""
Private API for initializing Params objects with arbitrary dictionary
"""
obj = cls(params=None)
if copy:
obj._dict = copy_module.copy(params)
else:
obj._dict = params
return obj
|
Private API for initializing Params objects with arbitrary dictionary
|
_from_dict
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/_params.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/_params.py
|
Apache-2.0
|
def to_json_serializable(self, params_only=False):
"""
Converts params into a dictionary
Parameters
----------
params_only : bool, default=False
If True, it only returns user params, excluding 'upstream' and
'product'
"""
out = self.to_dict()
if params_only:
out.pop("product", None)
out.pop("upstream", None)
elif "upstream" in out:
out["upstream"] = out["upstream"].to_json_serializable()
return out
|
Converts params into a dictionary
Parameters
----------
params_only : bool, default=False
If True, it only returns user params, excluding 'upstream' and
'product'
|
to_json_serializable
|
python
|
ploomber/ploomber
|
src/ploomber/tasks/_params.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/tasks/_params.py
|
Apache-2.0
|
def nulls_in_columns(cols, product):
"""Check if any column has NULL values, returns bool"""
df = load_product(product)
return df.isna().values.sum() > 0
|
Check if any column has NULL values, returns bool
|
nulls_in_columns
|
python
|
ploomber/ploomber
|
src/ploomber/testing/pandas.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/testing/pandas.py
|
Apache-2.0
|
def duplicates_in_column(col, product):
"""Check if a column has duplicated values, returns bool"""
df = load_product(product)
return (df[col].value_counts() > 1).sum() > 0
|
Check if a column has duplicated values, returns bool
|
duplicates_in_column
|
python
|
ploomber/ploomber
|
src/ploomber/testing/pandas.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/testing/pandas.py
|
Apache-2.0
|
def range_in_column(col, product):
"""Get range for a column, returns a (min_value, max_value) tuple"""
df = load_product(product)
return df[col].min(), df[col].max()
|
Get range for a column, returns a (min_value, max_value) tuple
|
range_in_column
|
python
|
ploomber/ploomber
|
src/ploomber/testing/pandas.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/testing/pandas.py
|
Apache-2.0
|
def _duplicates_query(col, product):
"""Generate SQL code that counts number of duplicates"""
cols = ",".join(_make_iterable(col))
return Template(
"""
SELECT {{cols}}, COUNT(*) - 1 AS n_duplicates
FROM {{product}}
GROUP BY {{cols}}
HAVING COUNT(*) > 1
"""
).render(cols=cols, product=product)
|
Generate SQL code that counts number of duplicates
|
_duplicates_query
|
python
|
ploomber/ploomber
|
src/ploomber/testing/sql/duplicated.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/testing/sql/duplicated.py
|
Apache-2.0
|
def duplicates_in_column(client, col: Union[str, List[str]], product) -> bool:
"""Check if a column (or group of columns) has duplicated values
Parameters
----------
client
Database client
cols
Column(s) to check
product
The relation to check
Returns
-------
bool
True if there are duplicates in the column(s). If passed more than
one column, they are considered as a whole, not individually
"""
sql = Template(
"""
SELECT EXISTS(
{{query}}
)
"""
).render(query=_duplicates_query(col, product))
cur = client.connection.cursor()
cur.execute(sql)
output = bool(cur.fetchone()[0])
cur.close()
return output
|
Check if a column (or group of columns) has duplicated values
Parameters
----------
client
Database client
cols
Column(s) to check
product
The relation to check
Returns
-------
bool
True if there are duplicates in the column(s). If passed more than
one column, they are considered as a whole, not individually
|
duplicates_in_column
|
python
|
ploomber/ploomber
|
src/ploomber/testing/sql/duplicated.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/testing/sql/duplicated.py
|
Apache-2.0
|
def assert_no_duplicates_in_column(
client, col: Union[str, List[str]], product, stats=False
):
"""
Assert if there are duplicates in a column (or group of columns). If there
are duplicates, it raises an AssertionError with an error message showing
some of the duplicated values
Parameters
----------
stats : bool, default=False
Whether to show duplicates stats in the error message or not
"""
duplicates_query = _duplicates_query(col, product)
sql = Template(
"""
{{query}}
LIMIT 10
"""
).render(query=duplicates_query)
cur = client.connection.cursor()
cur.execute(sql)
output = cur.fetchall()
if len(output):
names = [t[0] for t in cur.description]
table = tabulate(output, headers=names)
cols = ",".join(_make_iterable(col))
sql_sample_rows = Template(
"""
WITH duplicated AS (
{{sql}}
)
SELECT t.*
FROM {{product}} AS t
JOIN duplicated
USING ({{cols}})
ORDER BY {{cols}}
"""
).render(sql=sql, product=product, cols=cols)
cur = client.connection.cursor()
cur.execute(sql_sample_rows)
output = cur.fetchall()
cur.close()
names = [t[0] for t in cur.description]
table_sample = tabulate(output, headers=names)
msg = f"Duplicates found.\n\n{table}\n\n{table_sample}"
if stats:
n_rows, n_unique, n_duplicates = duplicates_stats(client, col, product)
msg += (
f"\n\nNumber of rows: {n_rows:,}\n"
f"Number of unique values: {n_unique:,}\n"
f"Number of duplicates: {n_duplicates:,}"
)
raise AssertionError(msg)
|
Assert if there are duplicates in a column (or group of columns). If there
are duplicates, it raises an AssertionError with an error message showing
some of the duplicated values
Parameters
----------
stats : bool, default=False
Whether to show duplicates stats in the error message or not
|
assert_no_duplicates_in_column
|
python
|
ploomber/ploomber
|
src/ploomber/testing/sql/duplicated.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/testing/sql/duplicated.py
|
Apache-2.0
|
def duplicates_stats(client, col: Union[str, List[str]], product):
"""Get stats on rows with duplicated values
Returns
-------
n_rows
Number of rows in product
n_unique
Number of unique values (for selected columns) in product
n_duplicates
Number of rows with duplicated values (this is equal as the number
of rows we'd have to drop to remove duplicates)
"""
cols = ",".join(_make_iterable(col))
# num of rows in product
n_rows = _query(
client, Template("SELECT COUNT(*) FROM {{product}}").render(product=product)
)
# num of unique values (using all columns)
n_unique = _query(
client,
Template("SELECT COUNT(DISTINCT({{cols}})) FROM {{product}}").render(
product=product, cols=cols
),
)
sql_n_duplicates = Template(
"""
WITH duplicated AS (
{{sql}}
)
SELECT SUM(n_duplicates) FROM duplicated
"""
).render(sql=_duplicates_query(col, product), product=product, cols=cols)
# num of duplicated rows (number of rows we have to drop to remove all
# duplicates)
n_duplicates = _query(client, sql_n_duplicates)
return n_rows, n_unique, n_duplicates
|
Get stats on rows with duplicated values
Returns
-------
n_rows
Number of rows in product
n_unique
Number of unique values (for selected columns) in product
n_duplicates
Number of rows with duplicated values (this is equal as the number
of rows we'd have to drop to remove duplicates)
|
duplicates_stats
|
python
|
ploomber/ploomber
|
src/ploomber/testing/sql/duplicated.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/testing/sql/duplicated.py
|
Apache-2.0
|
def nulls_in_columns(client, cols: Union[str, List[str]], product):
"""Check if any column has NULL values, returns bool
Parameters
----------
client
Database client
cols
Column(s) to check
product
The relation to check
Returns
-------
bool
True if there is at least one NULL in any of the columns
"""
# NOTE: SELECT EXISTS does not work on oracle
# it can be SELECT 1 FROM EXISTS(...) dual (dual is a system table
# it always exists). Should we support it?
sql = Template(
"""
SELECT EXISTS(
SELECT * FROM {{product}}
WHERE {{cols | join(' is null or ') }} is null
)
"""
).render(cols=cols, product=product)
cur = client.connection.cursor()
cur.execute(sql)
output = bool(cur.fetchone()[0])
cur.close()
return output
|
Check if any column has NULL values, returns bool
Parameters
----------
client
Database client
cols
Column(s) to check
product
The relation to check
Returns
-------
bool
True if there is at least one NULL in any of the columns
|
nulls_in_columns
|
python
|
ploomber/ploomber
|
src/ploomber/testing/sql/functions.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/testing/sql/functions.py
|
Apache-2.0
|
def distinct_values_in_column(client, col: str, product):
"""Get distinct values in a column
Parameters
----------
client
Database client
col
Column to check
product
The relation to check
Returns
-------
set
Distinct values in column
"""
sql = Template(
"""
SELECT DISTINCT {{col}} FROM {{product}}
"""
).render(col=col, product=product)
cur = client.connection.cursor()
cur.execute(sql)
output = cur.fetchall()
cur.close()
return set(o[0] for o in output)
|
Get distinct values in a column
Parameters
----------
client
Database client
col
Column to check
product
The relation to check
Returns
-------
set
Distinct values in column
|
distinct_values_in_column
|
python
|
ploomber/ploomber
|
src/ploomber/testing/sql/functions.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/testing/sql/functions.py
|
Apache-2.0
|
def range_in_column(client, col: str, product):
"""Get range for a column
Parameters
----------
client
Database client
cols
Column to check
product
The relation to check
Returns
-------
tuple
(minimum, maximum) values
"""
sql = Template(
"""
SELECT MIN({{col}}), MAX({{col}}) FROM {{product}}
"""
).render(col=col, product=product)
cur = client.connection.cursor()
cur.execute(sql)
output = cur.fetchone()
cur.close()
return output
|
Get range for a column
Parameters
----------
client
Database client
cols
Column to check
product
The relation to check
Returns
-------
tuple
(minimum, maximum) values
|
range_in_column
|
python
|
ploomber/ploomber
|
src/ploomber/testing/sql/functions.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/testing/sql/functions.py
|
Apache-2.0
|
def exists_row_where(client, criteria: str, product):
"""
Check whether at least one row exists matching the criteria
Parameters
----------
client
Database client
criteria
Criteria to evaluate (passed as argument to a WHERE clause)
product
The relation to check
Notes
-----
Runs a ``SELECT EXISTS (SELECT * FROM {{product}} WHERE {{criteria}})``
query
Returns
-------
bool
True if exists at least one row matching the criteria
"""
sql = Template(
"""
SELECT EXISTS(
SELECT *
FROM {{product}}
WHERE {{criteria}}
)
"""
).render(product=product, criteria=criteria)
cur = client.connection.cursor()
cur.execute(sql)
output = bool(cur.fetchone()[0])
cur.close()
return output
|
Check whether at least one row exists matching the criteria
Parameters
----------
client
Database client
criteria
Criteria to evaluate (passed as argument to a WHERE clause)
product
The relation to check
Notes
-----
Runs a ``SELECT EXISTS (SELECT * FROM {{product}} WHERE {{criteria}})``
query
Returns
-------
bool
True if exists at least one row matching the criteria
|
exists_row_where
|
python
|
ploomber/ploomber
|
src/ploomber/testing/sql/functions.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/testing/sql/functions.py
|
Apache-2.0
|
def debug_if_exception(callable_, task_name, kwargs=None):
"""
Drop a debugger session if running callable_() raises an exception,
otherwise it just returns the value returned by callable_()
"""
# NOTE: importing it here, otherwise we get a
# "If you suspect this is an IPython X.Y.Z bug..." message if any exception
# after the import if an exception happens
# NOTE: the IPython.terminal.debugger module has pdb-like classes but it
# doesn't mimic pdb's API exactly, ipdb is just a wrapper that takes care
# of those details - I tried using IPython directly but bumped into some
# issues
import ipdb
kwargs = kwargs or dict()
try:
result = callable_(**kwargs)
# this will happen if the user had a breakpoint and then they quit the
# debugger
except BdbQuit as e:
raise DebuggingFinished(task_name) from e
# any other thing starts the debugging session
except Exception as e:
click.secho(
f"{e} {type(e)} - Error in task {task_name!r}. " "Starting debugger...",
fg="red",
)
ipdb.post_mortem(sys.exc_info()[2])
raise DebuggingFinished(task_name) from e
else:
return result
|
Drop a debugger session if running callable_() raises an exception,
otherwise it just returns the value returned by callable_()
|
debug_if_exception
|
python
|
ploomber/ploomber
|
src/ploomber/util/debug.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/util/debug.py
|
Apache-2.0
|
def _package_location(root_path, name="pipeline.yaml"):
"""
Look for a src/{package-name}/pipeline.yaml relative to root_path
Parameters
----------
root_path : str or pathlib.Path
Looks for a package relative to this
name : str, default='pipeline.yaml'
YAML spec to search for
Returns
-------
str
Path to package. None if no package exists.
"""
pattern = str(Path(root_path, "src", "*", name))
candidates = sorted(
[f for f in glob(pattern) if not str(Path(f).parent).endswith(".egg-info")]
)
if len(candidates) > 1:
warnings.warn(
f"Found more than one package location: {candidates}. "
f"Using the first one: {candidates[0]!r}"
)
return candidates[0] if candidates else None
|
Look for a src/{package-name}/pipeline.yaml relative to root_path
Parameters
----------
root_path : str or pathlib.Path
Looks for a package relative to this
name : str, default='pipeline.yaml'
YAML spec to search for
Returns
-------
str
Path to package. None if no package exists.
|
_package_location
|
python
|
ploomber/ploomber
|
src/ploomber/util/default.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/util/default.py
|
Apache-2.0
|
def entry_point_with_name(root_path=None, name=None):
"""Search for an entry point with a given name
Parameters
----------
name : str, default=None
If None, searchs for a pipeline.yaml file otherwise for a
file with such name
"""
filename = name or "pipeline.yaml"
# first, find project root
project_root = find_root_recursively(starting_dir=root_path, filename=filename)
setup_py = Path(project_root, "setup.py")
setup_py_exists = setup_py.exists()
# if ther is a setup.py file, look up a {project_root}/src/*/{name} file
if setup_py_exists:
entry_point = _package_location(root_path=project_root, name=filename)
if entry_point is not None:
return relpath(entry_point, Path().resolve())
# otherwise use {project_root}/{file}. note that this file must
# exist since find_root_recursively raises an error if it doesn't
return relpath(Path(project_root, filename), Path().resolve())
|
Search for an entry point with a given name
Parameters
----------
name : str, default=None
If None, searchs for a pipeline.yaml file otherwise for a
file with such name
|
entry_point_with_name
|
python
|
ploomber/ploomber
|
src/ploomber/util/default.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/util/default.py
|
Apache-2.0
|
def entry_point(root_path=None):
"""
Determines the default YAML specentry point. It first determines the
project root. If the project isn't a package, it returns
project_root/pipeline.yaml, otherwise src/*/pipeline.yaml. If the
ENTRY_POINT environment variable is set, it looks for a file with
such name (e.g., project_root/{ENTRY_POINT}).
Parameters
----------
root_path, optional
Root path to look for the entry point. Defaults to the current working
directory
Notes
-----
Use cases for this function:
* Called by the cli to locate the default entry point to use (nor args)
* When deciding whether to add a new scaffold structure or parse the
current one and add new files (catches DAGSpecInvalidError), no args
Raises
------
DAGSpecInvalidError
If fails to determine project root or if no pipeline.yaml (or
the content of the ENTRY_POINT environment variable, if any) exists in
the expected location (once project root is determined).
"""
# FIXME: rename env var used
root_path = root_path or "."
env_var = os.environ.get("ENTRY_POINT")
if env_var:
if len(Path(env_var).parts) > 1:
raise ValueError(
f"ENTRY_POINT ({env_var!r}) "
"must be a filename and do not contain any "
"directory components (e.g., pipeline.yaml, "
"not path/to/pipeline.yaml)."
)
filename = env_var
else:
filename = "pipeline.yaml"
# check if there's a config file
path_to_config, _ = find_file_recursively(name="setup.cfg", starting_dir=root_path)
if path_to_config:
cfg = config.load_config(path_to_config)
if cfg and cfg["ploomber"].get("entry-point"):
parent = Path(path_to_config).parent
entry_point = str(parent / cfg["ploomber"]["entry-point"])
if not Path(entry_point).is_file():
raise DAGSpecInvalidError(
"Skipping DAG initialization:"
" found setup.cfg but "
f"entry-point {entry_point!r} "
"does not exist"
)
return entry_point
return entry_point_with_name(root_path=root_path, name=filename)
|
Determines the default YAML specentry point. It first determines the
project root. If the project isn't a package, it returns
project_root/pipeline.yaml, otherwise src/*/pipeline.yaml. If the
ENTRY_POINT environment variable is set, it looks for a file with
such name (e.g., project_root/{ENTRY_POINT}).
Parameters
----------
root_path, optional
Root path to look for the entry point. Defaults to the current working
directory
Notes
-----
Use cases for this function:
* Called by the cli to locate the default entry point to use (nor args)
* When deciding whether to add a new scaffold structure or parse the
current one and add new files (catches DAGSpecInvalidError), no args
Raises
------
DAGSpecInvalidError
If fails to determine project root or if no pipeline.yaml (or
the content of the ENTRY_POINT environment variable, if any) exists in
the expected location (once project root is determined).
|
entry_point
|
python
|
ploomber/ploomber
|
src/ploomber/util/default.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/util/default.py
|
Apache-2.0
|
def try_to_find_entry_point():
"""Try to find the default entry point. Returns None if it isn't possible"""
# check if it's a dotted path
type_ = try_to_find_entry_point_type(os.environ.get("ENTRY_POINT"))
if type_ == EntryPoint.DottedPath:
return os.environ.get("ENTRY_POINT")
# entry_point searches recursively for a YAML spec
try:
return entry_point(root_path=None)
except Exception:
# TODO: maybe display a warning with the error?
pass
|
Try to find the default entry point. Returns None if it isn't possible
|
try_to_find_entry_point
|
python
|
ploomber/ploomber
|
src/ploomber/util/default.py
|
https://github.com/ploomber/ploomber/blob/master/src/ploomber/util/default.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.