response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Checks if an object is a ``pandas`` object.
Use this before conditional ``import pandas as pd``. | def uses_pandas(obj: Any) -> bool:
"""
Checks if an object is a ``pandas`` object.
Use this before conditional ``import pandas as pd``.
"""
module = type(obj).__module__
return module is not None and module.startswith("pandas.") |
Issue a nicely formatted deprecation warning. | def deprecated(since_or_msg: Version | str,
old: str | None = None, new: str | None = None, extra: str | None = None) -> None:
""" Issue a nicely formatted deprecation warning. """
if isinstance(since_or_msg, tuple):
if old is None or new is None:
raise ValueError("deprecated entity and a replacement are required")
if len(since_or_msg) != 3 or not all(isinstance(x, int) and x >= 0 for x in since_or_msg):
raise ValueError(f"invalid version tuple: {since_or_msg!r}")
major, minor, patch = since_or_msg
since = f"{major}.{minor}.{patch}"
message = f"{old!r} was deprecated in Bokeh {since} and will be removed, use {new!r} instead."
if extra is not None:
message += " " + extra.strip()
else:
if not (old is None and new is None and extra is None):
raise ValueError("deprecated(message) signature doesn't allow extra arguments")
message = since_or_msg
warn(message, BokehDeprecationWarning) |
Find parameters with defaults and return them.
Arguments:
sig (Signature) : a function signature
Returns:
tuple(list, list) : parameters with defaults | def get_param_info(sig: Signature) -> tuple[list[str], list[Any]]:
''' Find parameters with defaults and return them.
Arguments:
sig (Signature) : a function signature
Returns:
tuple(list, list) : parameters with defaults
'''
defaults = []
for param in sig.parameters.values():
if param.default is not param.empty:
defaults.append(param.default)
return [name for name in sig.parameters], defaults |
Map axial *(q,r)* coordinates to cartesian *(x,y)* coordinates of
tiles centers.
This function can be useful for positioning other Bokeh glyphs with
cartesian coordinates in relation to a hex tiling.
This function was adapted from:
https://www.redblobgames.com/grids/hexagons/#hex-to-pixel
Args:
q (array[float]) :
A NumPy array of q-coordinates for binning
r (array[float]) :
A NumPy array of r-coordinates for binning
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str) :
Whether the hex tile orientation should be "pointytop" or
"flattop".
aspect_scale (float, optional) :
Scale the hexagons in the "cross" dimension.
For "pointytop" orientations, hexagons are scaled in the horizontal
direction. For "flattop", they are scaled in vertical direction.
When working with a plot with ``aspect_scale != 1``, it may be
useful to set this value to match the plot.
Returns:
(array[int], array[int]) | def axial_to_cartesian(q: Any, r: Any, size: float, orientation: str, aspect_scale: float = 1) -> tuple[Any, Any]:
''' Map axial *(q,r)* coordinates to cartesian *(x,y)* coordinates of
tiles centers.
This function can be useful for positioning other Bokeh glyphs with
cartesian coordinates in relation to a hex tiling.
This function was adapted from:
https://www.redblobgames.com/grids/hexagons/#hex-to-pixel
Args:
q (array[float]) :
A NumPy array of q-coordinates for binning
r (array[float]) :
A NumPy array of r-coordinates for binning
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str) :
Whether the hex tile orientation should be "pointytop" or
"flattop".
aspect_scale (float, optional) :
Scale the hexagons in the "cross" dimension.
For "pointytop" orientations, hexagons are scaled in the horizontal
direction. For "flattop", they are scaled in vertical direction.
When working with a plot with ``aspect_scale != 1``, it may be
useful to set this value to match the plot.
Returns:
(array[int], array[int])
'''
if orientation == "pointytop":
x = size * np.sqrt(3) * (q + r/2.0) / aspect_scale
y = -size * 3/2.0 * r
else:
x = size * 3/2.0 * q
y = -size * np.sqrt(3) * (r + q/2.0) * aspect_scale
return (x, y) |
Map Cartesion *(x,y)* points to axial *(q,r)* coordinates of enclosing
tiles.
This function was adapted from:
https://www.redblobgames.com/grids/hexagons/#pixel-to-hex
Args:
x (array[float]) :
A NumPy array of x-coordinates to convert
y (array[float]) :
A NumPy array of y-coordinates to convert
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str) :
Whether the hex tile orientation should be "pointytop" or
"flattop".
aspect_scale (float, optional) :
Scale the hexagons in the "cross" dimension.
For "pointytop" orientations, hexagons are scaled in the horizontal
direction. For "flattop", they are scaled in vertical direction.
When working with a plot with ``aspect_scale != 1``, it may be
useful to set this value to match the plot.
Returns:
(array[int], array[int]) | def cartesian_to_axial(x: Any, y: Any, size: float, orientation: str, aspect_scale: float = 1) -> tuple[Any, Any]:
''' Map Cartesion *(x,y)* points to axial *(q,r)* coordinates of enclosing
tiles.
This function was adapted from:
https://www.redblobgames.com/grids/hexagons/#pixel-to-hex
Args:
x (array[float]) :
A NumPy array of x-coordinates to convert
y (array[float]) :
A NumPy array of y-coordinates to convert
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str) :
Whether the hex tile orientation should be "pointytop" or
"flattop".
aspect_scale (float, optional) :
Scale the hexagons in the "cross" dimension.
For "pointytop" orientations, hexagons are scaled in the horizontal
direction. For "flattop", they are scaled in vertical direction.
When working with a plot with ``aspect_scale != 1``, it may be
useful to set this value to match the plot.
Returns:
(array[int], array[int])
'''
HEX_FLAT = [2.0/3.0, 0.0, -1.0/3.0, np.sqrt(3.0)/3.0]
HEX_POINTY = [np.sqrt(3.0)/3.0, -1.0/3.0, 0.0, 2.0/3.0]
coords = HEX_FLAT if orientation == 'flattop' else HEX_POINTY
x = x / size * (aspect_scale if orientation == "pointytop" else 1)
y = -y / size / (aspect_scale if orientation == "flattop" else 1)
q = coords[0] * x + coords[1] * y
r = coords[2] * x + coords[3] * y
return _round_hex(q, r) |
Perform an equal-weight binning of data points into hexagonal tiles.
For more sophisticated use cases, e.g. weighted binning or scaling
individual tiles proportional to some other quantity, consider using
HoloViews.
Args:
x (array[float]) :
A NumPy array of x-coordinates for binning
y (array[float]) :
A NumPy array of y-coordinates for binning
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str, optional) :
Whether the hex tile orientation should be "pointytop" or
"flattop". (default: "pointytop")
aspect_scale (float, optional) :
Match a plot's aspect ratio scaling.
When working with a plot with ``aspect_scale != 1``, this
parameter can be set to match the plot, in order to draw
regular hexagons (instead of "stretched" ones).
This is roughly equivalent to binning in "screen space", and
it may be better to use axis-aligned rectangular bins when
plot aspect scales are not one.
Returns:
DataFrame
The resulting DataFrame will have columns *q* and *r* that specify
hexagon tile locations in axial coordinates, and a column *counts* that
provides the count for each tile.
.. warning::
Hex binning only functions on linear scales, i.e. not on log plots. | def hexbin(x: Any, y: Any, size: float, orientation: str = "pointytop", aspect_scale: float = 1) -> Any:
''' Perform an equal-weight binning of data points into hexagonal tiles.
For more sophisticated use cases, e.g. weighted binning or scaling
individual tiles proportional to some other quantity, consider using
HoloViews.
Args:
x (array[float]) :
A NumPy array of x-coordinates for binning
y (array[float]) :
A NumPy array of y-coordinates for binning
size (float) :
The size of the hexagonal tiling.
The size is defined as the distance from the center of a hexagon
to the top corner for "pointytop" orientation, or from the center
to a side corner for "flattop" orientation.
orientation (str, optional) :
Whether the hex tile orientation should be "pointytop" or
"flattop". (default: "pointytop")
aspect_scale (float, optional) :
Match a plot's aspect ratio scaling.
When working with a plot with ``aspect_scale != 1``, this
parameter can be set to match the plot, in order to draw
regular hexagons (instead of "stretched" ones).
This is roughly equivalent to binning in "screen space", and
it may be better to use axis-aligned rectangular bins when
plot aspect scales are not one.
Returns:
DataFrame
The resulting DataFrame will have columns *q* and *r* that specify
hexagon tile locations in axial coordinates, and a column *counts* that
provides the count for each tile.
.. warning::
Hex binning only functions on linear scales, i.e. not on log plots.
'''
pd: Any = import_required('pandas','hexbin requires pandas to be installed')
q, r = cartesian_to_axial(x, y, size, orientation, aspect_scale=aspect_scale)
df = pd.DataFrame(dict(r=r, q=q))
return df.groupby(['q', 'r']).size().reset_index(name='counts') |
Round floating point axial hex coordinates to integer *(q,r)*
coordinates.
This code was adapted from:
https://www.redblobgames.com/grids/hexagons/#rounding
Args:
q (array[float]) :
NumPy array of Floating point axial *q* coordinates to round
r (array[float]) :
NumPy array of Floating point axial *q* coordinates to round
Returns:
(array[int], array[int]) | def _round_hex(q: Any, r: Any) -> tuple[Any, Any]:
''' Round floating point axial hex coordinates to integer *(q,r)*
coordinates.
This code was adapted from:
https://www.redblobgames.com/grids/hexagons/#rounding
Args:
q (array[float]) :
NumPy array of Floating point axial *q* coordinates to round
r (array[float]) :
NumPy array of Floating point axial *q* coordinates to round
Returns:
(array[int], array[int])
'''
x = q
z = r
y = -x-z
rx = np.round(x)
ry = np.round(y)
rz = np.round(z)
dx = np.abs(rx - x)
dy = np.abs(ry - y)
dz = np.abs(rz - z)
cond = (dx > dy) & (dx > dz)
q = np.where(cond , -(ry + rz), rx)
r = np.where(~cond & ~(dy > dz), -(rx + ry), rz)
return q.astype(int), r.astype(int) |
A logging.basicConfig() wrapper that also undoes the default
Bokeh-specific configuration. | def basicConfig(**kwargs: Any) -> None:
"""
A logging.basicConfig() wrapper that also undoes the default
Bokeh-specific configuration.
"""
if default_handler is not None:
bokeh_logger.removeHandler(default_handler)
bokeh_logger.propagate = True
logging.basicConfig(**kwargs) |
Perform some basic package validation checks for the installed package.
Args:
version (str | None, optional) :
A version to compare against the package's reported version
build_dir (str | None, optional) :
A path to a JS build dir to make detailed BokehJS file comparisons
Returns:
list[str]
A list of all errors encountered | def validate(*, version: str | None = None, build_dir: str | None = None) -> list[str]:
""" Perform some basic package validation checks for the installed package.
Args:
version (str | None, optional) :
A version to compare against the package's reported version
build_dir (str | None, optional) :
A path to a JS build dir to make detailed BokehJS file comparisons
Returns:
list[str]
A list of all errors encountered
"""
errors = []
if version is None:
# This can happen under certain circumstances
if __version__ == "0.0.0":
errors.append("Invalid version 0.0.0")
elif version != __version__:
errors.append(f"Version mismatch: given version ({version}) != package version ({__version__})")
if is_full_release(__version__) and __version__ != "0.0.0":
try:
resources.verify_sri_hashes()
except RuntimeError as e:
errors.append(f"SRI hashes for BokehJS files could not be verified: {e}")
r = resources.Resources(mode="absolute")
rmin = resources.Resources(mode="absolute", minified=True)
package_js_paths = r.js_files + rmin.js_files
for path in package_js_paths:
package_path = Path(path)
if not package_path.exists():
errors.append(f"missing BokehJS file: {path}")
elif package_path.stat().st_size == 0:
errors.append(f"Empty BokehJS file: {path}")
elif build_dir is not None:
build_path = Path(build_dir) / "js" / package_path.name
try:
if not filecmp.cmp(build_path, package_path):
errors.append(f"BokehJS package file differs from build dir file: {package_path}")
except FileNotFoundError:
errors.append(f"missing build dir file: {build_path}")
if not Path(__file__).parents[1].joinpath("py.typed").exists():
errors.append("py.typed is missing")
return errors |
Get the location of the server subpackage.
| def server_path() -> Path:
""" Get the location of the server subpackage.
"""
return ROOT_DIR / "server" |
Get the location of server's static directory.
| def static_path() -> Path:
""" Get the location of server's static directory.
"""
return server_path() / "static" |
Get the location of the bokehjs source files.
By default the files in ``bokeh/server/static`` are used. If ``dev``
is ``True``, then the files in ``bokehjs/build`` preferred. However,
if not available, then a warning is issued and the former files are
used as a fallback.
.. note:
This is a low-level API. Prefer using ``settings.bokehjs_path()``
instead of this function. | def bokehjs_path(dev: bool = False) -> Path:
""" Get the location of the bokehjs source files.
By default the files in ``bokeh/server/static`` are used. If ``dev``
is ``True``, then the files in ``bokehjs/build`` preferred. However,
if not available, then a warning is issued and the former files are
used as a fallback.
.. note:
This is a low-level API. Prefer using ``settings.bokehjs_path()``
instead of this function.
"""
if dev:
js_dir = ROOT_DIR.parent.parent / "bokehjs" / "build"
if js_dir.is_dir():
return js_dir
else:
log.warning(f"bokehjs' build directory '{js_dir}' doesn't exist; required by 'settings.dev'")
return static_path() |
Get the location of the server subpackage.
.. deprecated:: 3.4.0
Use ``server_path()`` instead. | def serverdir() -> str:
""" Get the location of the server subpackage.
.. deprecated:: 3.4.0
Use ``server_path()`` instead.
"""
deprecated((3, 4, 0), "serverdir()", "server_path()")
return str(server_path()) |
Get the location of the bokehjs source files.
By default the files in ``bokeh/server/static`` are used. If ``dev``
is ``True``, then the files in ``bokehjs/build`` preferred. However,
if not available, then a warning is issued and the former files are
used as a fallback.
.. note:
This is a low-level API. Prefer using ``settings.bokehjsdir()``
instead of this function.
.. deprecated:: 3.4.0
Use ``bokehjs_path()`` instead. | def bokehjsdir(dev: bool = False) -> str:
""" Get the location of the bokehjs source files.
By default the files in ``bokeh/server/static`` are used. If ``dev``
is ``True``, then the files in ``bokehjs/build`` preferred. However,
if not available, then a warning is issued and the former files are
used as a fallback.
.. note:
This is a low-level API. Prefer using ``settings.bokehjsdir()``
instead of this function.
.. deprecated:: 3.4.0
Use ``bokehjs_path()`` instead.
"""
deprecated((3, 4, 0), "bokehjsdir()", "bokehjs_path()")
return str(bokehjs_path(dev)) |
Download larger data sets for various Bokeh examples.
| def download(progress: bool = True) -> None:
''' Download larger data sets for various Bokeh examples.
'''
data_dir = external_data_dir(create=True)
print(f"Using data directory: {data_dir}")
# HTTP requests are cheaper for us, and there is nothing private to protect
s3 = 'http://sampledata.bokeh.org'
for file_name, md5 in metadata().items():
real_path = data_dir / real_name(file_name)
if real_path.exists():
with open(real_path, "rb") as file:
data = file.read()
local_md5 = hashlib.md5(data).hexdigest()
if local_md5 == md5:
print(f"Skipping {file_name!r} (checksum match)")
continue
print(f"Fetching {file_name!r}")
_download_file(s3, file_name, data_dir, progress=progress) |
Whether an object is any date, time, or datetime type recognized by
Bokeh.
Args:
obj (object) : the object to test
Returns:
bool : True if ``obj`` is a datetime type | def is_datetime_type(obj: Any) -> TypeGuard[dt.time | dt.datetime | np.datetime64]:
''' Whether an object is any date, time, or datetime type recognized by
Bokeh.
Args:
obj (object) : the object to test
Returns:
bool : True if ``obj`` is a datetime type
'''
_dt_tuple = tuple(_compute_datetime_types())
return isinstance(obj, _dt_tuple) |
Whether an object is any timedelta type recognized by Bokeh.
Args:
obj (object) : the object to test
Returns:
bool : True if ``obj`` is a timedelta type | def is_timedelta_type(obj: Any) -> TypeGuard[dt.timedelta | np.timedelta64]:
''' Whether an object is any timedelta type recognized by Bokeh.
Args:
obj (object) : the object to test
Returns:
bool : True if ``obj`` is a timedelta type
'''
return isinstance(obj, dt.timedelta | np.timedelta64) |
Convert a date object to a datetime
Args:
obj (date) : the object to convert
Returns:
datetime | def convert_date_to_datetime(obj: dt.date) -> float:
''' Convert a date object to a datetime
Args:
obj (date) : the object to convert
Returns:
datetime
'''
return (dt.datetime(*obj.timetuple()[:6], tzinfo=dt.timezone.utc) - DT_EPOCH).total_seconds() * 1000 |
Convert any recognized timedelta value to floating point absolute
milliseconds.
Args:
obj (object) : the object to convert
Returns:
float : milliseconds | def convert_timedelta_type(obj: dt.timedelta | np.timedelta64) -> float:
''' Convert any recognized timedelta value to floating point absolute
milliseconds.
Args:
obj (object) : the object to convert
Returns:
float : milliseconds
'''
if isinstance(obj, dt.timedelta):
return obj.total_seconds() * 1000.
elif isinstance(obj, np.timedelta64):
return float(obj / NP_MS_DELTA)
raise ValueError(f"Unknown timedelta object: {obj!r}") |
Convert any recognized date, time, or datetime value to floating point
milliseconds since epoch.
Args:
obj (object) : the object to convert
Returns:
float : milliseconds | def convert_datetime_type(obj: Any | pd.Timestamp | pd.Timedelta | dt.datetime | dt.date | dt.time | np.datetime64) -> float:
''' Convert any recognized date, time, or datetime value to floating point
milliseconds since epoch.
Args:
obj (object) : the object to convert
Returns:
float : milliseconds
'''
import pandas as pd
# Pandas NaT
if obj is pd.NaT:
return np.nan
# Pandas Period
if isinstance(obj, pd.Period):
return obj.to_timestamp().value / 10**6.0
# Pandas Timestamp
if isinstance(obj, pd.Timestamp):
return obj.value / 10**6.0
# Pandas Timedelta
elif isinstance(obj, pd.Timedelta):
return obj.value / 10**6.0
# Datetime (datetime is a subclass of date)
elif isinstance(obj, dt.datetime):
diff = obj.replace(tzinfo=dt.timezone.utc) - DT_EPOCH
return diff.total_seconds() * 1000
# XXX (bev) ideally this would not be here "dates are not datetimes"
# Date
elif isinstance(obj, dt.date):
return convert_date_to_datetime(obj)
# NumPy datetime64
elif isinstance(obj, np.datetime64):
epoch_delta = obj - NP_EPOCH
return float(epoch_delta / NP_MS_DELTA)
# Time
elif isinstance(obj, dt.time):
return (obj.hour*3600 + obj.minute*60 + obj.second)*1000 + obj.microsecond/1000.0
raise ValueError(f"unknown datetime object: {obj!r}") |
Convert NumPy datetime arrays to arrays to milliseconds since epoch.
Args:
array : (obj)
A NumPy array of datetime to convert
If the value passed in is not a NumPy array, it will be returned as-is.
Returns:
array | def convert_datetime_array(array: npt.NDArray[Any]) -> npt.NDArray[np.floating[Any]]:
''' Convert NumPy datetime arrays to arrays to milliseconds since epoch.
Args:
array : (obj)
A NumPy array of datetime to convert
If the value passed in is not a NumPy array, it will be returned as-is.
Returns:
array
'''
def convert(array: npt.NDArray[Any]) -> npt.NDArray[Any]:
return np.where(np.isnat(array), np.nan, array.astype("int64")/1000.0)
# not quite correct, truncates to ms..
if array.dtype.kind == "M":
return convert(array.astype("datetime64[us]"))
elif array.dtype.kind == "m":
return convert(array.astype("timedelta64[us]"))
# XXX (bev) special case dates, not great
elif array.dtype.kind == "O" and len(array) > 0 and isinstance(array[0], dt.date):
try:
return convert(array.astype("datetime64[us]"))
except Exception:
pass
return array |
Return a new unique ID for a Bokeh object.
Normally this function will return simple monotonically increasing integer
IDs (as strings) for identifying Bokeh objects within a Document. However,
if it is desirable to have globally unique for every object, this behavior
can be overridden by setting the environment variable ``BOKEH_SIMPLE_IDS=no``.
Returns:
str | def make_id() -> ID:
''' Return a new unique ID for a Bokeh object.
Normally this function will return simple monotonically increasing integer
IDs (as strings) for identifying Bokeh objects within a Document. However,
if it is desirable to have globally unique for every object, this behavior
can be overridden by setting the environment variable ``BOKEH_SIMPLE_IDS=no``.
Returns:
str
'''
global _simple_id
if settings.simple_ids():
with _simple_id_lock:
_simple_id += 1
return ID(f"p{_simple_id}")
else:
return make_globally_unique_id() |
Return a globally unique UUID.
Some situations, e.g. id'ing dynamically created Divs in HTML documents,
always require globally unique IDs.
Returns:
str | def make_globally_unique_id() -> ID:
''' Return a globally unique UUID.
Some situations, e.g. id'ing dynamically created Divs in HTML documents,
always require globally unique IDs.
Returns:
str
'''
return ID(str(uuid.uuid4())) |
Return a globally unique CSS-safe UUID.
Some situations, e.g. id'ing dynamically created Divs in HTML documents,
always require globally unique IDs. ID generated with this function can
be used in APIs like ``document.querySelector("#id")``.
Returns:
str | def make_globally_unique_css_safe_id() -> ID:
''' Return a globally unique CSS-safe UUID.
Some situations, e.g. id'ing dynamically created Divs in HTML documents,
always require globally unique IDs. ID generated with this function can
be used in APIs like ``document.querySelector("#id")``.
Returns:
str
'''
max_iter = 100
for _i in range(0, max_iter):
id = make_globally_unique_id()
if id[0].isalpha():
return id
return ID(f"bk-{make_globally_unique_id()}") |
Determine whether an array may be binary encoded.
The NumPy array dtypes that can be encoded are:
{binary_array_types}
Args:
array (np.ndarray) : the array to check
Returns:
bool | def array_encoding_disabled(array: npt.NDArray[Any]) -> bool:
''' Determine whether an array may be binary encoded.
The NumPy array dtypes that can be encoded are:
{binary_array_types}
Args:
array (np.ndarray) : the array to check
Returns:
bool
'''
# disable binary encoding for non-supported dtypes
return array.dtype not in BINARY_ARRAY_TYPES |
Transform a ndarray into a serializable ndarray.
Converts un-serializable dtypes and returns JSON serializable
format
Args:
array (np.ndarray) : a NumPy array to be transformed
Returns:
ndarray | def transform_array(array: npt.NDArray[Any]) -> npt.NDArray[Any]:
''' Transform a ndarray into a serializable ndarray.
Converts un-serializable dtypes and returns JSON serializable
format
Args:
array (np.ndarray) : a NumPy array to be transformed
Returns:
ndarray
'''
array = convert_datetime_array(array)
# XXX: as long as we can't support 64-bit integers, try to convert
# to 32-bits. If not possible, let the serializer convert to a less
# efficient representation and/or deal with any error messaging.
def _cast_if_can(array: npt.NDArray[Any], dtype: type[Any]) -> npt.NDArray[Any]:
info = np.iinfo(dtype)
if np.any((array < info.min) | (info.max < array)):
return array
else:
return array.astype(dtype, casting="unsafe")
if array.dtype == np.dtype(np.int64):
array = _cast_if_can(array, np.int32)
elif array.dtype == np.dtype(np.uint64):
array = _cast_if_can(array, np.uint32)
if isinstance(array, np.ma.MaskedArray):
array = array.filled(np.nan) # type: ignore # filled is untyped
if not array.flags["C_CONTIGUOUS"]:
array = np.ascontiguousarray(array)
return array |
Transforms a Pandas series into serialized form
Args:
series (pd.Series) : the Pandas series to transform
Returns:
ndarray | def transform_series(series: pd.Series[Any] | pd.Index[Any] | pd.api.extensions.ExtensionArray) -> npt.NDArray[Any]:
''' Transforms a Pandas series into serialized form
Args:
series (pd.Series) : the Pandas series to transform
Returns:
ndarray
'''
import pandas as pd
# not checking for pd here, this function should only be called if it
# is already known that series is a Pandas Series type
if isinstance(series, pd.PeriodIndex):
vals = series.to_timestamp().values
else:
vals = series.to_numpy()
return vals |
Indent all the lines in a given block of text by a specified amount.
Args:
text (str) :
The text to indent
n (int, optional) :
The amount to indent each line by (default: 2)
ch (char, optional) :
What character to fill the indentation with (default: " ") | def indent(text: str, n: int = 2, ch: str = " ") -> str:
''' Indent all the lines in a given block of text by a specified amount.
Args:
text (str) :
The text to indent
n (int, optional) :
The amount to indent each line by (default: 2)
ch (char, optional) :
What character to fill the indentation with (default: " ")
'''
padding = ch * n
return "\n".join(padding + line for line in text.split("\n")) |
Join together sequences of strings into English-friendly phrases using
the conjunction ``or`` when appropriate.
Args:
seq (seq[str]) : a sequence of strings to nicely join
sep (str, optional) : a sequence delimiter to use (default: ", ")
conjunction (str or None, optional) : a conjunction to use for the last
two items, or None to reproduce basic join behaviour (default: "or")
Returns:
a joined string
Examples:
>>> nice_join(["a", "b", "c"])
'a, b or c' | def nice_join(seq: Iterable[str], *, sep: str = ", ", conjunction: str = "or") -> str:
''' Join together sequences of strings into English-friendly phrases using
the conjunction ``or`` when appropriate.
Args:
seq (seq[str]) : a sequence of strings to nicely join
sep (str, optional) : a sequence delimiter to use (default: ", ")
conjunction (str or None, optional) : a conjunction to use for the last
two items, or None to reproduce basic join behaviour (default: "or")
Returns:
a joined string
Examples:
>>> nice_join(["a", "b", "c"])
'a, b or c'
'''
seq = [str(x) for x in seq]
if len(seq) <= 1 or conjunction is None:
return sep.join(seq)
else:
return f"{sep.join(seq[:-1])} {conjunction} {seq[-1]}" |
Convert CamelCase to snake_case. | def snakify(name: str, sep: str = "_") -> str:
''' Convert CamelCase to snake_case. '''
name = re.sub("([A-Z]+)([A-Z][a-z])", rf"\1{sep}\2", name)
name = re.sub("([a-z\\d])([A-Z])", rf"\1{sep}\2", name)
return name.lower() |
Safely append to docstrings.
When Python is executed with the ``-OO`` option, doc strings are removed and
replaced the value ``None``. This function guards against appending the
extra content in that case.
Args:
docstring (str or None) : The docstring to format, or None
extra (str): the content to append if docstring is not None
Returns:
str or None | def append_docstring(docstring: str | None, extra: str) -> str | None:
''' Safely append to docstrings.
When Python is executed with the ``-OO`` option, doc strings are removed and
replaced the value ``None``. This function guards against appending the
extra content in that case.
Args:
docstring (str or None) : The docstring to format, or None
extra (str): the content to append if docstring is not None
Returns:
str or None
'''
return None if docstring is None else docstring + extra |
Safely format docstrings.
When Python is executed with the ``-OO`` option, doc strings are removed and
replaced the value ``None``. This function guards against applying the string
formatting options in that case.
Args:
docstring (str or None) : The docstring to format, or ``None``
args (tuple) : string formatting arguments for the docsring
kwargs (dict) : string formatting arguments for the docsring
Returns:
str or None | def format_docstring(docstring: str | None, *args: Any, **kwargs: Any) -> str | None:
''' Safely format docstrings.
When Python is executed with the ``-OO`` option, doc strings are removed and
replaced the value ``None``. This function guards against applying the string
formatting options in that case.
Args:
docstring (str or None) : The docstring to format, or ``None``
args (tuple) : string formatting arguments for the docsring
kwargs (dict) : string formatting arguments for the docsring
Returns:
str or None
'''
return None if docstring is None else docstring.format(*args, **kwargs) |
Format a base URL with optional query arguments
Args:
url (str) :
An base URL to append query arguments to
arguments (dict or None, optional) :
A mapping of key/value URL query arguments, or None (default: None)
Returns:
str | def format_url_query_arguments(url: str, arguments: dict[str, str] | None = None) -> str:
''' Format a base URL with optional query arguments
Args:
url (str) :
An base URL to append query arguments to
arguments (dict or None, optional) :
A mapping of key/value URL query arguments, or None (default: None)
Returns:
str
'''
if arguments is not None:
items = (f"{quote_plus(key)}={quote_plus(value)}" for key, value in arguments.items())
url += "?" + "&".join(items)
return url |
Generate a new securely-generated secret key appropriate for SHA-256
HMAC signatures.
This key could be used to sign Bokeh server session IDs, for example. | def generate_secret_key() -> str:
''' Generate a new securely-generated secret key appropriate for SHA-256
HMAC signatures.
This key could be used to sign Bokeh server session IDs, for example.
'''
return _get_random_string() |
Generate a random session ID.
Typically, each browser tab connected to a Bokeh application has its own
session ID. In production deployments of a Bokeh app, session IDs should be
random and unguessable - otherwise users of the app could interfere with one
another. | def generate_session_id(secret_key: bytes | None = settings.secret_key_bytes(),
signed: bool = settings.sign_sessions()) -> ID:
''' Generate a random session ID.
Typically, each browser tab connected to a Bokeh application has its own
session ID. In production deployments of a Bokeh app, session IDs should be
random and unguessable - otherwise users of the app could interfere with one
another.
'''
session_id = _get_random_string()
if signed:
session_id = '.'.join([session_id, _signature(session_id, secret_key)])
return ID(session_id) |
Generates a JWT token given a session_id and additional payload.
Args:
session_id (str):
The session id to add to the token
secret_key (str, optional) :
Secret key (default: value of BOKEH_SECRET_KEY environment variable)
signed (bool, optional) :
Whether to sign the session ID (default: value of BOKEH_SIGN_SESSIONS
environment variable)
extra_payload (dict, optional) :
Extra key/value pairs to include in the Bokeh session token
expiration (int, optional) :
Expiration time
Returns:
str | def generate_jwt_token(session_id: ID,
secret_key: bytes | None = settings.secret_key_bytes(),
signed: bool = settings.sign_sessions(),
extra_payload: TokenPayload | None = None,
expiration: int = 300) -> str:
""" Generates a JWT token given a session_id and additional payload.
Args:
session_id (str):
The session id to add to the token
secret_key (str, optional) :
Secret key (default: value of BOKEH_SECRET_KEY environment variable)
signed (bool, optional) :
Whether to sign the session ID (default: value of BOKEH_SIGN_SESSIONS
environment variable)
extra_payload (dict, optional) :
Extra key/value pairs to include in the Bokeh session token
expiration (int, optional) :
Expiration time
Returns:
str
"""
now = calendar.timegm(dt.datetime.now(tz=dt.timezone.utc).timetuple())
payload = {'session_id': session_id, 'session_expiry': now + expiration}
if extra_payload:
if "session_id" in extra_payload:
raise RuntimeError("extra_payload for session tokens may not contain 'session_id'")
extra_payload_str = json.dumps(extra_payload, cls=_BytesEncoder).encode('utf-8')
compressed = zlib.compress(extra_payload_str, level=9)
payload[_TOKEN_ZLIB_KEY] = _base64_encode(compressed)
token = _base64_encode(json.dumps(payload))
secret_key = _ensure_bytes(secret_key)
if not signed:
return token
return token + '.' + _signature(token, secret_key) |
Extracts the session id from a JWT token.
Args:
token (str):
A JWT token containing the session_id and other data.
Returns:
str | def get_session_id(token: str) -> ID:
"""Extracts the session id from a JWT token.
Args:
token (str):
A JWT token containing the session_id and other data.
Returns:
str
"""
decoded = json.loads(_base64_decode(token.split('.')[0]))
return decoded['session_id'] |
Extract the payload from the token.
Args:
token (str):
A JWT token containing the session_id and other data.
Returns:
dict | def get_token_payload(token: str) -> TokenPayload:
"""Extract the payload from the token.
Args:
token (str):
A JWT token containing the session_id and other data.
Returns:
dict
"""
decoded = json.loads(_base64_decode(token.split('.')[0]))
if _TOKEN_ZLIB_KEY in decoded:
decompressed = zlib.decompress(_base64_decode(decoded[_TOKEN_ZLIB_KEY]))
del decoded[_TOKEN_ZLIB_KEY]
decoded.update(json.loads(decompressed, cls=_BytesDecoder))
del decoded['session_id']
return decoded |
Check the signature of a token and the contained signature.
The server uses this function to check whether a token and the
contained session id was generated with the correct secret key.
If signed sessions are disabled, this function always returns True.
Args:
token (str) :
The token to check
secret_key (str, optional) :
Secret key (default: value of BOKEH_SECRET_KEY environment variable)
signed (bool, optional) :
Whether to check anything (default: value of BOKEH_SIGN_SESSIONS
environment variable)
Returns:
bool | def check_token_signature(token: str,
secret_key: bytes | None = settings.secret_key_bytes(),
signed: bool = settings.sign_sessions()) -> bool:
"""Check the signature of a token and the contained signature.
The server uses this function to check whether a token and the
contained session id was generated with the correct secret key.
If signed sessions are disabled, this function always returns True.
Args:
token (str) :
The token to check
secret_key (str, optional) :
Secret key (default: value of BOKEH_SECRET_KEY environment variable)
signed (bool, optional) :
Whether to check anything (default: value of BOKEH_SIGN_SESSIONS
environment variable)
Returns:
bool
"""
secret_key = _ensure_bytes(secret_key)
if signed:
token_pieces = token.split('.', 1)
if len(token_pieces) != 2:
return False
base_token = token_pieces[0]
provided_token_signature = token_pieces[1]
expected_token_signature = _signature(base_token, secret_key)
# hmac.compare_digest() uses a string compare algorithm that doesn't
# short-circuit so we don't allow timing analysis
token_valid = hmac.compare_digest(
expected_token_signature, provided_token_signature,
)
session_id = get_session_id(token)
session_id_valid = check_session_id_signature(session_id, secret_key, signed)
return token_valid and session_id_valid
return True |
Check the signature of a session ID, returning True if it's valid.
The server uses this function to check whether a session ID was generated
with the correct secret key. If signed sessions are disabled, this function
always returns True. | def check_session_id_signature(session_id: str,
secret_key: bytes | None = settings.secret_key_bytes(),
signed: bool | None = settings.sign_sessions()) -> bool:
"""Check the signature of a session ID, returning True if it's valid.
The server uses this function to check whether a session ID was generated
with the correct secret key. If signed sessions are disabled, this function
always returns True.
"""
secret_key = _ensure_bytes(secret_key)
if signed:
id_pieces = session_id.split('.', 1)
if len(id_pieces) != 2:
return False
provided_id_signature = id_pieces[1]
expected_id_signature = _signature(id_pieces[0], secret_key)
return hmac.compare_digest(
expected_id_signature, provided_id_signature,
)
return True |
Return a securely generated random string.
With the a-z, A-Z, 0-9 character set:
Length 12 is a 71-bit value. log_2((26+26+10)^12) =~ 71
Length 44 is a 261-bit value. log_2((26+26+10)^44) = 261 | def _get_random_string(
length: int = 44,
allowed_chars: str = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789',
secret_key: bytes | None = settings.secret_key_bytes()) -> str:
""" Return a securely generated random string.
With the a-z, A-Z, 0-9 character set:
Length 12 is a 71-bit value. log_2((26+26+10)^12) =~ 71
Length 44 is a 261-bit value. log_2((26+26+10)^44) = 261
"""
secret_key = _ensure_bytes(secret_key)
_reseed_if_needed(using_sysrandom, secret_key)
return ''.join(random.choice(allowed_chars) for _ in range(length)) |
Find the first place in the stack that is not inside Bokeh.
Inspired by: pandas.util._exceptions.find_stack_level | def find_stack_level() -> int:
"""Find the first place in the stack that is not inside Bokeh.
Inspired by: pandas.util._exceptions.find_stack_level
"""
import bokeh
pkg_dir = os.path.dirname(bokeh.__file__)
# https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow
frame = inspect.currentframe()
n = 0
while frame:
fname = inspect.getfile(frame)
if fname.startswith(pkg_dir):
frame = frame.f_back
n += 1
else:
break
return n |
Applies a collection of general codebase style and quality rules to
every file in the repository. Unless specifically excepted:
* Files should not contain tabs
* Files should not start with newlines
* Files should end with one empty line
* Lines should not contain trailing whitespace
* Lines should not exceed 160 characters | def test_code_quality() -> None:
''' Applies a collection of general codebase style and quality rules to
every file in the repository. Unless specifically excepted:
* Files should not contain tabs
* Files should not start with newlines
* Files should end with one empty line
* Lines should not contain trailing whitespace
* Lines should not exceed 160 characters
'''
errors = collect_errors()
assert len(errors) == 0, "Code quality issues:\n" + "\n".join(errors) |
Returns True if there are tabs in the leading whitespace of a line,
including the whitespace of docstring code samples. | def tab_in_leading(s: str) -> bool:
""" Returns True if there are tabs in the leading whitespace of a line,
including the whitespace of docstring code samples.
"""
n = len(s) - len(s.lstrip())
if s[n:n + 3] not in ['...', '>>>']:
check = s[:n]
else:
smore = s[n + 3:]
check = s[:n] + smore[:len(smore) - len(smore.lstrip())]
return check.expandtabs() != check |
Assures that the BokehJS codebase passes configured eslint checks
| def test_eslint() -> None:
''' Assures that the BokehJS codebase passes configured eslint checks
'''
chdir(TOP_PATH/"bokehjs")
proc = run(["node", "make", "lint"], capture_output=True)
assert proc.returncode == 0, f"eslint issues:\n{proc.stdout.decode('utf-8')}" |
Assures that the Python codebase imports are correctly sorted.
| def isort(dir: str) -> None:
''' Assures that the Python codebase imports are correctly sorted.
'''
chdir(TOP_PATH)
proc = run(["isort", "--gitignore", "--diff", "-c", dir], capture_output=True)
assert proc.returncode == 0, f"isort issues:\n{proc.stdout.decode('utf-8')}" |
If the current set of JS licenses changes, they should be noted in
the bokehjs/LICENSE file. | def test_js_license_set() -> None:
''' If the current set of JS licenses changes, they should be noted in
the bokehjs/LICENSE file.
'''
os.chdir('bokehjs')
cmd = ["npx", "license-checker", "--production", "--summary", "--onlyAllow", ";".join(LICENSES)]
proc = run(cmd)
assert proc.returncode == 0, "New BokehJS licenses detected" |
Ensure the top-level repo LICENSES.txt always matches the copy in
the Python package folder (needed there when generating packages). | def test_license_set() -> None:
''' Ensure the top-level repo LICENSES.txt always matches the copy in
the Python package folder (needed there when generating packages).
'''
chdir(TOP_PATH)
proc = run(["diff", "LICENSE.txt", join("src", "bokeh", "LICENSE.txt")], capture_output=True)
assert proc.returncode == 0, f"LICENSE.txt mismatch:\n{proc.stdout.decode('utf-8')}" |
Basic usage of Bokeh should not result in any client code being
imported. This test ensures that importing basic modules does not bring in
bokeh.client. | def test_no_client_common() -> None:
''' Basic usage of Bokeh should not result in any client code being
imported. This test ensures that importing basic modules does not bring in
bokeh.client.
'''
proc = run([python, "-c", verify_clean_imports('bokeh.client', modules)])
assert proc.returncode == 0, "bokeh.client imported in common modules" |
Basic usage of Bokeh should not result in any server code being
imported. This test ensures that importing basic modules does not bring in
bokeh.server. | def test_no_server_common() -> None:
''' Basic usage of Bokeh should not result in any server code being
imported. This test ensures that importing basic modules does not bring in
bokeh.server.
'''
proc = run([python, "-c", verify_clean_imports('bokeh.server', modules)])
assert proc.returncode == 0, "bokeh.server imported in common modules" |
It is not safe for the Bokeh codebase to use request.host in any way.
This test ensures "request.host" does not appear in any file. | def test_no_request_host() -> None:
''' It is not safe for the Bokeh codebase to use request.host in any way.
This test ensures "request.host" does not appear in any file.
'''
errors = collect_errors()
assert len(errors) == 0, "request.host usage issues:\n" + "\n".join(errors) |
Basic usage of Bokeh should not result in any Tornado code being
imported. This test ensures that importing basic modules does not bring in
Tornado. | def test_no_tornado_common_combined() -> None:
''' Basic usage of Bokeh should not result in any Tornado code being
imported. This test ensures that importing basic modules does not bring in
Tornado.
'''
proc = run([python, "-c", verify_clean_imports('tornado', MODULES)])
assert proc.returncode == 0, "Tornado imported in collective common modules" |
Running python with -OO will discard docstrings (__doc__ is None)
which can cause problems if docstrings are naively formatted.
This test ensures that the all modules are importable, even with -OO set.
If you encounter a new problem with docstrings being formatted, try
using format_docstring. | def test_python_execution_with_OO() -> None:
''' Running python with -OO will discard docstrings (__doc__ is None)
which can cause problems if docstrings are naively formatted.
This test ensures that the all modules are importable, even with -OO set.
If you encounter a new problem with docstrings being formatted, try
using format_docstring.
'''
imports = [f"import {mod}" for mod in ls_modules(skip_prefixes=SKIP)]
proc = Popen([python, "-OO", "-"], stdout=PIPE, stdin=PIPE)
proc.communicate("\n".join(imports).encode("utf-8"))
proc.wait()
assert proc.returncode == 0, "Execution with -OO failed" |
Certain seemingly innocuous filenames like "aux.js" will cause
Windows packages to fail spectacularly. This test ensures those reserved
names are not present in the codebase. | def test_windows_reserved_filenames() -> None:
''' Certain seemingly innocuous filenames like "aux.js" will cause
Windows packages to fail spectacularly. This test ensures those reserved
names are not present in the codebase.
'''
bad: list[str] = []
for path, _, files in os.walk(TOP_PATH):
for file in files:
if splitext(file)[0].upper() in RESERVED_NAMES:
bad.append(join(path, file))
assert len(bad) == 0, f"Windows reserved filenames detected:\n{nice_join(bad)}" |
A PyTest fixture that will automatically skip a test if IPython is
not installed. | def ipython() -> ModuleType | None: # XXX: should be IPython | None, but not supported
''' A PyTest fixture that will automatically skip a test if IPython is
not installed.
'''
ipython = import_optional('IPython')
if ipython is None:
pytest.skip('IPython is not installed')
return ipython |
Starts a jupyter notebook server at the beginning of a session, and
closes at the end of a session.
Adds custom.js that runs all the cells on notebook opening. Cleans out
this custom.js at the end of the test run.
Returns the url that the jupyter notebook is running at. | def jupyter_notebook(request: pytest.FixtureRequest, log_file: IO[str]) -> str:
"""
Starts a jupyter notebook server at the beginning of a session, and
closes at the end of a session.
Adds custom.js that runs all the cells on notebook opening. Cleans out
this custom.js at the end of the test run.
Returns the url that the jupyter notebook is running at.
"""
# First - set-up the notebooks to run all cells when they're opened
#
# Can be cleaned up further to remember the user's existing customJS
# and then restore it after the test run.
from jupyter_core import paths
config_dir = paths.jupyter_config_dir()
body = """
require(["base/js/namespace", "base/js/events"], function (IPython, events) {
events.on("kernel_ready.Kernel", function () {
IPython.notebook.execute_all_cells();
});
});
"""
custom = join(config_dir, "custom")
if not exists(custom):
os.makedirs(custom)
customjs = join(custom, "custom.js")
old_customjs = None
if exists(customjs):
with open(customjs) as f:
old_customjs = f.read()
with open(customjs, "w") as f:
f.write(body)
# Add in the clean-up code
def clean_up_customjs() -> None:
text = old_customjs if old_customjs is not None else ""
with open(customjs, "w") as f:
f.write(text)
request.addfinalizer(clean_up_customjs)
# Second - Run a notebook server at the examples directory
#
notebook_port = request.config.option.notebook_port
env = os.environ.copy()
env['BOKEH_RESOURCES'] = 'server'
# Launch from the base directory of bokeh repo
notebook_dir = join(dirname(__file__), pardir, pardir)
cmd = ["jupyter", "notebook"]
argv = ["--no-browser", f"--port={notebook_port}", f"--notebook-dir={notebook_dir}"]
jupter_notebook_url = f"http://localhost:{notebook_port}"
try:
proc = subprocess.Popen(cmd + argv, env=env, stdout=log_file, stderr=log_file)
except OSError:
write(f"Failed to run: {' '.join(cmd + argv)}")
sys.exit(1)
else:
# Add in the clean-up code
def stop_jupyter_notebook() -> None:
write("Shutting down jupyter-notebook ...")
proc.kill()
request.addfinalizer(stop_jupyter_notebook)
def wait_until(func: Callable[[], Any], timeout: float = 5.0, interval: float = 0.01) -> bool:
start = time.time()
while True:
if func():
return True
if time.time() - start > timeout:
return False
time.sleep(interval)
def wait_for_jupyter_notebook() -> bool:
def helper() -> Any:
if proc.returncode is not None:
return True
try:
return requests.get(jupter_notebook_url)
except ConnectionError:
return False
return wait_until(helper)
if not wait_for_jupyter_notebook():
write(f"Timeout when running: {' '.join(cmd + argv)}")
sys.exit(1)
if proc.returncode is not None:
write(f"Jupyter notebook exited with code {proc.returncode}")
sys.exit(1)
return jupter_notebook_url |
A PyTest fixture that will automatically skip a test if networkx is
not installed. | def nx() -> ModuleType | None: # XXX: should be networkx | None, but not supported
''' A PyTest fixture that will automatically skip a test if networkx is
not installed.
'''
nx = import_optional('networkx')
if nx is None:
pytest.skip('networkx is not installed')
return nx |
Start a Bokeh server app and return information needed to test it.
Returns a tuple (url, message_test_port), where the latter is an instance of
``MessageTestPort`` dataclass, and will contain all messages that the Bokeh
Server sends/receives while running during the test. | def bokeh_app_info(request: pytest.FixtureRequest, driver: WebDriver) -> BokehAppInfo:
''' Start a Bokeh server app and return information needed to test it.
Returns a tuple (url, message_test_port), where the latter is an instance of
``MessageTestPort`` dataclass, and will contain all messages that the Bokeh
Server sends/receives while running during the test.
'''
def func(modify_doc: ModifyDoc) -> tuple[str, ws.MessageTestPort]:
ws._message_test_port = ws.MessageTestPort(sent=[], received=[])
port = find_free_port()
def worker() -> None:
io_loop = IOLoop()
server = Server({'/': modify_doc},
port=port,
io_loop=io_loop,
extra_patterns=[('/exit', _ExitHandler, dict(io_loop=io_loop))])
server.start()
server.io_loop.start()
t = Thread(target=worker)
t.start()
def cleanup() -> None:
driver.get(f"http://localhost:{port}/exit")
# XXX (bev) this line is a workaround for https://github.com/bokeh/bokeh/issues/7970
# and should be removed when that issue is resolved
driver.get_log('browser')
ws._message_test_port = None
t.join()
request.addfinalizer(cleanup)
return f"http://localhost:{port}/", ws._message_test_port
return func |
def pytest_report_collectionfinish(config: config.Config, startdir: py.path.local, items: Sequence[nodes.Item]) -> list[str]:
'''
'''
driver_name: str = config.getoption('driver', 'chrome').lower()
asserts = "ON" if driver_name == "chrome" else "OFF"
return ["", f"Bokeh selenium tests using {driver_name!r} driver (no-console-error assertions: {asserts})"] |
|
Select and configure a Selenium webdriver for integration tests.
| def driver(pytestconfig: config.Config) -> Iterator[WebDriver]:
''' Select and configure a Selenium webdriver for integration tests.
'''
driver_name: str = pytestconfig.getoption('driver', 'chrome').lower()
def chrome() -> WebDriver:
for executable in ["chromedriver", "chromium.chromedriver", "chromedriver-binary"]:
executable_path = which(executable)
if executable_path is not None:
break
else:
raise RuntimeError("chromedriver or its variant is not installed or not present on PATH")
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.webdriver import WebDriver as Chrome
service = Service(executable_path)
options = Options()
options.add_argument("--headless")
options.add_argument("--no-sandbox")
options.add_argument("--window-size=1920x1080")
return Chrome(service=service, options=options)
def firefox() -> WebDriver:
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.firefox.webdriver import WebDriver as Firefox
options = Options()
options.add_argument("--headless")
options.add_argument("--window-size=1920x1080")
return Firefox(options=options)
def safari() -> WebDriver:
from selenium.webdriver.safari.webdriver import WebDriver as Safari
return Safari()
driver: WebDriver
if driver_name == "chrome":
driver = chrome()
elif driver_name == "firefox":
driver = firefox()
elif driver_name == "safari":
driver = safari()
else:
raise ValueError("expected 'chrome', 'firefox' or 'safari'")
driver.implicitly_wait(10)
yield driver
driver.quit() |
Provide a function to assert no browser console errors are present.
Unfortunately logs are only accessibly with Chrome web driver, see e.g.
https://github.com/mozilla/geckodriver/issues/284
For non-Chrome webdrivers this check always returns True. | def has_no_console_errors(pytestconfig: config.Config) -> Callable[[WebDriver], bool]:
''' Provide a function to assert no browser console errors are present.
Unfortunately logs are only accessibly with Chrome web driver, see e.g.
https://github.com/mozilla/geckodriver/issues/284
For non-Chrome webdrivers this check always returns True.
'''
driver_name: str = pytestconfig.getoption('driver').lower()
if driver_name == "chrome":
def func(driver: WebDriver) -> bool:
logs = driver.get_log('browser')
severe_errors = [x for x in logs if x.get('level') == 'SEVERE']
non_network_errors = [l for l in severe_errors if l.get('type') != 'network']
if len(non_network_errors) == 0:
if len(severe_errors) != 0:
warn(f"There were severe network errors (this may or may not have affected your test): {severe_errors}")
return True
pytest.fail(f"Console errors: {non_network_errors}")
else:
def func(driver: WebDriver) -> bool:
return True
return func |
def verify_all(module: str | ModuleType, ALL: Sequence[str]) -> type:
'''
'''
class Test___all__:
_module: ModuleType | None = None
@property
def module(self) -> ModuleType:
if self._module is None:
if isinstance(module, str):
self._module = importlib.import_module(module)
else:
self._module = module
return self._module
def test___all__(self) -> None:
__all__: Sequence[str] | None = getattr(self.module, "__all__", None)
assert __all__ is not None, f"module {self.module.__name__} doesn't define __all__"
assert __all__ == ALL, f"for module {self.module.__name__}, expected: {set(ALL) - set(__all__)!r}, actual: {set(__all__) - set(ALL)!r}"
@pytest.mark.parametrize('name', ALL)
def test_contents(self, name: str) -> None:
assert hasattr(self.module, name)
return Test___all__ |
|
Compares data dictionaries containing floats, lists and arrays
Also supports nested lists and arrays | def cds_data_almost_equal(data1: Data, data2: Data, rtol: float = 1e-09, atol: float = 0.0) -> bool:
'''Compares data dictionaries containing floats, lists and arrays
Also supports nested lists and arrays
'''
if sorted(data1.keys()) != sorted(data2.keys()):
return False
for c in data1.keys():
cd1 = data1[c]
cd2 = data2[c]
if len(cd1) != len(cd2):
return False
for v1, v2 in zip(cd1, cd2):
if isinstance(v1, float | int) and isinstance(v2, float | int):
if not np.isclose(v1, v2, rtol, atol):
return False
elif isinstance(v1, list | np.ndarray) and isinstance(v2, list | np.ndarray):
v1, v2 = np.asarray(v1), np.asarray(v2)
if v1.dtype.kind in 'iufcmM' and v2.dtype.kind in 'iufcmM':
if (~np.isclose(v1, v2, rtol, atol)).any():
return False
elif (v1 != v2).any():
return False
elif v1 != v2:
return False
return True |
Temporarily set environment variables and undo the updates on exit.
Args:
value (optional) :
A mapping of strings to strings to apply to os.environ
Any remaining keywoard args are applied to os.environ | def envset(value: Mapping[str, str]|None=None, **kw: Any) -> Iterator[None]:
''' Temporarily set environment variables and undo the updates on exit.
Args:
value (optional) :
A mapping of strings to strings to apply to os.environ
Any remaining keywoard args are applied to os.environ
'''
old = os.environ.copy()
if value:
os.environ.update(value)
os.environ.update(**kw)
yield
# take care to keep the same actual dict object
os.environ.clear()
os.environ.update(old) |
def with_directory_contents(contents: dict[PathLike, str | None], func: Callable[[str], None]) -> None:
'''
'''
with TmpDir(prefix="test-") as dirname:
for filename, file_content in contents.items():
path = os.path.join(dirname, filename)
if file_content is None:
os.makedirs(path, exist_ok=True)
else:
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "w", encoding="utf-8") as f:
f.write(file_content)
func(os.path.realpath(dirname)) |
|
def with_file_contents(contents: str, func: Callable[[str], None], dir: PathLike | None = None, suffix: str = '') -> None:
'''
'''
def with_file_object(f: IO[bytes]) -> None:
f.write(contents.encode("UTF-8"))
f.flush()
# Windows will get mad if we try to rename it without closing,
# and some users of with_file_contents want to rename it.
f.close()
func(f.name)
with_temporary_file(with_file_object, dir=dir, suffix=suffix) |
|
def with_temporary_file(func: Callable[[IO[bytes]], None], dir: PathLike | None = None, suffix: str = '') -> None:
'''
'''
if dir is None:
dir = _LOCAL_TMP
# Windows throws a permission denied if we use delete=True for
# auto-delete, and then try to open the file again ourselves
# with f.name. So we manually delete in the finally block
# below.
f = tempfile.NamedTemporaryFile(dir=dir, delete=False, suffix=suffix)
try:
func(f)
finally:
f.close()
os.remove(f.name) |
|
wait is in milliseconds | def _run_in_browser(engine: list[str], url: str, local_wait: int | None = None, global_wait: int | None = None) -> JSResult:
"""
wait is in milliseconds
"""
cmd = [*engine, url]
if local_wait is not None:
cmd += [str(local_wait)]
if global_wait is not None:
cmd += [str(global_wait)]
trace(f"Running command: {' '.join(cmd)}")
env = os.environ.copy()
env["NODE_PATH"] = join(TOP_PATH, 'bokehjs', 'node_modules')
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
except OSError as e:
fail(f"Failed to run: {' '.join(cmd)}")
fail(str(e))
sys.exit(1)
with proc:
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
output = stderr.decode("utf-8")
fail(output)
sys.exit(1)
output = stdout.decode("utf-8")
return json.loads(output) |
model = Plot()
decoder = Deserializer(references=[model])
event = decoder.decode(dict(
type="event",
name=events..event_name,
values=dict(
type="map",
entries=[
["model", dict(id=model.id)],
],
),
)) | def test_mousewheelevent_decode_json() -> None:
"""
model = Plot()
decoder = Deserializer(references=[model])
event = decoder.decode(dict(
type="event",
name=events..event_name,
values=dict(
type="map",
entries=[
["model", dict(id=model.id)],
],
),
))
"""
model = Plot()
decoder = Deserializer(references=[model])
event = decoder.decode(dict(
type="event",
name=events.MouseWheel.event_name,
values=dict(
type="map",
entries=[
["model", dict(id=model.id)],
["delta", -0.1],
["sx", 3],
["sy", -2],
["x", 10],
["y", 100],
],
),
))
assert event.model == model
assert event.delta == -0.1
assert event.sx == 3
assert event.sy == -2
assert event.x == 10
assert event.y == 100 |
This test method has to be at the end of the test modules because
subclassing a Model causes the CustomModel to be added as a Model and
messes up the Resources state for the other tests. | def test_external_js_and_css_resource_embedding() -> None:
""" This test method has to be at the end of the test modules because
subclassing a Model causes the CustomModel to be added as a Model and
messes up the Resources state for the other tests.
"""
# External resources can be defined as a string or list of strings
class CustomModel1(Model):
__javascript__ = "external_js_1"
__css__ = "external_css_1"
class CustomModel2(Model):
__javascript__ = ["external_js_2", "external_js_3"]
__css__ = ["external_css_2", "external_css_3"]
class CustomModel3(Model):
__javascript__ = ["external_js_1", "external_js_3"]
__css__ = ["external_css_1", "external_css_2"]
r = resources.Resources()
assert "external_js_1" in r.js_files
assert "external_css_1" in r.css_files
assert "external_js_2" in r.js_files
assert "external_js_3" in r.js_files
assert "external_css_2" in r.css_files
assert "external_css_3" in r.css_files
# Deduplication should keep the first instance of every file
assert r.css_files.count("external_css_1") == 1
assert r.css_files.count("external_css_2") == 1
assert r.js_files.count("external_js_3") == 1
assert r.js_files.count("external_js_1") == 1 |
This is not really a test but a reminder that if you change the
autoload_nb_js.js template then you should make sure that insertion of
plots into notebooks is working as expected. In particular, this test was
created as part of https://github.com/bokeh/bokeh/issues/7125. | def test_autoload_template_has_changed() -> None:
"""This is not really a test but a reminder that if you change the
autoload_nb_js.js template then you should make sure that insertion of
plots into notebooks is working as expected. In particular, this test was
created as part of https://github.com/bokeh/bokeh/issues/7125.
"""
with open(join(TOP_PATH, "_templates/autoload_nb_js.js"), mode="rb") as f:
current_template_sha256 = compute_sha256(_crlf_cr_2_lf_bin(f.read()))
assert pinned_template_sha256 == current_template_sha256, """\
It seems that the template autoload_nb_js.js has changed.
If this is voluntary and that proper testing of plots insertion
in notebooks has been completed successfully, update this test
with the new file SHA256 signature.""" |
Yields the sequence of prime numbers via the Sieve of Eratosthenes. | def eratosthenes():
"""Yields the sequence of prime numbers via the Sieve of Eratosthenes."""
D = {} # map each composite integer to its first-found prime factor
q = 2 # q gets 2, 3, 4, 5, ... ad infinitum
while True:
p = D.pop(q, None)
if p is None:
# q not a key in D, so q is prime, therefore, yield it
yield q
# mark q squared as not-prime (with q as first-found prime factor)
D[q * q] = q
else:
# let x <- smallest (N*p)+q which wasn't yet known to be composite
# we just learned x is composite, with p first-found prime factor,
# since p is the first-found prime factor of q -- find and mark it
x = p + q
while x in D:
x += p
D[x] = p
q += 1 |
Yields numbers with 2 prime factors pfix and p. | def two_prime_factors(pfix=65537):
"""Yields numbers with 2 prime factors pfix and p."""
for p in eratosthenes():
yield pfix * p |
this checks for some race conditions between the first filename-based stat()
we did before dispatching to the (hopefully correct) file type backup handler
and the (hopefully) fd-based fstat() we did in the handler.
if there is a problematic difference (e.g. file type changed), we rather
skip the file than being tricked into a security problem.
such races should only happen if:
- we are backing up a live filesystem (no snapshot, not inactive)
- if files change due to normal fs activity at an unfortunate time
- if somebody is doing an attack against us | def stat_update_check(st_old, st_curr):
"""
this checks for some race conditions between the first filename-based stat()
we did before dispatching to the (hopefully correct) file type backup handler
and the (hopefully) fd-based fstat() we did in the handler.
if there is a problematic difference (e.g. file type changed), we rather
skip the file than being tricked into a security problem.
such races should only happen if:
- we are backing up a live filesystem (no snapshot, not inactive)
- if files change due to normal fs activity at an unfortunate time
- if somebody is doing an attack against us
"""
# assuming that a file type change implicates a different inode change AND that inode numbers
# are not duplicate in a short timeframe, this check is redundant and solved by the ino check:
if stat.S_IFMT(st_old.st_mode) != stat.S_IFMT(st_curr.st_mode):
# in this case, we dispatched to wrong handler - abort
raise BackupRaceConditionError("file type changed (race condition), skipping file")
if st_old.st_ino != st_curr.st_ino:
# in this case, the hardlinks-related code in create_helper has the wrong inode - abort!
raise BackupRaceConditionError("file inode changed (race condition), skipping file")
# looks ok, we are still dealing with the same thing - return current stat:
return st_curr |
gets a (potentially large) list of archive metadata stream chunk ids and writes them to repo objects | def archive_put_items(chunk_ids, *, repo_objs, cache=None, stats=None, add_reference=None):
"""gets a (potentially large) list of archive metadata stream chunk ids and writes them to repo objects"""
item_ptrs = []
for i in range(0, len(chunk_ids), IDS_PER_CHUNK):
data = msgpack.packb(chunk_ids[i : i + IDS_PER_CHUNK])
id = repo_objs.id_hash(data)
logger.debug(f"writing item_ptrs chunk {bin_to_hex(id)}")
if cache is not None and stats is not None:
cache.add_chunk(id, {}, data, stats=stats, ro_type=ROBJ_ARCHIVE_CHUNKIDS)
elif add_reference is not None:
cdata = repo_objs.format(id, {}, data, ro_type=ROBJ_ARCHIVE_CHUNKIDS)
add_reference(id, len(data), cdata)
else:
raise NotImplementedError
item_ptrs.append(id)
return item_ptrs |
check if the data <d> looks like a msgpacked dict | def valid_msgpacked_dict(d, keys_serialized):
"""check if the data <d> looks like a msgpacked dict"""
d_len = len(d)
if d_len == 0:
return False
if d[0] & 0xF0 == 0x80: # object is a fixmap (up to 15 elements)
offs = 1
elif d[0] == 0xDE: # object is a map16 (up to 2^16-1 elements)
offs = 3
else:
# object is not a map (dict)
# note: we must not have dicts with > 2^16-1 elements
return False
if d_len <= offs:
return False
# is the first dict key a bytestring?
if d[offs] & 0xE0 == 0xA0: # key is a small bytestring (up to 31 chars)
pass
elif d[offs] in (0xD9, 0xDA, 0xDB): # key is a str8, str16 or str32
pass
else:
# key is not a bytestring
return False
# is the bytestring any of the expected key names?
key_serialized = d[offs:]
return any(key_serialized.startswith(pattern) for pattern in keys_serialized) |
setup logging module according to the arguments provided
if conf_fname is given (or the config file name can be determined via
the env_var, if given): load this logging configuration.
otherwise, set up a stream handler logger on stderr (by default, if no
stream is provided).
is_serve: are we setting up the logging for "borg serve"? | def setup_logging(
stream=None, conf_fname=None, env_var="BORG_LOGGING_CONF", level="info", is_serve=False, log_json=False, func=None
):
"""setup logging module according to the arguments provided
if conf_fname is given (or the config file name can be determined via
the env_var, if given): load this logging configuration.
otherwise, set up a stream handler logger on stderr (by default, if no
stream is provided).
is_serve: are we setting up the logging for "borg serve"?
"""
global configured
err_msg = None
if env_var:
conf_fname = os.environ.get(env_var, conf_fname)
if conf_fname:
try:
conf_fname = os.path.abspath(conf_fname)
# we open the conf file here to be able to give a reasonable
# error message in case of failure (if we give the filename to
# fileConfig(), it silently ignores unreadable files and gives
# unhelpful error msgs like "No section: 'formatters'"):
with open(conf_fname) as f:
logging.config.fileConfig(f)
configured = True
logger = logging.getLogger(__name__)
logger.debug(f'using logging configuration read from "{conf_fname}"')
warnings.showwarning = _log_warning
return None
except Exception as err: # XXX be more precise
err_msg = str(err)
# if we did not / not successfully load a logging configuration, fallback to this:
level = level.upper()
fmt = "%(message)s"
formatter = JsonFormatter(fmt) if log_json else logging.Formatter(fmt)
SHandler = StderrHandler if stream is None else logging.StreamHandler
handler = BorgQueueHandler(borg_serve_log_queue) if is_serve else SHandler(stream)
handler.setFormatter(formatter)
logger = logging.getLogger()
remove_handlers(logger)
logger.setLevel(level)
if logging_debugging_path is not None:
# add an addtl. root handler for debugging purposes
log_fname = os.path.join(logging_debugging_path, f"borg-{'serve' if is_serve else 'client'}-root.log")
handler2 = logging.StreamHandler(open(log_fname, "a"))
handler2.setFormatter(formatter)
logger.addHandler(handler2)
logger.warning(f"--- {func} ---") # only handler2 shall get this
logger.addHandler(handler) # do this late, so handler is not added while debug handler is set up
bop_formatter = JSONProgressFormatter() if log_json else TextProgressFormatter()
bop_handler = BorgQueueHandler(borg_serve_log_queue) if is_serve else SHandler(stream)
bop_handler.setFormatter(bop_formatter)
bop_logger = logging.getLogger("borg.output.progress")
remove_handlers(bop_logger)
bop_logger.setLevel("INFO")
bop_logger.propagate = False
if logging_debugging_path is not None:
# add an addtl. progress handler for debugging purposes
log_fname = os.path.join(logging_debugging_path, f"borg-{'serve' if is_serve else 'client'}-progress.log")
bop_handler2 = logging.StreamHandler(open(log_fname, "a"))
bop_handler2.setFormatter(bop_formatter)
bop_logger.addHandler(bop_handler2)
json_dict = dict(
message=f"--- {func} ---", operation=0, msgid="", type="progress_message", finished=False, time=time.time()
)
bop_logger.warning(json.dumps(json_dict)) # only bop_handler2 shall get this
bop_logger.addHandler(bop_handler) # do this late, so bop_handler is not added while debug handler is set up
configured = True
logger = logging.getLogger(__name__)
if err_msg:
logger.warning(f'setup_logging for "{conf_fname}" failed with "{err_msg}".')
logger.debug("using builtin fallback logging configuration")
warnings.showwarning = _log_warning
return handler |
find the name of the first module calling this module
if we cannot find it, we return the current module's name
(__name__) instead. | def find_parent_module():
"""find the name of the first module calling this module
if we cannot find it, we return the current module's name
(__name__) instead.
"""
try:
frame = inspect.currentframe().f_back
module = inspect.getmodule(frame)
while module is None or module.__name__ == __name__:
frame = frame.f_back
module = inspect.getmodule(frame)
return module.__name__
except AttributeError:
# somehow we failed to find our module
# return the logger module name by default
return __name__ |
lazily create a Logger object with the proper path, which is returned by
find_parent_module() by default, or is provided via the commandline
this is really a shortcut for:
logger = logging.getLogger(__name__)
we use it to avoid errors and provide a more standard API.
We must create the logger lazily, because this is usually called from
module level (and thus executed at import time - BEFORE setup_logging()
was called). By doing it lazily we can do the setup first, we just have to
be careful not to call any logger methods before the setup_logging() call.
If you try, you'll get an exception. | def create_logger(name: str = None) -> LazyLogger:
"""lazily create a Logger object with the proper path, which is returned by
find_parent_module() by default, or is provided via the commandline
this is really a shortcut for:
logger = logging.getLogger(__name__)
we use it to avoid errors and provide a more standard API.
We must create the logger lazily, because this is usually called from
module level (and thus executed at import time - BEFORE setup_logging()
was called). By doing it lazily we can do the setup first, we just have to
be careful not to call any logger methods before the setup_logging() call.
If you try, you'll get an exception.
"""
return LazyLogger(name) |
Parse a pattern-file line and act depending on which command it represents. | def parse_patternfile_line(line, roots, ie_commands, fallback):
"""Parse a pattern-file line and act depending on which command it represents."""
ie_command = parse_inclexcl_command(line, fallback=fallback)
if ie_command.cmd is IECommand.RootPath:
roots.append(ie_command.val)
elif ie_command.cmd is IECommand.PatternStyle:
fallback = ie_command.val
else:
# it is some kind of include/exclude command
ie_commands.append(ie_command)
return fallback |
normalize paths for MacOS (but do nothing on other platforms) | def normalize_path(path):
"""normalize paths for MacOS (but do nothing on other platforms)"""
# HFS+ converts paths to a canonical form, so users shouldn't be required to enter an exact match.
# Windows and Unix filesystems allow different forms, so users always have to enter an exact match.
return unicodedata.normalize("NFD", path) if sys.platform == "darwin" else path |
Read pattern from string and return an instance of the appropriate implementation class. | def parse_pattern(pattern, fallback=FnmatchPattern, recurse_dir=True):
"""Read pattern from string and return an instance of the appropriate implementation class."""
if len(pattern) > 2 and pattern[2] == ":" and pattern[:2].isalnum():
(style, pattern) = (pattern[:2], pattern[3:])
cls = get_pattern_class(style)
else:
cls = fallback
return cls(pattern, recurse_dir) |
Read pattern from string and return an instance of the appropriate implementation class. | def parse_exclude_pattern(pattern_str, fallback=FnmatchPattern):
"""Read pattern from string and return an instance of the appropriate implementation class."""
epattern_obj = parse_pattern(pattern_str, fallback, recurse_dir=False)
return CmdTuple(epattern_obj, IECommand.ExcludeNoRecurse) |
Read a --patterns-from command from string and return a CmdTuple object. | def parse_inclexcl_command(cmd_line_str, fallback=ShellPattern):
"""Read a --patterns-from command from string and return a CmdTuple object."""
cmd_prefix_map = {
"-": IECommand.Exclude,
"!": IECommand.ExcludeNoRecurse,
"+": IECommand.Include,
"R": IECommand.RootPath,
"r": IECommand.RootPath,
"P": IECommand.PatternStyle,
"p": IECommand.PatternStyle,
}
if not cmd_line_str:
raise argparse.ArgumentTypeError("A pattern/command must not be empty.")
cmd = cmd_prefix_map.get(cmd_line_str[0])
if cmd is None:
raise argparse.ArgumentTypeError("A pattern/command must start with any of: %s" % ", ".join(cmd_prefix_map))
# remaining text on command-line following the command character
remainder_str = cmd_line_str[1:].lstrip()
if not remainder_str:
raise argparse.ArgumentTypeError("A pattern/command must have a value part.")
if cmd is IECommand.RootPath:
# TODO: validate string?
val = remainder_str
elif cmd is IECommand.PatternStyle:
# then remainder_str is something like 're' or 'sh'
try:
val = get_pattern_class(remainder_str)
except ValueError:
raise argparse.ArgumentTypeError(f"Invalid pattern style: {remainder_str}")
else:
# determine recurse_dir based on command type
recurse_dir = command_recurses_dir(cmd)
val = parse_pattern(remainder_str, fallback, recurse_dir)
return CmdTuple(val, cmd) |
return a regular expression string corresponding to the given pattern string.
the allowed pattern types are similar to the ones implemented by PatternBase subclasses,
but here we rather do generic string matching, not specialised filesystem paths matching. | def get_regex_from_pattern(pattern: str) -> str:
"""
return a regular expression string corresponding to the given pattern string.
the allowed pattern types are similar to the ones implemented by PatternBase subclasses,
but here we rather do generic string matching, not specialised filesystem paths matching.
"""
if len(pattern) > 2 and pattern[2] == ":" and pattern[:2] in {"sh", "re", "id"}:
(style, pattern) = (pattern[:2], pattern[3:])
else:
(style, pattern) = ("id", pattern) # "identical" match is the default
if style == "sh":
# (?ms) (meaning re.MULTILINE and re.DOTALL) are not desired here.
regex = shellpattern.translate(pattern, match_end="").removeprefix("(?ms)")
elif style == "re":
regex = pattern
elif style == "id":
regex = re.escape(pattern)
else:
raise NotImplementedError
return regex |
os.write wrapper so we do not lose data for partial writes. | def os_write(fd, data):
"""os.write wrapper so we do not lose data for partial writes."""
# TODO: this issue is fixed in cygwin since at least 2.8.0, remove this
# wrapper / workaround when this version is considered ancient.
# This is happening frequently on cygwin due to its small pipe buffer size of only 64kiB
# and also due to its different blocking pipe behaviour compared to Linux/*BSD.
# Neither Linux nor *BSD ever do partial writes on blocking pipes, unless interrupted by a
# signal, in which case serve() would terminate.
amount = remaining = len(data)
while remaining:
count = os.write(fd, data)
remaining -= count
if not remaining:
break
data = data[count:]
time.sleep(count * 1e-09)
return amount |
Check version requirements and use self.call to do the remote method call.
<since> specifies the version in which borg introduced this method.
Calling this method when connected to an older version will fail without transmitting anything to the server.
Further kwargs can be used to encode version specific restrictions:
<previously> is the value resulting in the behaviour before introducing the new parameter.
If a previous hardcoded behaviour is parameterized in a version, this allows calls that use the previously
hardcoded behaviour to pass through and generates an error if another behaviour is requested by the client.
E.g. when 'append_only' was introduced in 1.0.7 the previous behaviour was what now is append_only=False.
Thus @api(..., append_only={'since': parse_version('1.0.7'), 'previously': False}) allows calls
with append_only=False for all version but rejects calls using append_only=True on versions older than 1.0.7.
<dontcare> is a flag to set the behaviour if an old version is called the new way.
If set to True, the method is called without the (not yet supported) parameter (this should be done if that is the
more desirable behaviour). If False, an exception is generated.
E.g. before 'threshold' was introduced in 1.2.0a8, a hardcoded threshold of 0.1 was used in commit(). | def api(*, since, **kwargs_decorator):
"""Check version requirements and use self.call to do the remote method call.
<since> specifies the version in which borg introduced this method.
Calling this method when connected to an older version will fail without transmitting anything to the server.
Further kwargs can be used to encode version specific restrictions:
<previously> is the value resulting in the behaviour before introducing the new parameter.
If a previous hardcoded behaviour is parameterized in a version, this allows calls that use the previously
hardcoded behaviour to pass through and generates an error if another behaviour is requested by the client.
E.g. when 'append_only' was introduced in 1.0.7 the previous behaviour was what now is append_only=False.
Thus @api(..., append_only={'since': parse_version('1.0.7'), 'previously': False}) allows calls
with append_only=False for all version but rejects calls using append_only=True on versions older than 1.0.7.
<dontcare> is a flag to set the behaviour if an old version is called the new way.
If set to True, the method is called without the (not yet supported) parameter (this should be done if that is the
more desirable behaviour). If False, an exception is generated.
E.g. before 'threshold' was introduced in 1.2.0a8, a hardcoded threshold of 0.1 was used in commit().
"""
def decorator(f):
@functools.wraps(f)
def do_rpc(self, *args, **kwargs):
sig = inspect.signature(f)
bound_args = sig.bind(self, *args, **kwargs)
named = {} # Arguments for the remote process
extra = {} # Arguments for the local process
for name, param in sig.parameters.items():
if name == "self":
continue
if name in bound_args.arguments:
if name == "wait":
extra[name] = bound_args.arguments[name]
else:
named[name] = bound_args.arguments[name]
else:
if param.default is not param.empty:
named[name] = param.default
if self.server_version < since:
raise self.RPCServerOutdated(f.__name__, format_version(since))
for name, restriction in kwargs_decorator.items():
if restriction["since"] <= self.server_version:
continue
if "previously" in restriction and named[name] == restriction["previously"]:
continue
if restriction.get("dontcare", False):
continue
raise self.RPCServerOutdated(
f"{f.__name__} {name}={named[name]!s}", format_version(restriction["since"])
)
return self.call(f.__name__, named, **extra)
return do_rpc
return decorator |
Return a Repository(No)Cache for *repository*.
If *decrypted_cache* is a repo_objs object, then get and get_many will return a tuple
(csize, plaintext) instead of the actual data in the repository. The cache will
store decrypted data, which increases CPU efficiency (by avoiding repeatedly decrypting
and more importantly MAC and ID checking cached objects).
Internally, objects are compressed with LZ4. | def cache_if_remote(repository, *, decrypted_cache=False, pack=None, unpack=None, transform=None, force_cache=False):
"""
Return a Repository(No)Cache for *repository*.
If *decrypted_cache* is a repo_objs object, then get and get_many will return a tuple
(csize, plaintext) instead of the actual data in the repository. The cache will
store decrypted data, which increases CPU efficiency (by avoiding repeatedly decrypting
and more importantly MAC and ID checking cached objects).
Internally, objects are compressed with LZ4.
"""
if decrypted_cache and (pack or unpack or transform):
raise ValueError("decrypted_cache and pack/unpack/transform are incompatible")
elif decrypted_cache:
repo_objs = decrypted_cache
# 32 bit csize, 64 bit (8 byte) xxh64, 1 byte ctype, 1 byte clevel
cache_struct = struct.Struct("=I8sBB")
compressor = Compressor("lz4")
def pack(data):
csize, decrypted = data
meta, compressed = compressor.compress({}, decrypted)
return cache_struct.pack(csize, xxh64(compressed), meta["ctype"], meta["clevel"]) + compressed
def unpack(data):
data = memoryview(data)
csize, checksum, ctype, clevel = cache_struct.unpack(data[: cache_struct.size])
compressed = data[cache_struct.size :]
if checksum != xxh64(compressed):
raise IntegrityError("detected corrupted data in metadata cache")
meta = dict(ctype=ctype, clevel=clevel, csize=len(compressed))
_, decrypted = compressor.decompress(meta, compressed)
return csize, decrypted
def transform(id_, data):
meta, decrypted = repo_objs.parse(id_, data, ro_type=ROBJ_DONTCARE)
csize = meta.get("csize", len(data))
return csize, decrypted
if isinstance(repository, RemoteRepository) or force_cache:
return RepositoryCache(repository, pack, unpack, transform)
else:
return RepositoryNoCache(repository, transform) |
Simplistic parser for setuptools_scm versions.
Supports final versions and alpha ('a'), beta ('b') and release candidate ('rc') versions.
It does not try to parse anything else than that, even if there is more in the version string.
Output is a version tuple containing integers. It ends with one or two elements that ensure that relational
operators yield correct relations for alpha, beta and rc versions, too.
For final versions the last element is a -1.
For prerelease versions the last two elements are a smaller negative number and the number of e.g. the beta.
This version format is part of the remote protocol, don‘t change in breaking ways. | def parse_version(version):
"""
Simplistic parser for setuptools_scm versions.
Supports final versions and alpha ('a'), beta ('b') and release candidate ('rc') versions.
It does not try to parse anything else than that, even if there is more in the version string.
Output is a version tuple containing integers. It ends with one or two elements that ensure that relational
operators yield correct relations for alpha, beta and rc versions, too.
For final versions the last element is a -1.
For prerelease versions the last two elements are a smaller negative number and the number of e.g. the beta.
This version format is part of the remote protocol, don‘t change in breaking ways.
"""
version_re = r"""
(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+) # version, e.g. 1.2.33
(?P<prerelease>(?P<ptype>a|b|rc)(?P<pnum>\d+))? # optional prerelease, e.g. a1 or b2 or rc33
"""
m = re.match(version_re, version, re.VERBOSE)
if m is None:
raise ValueError("Invalid version string %s" % version)
gd = m.groupdict()
version = [int(gd["major"]), int(gd["minor"]), int(gd["patch"])]
if m.lastgroup == "prerelease":
p_type = {"a": -4, "b": -3, "rc": -2}[gd["ptype"]]
p_num = int(gd["pnum"])
version += [p_type, p_num]
else:
version += [-1]
return tuple(version) |
a reverse for parse_version (obviously without the dropped information) | def format_version(version):
"""a reverse for parse_version (obviously without the dropped information)"""
f = []
it = iter(version)
while True:
part = next(it)
if part >= 0:
f.append(str(part))
elif part == -1:
break
else:
f[-1] = f[-1] + {-2: "rc", -3: "b", -4: "a"}[part] + str(next(it))
break
return ".".join(f) |
Determine if xattr is enabled on the filesystem | def is_enabled(path=None):
"""Determine if xattr is enabled on the filesystem"""
with tempfile.NamedTemporaryFile(dir=path, prefix="borg-tmp") as f:
fd = f.fileno()
name, value = b"user.name", b"value"
try:
setxattr(fd, name, value)
except OSError:
return False
try:
names = listxattr(fd)
except OSError:
return False
if name not in names:
return False
return getxattr(fd, name) == value |
Return all extended attributes on *path* as a mapping.
*path* can either be a path (str or bytes) or an open file descriptor (int).
*follow_symlinks* indicates whether symlinks should be followed
and only applies when *path* is not an open file descriptor.
The returned mapping maps xattr names (bytes) to values (bytes or None).
None indicates, as a xattr value, an empty value, i.e. a value of length zero. | def get_all(path, follow_symlinks=False):
"""
Return all extended attributes on *path* as a mapping.
*path* can either be a path (str or bytes) or an open file descriptor (int).
*follow_symlinks* indicates whether symlinks should be followed
and only applies when *path* is not an open file descriptor.
The returned mapping maps xattr names (bytes) to values (bytes or None).
None indicates, as a xattr value, an empty value, i.e. a value of length zero.
"""
if isinstance(path, str):
path = os.fsencode(path)
result = {}
try:
names = listxattr(path, follow_symlinks=follow_symlinks)
for name in names:
try:
# xattr name is a bytes object, we directly use it.
result[name] = getxattr(path, name, follow_symlinks=follow_symlinks)
except OSError as e:
# note: platform.xattr._check has already made a nice exception e with errno, msg, path/fd
if e.errno in (ENOATTR,): # errors we just ignore silently
# ENOATTR: a race has happened: xattr names were deleted after list.
pass
else: # all others: warn, skip this single xattr name, continue processing other xattrs
# EPERM: we were not permitted to read this attribute
# EINVAL: maybe xattr name is invalid or other issue, #6988
logger.warning("when getting extended attribute %s: %s", name.decode(errors="replace"), str(e))
except OSError as e:
if e.errno in (errno.ENOTSUP, errno.EPERM):
# if xattrs are not supported on the filesystem, we give up.
# EPERM might be raised by listxattr.
pass
else:
raise
return result |
Set all extended attributes on *path* from a mapping.
*path* can either be a path (str or bytes) or an open file descriptor (int).
*follow_symlinks* indicates whether symlinks should be followed
and only applies when *path* is not an open file descriptor.
*xattrs* is mapping maps xattr names (bytes) to values (bytes or None).
None indicates, as a xattr value, an empty value, i.e. a value of length zero.
Return warning status (True means a non-fatal exception has happened and was dealt with). | def set_all(path, xattrs, follow_symlinks=False):
"""
Set all extended attributes on *path* from a mapping.
*path* can either be a path (str or bytes) or an open file descriptor (int).
*follow_symlinks* indicates whether symlinks should be followed
and only applies when *path* is not an open file descriptor.
*xattrs* is mapping maps xattr names (bytes) to values (bytes or None).
None indicates, as a xattr value, an empty value, i.e. a value of length zero.
Return warning status (True means a non-fatal exception has happened and was dealt with).
"""
if isinstance(path, str):
path = os.fsencode(path)
warning = False
for k, v in xattrs.items():
try:
setxattr(path, k, v, follow_symlinks=follow_symlinks)
except OSError as e:
# note: platform.xattr._check has already made a nice exception e with errno, msg, path/fd
warning = True
if e.errno == errno.E2BIG:
err_str = "too big for this filesystem (%s)" % str(e)
elif e.errno == errno.ENOSPC:
# ext4 reports ENOSPC when trying to set an xattr with >4kiB while ext4 can only support 4kiB xattrs
# (in this case, this is NOT a "disk full" error, just a ext4 limitation).
err_str = "fs full or xattr too big? [xattr len = %d] (%s)" % (len(v), str(e))
else:
# generic handler
# EACCES: permission denied to set this specific xattr (this may happen related to security.* keys)
# EPERM: operation not permitted
err_str = str(e)
logger.warning("when setting extended attribute %s: %s", k.decode(errors="replace"), err_str)
return warning |
find chunks that need processing (usually: recompression). | def find_chunks(repository, repo_objs, stats, ctype, clevel, olevel):
"""find chunks that need processing (usually: recompression)."""
# to do it this way is maybe not obvious, thus keeping the essential design criteria here:
# - determine the chunk ids at one point in time (== do a **full** scan in one go) **before**
# writing to the repo (and especially before doing a compaction, which moves segment files around)
# - get the chunk ids in **on-disk order** (so we can efficiently compact while processing the chunks)
# - only put the ids into the list that actually need recompression (keeps it a little shorter in some cases)
recompress_ids = []
compr_keys = stats["compr_keys"] = set()
compr_wanted = ctype, clevel, olevel
state = None
chunks_count = len(repository)
chunks_limit = min(1000, max(100, chunks_count // 1000))
pi = ProgressIndicatorPercent(
total=chunks_count,
msg="Searching for recompression candidates %3.1f%%",
step=0.1,
msgid="rcompress.find_chunks",
)
while True:
chunk_ids, state = repository.scan(limit=chunks_limit, state=state)
if not chunk_ids:
break
for id, chunk_no_data in zip(chunk_ids, repository.get_many(chunk_ids, read_data=False)):
meta = repo_objs.parse_meta(id, chunk_no_data, ro_type=ROBJ_DONTCARE)
compr_found = meta["ctype"], meta["clevel"], meta.get("olevel", -1)
if compr_found != compr_wanted:
recompress_ids.append(id)
compr_keys.add(compr_found)
stats[compr_found] += 1
stats["checked_count"] += 1
pi.show(increase=1)
pi.finish()
return recompress_ids |
process some chunks (usually: recompress) | def process_chunks(repository, repo_objs, stats, recompress_ids, olevel):
"""process some chunks (usually: recompress)"""
compr_keys = stats["compr_keys"]
if compr_keys == 0: # work around defaultdict(int)
compr_keys = stats["compr_keys"] = set()
for id, chunk in zip(recompress_ids, repository.get_many(recompress_ids, read_data=True)):
old_size = len(chunk)
stats["old_size"] += old_size
meta, data = repo_objs.parse(id, chunk, ro_type=ROBJ_DONTCARE)
ro_type = meta.pop("type", None)
compr_old = meta["ctype"], meta["clevel"], meta.get("olevel", -1)
if olevel == -1:
# if the chunk was obfuscated, but should not be in future, remove related metadata
meta.pop("olevel", None)
meta.pop("psize", None)
chunk = repo_objs.format(id, meta, data, ro_type=ro_type)
compr_done = meta["ctype"], meta["clevel"], meta.get("olevel", -1)
if compr_done != compr_old:
# we actually changed something
repository.put(id, chunk, wait=False)
repository.async_response(wait=False)
stats["new_size"] += len(chunk)
compr_keys.add(compr_done)
stats[compr_done] += 1
stats["recompressed_count"] += 1
else:
# It might be that the old chunk used compression none or lz4 (for whatever reason,
# including the old compressor being a DecidingCompressor) AND we used a
# DecidingCompressor now, which did NOT compress like we wanted, but decided
# to use the same compression (and obfuscation) we already had.
# In this case, we just keep the old chunk and do not rewrite it -
# This is important to avoid rewriting such chunks **again and again**.
stats["new_size"] += old_size
compr_keys.add(compr_old)
stats[compr_old] += 1
stats["kept_count"] += 1 |
Method decorator for subcommand-handling methods: do_XYZ(self, args, repository, …)
If a parameter (where allowed) is a str the attribute named of args is used instead.
:param create: create repository
:param lock: lock repository
:param exclusive: (bool) lock repository exclusively (for writing)
:param manifest: load manifest and repo_objs (key), pass them as keyword arguments
:param cache: open cache, pass it as keyword argument (implies manifest)
:param secure: do assert_secure after loading manifest
:param compatibility: mandatory if not create and (manifest or cache), specifies mandatory
feature categories to check | def with_repository(
create=False, lock=True, exclusive=False, manifest=True, cache=False, secure=True, compatibility=None
):
"""
Method decorator for subcommand-handling methods: do_XYZ(self, args, repository, …)
If a parameter (where allowed) is a str the attribute named of args is used instead.
:param create: create repository
:param lock: lock repository
:param exclusive: (bool) lock repository exclusively (for writing)
:param manifest: load manifest and repo_objs (key), pass them as keyword arguments
:param cache: open cache, pass it as keyword argument (implies manifest)
:param secure: do assert_secure after loading manifest
:param compatibility: mandatory if not create and (manifest or cache), specifies mandatory
feature categories to check
"""
# Note: with_repository decorator does not have a "key" argument (yet?)
compatibility = compat_check(
create=create,
manifest=manifest,
key=manifest,
cache=cache,
compatibility=compatibility,
decorator_name="with_repository",
)
# To process the `--bypass-lock` option if specified, we need to
# modify `lock` inside `wrapper`. Therefore we cannot use the
# `nonlocal` statement to access `lock` as modifications would also
# affect the scope outside of `wrapper`. Subsequent calls would
# only see the overwritten value of `lock`, not the original one.
# The solution is to define a place holder variable `_lock` to
# propagate the value into `wrapper`.
_lock = lock
def decorator(method):
@functools.wraps(method)
def wrapper(self, args, **kwargs):
location = getattr(args, "location")
if not location.valid: # location always must be given
raise Error("missing repository, please use --repo or BORG_REPO env var!")
assert isinstance(exclusive, bool)
lock = getattr(args, "lock", _lock)
append_only = getattr(args, "append_only", False)
storage_quota = getattr(args, "storage_quota", None)
make_parent_dirs = getattr(args, "make_parent_dirs", False)
repository = get_repository(
location,
create=create,
exclusive=exclusive,
lock_wait=self.lock_wait,
lock=lock,
append_only=append_only,
make_parent_dirs=make_parent_dirs,
storage_quota=storage_quota,
args=args,
)
with repository:
if repository.version not in (2,):
raise Error(
"This borg version only accepts version 2 repos for -r/--repo. "
"You can use 'borg transfer' to copy archives from old to new repos."
)
if manifest or cache:
manifest_ = Manifest.load(repository, compatibility)
kwargs["manifest"] = manifest_
if "compression" in args:
manifest_.repo_objs.compressor = args.compression.compressor
if secure:
assert_secure(repository, manifest_, self.lock_wait)
if cache:
with Cache(
repository,
manifest_,
progress=getattr(args, "progress", False),
lock_wait=self.lock_wait,
cache_mode=getattr(args, "files_cache_mode", FILES_CACHE_MODE_DISABLED),
iec=getattr(args, "iec", False),
) as cache_:
return method(self, args, repository=repository, cache=cache_, **kwargs)
else:
return method(self, args, repository=repository, **kwargs)
return wrapper
return decorator |
this is a simplified version of "with_repository", just for the "other location".
the repository at the "other location" is intended to get used as a **source** (== read operations). | def with_other_repository(manifest=False, cache=False, compatibility=None):
"""
this is a simplified version of "with_repository", just for the "other location".
the repository at the "other location" is intended to get used as a **source** (== read operations).
"""
compatibility = compat_check(
create=False,
manifest=manifest,
key=manifest,
cache=cache,
compatibility=compatibility,
decorator_name="with_other_repository",
)
def decorator(method):
@functools.wraps(method)
def wrapper(self, args, **kwargs):
location = getattr(args, "other_location")
if not location.valid: # nothing to do
return method(self, args, **kwargs)
repository = get_repository(
location,
create=False,
exclusive=True,
lock_wait=self.lock_wait,
lock=True,
append_only=False,
make_parent_dirs=False,
storage_quota=None,
args=args,
)
with repository:
if repository.version not in (1, 2):
raise Error("This borg version only accepts version 1 or 2 repos for --other-repo.")
kwargs["other_repository"] = repository
if manifest or cache:
manifest_ = Manifest.load(
repository, compatibility, ro_cls=RepoObj if repository.version > 1 else RepoObj1
)
assert_secure(repository, manifest_, self.lock_wait)
if manifest:
kwargs["other_manifest"] = manifest_
if cache:
with Cache(
repository,
manifest_,
progress=False,
lock_wait=self.lock_wait,
cache_mode=getattr(args, "files_cache_mode", FILES_CACHE_MODE_DISABLED),
iec=getattr(args, "iec", False),
) as cache_:
kwargs["other_cache"] = cache_
return method(self, args, **kwargs)
else:
return method(self, args, **kwargs)
return wrapper
return decorator |
search the stack for infos about the currently processed file and print them | def sig_info_handler(sig_no, stack): # pragma: no cover
"""search the stack for infos about the currently processed file and print them"""
with signal_handler(sig_no, signal.SIG_IGN):
for frame in inspect.getouterframes(stack):
func, loc = frame[3], frame[0].f_locals
if func in ("process_file", "_rec_walk"): # create op
path = loc["path"]
try:
pos = loc["fd"].tell()
total = loc["st"].st_size
except Exception:
pos, total = 0, 0
logger.info(f"{path} {format_file_size(pos)}/{format_file_size(total)}")
break
if func in ("extract_item",): # extract op
path = loc["item"].path
try:
pos = loc["fd"].tell()
except Exception:
pos = 0
logger.info(f"{path} {format_file_size(pos)}/???")
break |
is the chunker secret the same? | def uses_same_chunker_secret(other_key, key):
"""is the chunker secret the same?"""
# avoid breaking the deduplication by a different chunker secret
same_chunker_secret = other_key.chunk_seed == key.chunk_seed
return same_chunker_secret |
other_key -> key upgrade: is the id hash the same? | def uses_same_id_hash(other_key, key):
"""other_key -> key upgrade: is the id hash the same?"""
# avoid breaking the deduplication by changing the id hash
old_sha256_ids = (PlaintextKey,)
new_sha256_ids = (PlaintextKey,)
old_hmac_sha256_ids = (RepoKey, KeyfileKey, AuthenticatedKey)
new_hmac_sha256_ids = (AESOCBRepoKey, AESOCBKeyfileKey, CHPORepoKey, CHPOKeyfileKey, AuthenticatedKey)
old_blake2_ids = (Blake2RepoKey, Blake2KeyfileKey, Blake2AuthenticatedKey)
new_blake2_ids = (
Blake2AESOCBRepoKey,
Blake2AESOCBKeyfileKey,
Blake2CHPORepoKey,
Blake2CHPOKeyfileKey,
Blake2AuthenticatedKey,
)
same_ids = (
isinstance(other_key, old_hmac_sha256_ids + new_hmac_sha256_ids)
and isinstance(key, new_hmac_sha256_ids)
or isinstance(other_key, old_blake2_ids + new_blake2_ids)
and isinstance(key, new_blake2_ids)
or isinstance(other_key, old_sha256_ids + new_sha256_ids)
and isinstance(key, new_sha256_ids)
)
return same_ids |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.