index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
7,044 | growthbook | __init__ | null | def __init__(
self,
variationId: int,
inExperiment: bool,
value,
hashUsed: bool,
hashAttribute: str,
hashValue: str,
featureId: Optional[str],
meta: VariationMeta = None,
bucket: float = None,
stickyBucketUsed: bool = False,
) -> None:
self.variationId = variationId
self.inExperiment = inExperiment
self.value = value
self.hashUsed = hashUsed
self.hashAttribute = hashAttribute
self.hashValue = hashValue
self.featureId = featureId or None
self.bucket = bucket
self.stickyBucketUsed = stickyBucketUsed
self.key = str(variationId)
self.name = ""
self.passthrough = False
if meta:
if "name" in meta:
self.name = meta["name"]
if "key" in meta:
self.key = meta["key"]
if "passthrough" in meta:
self.passthrough = meta["passthrough"]
| (self, variationId: int, inExperiment: bool, value, hashUsed: bool, hashAttribute: str, hashValue: str, featureId: Optional[str], meta: Optional[growthbook.VariationMeta] = None, bucket: Optional[float] = None, stickyBucketUsed: bool = False) -> NoneType |
7,045 | growthbook | to_dict | null | def to_dict(self) -> dict:
obj = {
"featureId": self.featureId,
"variationId": self.variationId,
"inExperiment": self.inExperiment,
"value": self.value,
"hashUsed": self.hashUsed,
"hashAttribute": self.hashAttribute,
"hashValue": self.hashValue,
"key": self.key,
"stickyBucketUsed": self.stickyBucketUsed,
}
if self.bucket is not None:
obj["bucket"] = self.bucket
if self.name:
obj["name"] = self.name
if self.passthrough:
obj["passthrough"] = True
return obj
| (self) -> dict |
7,046 | typing | TypedDict | A simple typed namespace. At runtime it is equivalent to a plain dict.
TypedDict creates a dictionary type that expects all of its
instances to have a certain set of keys, where each key is
associated with a value of a consistent type. This expectation
is not checked at runtime but is only enforced by type checkers.
Usage::
class Point2D(TypedDict):
x: int
y: int
label: str
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
The type info can be accessed via the Point2D.__annotations__ dict, and
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
TypedDict supports two additional equivalent forms::
Point2D = TypedDict('Point2D', x=int, y=int, label=str)
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
By default, all keys must be present in a TypedDict. It is possible
to override this by specifying totality.
Usage::
class point2D(TypedDict, total=False):
x: int
y: int
This means that a point2D TypedDict can have any of the keys omitted.A type
checker is only expected to support a literal False or True as the value of
the total argument. True is the default, and makes all items defined in the
class body be required.
The class syntax is only supported in Python 3.6+, while two other
syntax forms work for Python 2.7 and 3.2+
| def TypedDict(typename, fields=None, /, *, total=True, **kwargs):
"""A simple typed namespace. At runtime it is equivalent to a plain dict.
TypedDict creates a dictionary type that expects all of its
instances to have a certain set of keys, where each key is
associated with a value of a consistent type. This expectation
is not checked at runtime but is only enforced by type checkers.
Usage::
class Point2D(TypedDict):
x: int
y: int
label: str
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
The type info can be accessed via the Point2D.__annotations__ dict, and
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
TypedDict supports two additional equivalent forms::
Point2D = TypedDict('Point2D', x=int, y=int, label=str)
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
By default, all keys must be present in a TypedDict. It is possible
to override this by specifying totality.
Usage::
class point2D(TypedDict, total=False):
x: int
y: int
This means that a point2D TypedDict can have any of the keys omitted.A type
checker is only expected to support a literal False or True as the value of
the total argument. True is the default, and makes all items defined in the
class body be required.
The class syntax is only supported in Python 3.6+, while two other
syntax forms work for Python 2.7 and 3.2+
"""
if fields is None:
fields = kwargs
elif kwargs:
raise TypeError("TypedDict takes either a dict or keyword arguments,"
" but not both")
ns = {'__annotations__': dict(fields)}
try:
# Setting correct module is necessary to make typed dict classes pickleable.
ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return _TypedDictMeta(typename, (), ns, total=total)
| (typename, fields=None, /, *, total=True, **kwargs) |
7,047 | growthbook | VariationMeta | null | class VariationMeta(TypedDict):
key: str
name: str
passthrough: bool
| null |
7,048 | abc | abstractmethod | A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms. abstractmethod() may be used to declare
abstract methods for properties and descriptors.
Usage:
class C(metaclass=ABCMeta):
@abstractmethod
def my_abstract_method(self, ...):
...
| def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms. abstractmethod() may be used to declare
abstract methods for properties and descriptors.
Usage:
class C(metaclass=ABCMeta):
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
| (funcobj) |
7,050 | base64 | b64decode | Decode the Base64 encoded bytes-like object or ASCII string s.
Optional altchars must be a bytes-like object or ASCII string of length 2
which specifies the alternative alphabet used instead of the '+' and '/'
characters.
The result is returned as a bytes object. A binascii.Error is raised if
s is incorrectly padded.
If validate is False (the default), characters that are neither in the
normal base-64 alphabet nor the alternative alphabet are discarded prior
to the padding check. If validate is True, these non-alphabet characters
in the input result in a binascii.Error.
| def b64decode(s, altchars=None, validate=False):
"""Decode the Base64 encoded bytes-like object or ASCII string s.
Optional altchars must be a bytes-like object or ASCII string of length 2
which specifies the alternative alphabet used instead of the '+' and '/'
characters.
The result is returned as a bytes object. A binascii.Error is raised if
s is incorrectly padded.
If validate is False (the default), characters that are neither in the
normal base-64 alphabet nor the alternative alphabet are discarded prior
to the padding check. If validate is True, these non-alphabet characters
in the input result in a binascii.Error.
"""
s = _bytes_from_decode_data(s)
if altchars is not None:
altchars = _bytes_from_decode_data(altchars)
assert len(altchars) == 2, repr(altchars)
s = s.translate(bytes.maketrans(altchars, b'+/'))
if validate and not re.fullmatch(b'[A-Za-z0-9+/]*={0,2}', s):
raise binascii.Error('Non-base64 digit found')
return binascii.a2b_base64(s)
| (s, altchars=None, validate=False) |
7,051 | growthbook | chooseVariation | null | def chooseVariation(n: float, ranges: List[Tuple[float, float]]) -> int:
for i, r in enumerate(ranges):
if inRange(n, r):
return i
return -1
| (n: float, ranges: List[Tuple[float, float]]) -> int |
7,052 | growthbook | compare | null | def compare(val1, val2) -> int:
if (type(val1) is int or type(val1) is float) and not (type(val2) is int or type(val2) is float):
if (val2 is None):
val2 = 0
else:
val2 = float(val2)
if (type(val2) is int or type(val2) is float) and not (type(val1) is int or type(val1) is float):
if (val1 is None):
val1 = 0
else:
val1 = float(val1)
if val1 > val2:
return 1
if val1 < val2:
return -1
return 0
| (val1, val2) -> int |
7,053 | growthbook | decrypt | null | def decrypt(encrypted_str: str, key_str: str) -> str:
iv_str, ct_str = encrypted_str.split(".", 2)
key = b64decode(key_str)
iv = b64decode(iv_str)
ct = b64decode(ct_str)
cipher = Cipher(algorithms.AES128(key), modes.CBC(iv))
decryptor = cipher.decryptor()
decrypted = decryptor.update(ct) + decryptor.finalize()
unpadder = padding.PKCS7(128).unpadder()
bytestring = unpadder.update(decrypted) + unpadder.finalize()
return bytestring.decode("utf-8")
| (encrypted_str: str, key_str: str) -> str |
7,054 | growthbook | elemMatch | null | def elemMatch(condition, attributeValue) -> bool:
if not type(attributeValue) is list:
return False
for item in attributeValue:
if isOperatorObject(condition):
if evalConditionValue(condition, item):
return True
else:
if evalCondition(item, condition):
return True
return False
| (condition, attributeValue) -> bool |
7,055 | growthbook | evalAnd | null | def evalAnd(attributes, conditions) -> bool:
for condition in conditions:
if not evalCondition(attributes, condition):
return False
return True
| (attributes, conditions) -> bool |
7,056 | growthbook | evalCondition | null | def evalCondition(attributes: dict, condition: dict) -> bool:
if "$or" in condition:
return evalOr(attributes, condition["$or"])
if "$nor" in condition:
return not evalOr(attributes, condition["$nor"])
if "$and" in condition:
return evalAnd(attributes, condition["$and"])
if "$not" in condition:
return not evalCondition(attributes, condition["$not"])
for key, value in condition.items():
if not evalConditionValue(value, getPath(attributes, key)):
return False
return True
| (attributes: dict, condition: dict) -> bool |
7,057 | growthbook | evalConditionValue | null | def evalConditionValue(conditionValue, attributeValue) -> bool:
if type(conditionValue) is dict and isOperatorObject(conditionValue):
for key, value in conditionValue.items():
if not evalOperatorCondition(key, attributeValue, value):
return False
return True
return conditionValue == attributeValue
| (conditionValue, attributeValue) -> bool |
7,058 | growthbook | evalOperatorCondition | null | def evalOperatorCondition(operator, attributeValue, conditionValue) -> bool:
if operator == "$eq":
try:
return compare(attributeValue, conditionValue) == 0
except Exception:
return False
elif operator == "$ne":
try:
return compare(attributeValue, conditionValue) != 0
except Exception:
return False
elif operator == "$lt":
try:
return compare(attributeValue, conditionValue) < 0
except Exception:
return False
elif operator == "$lte":
try:
return compare(attributeValue, conditionValue) <= 0
except Exception:
return False
elif operator == "$gt":
try:
return compare(attributeValue, conditionValue) > 0
except Exception:
return False
elif operator == "$gte":
try:
return compare(attributeValue, conditionValue) >= 0
except Exception:
return False
elif operator == "$veq":
return paddedVersionString(attributeValue) == paddedVersionString(conditionValue)
elif operator == "$vne":
return paddedVersionString(attributeValue) != paddedVersionString(conditionValue)
elif operator == "$vlt":
return paddedVersionString(attributeValue) < paddedVersionString(conditionValue)
elif operator == "$vlte":
return paddedVersionString(attributeValue) <= paddedVersionString(conditionValue)
elif operator == "$vgt":
return paddedVersionString(attributeValue) > paddedVersionString(conditionValue)
elif operator == "$vgte":
return paddedVersionString(attributeValue) >= paddedVersionString(conditionValue)
elif operator == "$regex":
try:
r = re.compile(conditionValue)
return bool(r.search(attributeValue))
except Exception:
return False
elif operator == "$in":
if not type(conditionValue) is list:
return False
return isIn(conditionValue, attributeValue)
elif operator == "$nin":
if not type(conditionValue) is list:
return False
return not isIn(conditionValue, attributeValue)
elif operator == "$elemMatch":
return elemMatch(conditionValue, attributeValue)
elif operator == "$size":
if not (type(attributeValue) is list):
return False
return evalConditionValue(conditionValue, len(attributeValue))
elif operator == "$all":
if not (type(attributeValue) is list):
return False
for cond in conditionValue:
passing = False
for attr in attributeValue:
if evalConditionValue(cond, attr):
passing = True
if not passing:
return False
return True
elif operator == "$exists":
if not conditionValue:
return attributeValue is None
return attributeValue is not None
elif operator == "$type":
return getType(attributeValue) == conditionValue
elif operator == "$not":
return not evalConditionValue(conditionValue, attributeValue)
return False
| (operator, attributeValue, conditionValue) -> bool |
7,059 | growthbook | evalOr | null | def evalOr(attributes, conditions) -> bool:
if len(conditions) == 0:
return True
for condition in conditions:
if evalCondition(attributes, condition):
return True
return False
| (attributes, conditions) -> bool |
7,060 | growthbook | fnv1a32 | null | def fnv1a32(str: str) -> int:
hval = 0x811C9DC5
prime = 0x01000193
uint32_max = 2 ** 32
for s in str:
hval = hval ^ ord(s)
hval = (hval * prime) % uint32_max
return hval
| (str: str) -> int |
7,061 | growthbook | gbhash | null | def gbhash(seed: str, value: str, version: int) -> Optional[float]:
if version == 2:
n = fnv1a32(str(fnv1a32(seed + value)))
return (n % 10000) / 10000
if version == 1:
n = fnv1a32(value + seed)
return (n % 1000) / 1000
return None
| (seed: str, value: str, version: int) -> Optional[float] |
7,062 | growthbook | getBucketRanges | null | def getBucketRanges(
numVariations: int, coverage: float = 1, weights: List[float] = None
) -> List[Tuple[float, float]]:
if coverage < 0:
coverage = 0
if coverage > 1:
coverage = 1
if weights is None:
weights = getEqualWeights(numVariations)
if len(weights) != numVariations:
weights = getEqualWeights(numVariations)
if sum(weights) < 0.99 or sum(weights) > 1.01:
weights = getEqualWeights(numVariations)
cumulative: float = 0
ranges = []
for w in weights:
start = cumulative
cumulative += w
ranges.append((start, start + coverage * w))
return ranges
| (numVariations: int, coverage: float = 1, weights: Optional[List[float]] = None) -> List[Tuple[float, float]] |
7,063 | growthbook | getEqualWeights | null | def getEqualWeights(numVariations: int) -> List[float]:
if numVariations < 1:
return []
return [1 / numVariations for _ in range(numVariations)]
| (numVariations: int) -> List[float] |
7,064 | growthbook | getPath | null | def getPath(attributes, path):
current = attributes
for segment in path.split("."):
if type(current) is dict and segment in current:
current = current[segment]
else:
return None
return current
| (attributes, path) |
7,065 | growthbook | getQueryStringOverride | null | def getQueryStringOverride(id: str, url: str, numVariations: int) -> Optional[int]:
res = urlparse(url)
if not res.query:
return None
qs = parse_qs(res.query)
if id not in qs:
return None
variation = qs[id][0]
if variation is None or not variation.isdigit():
return None
varId = int(variation)
if varId < 0 or varId >= numVariations:
return None
return varId
| (id: str, url: str, numVariations: int) -> Optional[int] |
7,066 | growthbook | getType | null | def getType(attributeValue) -> str:
t = type(attributeValue)
if attributeValue is None:
return "null"
if t is int or t is float:
return "number"
if t is str:
return "string"
if t is list or t is set:
return "array"
if t is dict:
return "object"
if t is bool:
return "boolean"
return "unknown"
| (attributeValue) -> str |
7,067 | growthbook | inNamespace | null | def inNamespace(userId: str, namespace: Tuple[str, float, float]) -> bool:
n = gbhash("__" + namespace[0], userId, 1)
if n is None:
return False
return namespace[1] <= n < namespace[2]
| (userId: str, namespace: Tuple[str, float, float]) -> bool |
7,068 | growthbook | inRange | null | def inRange(n: float, range: Tuple[float, float]) -> bool:
return range[0] <= n < range[1]
| (n: float, range: Tuple[float, float]) -> bool |
7,069 | growthbook | isIn | null | def isIn(conditionValue, attributeValue) -> bool:
if type(attributeValue) is list:
return bool(set(conditionValue) & set(attributeValue))
return attributeValue in conditionValue
| (conditionValue, attributeValue) -> bool |
7,070 | growthbook | isOperatorObject | null | def isOperatorObject(obj) -> bool:
for key in obj.keys():
if key[0] != "$":
return False
return True
| (obj) -> bool |
7,074 | growthbook | paddedVersionString | null | def paddedVersionString(input) -> str:
# If input is a number, convert to a string
if type(input) is int or type(input) is float:
input = str(input)
if not input or type(input) is not str:
input = "0"
# Remove build info and leading `v` if any
input = re.sub(r"(^v|\+.*$)", "", input)
# Split version into parts (both core version numbers and pre-release tags)
# "v1.2.3-rc.1+build123" -> ["1","2","3","rc","1"]
parts = re.split(r"[-.]", input)
# If it's SemVer without a pre-release, add `~` to the end
# ["1","0","0"] -> ["1","0","0","~"]
# "~" is the largest ASCII character, so this will make "1.0.0" greater than "1.0.0-beta" for example
if len(parts) == 3:
parts.append("~")
# Left pad each numeric part with spaces so string comparisons will work ("9">"10", but " 9"<"10")
# Then, join back together into a single string
return "-".join([v.rjust(5, " ") if re.match(r"^[0-9]+$", v) else v for v in parts])
| (input) -> str |
7,076 | urllib.parse | parse_qs | Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
max_num_fields: int. If set, then throws a ValueError if there
are more than n fields read by parse_qsl().
separator: str. The symbol to use for separating the query arguments.
Defaults to &.
Returns a dictionary.
| def parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace', max_num_fields=None, separator='&'):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings.
A true value indicates that blanks should be retained as
blank strings. The default false value indicates that
blank values are to be ignored and treated as if they were
not included.
strict_parsing: flag indicating what to do with parsing errors.
If false (the default), errors are silently ignored.
If true, errors raise a ValueError exception.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
max_num_fields: int. If set, then throws a ValueError if there
are more than n fields read by parse_qsl().
separator: str. The symbol to use for separating the query arguments.
Defaults to &.
Returns a dictionary.
"""
parsed_result = {}
pairs = parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors,
max_num_fields=max_num_fields, separator=separator)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
| (qs, keep_blank_values=False, strict_parsing=False, encoding='utf-8', errors='replace', max_num_fields=None, separator='&') |
7,080 | importlib.metadata | PackageNotFoundError | The package was not found. | class PackageNotFoundError(ModuleNotFoundError):
"""The package was not found."""
def __str__(self):
return f"No package metadata was found for {self.name}"
@property
def name(self):
(name,) = self.args
return name
| null |
7,081 | importlib.metadata | __str__ | null | def __str__(self):
return f"No package metadata was found for {self.name}"
| (self) |
7,083 | mapply.mapply | mapply | Run apply on n_workers. Split in chunks if sensible, gather results, and concat.
When using :meth:`mapply.init`, the signature of this method will behave the same as
:meth:`pandas.DataFrame.apply`/:meth:`pandas.Series.apply`/:meth:`pandas.core.groupby.GroupBy.apply`.
Args:
df_or_series: Argument reserved to the class instance, a.k.a. 'self'.
func: func to apply to each column or row.
axis: Axis along which func is applied.
n_workers: Maximum amount of workers (processes) to spawn. Might be lowered
depending on chunk_size and max_chunks_per_worker. Will throw a warning if
set higher than is sensible (see :meth:`mapply.parallel.sensible_cpu_count`).
chunk_size: Minimum amount of columns/rows per chunk. Higher value means a higher
threshold to go multi-core. Set to 1 to let max_chunks_per_worker decide.
max_chunks_per_worker: Upper limit on amount of chunks per worker. Will lower
n_chunks determined by chunk_size if necessary. Set to 0 to skip this check.
progressbar: Whether to wrap the chunks in a :meth:`tqdm.auto.tqdm`.
args: Additional positional arguments to pass to func.
**kwargs: Additional keyword arguments to pass to apply/func.
Returns:
Series or DataFrame resulting from applying func along given axis.
Raises:
ValueError: if a Series is passed in combination with axis=1
| def mapply( # noqa: PLR0913
df_or_series: Any,
func: Callable,
axis: int | str = 0,
*,
n_workers: int = -1,
chunk_size: int = DEFAULT_CHUNK_SIZE,
max_chunks_per_worker: int = DEFAULT_MAX_CHUNKS_PER_WORKER,
progressbar: bool = True,
args: tuple[Any, ...] = (),
**kwargs: Any,
) -> Any:
"""Run apply on n_workers. Split in chunks if sensible, gather results, and concat.
When using :meth:`mapply.init`, the signature of this method will behave the same as
:meth:`pandas.DataFrame.apply`/:meth:`pandas.Series.apply`/:meth:`pandas.core.groupby.GroupBy.apply`.
Args:
df_or_series: Argument reserved to the class instance, a.k.a. 'self'.
func: func to apply to each column or row.
axis: Axis along which func is applied.
n_workers: Maximum amount of workers (processes) to spawn. Might be lowered
depending on chunk_size and max_chunks_per_worker. Will throw a warning if
set higher than is sensible (see :meth:`mapply.parallel.sensible_cpu_count`).
chunk_size: Minimum amount of columns/rows per chunk. Higher value means a higher
threshold to go multi-core. Set to 1 to let max_chunks_per_worker decide.
max_chunks_per_worker: Upper limit on amount of chunks per worker. Will lower
n_chunks determined by chunk_size if necessary. Set to 0 to skip this check.
progressbar: Whether to wrap the chunks in a :meth:`tqdm.auto.tqdm`.
args: Additional positional arguments to pass to func.
**kwargs: Additional keyword arguments to pass to apply/func.
Returns:
Series or DataFrame resulting from applying func along given axis.
Raises:
ValueError: if a Series is passed in combination with axis=1
"""
from numpy import array_split
from pandas import Series, concat
from pandas.core.groupby import GroupBy
if isinstance(df_or_series, GroupBy):
return run_groupwise_apply(
df_or_series,
func,
n_workers=n_workers,
progressbar=progressbar,
args=args,
**kwargs,
)
if isinstance(axis, str):
axis = ["index", "columns"].index(axis)
isseries = int(isinstance(df_or_series, Series))
if isseries and axis == 1:
msg = "Passing axis=1 is not allowed for Series"
raise ValueError(msg)
opposite_axis = 1 - (isseries or axis)
n_chunks = _choose_n_chunks(
df_or_series.shape,
opposite_axis,
n_workers,
chunk_size,
max_chunks_per_worker,
)
dfs = array_split(df_or_series, n_chunks, axis=opposite_axis)
def run_apply(func, df_or_series, args=(), **kwargs):
return df_or_series.apply(func, args=args, **kwargs)
if not isseries:
kwargs["axis"] = axis
results = list(
multiprocessing_imap(
partial(run_apply, func, args=args, **kwargs),
dfs,
n_workers=n_workers,
progressbar=progressbar,
),
)
if isseries or len(results) == 1 or sum(map(len, results)) in df_or_series.shape:
return concat(results, copy=False)
return concat(results, axis=1, copy=False)
| (df_or_series: Any, func: Callable, axis: int | str = 0, *, n_workers: int = -1, chunk_size: int = 100, max_chunks_per_worker: int = 8, progressbar: bool = True, args: tuple[typing.Any, ...] = (), **kwargs: Any) -> Any |
7,085 | mapply | init | Patch Pandas, adding multi-core methods to PandasObject.
Subsequent calls to this function will create/overwrite methods with new settings.
Args:
n_workers: Maximum amount of workers (processes) to spawn. Might be lowered
depending on chunk_size and max_chunks_per_worker. Will throw a warning if
set higher than is sensible (see :meth:`mapply.parallel.sensible_cpu_count`).
chunk_size: Minimum amount of columns/rows per chunk. Higher value means a higher
threshold to go multi-core. Set to 1 to let max_chunks_per_worker decide.
max_chunks_per_worker: Upper limit on amount of chunks per worker. Will lower
n_chunks determined by chunk_size if necessary. Set to 0 to skip this check.
progressbar: Whether to wrap the chunks in a :meth:`tqdm.auto.tqdm`.
apply_name: Method name for the patched apply function.
| def init(
*,
n_workers: int = -1,
chunk_size: int = DEFAULT_CHUNK_SIZE,
max_chunks_per_worker: int = DEFAULT_MAX_CHUNKS_PER_WORKER,
progressbar: bool = True,
apply_name: str = "mapply",
):
"""Patch Pandas, adding multi-core methods to PandasObject.
Subsequent calls to this function will create/overwrite methods with new settings.
Args:
n_workers: Maximum amount of workers (processes) to spawn. Might be lowered
depending on chunk_size and max_chunks_per_worker. Will throw a warning if
set higher than is sensible (see :meth:`mapply.parallel.sensible_cpu_count`).
chunk_size: Minimum amount of columns/rows per chunk. Higher value means a higher
threshold to go multi-core. Set to 1 to let max_chunks_per_worker decide.
max_chunks_per_worker: Upper limit on amount of chunks per worker. Will lower
n_chunks determined by chunk_size if necessary. Set to 0 to skip this check.
progressbar: Whether to wrap the chunks in a :meth:`tqdm.auto.tqdm`.
apply_name: Method name for the patched apply function.
"""
from pandas.core.base import PandasObject
apply = partialmethod(
_mapply,
n_workers=n_workers,
chunk_size=chunk_size,
max_chunks_per_worker=max_chunks_per_worker,
progressbar=progressbar,
)
setattr(PandasObject, apply_name, apply)
| (*, n_workers: int = -1, chunk_size: int = 100, max_chunks_per_worker: int = 8, progressbar: bool = True, apply_name: str = 'mapply') |
7,088 | functools | partialmethod | Method descriptor with partial application of the given arguments
and keywords.
Supports wrapping existing descriptors and handles non-descriptor
callables as instance methods.
| class partialmethod(object):
"""Method descriptor with partial application of the given arguments
and keywords.
Supports wrapping existing descriptors and handles non-descriptor
callables as instance methods.
"""
def __init__(self, func, /, *args, **keywords):
if not callable(func) and not hasattr(func, "__get__"):
raise TypeError("{!r} is not callable or a descriptor"
.format(func))
# func could be a descriptor like classmethod which isn't callable,
# so we can't inherit from partial (it verifies func is callable)
if isinstance(func, partialmethod):
# flattening is mandatory in order to place cls/self before all
# other arguments
# it's also more efficient since only one function will be called
self.func = func.func
self.args = func.args + args
self.keywords = {**func.keywords, **keywords}
else:
self.func = func
self.args = args
self.keywords = keywords
def __repr__(self):
args = ", ".join(map(repr, self.args))
keywords = ", ".join("{}={!r}".format(k, v)
for k, v in self.keywords.items())
format_string = "{module}.{cls}({func}, {args}, {keywords})"
return format_string.format(module=self.__class__.__module__,
cls=self.__class__.__qualname__,
func=self.func,
args=args,
keywords=keywords)
def _make_unbound_method(self):
def _method(cls_or_self, /, *args, **keywords):
keywords = {**self.keywords, **keywords}
return self.func(cls_or_self, *self.args, *args, **keywords)
_method.__isabstractmethod__ = self.__isabstractmethod__
_method._partialmethod = self
return _method
def __get__(self, obj, cls=None):
get = getattr(self.func, "__get__", None)
result = None
if get is not None:
new_func = get(obj, cls)
if new_func is not self.func:
# Assume __get__ returning something new indicates the
# creation of an appropriate callable
result = partial(new_func, *self.args, **self.keywords)
try:
result.__self__ = new_func.__self__
except AttributeError:
pass
if result is None:
# If the underlying descriptor didn't do anything, treat this
# like an instance method
result = self._make_unbound_method().__get__(obj, cls)
return result
@property
def __isabstractmethod__(self):
return getattr(self.func, "__isabstractmethod__", False)
__class_getitem__ = classmethod(GenericAlias)
| (func, /, *args, **keywords) |
7,089 | functools | __get__ | null | def __get__(self, obj, cls=None):
get = getattr(self.func, "__get__", None)
result = None
if get is not None:
new_func = get(obj, cls)
if new_func is not self.func:
# Assume __get__ returning something new indicates the
# creation of an appropriate callable
result = partial(new_func, *self.args, **self.keywords)
try:
result.__self__ = new_func.__self__
except AttributeError:
pass
if result is None:
# If the underlying descriptor didn't do anything, treat this
# like an instance method
result = self._make_unbound_method().__get__(obj, cls)
return result
| (self, obj, cls=None) |
7,090 | functools | __init__ | null | def __init__(self, func, /, *args, **keywords):
if not callable(func) and not hasattr(func, "__get__"):
raise TypeError("{!r} is not callable or a descriptor"
.format(func))
# func could be a descriptor like classmethod which isn't callable,
# so we can't inherit from partial (it verifies func is callable)
if isinstance(func, partialmethod):
# flattening is mandatory in order to place cls/self before all
# other arguments
# it's also more efficient since only one function will be called
self.func = func.func
self.args = func.args + args
self.keywords = {**func.keywords, **keywords}
else:
self.func = func
self.args = args
self.keywords = keywords
| (self, func, /, *args, **keywords) |
7,091 | functools | __repr__ | null | def __repr__(self):
args = ", ".join(map(repr, self.args))
keywords = ", ".join("{}={!r}".format(k, v)
for k, v in self.keywords.items())
format_string = "{module}.{cls}({func}, {args}, {keywords})"
return format_string.format(module=self.__class__.__module__,
cls=self.__class__.__qualname__,
func=self.func,
args=args,
keywords=keywords)
| (self) |
7,092 | functools | _make_unbound_method | null | def _make_unbound_method(self):
def _method(cls_or_self, /, *args, **keywords):
keywords = {**self.keywords, **keywords}
return self.func(cls_or_self, *self.args, *args, **keywords)
_method.__isabstractmethod__ = self.__isabstractmethod__
_method._partialmethod = self
return _method
| (self) |
7,094 | plastik.ridge | Ridge | Plot data in a ridge plot with fixed width and fixed height per ridge.
Parameters
----------
data : List
A list of n 2-tuples with (x, y)-pairs; list of n np.ndarrays: (y)
options : str
String with characters that set different options. This include 'b' (blank), 'c'
(crop x-axis), 'g' (grid), 's' (slalom axis) and 'z' (squeeze). Blank removes
all axis lines and turns the grid off. Crop sets the x-axis to the smallest
common x-limit in all data tuples. Grid turns on the grid. Slalom axis make the
y-axis ticks alternate between the left and right side. Squeeze makes each axis
plot overlap by 50% (this turns slalom axis on, and makes two RHS ticks
contiguous). The options are combined in a single string in arbitrary order. Any
other characters than 'bcgsz' can be included without having any effect, the
class with just look for existence of any of 'bcgsz'.
y_scale : float
Scale of y-axis relative to the default which decides the total height of the
figure. Defaults to 1.
xlabel : str
x-label placed at the bottom.
ylabel : str
y-label for all y-axis.
ylim : List
List containing the upper and lower y-axis limit in all ridges.
pltype : str
plt class (loglog, plot, semilogx etc.) Defaults to plot.
kwargs : Dict
Any keyword argument plt.plot accepts. (Need to be a dict, asterisk syntax not
supported.)
| class Ridge:
"""Plot data in a ridge plot with fixed width and fixed height per ridge.
Parameters
----------
data : List
A list of n 2-tuples with (x, y)-pairs; list of n np.ndarrays: (y)
options : str
String with characters that set different options. This include 'b' (blank), 'c'
(crop x-axis), 'g' (grid), 's' (slalom axis) and 'z' (squeeze). Blank removes
all axis lines and turns the grid off. Crop sets the x-axis to the smallest
common x-limit in all data tuples. Grid turns on the grid. Slalom axis make the
y-axis ticks alternate between the left and right side. Squeeze makes each axis
plot overlap by 50% (this turns slalom axis on, and makes two RHS ticks
contiguous). The options are combined in a single string in arbitrary order. Any
other characters than 'bcgsz' can be included without having any effect, the
class with just look for existence of any of 'bcgsz'.
y_scale : float
Scale of y-axis relative to the default which decides the total height of the
figure. Defaults to 1.
xlabel : str
x-label placed at the bottom.
ylabel : str
y-label for all y-axis.
ylim : List
List containing the upper and lower y-axis limit in all ridges.
pltype : str
plt class (loglog, plot, semilogx etc.) Defaults to plot.
kwargs : Dict
Any keyword argument plt.plot accepts. (Need to be a dict, asterisk syntax not
supported.)
"""
data: List[Any] = attr.ib()
@data.validator
def _check_data_type(self, _, value):
if not isinstance(value[0], tuple) and not isinstance(value[0], np.ndarray):
raise TypeError(
"data must be a list of tuples or numpy arrays, not list of"
f" {type(self.data[0])}."
)
options: str = attr.ib(converter=str)
y_scale: float = attr.ib(converter=float, default=1.0)
xlabel: Optional[str] = attr.ib(converter=str, kw_only=True, default="")
ylabel: Optional[str] = attr.ib(converter=str, kw_only=True, default="")
xlim: List[float] = attr.Factory(list)
ylim: List[float] = attr.Factory(list)
pltype: str = attr.ib(converter=str, default="plot")
kwargs: Dict[str, Any] = attr.Factory(dict)
colors = itertools.cycle(plt.rcParams["axes.prop_cycle"].by_key()["color"])
def set_grid(self) -> None:
"""Set the gridstructure of the figure."""
fsize = (4, self.y_scale * len(self.data))
self.gs = grid_spec.GridSpec(len(self.data), 1)
self.__fig = plt.figure(figsize=fsize)
# Set line type of horizontal grid lines
self.gls = itertools.cycle(["-", "--"])
self.ax_objs: List[plt.Axes] = []
if "z" in self.options:
self.gs.update(hspace=-0.5)
else:
self.gs.update(hspace=0.0)
def set_xaxs(self) -> None:
"""Set the x-axis limits."""
if self.xlim:
x_min, x_max = self.xlim
elif len(self.data[0]) != 2: # noqa: PLR2004
x_min, x_max = -0.5, len(self.data[0]) - 0.5
x_min = 0.5 if self.pltype in ["loglog", "semilogx"] else x_min
elif "c" in self.options:
x_min, x_max = self.__x_limit(False)
else:
x_min, x_max = self.__x_limit()
self.__xmin = x_min
self.__xmax = x_max
def set_ylabel(
self, y_min: Optional[float] = None, y_max: Optional[float] = None
) -> None:
"""Set the y-axis label."""
if y_min is None or y_max is None:
self.ax = self.__fig.add_subplot(111, frame_on=False)
self.ax.tick_params(
labelcolor="w",
axis="both",
which="both",
zorder=-1, # labelleft=False,
labelbottom=False,
top=False,
bottom=False,
left=False,
right=False,
)
plt.setp(self.ax.get_yticklabels(), alpha=0)
else:
self._set_ymin_ymax(y_min, y_max)
def _set_ymin_ymax(self, y_min, y_max):
self.ax.spines["top"].set_visible(False)
self.ax.spines["bottom"].set_visible(False)
self.ax.spines["left"].set_visible(False)
self.ax.spines["right"].set_visible(False)
if self.pltype != "plot":
pltype = "log" if self.pltype in ["semilogy", "loglog"] else "linear"
self.ax.set_yscale(pltype)
self.ax.set_ylabel(self.ylabel)
y_min = 1e-3 if self.pltype == "log" and y_min <= 0 else y_min
ylim = self.ylim or [y_min, y_max]
self.ax.set_ylim(ylim)
def __blank(self) -> None:
spine = ["top", "bottom", "left", "right"]
for sp in spine:
self.ax_objs[-1].spines[sp].set_visible(False)
plt.tick_params(
axis="both",
which="both",
bottom=False,
left=False,
top=False,
right=False,
labelbottom=False,
labelleft=False,
)
def __z_option(self, i) -> None:
if i % 2:
self.ax_objs[-1].tick_params(
axis="y",
which="both",
left=False,
labelleft=False,
labelright=True,
)
self.ax_objs[-1].spines["left"].set_color("k")
else:
self.ax_objs[-1].tick_params(
axis="y",
which="both",
right=False,
labelleft=True,
labelright=False,
)
self.ax_objs[-1].spines["right"].set_color("k")
def __s_option(self, i) -> None:
if i % 2:
self.ax_objs[-1].tick_params(
axis="y", which="both", labelleft=False, labelright=True
)
def __g_option(self, i) -> None:
if ("g" in self.options and "z" not in self.options) or (
"g" in self.options and len(self.data) == 1
):
plt.grid(True, which="major", ls="-", alpha=0.2)
elif "g" in self.options:
plt.minorticks_off()
alpha = 0.2 if i in (0, len(self.data) - 1) else 0.1
plt.grid(True, axis="y", which="major", ls=next(self.gls), alpha=0.2)
plt.grid(True, axis="x", which="major", ls="-", alpha=alpha)
def __resolve_first_last_axis(self, i) -> None:
if i == len(self.data) - 1:
if self.xlabel:
plt.xlabel(self.xlabel)
if len(self.data) != 1:
plt.tick_params(axis="x", which="both", top=False)
elif i == 0:
plt.tick_params(
axis="x", which="both", bottom=False, labelbottom=False
) # , labeltop=True
else:
plt.tick_params(
axis="x", which="both", bottom=False, top=False, labelbottom=False
)
def __resolve_options(self, i, spines, col) -> None:
if len(self.data) != 1:
if "z" in self.options: # Squeeze
self.__z_option(i)
elif "s" in self.options: # Slalom axis
self.__s_option(i)
for sp in spines:
self.ax_objs[-1].spines[sp].set_visible(False)
if "z" not in self.options: # Squeeze
self.ax_objs[-1].spines["left"].set_color(col)
self.ax_objs[-1].spines["right"].set_color(col)
self.ax_objs[-1].tick_params(axis="y", which="both", colors=col)
self.ax_objs[-1].yaxis.label.set_color(col)
self.__g_option(i)
self.__resolve_first_last_axis(i)
def __setup_axis(
self,
y_min: float,
y_max: float,
i: int,
s: Union[Tuple[np.ndarray, np.ndarray], np.ndarray],
) -> Tuple[
float, float, Union[Tuple[np.ndarray, np.ndarray], np.ndarray], List[str]
]:
self.ax_objs.append(self.__fig.add_subplot(self.gs[i : i + 1, 0:]))
if i == 0:
spines = ["bottom"]
elif i == len(self.data) - 1:
spines = ["top"]
else:
spines = ["top", "bottom"]
s_ = s if isinstance(s, np.ndarray) else s[1]
y_min = min(s_.min(), y_min)
y_max = max(s_.max(), y_max)
return y_min, y_max, s, spines
def __draw_lines(self, s, col) -> None:
# Plot data
p_func = getattr(self.ax_objs[-1], self.pltype)
if len(s) == 2: # noqa: PLR2004
ell = p_func(s[0], s[1], color=col, markersize=2.5, **self.kwargs)[0]
else:
ell = p_func(s, color=col, markersize=2.5, **self.kwargs)[0]
# Append in line-list to create legend
self.__lines.append(ell)
def data_loop(self) -> Tuple[float, float]:
"""Run the data loop."""
# Loop through data
self.__lines: List[plt.Line2D] = []
y_min = np.inf
y_max = -np.inf
for i, s in enumerate(self.data):
col = next(self.colors)
y_min, y_max, s_, spines = self.__setup_axis(y_min, y_max, i, s)
self.__draw_lines(s_, col)
self.ax_objs[-1].patch.set_alpha(0)
# Scale all subplots to the same x axis
plt.xlim([self.__xmin, self.__xmax])
if self.ylim:
plt.ylim(self.ylim)
# The length of data is greater than one, fix the plot according to the
# input args and kwargs.
if "b" in self.options:
self.__blank()
else:
self.__resolve_options(i, spines, col)
return y_min, y_max
def __x_limit(self, maxx=True) -> Tuple[float, float]:
if isinstance(self.data[0], tuple):
data: List[np.ndarray] = [d[0] for d in self.data]
else:
raise ValueError("'data' must have x-values.")
t_min = data[0]
x_max = data[0][-1]
for t in data[1:]:
t_0, t_max = np.min(t), np.max(t)
if maxx:
t_min = t if t_0 < t_min[0] else t_min
# t_max = t if t_1 > t_max[-1] else t_max
x_max = max(t_max, x_max)
else:
t_min = t if t[0] > t_min[0] else t_min
# t_max = t if t[-1] < t_max[-1] else t_max
x_max = min(t_max, x_max)
diff = 0.05 * (x_max - t_min[0])
# x_max = t_max[-1] + diff
x_max += diff
if self.pltype in ["loglog", "semilogx"]:
x_min = 0.8 * t_min[t_min > 0][0] if t_min[0] < diff else t_min[0] - diff
# if x_min < 0:
# x_min = 1e-10
else:
x_min = t_min[0] - diff
return x_min, x_max
@property
def lines(self) -> List:
"""Return the plotted lines."""
return self.__lines
@property
def figure(self) -> mpl.figure.Figure:
"""Return the figure."""
return self.__fig
@property
def top_axes(self) -> mpl.axes.Axes:
"""Return the top axes."""
return self.ax_objs[0]
@property
def bottom_axes(self) -> mpl.axes.Axes:
"""Return the bottom axes."""
return self.ax_objs[-1]
@property
def ylabel_axis(self) -> mpl.axes.Axes:
"""Return axis with y-label."""
return self.ax
@property
def all_axes(self) -> List[mpl.axes.Axes]:
"""Return all the axes."""
return self.ax_objs
def main(self) -> None:
"""Run the main function."""
self.set_grid()
self.set_xaxs()
if self.ylabel:
self.set_ylabel()
y1, y2 = self.data_loop()
if self.ylabel:
self.set_ylabel(y1, y2)
| (data: List[Any], options: str, y_scale: float = 1.0, xlim: List[float] = NOTHING, ylim: List[float] = NOTHING, pltype: str = 'plot', kwargs: Dict[str, Any] = NOTHING, *, xlabel: Optional[str] = '', ylabel: Optional[str] = '') -> None |
7,095 | plastik.ridge | __blank | null | def __blank(self) -> None:
spine = ["top", "bottom", "left", "right"]
for sp in spine:
self.ax_objs[-1].spines[sp].set_visible(False)
plt.tick_params(
axis="both",
which="both",
bottom=False,
left=False,
top=False,
right=False,
labelbottom=False,
labelleft=False,
)
| (self) -> NoneType |
7,096 | plastik.ridge | __draw_lines | null | def __draw_lines(self, s, col) -> None:
# Plot data
p_func = getattr(self.ax_objs[-1], self.pltype)
if len(s) == 2: # noqa: PLR2004
ell = p_func(s[0], s[1], color=col, markersize=2.5, **self.kwargs)[0]
else:
ell = p_func(s, color=col, markersize=2.5, **self.kwargs)[0]
# Append in line-list to create legend
self.__lines.append(ell)
| (self, s, col) -> NoneType |
7,097 | plastik.ridge | __g_option | null | def __g_option(self, i) -> None:
if ("g" in self.options and "z" not in self.options) or (
"g" in self.options and len(self.data) == 1
):
plt.grid(True, which="major", ls="-", alpha=0.2)
elif "g" in self.options:
plt.minorticks_off()
alpha = 0.2 if i in (0, len(self.data) - 1) else 0.1
plt.grid(True, axis="y", which="major", ls=next(self.gls), alpha=0.2)
plt.grid(True, axis="x", which="major", ls="-", alpha=alpha)
| (self, i) -> NoneType |
7,098 | plastik.ridge | __resolve_first_last_axis | null | def __resolve_first_last_axis(self, i) -> None:
if i == len(self.data) - 1:
if self.xlabel:
plt.xlabel(self.xlabel)
if len(self.data) != 1:
plt.tick_params(axis="x", which="both", top=False)
elif i == 0:
plt.tick_params(
axis="x", which="both", bottom=False, labelbottom=False
) # , labeltop=True
else:
plt.tick_params(
axis="x", which="both", bottom=False, top=False, labelbottom=False
)
| (self, i) -> NoneType |
7,099 | plastik.ridge | __resolve_options | null | def __resolve_options(self, i, spines, col) -> None:
if len(self.data) != 1:
if "z" in self.options: # Squeeze
self.__z_option(i)
elif "s" in self.options: # Slalom axis
self.__s_option(i)
for sp in spines:
self.ax_objs[-1].spines[sp].set_visible(False)
if "z" not in self.options: # Squeeze
self.ax_objs[-1].spines["left"].set_color(col)
self.ax_objs[-1].spines["right"].set_color(col)
self.ax_objs[-1].tick_params(axis="y", which="both", colors=col)
self.ax_objs[-1].yaxis.label.set_color(col)
self.__g_option(i)
self.__resolve_first_last_axis(i)
| (self, i, spines, col) -> NoneType |
7,100 | plastik.ridge | __s_option | null | def __s_option(self, i) -> None:
if i % 2:
self.ax_objs[-1].tick_params(
axis="y", which="both", labelleft=False, labelright=True
)
| (self, i) -> NoneType |
7,101 | plastik.ridge | __setup_axis | null | def __setup_axis(
self,
y_min: float,
y_max: float,
i: int,
s: Union[Tuple[np.ndarray, np.ndarray], np.ndarray],
) -> Tuple[
float, float, Union[Tuple[np.ndarray, np.ndarray], np.ndarray], List[str]
]:
self.ax_objs.append(self.__fig.add_subplot(self.gs[i : i + 1, 0:]))
if i == 0:
spines = ["bottom"]
elif i == len(self.data) - 1:
spines = ["top"]
else:
spines = ["top", "bottom"]
s_ = s if isinstance(s, np.ndarray) else s[1]
y_min = min(s_.min(), y_min)
y_max = max(s_.max(), y_max)
return y_min, y_max, s, spines
| (self, y_min: float, y_max: float, i: int, s: Union[Tuple[numpy.ndarray, numpy.ndarray], numpy.ndarray]) -> Tuple[float, float, Union[Tuple[numpy.ndarray, numpy.ndarray], numpy.ndarray], List[str]] |
7,102 | plastik.ridge | __x_limit | null | def __x_limit(self, maxx=True) -> Tuple[float, float]:
if isinstance(self.data[0], tuple):
data: List[np.ndarray] = [d[0] for d in self.data]
else:
raise ValueError("'data' must have x-values.")
t_min = data[0]
x_max = data[0][-1]
for t in data[1:]:
t_0, t_max = np.min(t), np.max(t)
if maxx:
t_min = t if t_0 < t_min[0] else t_min
# t_max = t if t_1 > t_max[-1] else t_max
x_max = max(t_max, x_max)
else:
t_min = t if t[0] > t_min[0] else t_min
# t_max = t if t[-1] < t_max[-1] else t_max
x_max = min(t_max, x_max)
diff = 0.05 * (x_max - t_min[0])
# x_max = t_max[-1] + diff
x_max += diff
if self.pltype in ["loglog", "semilogx"]:
x_min = 0.8 * t_min[t_min > 0][0] if t_min[0] < diff else t_min[0] - diff
# if x_min < 0:
# x_min = 1e-10
else:
x_min = t_min[0] - diff
return x_min, x_max
| (self, maxx=True) -> Tuple[float, float] |
7,103 | plastik.ridge | __z_option | null | def __z_option(self, i) -> None:
if i % 2:
self.ax_objs[-1].tick_params(
axis="y",
which="both",
left=False,
labelleft=False,
labelright=True,
)
self.ax_objs[-1].spines["left"].set_color("k")
else:
self.ax_objs[-1].tick_params(
axis="y",
which="both",
right=False,
labelleft=True,
labelright=False,
)
self.ax_objs[-1].spines["right"].set_color("k")
| (self, i) -> NoneType |
7,104 | plastik.ridge | __eq__ | Method generated by attrs for class Ridge. | """Creates a ridge plot figure."""
import itertools
from typing import Any, Dict, List, Optional, Tuple, Union
import attr
import matplotlib as mpl
import matplotlib.gridspec as grid_spec
import matplotlib.pyplot as plt
import numpy as np
import plastik
@attr.s(auto_attribs=True)
class Ridge:
"""Plot data in a ridge plot with fixed width and fixed height per ridge.
Parameters
----------
data : List
A list of n 2-tuples with (x, y)-pairs; list of n np.ndarrays: (y)
options : str
String with characters that set different options. This include 'b' (blank), 'c'
(crop x-axis), 'g' (grid), 's' (slalom axis) and 'z' (squeeze). Blank removes
all axis lines and turns the grid off. Crop sets the x-axis to the smallest
common x-limit in all data tuples. Grid turns on the grid. Slalom axis make the
y-axis ticks alternate between the left and right side. Squeeze makes each axis
plot overlap by 50% (this turns slalom axis on, and makes two RHS ticks
contiguous). The options are combined in a single string in arbitrary order. Any
other characters than 'bcgsz' can be included without having any effect, the
class with just look for existence of any of 'bcgsz'.
y_scale : float
Scale of y-axis relative to the default which decides the total height of the
figure. Defaults to 1.
xlabel : str
x-label placed at the bottom.
ylabel : str
y-label for all y-axis.
ylim : List
List containing the upper and lower y-axis limit in all ridges.
pltype : str
plt class (loglog, plot, semilogx etc.) Defaults to plot.
kwargs : Dict
Any keyword argument plt.plot accepts. (Need to be a dict, asterisk syntax not
supported.)
"""
data: List[Any] = attr.ib()
@data.validator
def _check_data_type(self, _, value):
if not isinstance(value[0], tuple) and not isinstance(value[0], np.ndarray):
raise TypeError(
"data must be a list of tuples or numpy arrays, not list of"
f" {type(self.data[0])}."
)
options: str = attr.ib(converter=str)
y_scale: float = attr.ib(converter=float, default=1.0)
xlabel: Optional[str] = attr.ib(converter=str, kw_only=True, default="")
ylabel: Optional[str] = attr.ib(converter=str, kw_only=True, default="")
xlim: List[float] = attr.Factory(list)
ylim: List[float] = attr.Factory(list)
pltype: str = attr.ib(converter=str, default="plot")
kwargs: Dict[str, Any] = attr.Factory(dict)
colors = itertools.cycle(plt.rcParams["axes.prop_cycle"].by_key()["color"])
def set_grid(self) -> None:
"""Set the gridstructure of the figure."""
fsize = (4, self.y_scale * len(self.data))
self.gs = grid_spec.GridSpec(len(self.data), 1)
self.__fig = plt.figure(figsize=fsize)
# Set line type of horizontal grid lines
self.gls = itertools.cycle(["-", "--"])
self.ax_objs: List[plt.Axes] = []
if "z" in self.options:
self.gs.update(hspace=-0.5)
else:
self.gs.update(hspace=0.0)
def set_xaxs(self) -> None:
"""Set the x-axis limits."""
if self.xlim:
x_min, x_max = self.xlim
elif len(self.data[0]) != 2: # noqa: PLR2004
x_min, x_max = -0.5, len(self.data[0]) - 0.5
x_min = 0.5 if self.pltype in ["loglog", "semilogx"] else x_min
elif "c" in self.options:
x_min, x_max = self.__x_limit(False)
else:
x_min, x_max = self.__x_limit()
self.__xmin = x_min
self.__xmax = x_max
def set_ylabel(
self, y_min: Optional[float] = None, y_max: Optional[float] = None
) -> None:
"""Set the y-axis label."""
if y_min is None or y_max is None:
self.ax = self.__fig.add_subplot(111, frame_on=False)
self.ax.tick_params(
labelcolor="w",
axis="both",
which="both",
zorder=-1, # labelleft=False,
labelbottom=False,
top=False,
bottom=False,
left=False,
right=False,
)
plt.setp(self.ax.get_yticklabels(), alpha=0)
else:
self._set_ymin_ymax(y_min, y_max)
def _set_ymin_ymax(self, y_min, y_max):
self.ax.spines["top"].set_visible(False)
self.ax.spines["bottom"].set_visible(False)
self.ax.spines["left"].set_visible(False)
self.ax.spines["right"].set_visible(False)
if self.pltype != "plot":
pltype = "log" if self.pltype in ["semilogy", "loglog"] else "linear"
self.ax.set_yscale(pltype)
self.ax.set_ylabel(self.ylabel)
y_min = 1e-3 if self.pltype == "log" and y_min <= 0 else y_min
ylim = self.ylim or [y_min, y_max]
self.ax.set_ylim(ylim)
def __blank(self) -> None:
spine = ["top", "bottom", "left", "right"]
for sp in spine:
self.ax_objs[-1].spines[sp].set_visible(False)
plt.tick_params(
axis="both",
which="both",
bottom=False,
left=False,
top=False,
right=False,
labelbottom=False,
labelleft=False,
)
def __z_option(self, i) -> None:
if i % 2:
self.ax_objs[-1].tick_params(
axis="y",
which="both",
left=False,
labelleft=False,
labelright=True,
)
self.ax_objs[-1].spines["left"].set_color("k")
else:
self.ax_objs[-1].tick_params(
axis="y",
which="both",
right=False,
labelleft=True,
labelright=False,
)
self.ax_objs[-1].spines["right"].set_color("k")
def __s_option(self, i) -> None:
if i % 2:
self.ax_objs[-1].tick_params(
axis="y", which="both", labelleft=False, labelright=True
)
def __g_option(self, i) -> None:
if ("g" in self.options and "z" not in self.options) or (
"g" in self.options and len(self.data) == 1
):
plt.grid(True, which="major", ls="-", alpha=0.2)
elif "g" in self.options:
plt.minorticks_off()
alpha = 0.2 if i in (0, len(self.data) - 1) else 0.1
plt.grid(True, axis="y", which="major", ls=next(self.gls), alpha=0.2)
plt.grid(True, axis="x", which="major", ls="-", alpha=alpha)
def __resolve_first_last_axis(self, i) -> None:
if i == len(self.data) - 1:
if self.xlabel:
plt.xlabel(self.xlabel)
if len(self.data) != 1:
plt.tick_params(axis="x", which="both", top=False)
elif i == 0:
plt.tick_params(
axis="x", which="both", bottom=False, labelbottom=False
) # , labeltop=True
else:
plt.tick_params(
axis="x", which="both", bottom=False, top=False, labelbottom=False
)
def __resolve_options(self, i, spines, col) -> None:
if len(self.data) != 1:
if "z" in self.options: # Squeeze
self.__z_option(i)
elif "s" in self.options: # Slalom axis
self.__s_option(i)
for sp in spines:
self.ax_objs[-1].spines[sp].set_visible(False)
if "z" not in self.options: # Squeeze
self.ax_objs[-1].spines["left"].set_color(col)
self.ax_objs[-1].spines["right"].set_color(col)
self.ax_objs[-1].tick_params(axis="y", which="both", colors=col)
self.ax_objs[-1].yaxis.label.set_color(col)
self.__g_option(i)
self.__resolve_first_last_axis(i)
def __setup_axis(
self,
y_min: float,
y_max: float,
i: int,
s: Union[Tuple[np.ndarray, np.ndarray], np.ndarray],
) -> Tuple[
float, float, Union[Tuple[np.ndarray, np.ndarray], np.ndarray], List[str]
]:
self.ax_objs.append(self.__fig.add_subplot(self.gs[i : i + 1, 0:]))
if i == 0:
spines = ["bottom"]
elif i == len(self.data) - 1:
spines = ["top"]
else:
spines = ["top", "bottom"]
s_ = s if isinstance(s, np.ndarray) else s[1]
y_min = min(s_.min(), y_min)
y_max = max(s_.max(), y_max)
return y_min, y_max, s, spines
def __draw_lines(self, s, col) -> None:
# Plot data
p_func = getattr(self.ax_objs[-1], self.pltype)
if len(s) == 2: # noqa: PLR2004
ell = p_func(s[0], s[1], color=col, markersize=2.5, **self.kwargs)[0]
else:
ell = p_func(s, color=col, markersize=2.5, **self.kwargs)[0]
# Append in line-list to create legend
self.__lines.append(ell)
def data_loop(self) -> Tuple[float, float]:
"""Run the data loop."""
# Loop through data
self.__lines: List[plt.Line2D] = []
y_min = np.inf
y_max = -np.inf
for i, s in enumerate(self.data):
col = next(self.colors)
y_min, y_max, s_, spines = self.__setup_axis(y_min, y_max, i, s)
self.__draw_lines(s_, col)
self.ax_objs[-1].patch.set_alpha(0)
# Scale all subplots to the same x axis
plt.xlim([self.__xmin, self.__xmax])
if self.ylim:
plt.ylim(self.ylim)
# The length of data is greater than one, fix the plot according to the
# input args and kwargs.
if "b" in self.options:
self.__blank()
else:
self.__resolve_options(i, spines, col)
return y_min, y_max
def __x_limit(self, maxx=True) -> Tuple[float, float]:
if isinstance(self.data[0], tuple):
data: List[np.ndarray] = [d[0] for d in self.data]
else:
raise ValueError("'data' must have x-values.")
t_min = data[0]
x_max = data[0][-1]
for t in data[1:]:
t_0, t_max = np.min(t), np.max(t)
if maxx:
t_min = t if t_0 < t_min[0] else t_min
# t_max = t if t_1 > t_max[-1] else t_max
x_max = max(t_max, x_max)
else:
t_min = t if t[0] > t_min[0] else t_min
# t_max = t if t[-1] < t_max[-1] else t_max
x_max = min(t_max, x_max)
diff = 0.05 * (x_max - t_min[0])
# x_max = t_max[-1] + diff
x_max += diff
if self.pltype in ["loglog", "semilogx"]:
x_min = 0.8 * t_min[t_min > 0][0] if t_min[0] < diff else t_min[0] - diff
# if x_min < 0:
# x_min = 1e-10
else:
x_min = t_min[0] - diff
return x_min, x_max
@property
def lines(self) -> List:
"""Return the plotted lines."""
return self.__lines
@property
def figure(self) -> mpl.figure.Figure:
"""Return the figure."""
return self.__fig
@property
def top_axes(self) -> mpl.axes.Axes:
"""Return the top axes."""
return self.ax_objs[0]
@property
def bottom_axes(self) -> mpl.axes.Axes:
"""Return the bottom axes."""
return self.ax_objs[-1]
@property
def ylabel_axis(self) -> mpl.axes.Axes:
"""Return axis with y-label."""
return self.ax
@property
def all_axes(self) -> List[mpl.axes.Axes]:
"""Return all the axes."""
return self.ax_objs
def main(self) -> None:
"""Run the main function."""
self.set_grid()
self.set_xaxs()
if self.ylabel:
self.set_ylabel()
y1, y2 = self.data_loop()
if self.ylabel:
self.set_ylabel(y1, y2)
| (self, other) |
7,105 | plastik.ridge | __ge__ | Method generated by attrs for class Ridge. | null | (self, other) |
7,112 | plastik.ridge | _check_data_type | null | @data.validator
def _check_data_type(self, _, value):
if not isinstance(value[0], tuple) and not isinstance(value[0], np.ndarray):
raise TypeError(
"data must be a list of tuples or numpy arrays, not list of"
f" {type(self.data[0])}."
)
| (self, _, value) |
7,113 | plastik.ridge | _set_ymin_ymax | null | def _set_ymin_ymax(self, y_min, y_max):
self.ax.spines["top"].set_visible(False)
self.ax.spines["bottom"].set_visible(False)
self.ax.spines["left"].set_visible(False)
self.ax.spines["right"].set_visible(False)
if self.pltype != "plot":
pltype = "log" if self.pltype in ["semilogy", "loglog"] else "linear"
self.ax.set_yscale(pltype)
self.ax.set_ylabel(self.ylabel)
y_min = 1e-3 if self.pltype == "log" and y_min <= 0 else y_min
ylim = self.ylim or [y_min, y_max]
self.ax.set_ylim(ylim)
| (self, y_min, y_max) |
7,114 | plastik.ridge | data_loop | Run the data loop. | def data_loop(self) -> Tuple[float, float]:
"""Run the data loop."""
# Loop through data
self.__lines: List[plt.Line2D] = []
y_min = np.inf
y_max = -np.inf
for i, s in enumerate(self.data):
col = next(self.colors)
y_min, y_max, s_, spines = self.__setup_axis(y_min, y_max, i, s)
self.__draw_lines(s_, col)
self.ax_objs[-1].patch.set_alpha(0)
# Scale all subplots to the same x axis
plt.xlim([self.__xmin, self.__xmax])
if self.ylim:
plt.ylim(self.ylim)
# The length of data is greater than one, fix the plot according to the
# input args and kwargs.
if "b" in self.options:
self.__blank()
else:
self.__resolve_options(i, spines, col)
return y_min, y_max
| (self) -> Tuple[float, float] |
7,115 | plastik.ridge | main | Run the main function. | def main(self) -> None:
"""Run the main function."""
self.set_grid()
self.set_xaxs()
if self.ylabel:
self.set_ylabel()
y1, y2 = self.data_loop()
if self.ylabel:
self.set_ylabel(y1, y2)
| (self) -> NoneType |
7,116 | plastik.ridge | set_grid | Set the gridstructure of the figure. | def set_grid(self) -> None:
"""Set the gridstructure of the figure."""
fsize = (4, self.y_scale * len(self.data))
self.gs = grid_spec.GridSpec(len(self.data), 1)
self.__fig = plt.figure(figsize=fsize)
# Set line type of horizontal grid lines
self.gls = itertools.cycle(["-", "--"])
self.ax_objs: List[plt.Axes] = []
if "z" in self.options:
self.gs.update(hspace=-0.5)
else:
self.gs.update(hspace=0.0)
| (self) -> NoneType |
7,117 | plastik.ridge | set_xaxs | Set the x-axis limits. | def set_xaxs(self) -> None:
"""Set the x-axis limits."""
if self.xlim:
x_min, x_max = self.xlim
elif len(self.data[0]) != 2: # noqa: PLR2004
x_min, x_max = -0.5, len(self.data[0]) - 0.5
x_min = 0.5 if self.pltype in ["loglog", "semilogx"] else x_min
elif "c" in self.options:
x_min, x_max = self.__x_limit(False)
else:
x_min, x_max = self.__x_limit()
self.__xmin = x_min
self.__xmax = x_max
| (self) -> NoneType |
7,118 | plastik.ridge | set_ylabel | Set the y-axis label. | def set_ylabel(
self, y_min: Optional[float] = None, y_max: Optional[float] = None
) -> None:
"""Set the y-axis label."""
if y_min is None or y_max is None:
self.ax = self.__fig.add_subplot(111, frame_on=False)
self.ax.tick_params(
labelcolor="w",
axis="both",
which="both",
zorder=-1, # labelleft=False,
labelbottom=False,
top=False,
bottom=False,
left=False,
right=False,
)
plt.setp(self.ax.get_yticklabels(), alpha=0)
else:
self._set_ymin_ymax(y_min, y_max)
| (self, y_min: Optional[float] = None, y_max: Optional[float] = None) -> NoneType |
7,122 | plastik.axes | dark_theme | Change plot style to fit a dark background.
This is better in e.g. beamers with dark theme.
Parameters
----------
*ax : mpl.axes.Axes
Send in any number of matplotlib axes and the changes will be applied to all
fig : mpl.figure.Figure | None, optional
The figure object that should be altered
keep_yaxis : bool
Keep the colour of the label along the vertical axis as is.
Useful if a plot has y-labels that are coloured to match the plotted
lines. Defaults to False.
| def dark_theme(
*ax: mpl.axes.Axes,
fig: mpl.figure.Figure | None = None,
keep_yaxis: bool = False,
) -> None:
"""Change plot style to fit a dark background.
This is better in e.g. beamers with dark theme.
Parameters
----------
*ax : mpl.axes.Axes
Send in any number of matplotlib axes and the changes will be applied to all
fig : mpl.figure.Figure | None, optional
The figure object that should be altered
keep_yaxis : bool
Keep the colour of the label along the vertical axis as is.
Useful if a plot has y-labels that are coloured to match the plotted
lines. Defaults to False.
"""
for a in ax:
if not keep_yaxis:
a.tick_params(axis="both", colors="w")
a.yaxis.label.set_color("w")
else:
a.tick_params(axis="x", colors="w")
a.xaxis.label.set_color("w")
plt.setp(a.spines.values(), color="gray")
if fig is not None:
fig.patch.set_alpha(0)
| (*ax: matplotlib.axes._axes.Axes, fig: Optional[matplotlib.figure.Figure] = None, keep_yaxis: bool = False) -> NoneType |
7,128 | plastik.percentiles | percentiles | Calculate percentiles from ensemble 'y' along 'x'.
Parameters
----------
x : np.ndarray
One dimensional array, x-axis.
y : np.ndarray
Values along y-axis. Need shape (N, len(x)).
n : int
The number of percentiles, linearly spaced from 50 to 'percentile_m{in,ax}'.
Defaults to 20.
ax : mpl.axes.Axes | None, optional
The axes object to plot on. If not given, the current axes will be used.
plot_mean : bool
Plot mean of 'y'
plot_median : bool
Plot median of 'y'
**kwargs : Any
alpha : str
Alpha value of each layer of the percentiles
color : str
Colour of the percentile shading. Can be any colour that can be parsed by
matplotlib's plotting function.
line_color : str
Colour of the mean/median plot. Can be any colour that can be parsed by
matplotlib's plotting function.
percentile_min : float
Lower percentile limit.
percentile_max : float
Upper percentile limit.
Returns
-------
mpl.axes.Axes
The axes object of the figure.
| def percentiles( # noqa: PLR0913
x: np.ndarray,
y: np.ndarray,
n: int = 20,
ax: mpl.axes.Axes | None = None,
plot_mean: bool = False,
plot_median: bool = True,
**kwargs: Any,
) -> mpl.axes.Axes:
"""Calculate percentiles from ensemble 'y' along 'x'.
Parameters
----------
x : np.ndarray
One dimensional array, x-axis.
y : np.ndarray
Values along y-axis. Need shape (N, len(x)).
n : int
The number of percentiles, linearly spaced from 50 to 'percentile_m{in,ax}'.
Defaults to 20.
ax : mpl.axes.Axes | None, optional
The axes object to plot on. If not given, the current axes will be used.
plot_mean : bool
Plot mean of 'y'
plot_median : bool
Plot median of 'y'
**kwargs : Any
alpha : str
Alpha value of each layer of the percentiles
color : str
Colour of the percentile shading. Can be any colour that can be parsed by
matplotlib's plotting function.
line_color : str
Colour of the mean/median plot. Can be any colour that can be parsed by
matplotlib's plotting function.
percentile_min : float
Lower percentile limit.
percentile_max : float
Upper percentile limit.
Returns
-------
mpl.axes.Axes
The axes object of the figure.
"""
ax = ax or plt.gca()
# calculate the lower and upper percentile groups, skipping 50 percentile
percentile_min = kwargs.pop("percentile_min") if "percentile_min" in kwargs else 1
percentile_max = kwargs.pop("percentile_max") if "percentile_max" in kwargs else 99
perc1 = np.percentile(
y, np.linspace(percentile_min, 50, num=n, endpoint=False), axis=0
)
perc2 = np.percentile(y, np.linspace(50, percentile_max, num=n + 1)[1:], axis=0)
color = kwargs.pop("color") if "color" in kwargs else "r"
line_color = kwargs.pop("line_color") if "line_color" in kwargs else "k"
if plot_mean:
ax.plot(x, np.mean(y, axis=0), color=line_color)
if plot_median:
ax.plot(x, np.median(y, axis=0), "-d", color=line_color)
alpha = kwargs.pop("alpha") if "alpha" in kwargs else 1 / n
# fill lower and upper percentile groups
for p1, p2 in zip(perc1, perc2, strict=True):
ax.fill_between(x, p1, p2, alpha=alpha, color=color, edgecolor=None)
return ax
| (x: numpy.ndarray, y: numpy.ndarray, n: int = 20, ax: Optional[matplotlib.axes._axes.Axes] = None, plot_mean: bool = False, plot_median: bool = True, **kwargs: Any) -> matplotlib.axes._axes.Axes |
7,132 | plastik.legends | topside_legends | Move the legend to the top of the plot.
Parameters
----------
ax : mpl.axes.Axes
The axes object of the figure
*args : Any
Parameters given to ax.legend(), i.e. handles and labels. This is useful if you
have many axes objects with one or more lines on them, but you want all lines in
one legend at a given axes object.
c_max : int
Total number of columns allowed. If less than `c_max` labels are used, `c_max`
is set to the number of labels. Defaults to 4.
alpha : float
Alpha value for the background of the legend. Defaults to 0.8.
side : Literal['top', 'bottom', 'right', 'left', 'top right', 'top left', 'bottom right', 'bottom left']
Places the legend at the given side. Valid sides are 'top', 'bottom', 'right',
'left', 'top right', 'top left', 'bottom right' and 'bottom left'. Defaults to
'top'.
edgecolor : str | tuple[float, float, float]
Set the colour of the legend edge. Can also be set with the alias 'ec'. Values
can be either a string colour or a RGB tuple.
facecolor : str | tuple[float, float, float]
Set the colour of the legend face/background. Can also be set with the alias
'fc'. Values can be either a string colour or a RGB tuple.
anchor_ : tuple[float, float] | None, optional
If you want to place the legend at a custom location, you can set the 'anchor'
keyword parameter to a tuple. (0, 0) is the bottom left of the plot, (1, 1) is
the top right. The anchor location defines where the 'side' parameter goes, so
with 'anchor=(0.5, 0.5)' and 'side="top left"', top top left corner of the
legend will be at the centre of the plot.
**kwargs : Any
All keyword arguments are sent to ax.legend().
See https://matplotlib.org/stable/api/legend_api.html#matplotlib.legend.Legend
for details.
Returns
-------
mpl.axes.Axes
Axes object with updated (topside) legend.
Raises
------
ValueError
If the first parameter is a string type (should be an axis Artist).
| def topside_legends( # noqa: PLR0913
ax: mpl.axes.Axes,
*args: Any,
c_max: int = 4,
alpha: float = 0.8,
side: Literal[
"top",
"bottom",
"right",
"left",
"top right",
"top left",
"bottom right",
"bottom left",
] = "top",
edgecolor: str | tuple[float, float, float] = "",
facecolor: str | tuple[float, float, float] = "",
anchor_: tuple[float, float] | None = None,
**kwargs: Any,
) -> mpl.axes.Axes:
"""Move the legend to the top of the plot.
Parameters
----------
ax : mpl.axes.Axes
The axes object of the figure
*args : Any
Parameters given to ax.legend(), i.e. handles and labels. This is useful if you
have many axes objects with one or more lines on them, but you want all lines in
one legend at a given axes object.
c_max : int
Total number of columns allowed. If less than `c_max` labels are used, `c_max`
is set to the number of labels. Defaults to 4.
alpha : float
Alpha value for the background of the legend. Defaults to 0.8.
side : Literal['top', 'bottom', 'right', 'left', 'top right', 'top left', 'bottom right', 'bottom left']
Places the legend at the given side. Valid sides are 'top', 'bottom', 'right',
'left', 'top right', 'top left', 'bottom right' and 'bottom left'. Defaults to
'top'.
edgecolor : str | tuple[float, float, float]
Set the colour of the legend edge. Can also be set with the alias 'ec'. Values
can be either a string colour or a RGB tuple.
facecolor : str | tuple[float, float, float]
Set the colour of the legend face/background. Can also be set with the alias
'fc'. Values can be either a string colour or a RGB tuple.
anchor_ : tuple[float, float] | None, optional
If you want to place the legend at a custom location, you can set the 'anchor'
keyword parameter to a tuple. (0, 0) is the bottom left of the plot, (1, 1) is
the top right. The anchor location defines where the 'side' parameter goes, so
with 'anchor=(0.5, 0.5)' and 'side="top left"', top top left corner of the
legend will be at the centre of the plot.
**kwargs : Any
All keyword arguments are sent to ax.legend().
See https://matplotlib.org/stable/api/legend_api.html#matplotlib.legend.Legend
for details.
Returns
-------
mpl.axes.Axes
Axes object with updated (topside) legend.
Raises
------
ValueError
If the first parameter is a string type (should be an axis Artist).
"""
_sides = {
"top": "upper center",
"bottom": "lower center",
"right": "center right",
"left": "center left",
"top right": "upper right",
"top left": "upper left",
"bottom right": "lower right",
"bottom left": "lower left",
}
_anchors = {
"top": (0.5, 1.05),
"bottom": (0.5, -0.05),
"right": (1.04, 0.5),
"left": (-0.04, 0.5),
"top right": (1.04, 1.05),
"top left": (-0.04, 1.05),
"bottom right": (1.04, -0.05),
"bottom left": (-0.04, -0.05),
}
loc = _sides[side]
edgecolor = kwargs.pop("ec", edgecolor)
facecolor = kwargs.pop("fc", facecolor)
anchor = anchor_ or _anchors[side]
if args and isinstance(args[0][0], str):
raise ValueError(
"The first args parameter must be a sequence of Artist, not str."
)
less_than_two = 2
if len(args) < less_than_two:
try:
# If the labels are defined directly in the legend as a list, calling
# ax.legend() will re-set it to an empty legend. Therefore, we grab the list
# and re-set it when we update the legend object.
legend1: mpl.legend.Legend = ax.get_legend()
lst = [l_.get_text() for l_ in legend1.get_texts()]
l_d = len(legend1.get_texts())
except AttributeError:
# If, however, the labels are set when creating the lines objects (e.g.
# ax.plot(x, y, label="Label for (x, y) data")), we first make sure the
# legend object is created by calling ax.legend(), then we check how many
# labels exist in it. Calling ax.legend() will in this case preserve all
# labels.
ax.legend()
legend2: mpl.legend.Legend = ax.get_legend()
lst = [] # The empty list is falsy.
l_d = len(legend2.get_texts())
else:
lst = list(args[1])
l_d = len(args[1])
n_row = int(np.ceil(l_d / c_max))
n_col = 1
while l_d > n_col * n_row:
n_col += 1
if args:
leg = ax.legend(
args[0],
lst,
loc=loc,
bbox_to_anchor=anchor,
bbox_transform=ax.transAxes,
ncol=n_col,
**kwargs,
)
elif lst:
leg = ax.legend(
lst,
loc=loc,
bbox_to_anchor=anchor,
bbox_transform=ax.transAxes,
ncol=n_col,
**kwargs,
)
else:
leg = ax.legend(
loc=loc,
bbox_to_anchor=anchor,
bbox_transform=ax.transAxes,
ncol=n_col,
**kwargs,
)
if facecolor:
leg.get_frame().set_facecolor(facecolor)
if edgecolor:
leg.get_frame().set_edgecolor(edgecolor)
leg.get_frame().set_alpha(alpha)
return ax
| (ax: matplotlib.axes._axes.Axes, *args: Any, c_max: int = 4, alpha: float = 0.8, side: Literal['top', 'bottom', 'right', 'left', 'top right', 'top left', 'bottom right', 'bottom left'] = 'top', edgecolor: str | tuple[float, float, float] = '', facecolor: str | tuple[float, float, float] = '', anchor_: Optional[tuple[float, float]] = None, **kwargs: Any) -> matplotlib.axes._axes.Axes |
7,134 | ipl3checksum | CICKind | Enum that represents a CIC kind | from ipl3checksum import CICKind
| null |
7,138 | wikitools3.api | APIDisabled | API not enabled | class APIDisabled(APIError):
"""API not enabled"""
| null |
7,139 | wikitools3.api | APIError | Base class for errors | class APIError(Exception):
"""Base class for errors"""
| null |
7,140 | wikitools3.api | APIListResult | null | class APIListResult(list):
response = []
| (iterable=(), /) |
7,141 | wikitools3.api | APIRequest | A request to the site's API | class APIRequest:
"""A request to the site's API"""
def __init__(self, wiki, data, write=False, multipart=False):
"""
wiki - A Wiki object
data - API parameters in the form of a dict
write - set to True if doing a write query, so it won't try again on error
multipart - use multipart data transfer, required for file uploads,
requires the poster3 package
maxlag is set by default to 5 but can be changed
format is always set to json
"""
if not canupload and multipart:
raise APIError("The poster3 module is required for multipart support")
self.sleep = 5
self.data = data.copy()
self.data["format"] = "json"
self.iswrite = write
if wiki.assertval is not None and self.iswrite:
self.data["assert"] = wiki.assertval
if not "maxlag" in self.data and not wiki.maxlag < 0:
self.data["maxlag"] = wiki.maxlag
self.multipart = multipart
if self.multipart:
(datagen, self.headers) = multipart_encode(self.data)
self.encodeddata = ""
for singledata in datagen:
self.encodeddata = self.encodeddata + singledata
else:
self.encodeddata = urlencode(self.data, 1)
self.headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Content-Length": str(len(self.encodeddata)),
}
self.headers["User-agent"] = wiki.useragent
if gzip:
self.headers["Accept-Encoding"] = "gzip"
self.wiki = wiki
self.response = False
if wiki.auth:
self.headers["Authorization"] = "Basic {0}".format(
base64.encodestring(wiki.auth + ":" + wiki.httppass)
).replace("\n", "")
if hasattr(wiki, "passman"):
self.opener = urllib.build_opener(
urllib.HTTPDigestAuthHandler(wiki.passman),
urllib.HTTPCookieProcessor(wiki.cookies),
)
else:
self.opener = urllib.build_opener(urllib.HTTPCookieProcessor(wiki.cookies))
self.request = urllib.Request(self.wiki.apibase, self.encodeddata, self.headers)
def setMultipart(self, multipart=True):
"""Enable multipart data transfer, required for file uploads."""
if not canupload and multipart:
raise APIError("The poster3 package is required for multipart support")
self.multipart = multipart
if multipart:
(datagen, headers) = multipart_encode(self.data)
self.headers.pop("Content-Length")
self.headers.pop("Content-Type")
self.headers.update(headers)
self.encodeddata = ""
for singledata in datagen:
self.encodeddata = self.encodeddata + singledata
else:
self.encodeddata = urlencode(self.data, 1)
self.headers["Content-Length"] = str(len(self.encodeddata))
self.headers["Content-Type"] = "application/x-www-form-urlencoded"
def changeParam(self, param, value):
"""Change or add a parameter after making the request object
Simply changing self.data won't work as it needs to update other things.
value can either be a normal string value, or a file-like object,
which will be uploaded, if setMultipart was called previously.
"""
if param == "format":
raise APIError("You can not change the result format")
self.data[param] = value
if self.multipart:
(datagen, headers) = multipart_encode(self.data)
self.headers.pop("Content-Length")
self.headers.pop("Content-Type")
self.headers.update(headers)
self.encodeddata = ""
for singledata in datagen:
self.encodeddata = self.encodeddata + singledata
else:
self.encodeddata = urlencode(self.data, 1)
self.headers["Content-Length"] = str(len(self.encodeddata))
self.headers["Content-Type"] = "application/x-www-form-urlencoded"
self.request = urllib.Request(self.wiki.apibase, self.encodeddata, self.headers)
def query(self, querycontinue=True):
"""Actually do the query here and return usable stuff
querycontinue - look for query-continue in the results and continue querying
until there is no more data to retrieve (DEPRECATED: use queryGen as a more
reliable and efficient alternative)
"""
if querycontinue and self.data["action"] == "query":
warnings.warn(
"""The querycontinue option is deprecated and will be removed
in a future release, use the new queryGen function instead
for queries requring multiple requests""",
FutureWarning,
)
data = False
while not data:
rawdata = self.__getRaw()
data = self.__parseJSON(rawdata)
if not data and type(data) is APIListResult:
break
if "error" in data:
if self.iswrite and data["error"]["code"] == "blocked":
raise wiki.UserBlocked(data["error"]["info"])
raise APIError(data["error"]["code"], data["error"]["info"])
if "query-continue" in data and querycontinue:
data = self.__longQuery(data)
return data
def queryGen(self):
"""Unlike the old query-continue method that tried to stitch results
together, which could work poorly for complex result sets and could
use a lot of memory, this yield each set returned by the API and lets
the user process the data.
Loosely based on the recommended implementation on mediawiki.org
"""
reqcopy = copy.deepcopy(self.request)
self.changeParam("continue", "")
while True:
data = False
while not data:
rawdata = self.__getRaw()
data = self.__parseJSON(rawdata)
if not data and type(data) is APIListResult:
break
if "error" in data:
if self.iswrite and data["error"]["code"] == "blocked":
raise wiki.UserBlocked(data["error"]["info"])
raise APIError(data["error"]["code"], data["error"]["info"])
yield data
if "continue" not in data:
break
else:
self.request = copy.deepcopy(reqcopy)
for param in data["continue"]:
self.changeParam(param, data["continue"][param])
def __longQuery(self, initialdata):
"""For queries that require multiple requests"""
self._continues = set()
self._generator = ""
total = initialdata
res = initialdata
params = self.data
numkeys = len(res["query-continue"].keys())
while numkeys > 0:
key1 = ""
key2 = ""
possiblecontinues = res["query-continue"].keys()
if len(possiblecontinues) == 1:
key1 = possiblecontinues[0]
keylist = res["query-continue"][key1].keys()
if len(keylist) == 1:
key2 = keylist[0]
else:
for key in keylist:
if len(key) < 11:
key2 = key
break
else:
key2 = keylist[0]
else:
for posskey in possiblecontinues:
keylist = res["query-continue"][posskey].keys()
for key in keylist:
if len(key) < 11:
key1 = posskey
key2 = key
break
if key1:
break
else:
key1 = possiblecontinues[0]
key2 = res["query-continue"][key1].keys()[0]
if isinstance(res["query-continue"][key1][key2], int):
cont = res["query-continue"][key1][key2]
else:
cont = res["query-continue"][key1][key2].encode("utf-8")
if len(key2) >= 11 and key2.startswith("g"):
self._generator = key2
for ckey in self._continues:
params.pop(ckey, None)
else:
self._continues.add(key2)
params[key2] = cont
req = APIRequest(self.wiki, params)
res = req.query(False)
for type in possiblecontinues:
total = resultCombine(type, total, res)
if "query-continue" in res:
numkeys = len(res["query-continue"].keys())
else:
numkeys = 0
return total
def __getRaw(self):
data = False
while not data:
try:
if self.sleep >= self.wiki.maxwaittime or self.iswrite:
catcherror = None
else:
catcherror = Exception
data = self.opener.open(self.request)
self.response = data.info()
if gzip:
encoding = self.response.get("Content-encoding")
if encoding in ("gzip", "x-gzip"):
data = gzip.GzipFile(
"", "rb", 9, StringIO.StringIO(data.read())
)
except catcherror as exc:
errname = sys.exc_info()[0].__name__
errinfo = exc
print(
"%s: %s trying request again in %d seconds"
% (errname, errinfo, self.sleep)
)
time.sleep(self.sleep + 0.5)
self.sleep += 5
return data
def __parseJSON(self, data):
maxlag = True
while maxlag:
try:
maxlag = False
parsed = json.loads(data.read())
content = None
if isinstance(parsed, dict):
content = APIResult(parsed)
content.response = self.response.items()
elif isinstance(parsed, list):
content = APIListResult(parsed)
content.response = self.response.items()
else:
content = parsed
if "error" in content:
error = content["error"]["code"]
if error == "maxlag":
lagtime = int(
re.search("(\d+) seconds", content["error"]["info"]).group(
1
)
)
if lagtime > self.wiki.maxwaittime:
lagtime = self.wiki.maxwaittime
print("Server lag, sleeping for " + str(lagtime) + " seconds")
maxlag = True
time.sleep(int(lagtime) + 0.5)
return False
except: # Something's wrong with the data...
data.seek(0)
if (
"MediaWiki API is not enabled for this site. Add the following line to your LocalSettings.php<pre><b>$wgEnableAPI=true;</b></pre>"
in data.read()
):
raise APIDisabled("The API is not enabled on this site")
print("Invalid JSON, trying request again")
# FIXME: Would be nice if this didn't just go forever if its never going to work
return False
return content
| (wiki, data, write=False, multipart=False) |
7,142 | wikitools3.api | __getRaw | null | def __getRaw(self):
data = False
while not data:
try:
if self.sleep >= self.wiki.maxwaittime or self.iswrite:
catcherror = None
else:
catcherror = Exception
data = self.opener.open(self.request)
self.response = data.info()
if gzip:
encoding = self.response.get("Content-encoding")
if encoding in ("gzip", "x-gzip"):
data = gzip.GzipFile(
"", "rb", 9, StringIO.StringIO(data.read())
)
except catcherror as exc:
errname = sys.exc_info()[0].__name__
errinfo = exc
print(
"%s: %s trying request again in %d seconds"
% (errname, errinfo, self.sleep)
)
time.sleep(self.sleep + 0.5)
self.sleep += 5
return data
| (self) |
7,143 | wikitools3.api | __longQuery | For queries that require multiple requests | def __longQuery(self, initialdata):
"""For queries that require multiple requests"""
self._continues = set()
self._generator = ""
total = initialdata
res = initialdata
params = self.data
numkeys = len(res["query-continue"].keys())
while numkeys > 0:
key1 = ""
key2 = ""
possiblecontinues = res["query-continue"].keys()
if len(possiblecontinues) == 1:
key1 = possiblecontinues[0]
keylist = res["query-continue"][key1].keys()
if len(keylist) == 1:
key2 = keylist[0]
else:
for key in keylist:
if len(key) < 11:
key2 = key
break
else:
key2 = keylist[0]
else:
for posskey in possiblecontinues:
keylist = res["query-continue"][posskey].keys()
for key in keylist:
if len(key) < 11:
key1 = posskey
key2 = key
break
if key1:
break
else:
key1 = possiblecontinues[0]
key2 = res["query-continue"][key1].keys()[0]
if isinstance(res["query-continue"][key1][key2], int):
cont = res["query-continue"][key1][key2]
else:
cont = res["query-continue"][key1][key2].encode("utf-8")
if len(key2) >= 11 and key2.startswith("g"):
self._generator = key2
for ckey in self._continues:
params.pop(ckey, None)
else:
self._continues.add(key2)
params[key2] = cont
req = APIRequest(self.wiki, params)
res = req.query(False)
for type in possiblecontinues:
total = resultCombine(type, total, res)
if "query-continue" in res:
numkeys = len(res["query-continue"].keys())
else:
numkeys = 0
return total
| (self, initialdata) |
7,144 | wikitools3.api | __parseJSON | null | def __parseJSON(self, data):
maxlag = True
while maxlag:
try:
maxlag = False
parsed = json.loads(data.read())
content = None
if isinstance(parsed, dict):
content = APIResult(parsed)
content.response = self.response.items()
elif isinstance(parsed, list):
content = APIListResult(parsed)
content.response = self.response.items()
else:
content = parsed
if "error" in content:
error = content["error"]["code"]
if error == "maxlag":
lagtime = int(
re.search("(\d+) seconds", content["error"]["info"]).group(
1
)
)
if lagtime > self.wiki.maxwaittime:
lagtime = self.wiki.maxwaittime
print("Server lag, sleeping for " + str(lagtime) + " seconds")
maxlag = True
time.sleep(int(lagtime) + 0.5)
return False
except: # Something's wrong with the data...
data.seek(0)
if (
"MediaWiki API is not enabled for this site. Add the following line to your LocalSettings.php<pre><b>$wgEnableAPI=true;</b></pre>"
in data.read()
):
raise APIDisabled("The API is not enabled on this site")
print("Invalid JSON, trying request again")
# FIXME: Would be nice if this didn't just go forever if its never going to work
return False
return content
| (self, data) |
7,145 | wikitools3.api | __init__ |
wiki - A Wiki object
data - API parameters in the form of a dict
write - set to True if doing a write query, so it won't try again on error
multipart - use multipart data transfer, required for file uploads,
requires the poster3 package
maxlag is set by default to 5 but can be changed
format is always set to json
| def __init__(self, wiki, data, write=False, multipart=False):
"""
wiki - A Wiki object
data - API parameters in the form of a dict
write - set to True if doing a write query, so it won't try again on error
multipart - use multipart data transfer, required for file uploads,
requires the poster3 package
maxlag is set by default to 5 but can be changed
format is always set to json
"""
if not canupload and multipart:
raise APIError("The poster3 module is required for multipart support")
self.sleep = 5
self.data = data.copy()
self.data["format"] = "json"
self.iswrite = write
if wiki.assertval is not None and self.iswrite:
self.data["assert"] = wiki.assertval
if not "maxlag" in self.data and not wiki.maxlag < 0:
self.data["maxlag"] = wiki.maxlag
self.multipart = multipart
if self.multipart:
(datagen, self.headers) = multipart_encode(self.data)
self.encodeddata = ""
for singledata in datagen:
self.encodeddata = self.encodeddata + singledata
else:
self.encodeddata = urlencode(self.data, 1)
self.headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Content-Length": str(len(self.encodeddata)),
}
self.headers["User-agent"] = wiki.useragent
if gzip:
self.headers["Accept-Encoding"] = "gzip"
self.wiki = wiki
self.response = False
if wiki.auth:
self.headers["Authorization"] = "Basic {0}".format(
base64.encodestring(wiki.auth + ":" + wiki.httppass)
).replace("\n", "")
if hasattr(wiki, "passman"):
self.opener = urllib.build_opener(
urllib.HTTPDigestAuthHandler(wiki.passman),
urllib.HTTPCookieProcessor(wiki.cookies),
)
else:
self.opener = urllib.build_opener(urllib.HTTPCookieProcessor(wiki.cookies))
self.request = urllib.Request(self.wiki.apibase, self.encodeddata, self.headers)
| (self, wiki, data, write=False, multipart=False) |
7,146 | wikitools3.api | changeParam | Change or add a parameter after making the request object
Simply changing self.data won't work as it needs to update other things.
value can either be a normal string value, or a file-like object,
which will be uploaded, if setMultipart was called previously.
| def changeParam(self, param, value):
"""Change or add a parameter after making the request object
Simply changing self.data won't work as it needs to update other things.
value can either be a normal string value, or a file-like object,
which will be uploaded, if setMultipart was called previously.
"""
if param == "format":
raise APIError("You can not change the result format")
self.data[param] = value
if self.multipart:
(datagen, headers) = multipart_encode(self.data)
self.headers.pop("Content-Length")
self.headers.pop("Content-Type")
self.headers.update(headers)
self.encodeddata = ""
for singledata in datagen:
self.encodeddata = self.encodeddata + singledata
else:
self.encodeddata = urlencode(self.data, 1)
self.headers["Content-Length"] = str(len(self.encodeddata))
self.headers["Content-Type"] = "application/x-www-form-urlencoded"
self.request = urllib.Request(self.wiki.apibase, self.encodeddata, self.headers)
| (self, param, value) |
7,147 | wikitools3.api | query | Actually do the query here and return usable stuff
querycontinue - look for query-continue in the results and continue querying
until there is no more data to retrieve (DEPRECATED: use queryGen as a more
reliable and efficient alternative)
| def query(self, querycontinue=True):
"""Actually do the query here and return usable stuff
querycontinue - look for query-continue in the results and continue querying
until there is no more data to retrieve (DEPRECATED: use queryGen as a more
reliable and efficient alternative)
"""
if querycontinue and self.data["action"] == "query":
warnings.warn(
"""The querycontinue option is deprecated and will be removed
in a future release, use the new queryGen function instead
for queries requring multiple requests""",
FutureWarning,
)
data = False
while not data:
rawdata = self.__getRaw()
data = self.__parseJSON(rawdata)
if not data and type(data) is APIListResult:
break
if "error" in data:
if self.iswrite and data["error"]["code"] == "blocked":
raise wiki.UserBlocked(data["error"]["info"])
raise APIError(data["error"]["code"], data["error"]["info"])
if "query-continue" in data and querycontinue:
data = self.__longQuery(data)
return data
| (self, querycontinue=True) |
7,148 | wikitools3.api | queryGen | Unlike the old query-continue method that tried to stitch results
together, which could work poorly for complex result sets and could
use a lot of memory, this yield each set returned by the API and lets
the user process the data.
Loosely based on the recommended implementation on mediawiki.org
| def queryGen(self):
"""Unlike the old query-continue method that tried to stitch results
together, which could work poorly for complex result sets and could
use a lot of memory, this yield each set returned by the API and lets
the user process the data.
Loosely based on the recommended implementation on mediawiki.org
"""
reqcopy = copy.deepcopy(self.request)
self.changeParam("continue", "")
while True:
data = False
while not data:
rawdata = self.__getRaw()
data = self.__parseJSON(rawdata)
if not data and type(data) is APIListResult:
break
if "error" in data:
if self.iswrite and data["error"]["code"] == "blocked":
raise wiki.UserBlocked(data["error"]["info"])
raise APIError(data["error"]["code"], data["error"]["info"])
yield data
if "continue" not in data:
break
else:
self.request = copy.deepcopy(reqcopy)
for param in data["continue"]:
self.changeParam(param, data["continue"][param])
| (self) |
7,149 | wikitools3.api | setMultipart | Enable multipart data transfer, required for file uploads. | def setMultipart(self, multipart=True):
"""Enable multipart data transfer, required for file uploads."""
if not canupload and multipart:
raise APIError("The poster3 package is required for multipart support")
self.multipart = multipart
if multipart:
(datagen, headers) = multipart_encode(self.data)
self.headers.pop("Content-Length")
self.headers.pop("Content-Type")
self.headers.update(headers)
self.encodeddata = ""
for singledata in datagen:
self.encodeddata = self.encodeddata + singledata
else:
self.encodeddata = urlencode(self.data, 1)
self.headers["Content-Length"] = str(len(self.encodeddata))
self.headers["Content-Type"] = "application/x-www-form-urlencoded"
| (self, multipart=True) |
7,150 | wikitools3.api | APIResult | null | class APIResult(dict):
response = []
| null |
7,151 | wikitools3.page | BadNamespace | Invalid namespace number | class BadNamespace(wiki.WikiError):
"""Invalid namespace number"""
| null |
7,152 | wikitools3.page | BadTitle | Invalid title | class BadTitle(wiki.WikiError):
"""Invalid title"""
| null |
7,153 | wikitools3.category | Category | A category on the wiki | class Category(page.Page):
"""A category on the wiki"""
def __init__(
self,
site,
title=False,
check=True,
followRedir=False,
section=False,
sectionnumber=False,
pageid=False,
):
"""
wiki - A wiki object
title - The page title, as a string or unicode object
check - Checks for existence, normalizes title, required for most things
followRedir - follow redirects (check must be true)
section - the section name
sectionnumber - the section number
pageid - pageid, can be in place of title
"""
page.Page.__init__(
self,
site=site,
title=title,
check=check,
followRedir=followRedir,
section=section,
sectionnumber=sectionnumber,
pageid=pageid,
)
self.members = []
if self.namespace != 14:
self.setNamespace(14, check)
def getAllMembers(self, titleonly=False, reload=False, namespaces=False):
"""Gets a list of pages in the category
titleonly - set to True to only create a list of strings,
else it will be a list of Page objects
reload - reload the list even if it was generated before
namespaces - List of namespaces to restrict to (queries with this option will not be cached)
"""
if self.members and not reload:
if titleonly:
if namespaces is not False:
return [p.title for p in self.members if p.namespace in namespaces]
else:
return [p.title for p in self.members]
if namespaces is False:
return self.members
else:
return [p for p in self.members if p.namespace in namespaces]
else:
ret = []
members = []
for member in self.__getMembersInternal(namespaces):
members.append(member)
if titleonly:
ret.append(member.title)
if titleonly:
return ret
if namespaces is False:
self.members = members
return members
def getAllMembersGen(self, titleonly=False, reload=False, namespaces=False):
"""Generator function for pages in the category
titleonly - set to True to return strings,
else it will return Page objects
reload - reload the list even if it was generated before
namespaces - List of namespaces to restrict to (queries with this option will not be cached)
"""
if self.members and not reload:
for member in self.members:
if namespaces is False or member.namespace in namespaces:
if titleonly:
yield member.title
else:
yield member
else:
if namespaces is False:
self.members = []
for member in self.__getMembersInternal(namespaces):
if namespaces is False:
self.members.append(member)
if titleonly:
yield member.title
else:
yield member
def __getMembersInternal(self, namespaces=False):
params = {
"action": "query",
"list": "categorymembers",
"cmtitle": self.title,
"cmlimit": self.site.limit,
"cmprop": "title",
}
if namespaces is not False:
params["cmnamespace"] = "|".join([str(ns) for ns in namespaces])
while True:
req = api.APIRequest(self.site, params)
data = req.query(False)
for item in data["query"]["categorymembers"]:
yield page.Page(
self.site, item["title"], check=False, followRedir=False
)
try:
params["cmcontinue"] = data["query-continue"]["categorymembers"][
"cmcontinue"
]
except:
break
| (site, title=False, check=True, followRedir=False, section=False, sectionnumber=False, pageid=False) |
7,154 | wikitools3.category | __getMembersInternal | null | def __getMembersInternal(self, namespaces=False):
params = {
"action": "query",
"list": "categorymembers",
"cmtitle": self.title,
"cmlimit": self.site.limit,
"cmprop": "title",
}
if namespaces is not False:
params["cmnamespace"] = "|".join([str(ns) for ns in namespaces])
while True:
req = api.APIRequest(self.site, params)
data = req.query(False)
for item in data["query"]["categorymembers"]:
yield page.Page(
self.site, item["title"], check=False, followRedir=False
)
try:
params["cmcontinue"] = data["query-continue"]["categorymembers"][
"cmcontinue"
]
except:
break
| (self, namespaces=False) |
7,155 | wikitools3.page | __extractToList | null | def __extractToList(self, json, stuff):
list = []
if self.pageid == 0:
self.pageid = json["query"]["pages"].keys()[0]
if stuff in json["query"]["pages"][str(self.pageid)]:
for item in json["query"]["pages"][str(self.pageid)][stuff]:
list.append(item["title"])
return list
| (self, json, stuff) |
7,156 | wikitools3.page | __getHistoryInternal | null | def __getHistoryInternal(self, direction, content, limit, rvcontinue):
if self.pageid == 0 and not self.title:
self.setPageInfo()
if not self.exists:
raise NoPage
if direction != "newer" and direction != "older":
raise wiki.WikiError("direction must be 'newer' or 'older'")
params = {
"action": "query",
"prop": "revisions",
"rvdir": direction,
"rvprop": "ids|flags|timestamp|user|userid|size|sha1|comment",
"continue": "",
"rvlimit": limit,
}
if self.pageid:
params["pageids"] = self.pageid
else:
params["titles"] = self.title
if content:
params["rvprop"] += "|content"
if rvcontinue:
params["continue"] = rvcontinue["continue"]
params["rvcontinue"] = rvcontinue["rvcontinue"]
req = api.APIRequest(self.site, params)
response = req.query(False)
id = response["query"]["pages"].keys()[0]
if not self.pageid:
self.pageid = int(id)
revs = response["query"]["pages"][id]["revisions"]
rvc = None
if "continue" in response:
rvc = response["continue"]
return (revs, rvc)
| (self, direction, content, limit, rvcontinue) |
7,157 | wikitools3.page | __getSection | null | def __getSection(self, section):
if not self.title:
self.setPageInfo()
params = {"action": "parse", "page": self.title, "prop": "sections"}
number = False
req = api.APIRequest(self.site, params)
response = req.query()
for item in response["parse"]["sections"]:
if section == item["line"] or section == item["anchor"]:
if item["index"].startswith(
"T"
): # TODO: It would be cool if it set the page title to the template in this case
continue
number = item["index"]
break
return number
| (self, section) |
7,158 | wikitools3.page | __eq__ | null | def __eq__(self, other):
if not isinstance(other, Page):
return False
if self.title:
if self.title == other.title and self.site == other.site:
return True
else:
if self.pageid == other.pageid and self.site == other.site:
return True
return False
| (self, other) |
7,159 | wikitools3.page | __hash__ | null | def __hash__(self):
return int(self.pageid) ^ hash(self.site.apibase)
| (self) |
7,160 | wikitools3.category | __init__ |
wiki - A wiki object
title - The page title, as a string or unicode object
check - Checks for existence, normalizes title, required for most things
followRedir - follow redirects (check must be true)
section - the section name
sectionnumber - the section number
pageid - pageid, can be in place of title
| def __init__(
self,
site,
title=False,
check=True,
followRedir=False,
section=False,
sectionnumber=False,
pageid=False,
):
"""
wiki - A wiki object
title - The page title, as a string or unicode object
check - Checks for existence, normalizes title, required for most things
followRedir - follow redirects (check must be true)
section - the section name
sectionnumber - the section number
pageid - pageid, can be in place of title
"""
page.Page.__init__(
self,
site=site,
title=title,
check=check,
followRedir=followRedir,
section=section,
sectionnumber=sectionnumber,
pageid=pageid,
)
self.members = []
if self.namespace != 14:
self.setNamespace(14, check)
| (self, site, title=False, check=True, followRedir=False, section=False, sectionnumber=False, pageid=False) |
7,161 | wikitools3.page | __ne__ | null | def __ne__(self, other):
if not isinstance(other, Page):
return True
if self.title:
if self.title == other.title and self.site == other.site:
return False
else:
if self.pageid == other.pageid and self.site == other.site:
return False
return True
| (self, other) |
7,162 | wikitools3.page | __repr__ | null | def __repr__(self):
if self.title:
title = self.title
else:
title = "pageid: " + self.pageid
return (
"<"
+ self.__module__
+ "."
+ self.__class__.__name__
+ " "
+ repr(title)
+ " using "
+ repr(self.site.apibase)
+ ">"
)
| (self) |
7,163 | wikitools3.page | __str__ | null | def __str__(self):
if self.title:
title = self.title
else:
title = "pageid: " + self.pageid
return (
self.__class__.__name__
+ " "
+ repr(title)
+ " from "
+ repr(self.site.domain)
)
| (self) |
7,164 | wikitools3.page | canHaveSubpages | Is the page in a namespace that allows subpages? | def canHaveSubpages(self):
"""Is the page in a namespace that allows subpages?"""
if not self.title:
self.setPageInfo()
return "subpages" in self.site.namespaces[self.namespace]
| (self) |
7,165 | wikitools3.page | delete | Delete the page
reason - summary for log
watch - add the page to your watchlist
unwatch - remove the page from your watchlist
| def delete(self, reason=False, watch=False, unwatch=False):
"""Delete the page
reason - summary for log
watch - add the page to your watchlist
unwatch - remove the page from your watchlist
"""
if not self.title and self.pageid == 0:
self.setPageInfo()
if not self.exists:
raise NoPage
token = self.site.getToken("csrf")
params = {
"action": "delete",
"token": token,
}
if self.pageid:
params["pageid"] = self.pageid
else:
params["title"] = self.title
if reason:
params["reason"] = reason.encode("utf-8")
if watch:
params["watch"] = "1"
if unwatch:
params["unwatch"] = "1"
req = api.APIRequest(self.site, params, write=True)
result = req.query()
if "delete" in result:
self.pageid = 0
self.exists = False
self.wikitext = ""
self.templates = ""
self.links = ""
self.protection = {}
self.section = False
return result
| (self, reason=False, watch=False, unwatch=False) |
7,166 | wikitools3.page | edit | Edit the page
Arguments are a subset of the API's action=edit arguments, valid arguments
are defined in the validargs set
To skip the MD5 check, set "skipmd5" keyword argument to True
http://www.mediawiki.org/wiki/API:Edit_-_Create%26Edit_pages#Parameters
For backwards compatibility:
'newtext' is equivalent to 'text'
'basetime' is equivalent to 'basetimestamp'
| def edit(self, *args, **kwargs):
"""Edit the page
Arguments are a subset of the API's action=edit arguments, valid arguments
are defined in the validargs set
To skip the MD5 check, set "skipmd5" keyword argument to True
http://www.mediawiki.org/wiki/API:Edit_-_Create%26Edit_pages#Parameters
For backwards compatibility:
'newtext' is equivalent to 'text'
'basetime' is equivalent to 'basetimestamp'
"""
validargs = set(
[
"text",
"summary",
"minor",
"notminor",
"bot",
"basetimestamp",
"starttimestamp",
"recreate",
"createonly",
"nocreate",
"watch",
"unwatch",
"watchlist",
"prependtext",
"appendtext",
"section",
"captchaword",
"captchaid",
]
)
# For backwards compatibility
if "newtext" in kwargs:
kwargs["text"] = kwargs["newtext"]
del kwargs["newtext"]
if "basetime" in kwargs:
kwargs["basetimestamp"] = kwargs["basetime"]
del kwargs["basetime"]
if len(args) and "text" not in kwargs:
kwargs["text"] = args[0]
skipmd5 = False
if "skipmd5" in kwargs and kwargs["skipmd5"]:
skipmd5 = True
invalid = set(kwargs.keys()).difference(validargs)
if invalid:
for arg in invalid:
del kwargs[arg]
if not self.title:
self.setPageInfo()
if not "section" in kwargs and self.section is not False:
kwargs["section"] = self.section
if (
not "text" in kwargs
and not "prependtext" in kwargs
and not "appendtext" in kwargs
):
raise EditError("No text specified")
if "prependtext" in kwargs and "section" in kwargs:
raise EditError("Bad param combination")
if "createonly" in kwargs and "nocreate" in kwargs:
raise EditError("Bad param combination")
token = self.site.getToken("csrf")
if "text" in kwargs:
hashtext = kwargs["text"]
elif "prependtext" in kwargs and "appendtext" in kwargs:
hashtext = kwargs["prependtext"] + kwargs["appendtext"]
elif "prependtext" in kwargs:
hashtext = kwargs["prependtext"]
else:
hashtext = kwargs["appendtext"]
params = {
"action": "edit",
"title": self.title,
"token": token,
}
if not skipmd5:
if not isinstance(hashtext, str):
hashtext = str(hashtext)
hashtext = unicodedata.normalize("NFC", hashtext).encode("utf8")
params["md5"] = md5(hashtext).hexdigest()
params.update(kwargs)
req = api.APIRequest(self.site, params, write=True)
result = req.query()
if "edit" in result and result["edit"]["result"] == "Success":
self.wikitext = ""
self.links = []
self.templates = []
self.exists = True
return result
| (self, *args, **kwargs) |
7,167 | wikitools3.category | getAllMembers | Gets a list of pages in the category
titleonly - set to True to only create a list of strings,
else it will be a list of Page objects
reload - reload the list even if it was generated before
namespaces - List of namespaces to restrict to (queries with this option will not be cached)
| def getAllMembers(self, titleonly=False, reload=False, namespaces=False):
"""Gets a list of pages in the category
titleonly - set to True to only create a list of strings,
else it will be a list of Page objects
reload - reload the list even if it was generated before
namespaces - List of namespaces to restrict to (queries with this option will not be cached)
"""
if self.members and not reload:
if titleonly:
if namespaces is not False:
return [p.title for p in self.members if p.namespace in namespaces]
else:
return [p.title for p in self.members]
if namespaces is False:
return self.members
else:
return [p for p in self.members if p.namespace in namespaces]
else:
ret = []
members = []
for member in self.__getMembersInternal(namespaces):
members.append(member)
if titleonly:
ret.append(member.title)
if titleonly:
return ret
if namespaces is False:
self.members = members
return members
| (self, titleonly=False, reload=False, namespaces=False) |
7,168 | wikitools3.category | getAllMembersGen | Generator function for pages in the category
titleonly - set to True to return strings,
else it will return Page objects
reload - reload the list even if it was generated before
namespaces - List of namespaces to restrict to (queries with this option will not be cached)
| def getAllMembersGen(self, titleonly=False, reload=False, namespaces=False):
"""Generator function for pages in the category
titleonly - set to True to return strings,
else it will return Page objects
reload - reload the list even if it was generated before
namespaces - List of namespaces to restrict to (queries with this option will not be cached)
"""
if self.members and not reload:
for member in self.members:
if namespaces is False or member.namespace in namespaces:
if titleonly:
yield member.title
else:
yield member
else:
if namespaces is False:
self.members = []
for member in self.__getMembersInternal(namespaces):
if namespaces is False:
self.members.append(member)
if titleonly:
yield member.title
else:
yield member
| (self, titleonly=False, reload=False, namespaces=False) |
7,169 | wikitools3.page | getCategories | Gets all list of all the categories on the page
force - load the list even if we already loaded it before
| def getCategories(self, force=False):
"""Gets all list of all the categories on the page
force - load the list even if we already loaded it before
"""
if self.categories and not force:
return self.categories
if self.pageid == 0 and not self.title:
self.setPageInfo()
if not self.exists:
raise NoPage
params = {
"action": "query",
"prop": "categories",
"cllimit": self.site.limit,
}
if self.pageid:
params["pageids"] = self.pageid
else:
params["titles"] = self.title
req = api.APIRequest(self.site, params)
self.categories = []
for data in req.queryGen():
self.categories.extend(self.__extractToList(data, "categories"))
return self.categories
| (self, force=False) |
7,170 | wikitools3.page | getHistory | Get the history of a page
direction - 2 options: 'older' (default) - start with the current revision and get older ones
'newer' - start with the oldest revision and get newer ones
content - If False, get only metadata (timestamp, edit summary, user, etc)
If True (default), also get the revision text
limit - Only retrieve a certain number of revisions. If 'all' (default), all revisions are returned
The data is returned in essentially the same format as the API, a list of dicts that look like:
{u'*': u"Page content", # Only returned when content=True
u'comment': u'Edit summary',
u'contentformat': u'text/x-wiki', # Only returned when content=True
u'contentmodel': u'wikitext', # Only returned when content=True
u'parentid': 139946, # id of previous revision
u'revid': 139871, # revision id
u'sha1': u'0a5cec3ca3e084e767f00c9a5645c17ac27b2757', # sha1 hash of page content
u'size': 129, # size of page in bytes
u'timestamp': u'2002-08-05T14:11:27Z', # timestamp of edit
u'user': u'Username',
u'userid': 48 # user id
}
Note that unlike other get* functions, the data is not cached
| def getHistory(self, direction="older", content=True, limit="all"):
"""Get the history of a page
direction - 2 options: 'older' (default) - start with the current revision and get older ones
'newer' - start with the oldest revision and get newer ones
content - If False, get only metadata (timestamp, edit summary, user, etc)
If True (default), also get the revision text
limit - Only retrieve a certain number of revisions. If 'all' (default), all revisions are returned
The data is returned in essentially the same format as the API, a list of dicts that look like:
{u'*': u"Page content", # Only returned when content=True
u'comment': u'Edit summary',
u'contentformat': u'text/x-wiki', # Only returned when content=True
u'contentmodel': u'wikitext', # Only returned when content=True
u'parentid': 139946, # id of previous revision
u'revid': 139871, # revision id
u'sha1': u'0a5cec3ca3e084e767f00c9a5645c17ac27b2757', # sha1 hash of page content
u'size': 129, # size of page in bytes
u'timestamp': u'2002-08-05T14:11:27Z', # timestamp of edit
u'user': u'Username',
u'userid': 48 # user id
}
Note that unlike other get* functions, the data is not cached
"""
max = limit
if limit == "all":
max = float("inf")
if limit == "all" or limit > self.site.limit:
limit = self.site.limit
history = []
rvc = None
while True:
revs, rvc = self.__getHistoryInternal(direction, content, limit, rvc)
history = history + revs
if len(history) == max or rvc is None:
break
if max - len(history) < self.site.limit:
limit = max - len(history)
return history
| (self, direction='older', content=True, limit='all') |
7,171 | wikitools3.page | getHistoryGen | Generator function for page history
The interface is the same as getHistory, but it will only retrieve 1 revision at a time.
This will be slower and have much higher network overhead, but does not require storing
the entire page history in memory
| def getHistoryGen(self, direction="older", content=True, limit="all"):
"""Generator function for page history
The interface is the same as getHistory, but it will only retrieve 1 revision at a time.
This will be slower and have much higher network overhead, but does not require storing
the entire page history in memory
"""
max = limit
count = 0
rvc = None
while True:
revs, rvc = self.__getHistoryInternal(direction, content, 1, rvc)
yield revs[0]
count += 1
if count == max or rvc is None:
break
| (self, direction='older', content=True, limit='all') |
7,172 | wikitools3.page | getLinks | Gets a list of all the internal links *on* the page
force - load the list even if we already loaded it before
| def getLinks(self, force=False):
"""Gets a list of all the internal links *on* the page
force - load the list even if we already loaded it before
"""
if self.links and not force:
return self.links
if self.pageid == 0 and not self.title:
self.setPageInfo()
if not self.exists:
raise NoPage
params = {
"action": "query",
"prop": "links",
"pllimit": self.site.limit,
}
if self.pageid > 0:
params["pageids"] = self.pageid
else:
params["titles"] = self.title
req = api.APIRequest(self.site, params)
self.links = []
for data in req.queryGen():
self.links.extend(self.__extractToList(data, "links"))
return self.links
| (self, force=False) |
7,173 | wikitools3.page | getProtection | Returns the current protection status of the page | def getProtection(self, force=False):
"""Returns the current protection status of the page"""
if self.protection and not force:
return self.protection
if self.pageid == 0 and not self.title:
self.setPageInfo()
params = {
"action": "query",
"prop": "info",
"inprop": "protection",
}
if not self.exists or self.pageid <= 0:
params["titles"] = self.title
else:
params["titles"] = self.title
req = api.APIRequest(self.site, params)
response = req.query(False)
for pr in response["query"].values()[0].values()[0]["protection"]:
if pr["level"]:
if pr["expiry"] == "infinity":
expiry = "infinity"
else:
expiry = datetime.datetime.strptime(
pr["expiry"], "%Y-%m-%dT%H:%M:%SZ"
)
self.protection[pr["type"]] = {"expiry": expiry, "level": pr["level"]}
return self.protection
| (self, force=False) |
7,174 | wikitools3.page | getTemplates | Gets all list of all the templates on the page
force - load the list even if we already loaded it before
| def getTemplates(self, force=False):
"""Gets all list of all the templates on the page
force - load the list even if we already loaded it before
"""
if self.templates and not force:
return self.templates
if self.pageid == 0 and not self.title:
self.setPageInfo()
if not self.exists:
raise NoPage
params = {
"action": "query",
"prop": "templates",
"tllimit": self.site.limit,
}
if self.pageid:
params["pageids"] = self.pageid
else:
params["titles"] = self.title
req = api.APIRequest(self.site, params)
self.templates = []
for data in req.queryGen():
self.templates.extend(self.__extractToList(data, "templates"))
return self.templates
| (self, force=False) |
7,175 | wikitools3.page | getWikiText | Gets the Wikitext of the page
expandtemplates - expand the templates to wikitext instead of transclusions
force - load the text even if we already loaded it before
| def getWikiText(self, expandtemplates=False, force=False):
"""Gets the Wikitext of the page
expandtemplates - expand the templates to wikitext instead of transclusions
force - load the text even if we already loaded it before
"""
if self.wikitext and not force:
return self.wikitext
if self.pageid == 0 and not self.title:
self.setPageInfo()
if not self.exists:
raise NoPage
params = {
"action": "query",
"prop": "revisions",
"rvprop": "content|timestamp",
"rvlimit": "1",
}
if self.pageid:
params["pageids"] = self.pageid
else:
params["titles"] = self.title
if expandtemplates:
params["rvexpandtemplates"] = "1"
if self.section is not False:
params["rvsection"] = self.section
req = api.APIRequest(self.site, params)
response = req.query(False)
if self.pageid == 0:
self.pageid = int(response["query"]["pages"].keys()[0])
if self.pageid == -1:
self.exists == False
raise NoPage
self.wikitext = response["query"]["pages"][str(self.pageid)]["revisions"][0][
"*"
].encode("utf-8")
self.lastedittime = response["query"]["pages"][str(self.pageid)]["revisions"][
0
]["timestamp"]
return self.wikitext
| (self, expandtemplates=False, force=False) |
7,176 | wikitools3.page | isRedir | Is the page a redirect? | def isRedir(self):
"""Is the page a redirect?"""
params = {"action": "query", "redirects": ""}
if not self.exists:
raise NoPage
if self.pageid != 0 and self.exists:
params["pageids"] = self.pageid
elif self.title:
params["titles"] = self.title
else:
self.setPageInfo()
if self.pageid != 0 and self.exists:
params["pageids"] = self.pageid
else:
raise NoPage
req = api.APIRequest(self.site, params)
res = req.query(False)
if "redirects" in res["query"]:
return True
else:
return False
| (self) |
7,177 | wikitools3.page | isTalk | Is the page a discussion page? | def isTalk(self):
"""Is the page a discussion page?"""
if not self.title:
self.setPageInfo()
return self.namespace % 2 == 1 and self.namespace >= 0
| (self) |
Subsets and Splits