docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
---|---|---|
Returns metric usage for client and its subhierarchy.
Args:
auth: <cik> for authentication
resource: ResourceID
metrics: Metric to measure (as string), it may be an entity or consumable.
starttime: Start time of window to measure useage (format is ___).
endtime: End time of window to measure useage (format is ___).
|
def usage(self, auth, resource, metric, starttime, endtime, defer=False):
return self._call('usage', auth,
[resource, metric, starttime, endtime], defer)
| 857,261 |
This is a HTTP Long Polling API which allows a user to wait on specific resources to be
updated.
Args:
auth: <cik> for authentication
resource: <ResourceID> to specify what resource to wait on.
options: Options for the wait including a timeout (in ms), (max 5min) and start time
(null acts as when request is recieved)
|
def wait(self, auth, resource, options, defer=False):
# let the server control the timeout
return self._call('wait', auth, [resource, options], defer, notimeout=True)
| 857,262 |
Writes a single value to the resource specified.
Args:
auth: cik for authentication.
resource: resource to write to.
value: value to write
options: options.
|
def write(self, auth, resource, value, options={}, defer=False):
return self._call('write', auth, [resource, value, options], defer)
| 857,263 |
Writes the given values for the respective resources in the list, all writes have same
timestamp.
Args:
auth: cik for authentication.
entries: List of key, value lists. eg. [[key, value], [k,v],,,]
|
def writegroup(self, auth, entries, defer=False):
return self._call('writegroup', auth, [entries], defer)
| 857,264 |
Write the content of 'buf' out in a hexdump style
Args:
fout: file object to write to
buf: the buffer to be pretty printed
offset: the starting offset of the buffer
width: how many bytes should be displayed per row
|
def write_hex(fout, buf, offset, width=16):
skipped_zeroes = 0
for i, chunk in enumerate(chunk_iter(buf, width)):
# zero skipping
if chunk == (b"\x00" * width):
skipped_zeroes += 1
continue
elif skipped_zeroes != 0:
fout.write(" -- skipped zeroes: {}\n".format(skipped_zeroes))
skipped_zeroes = 0
# starting address of the current line
fout.write("{:016x} ".format(i * width + offset))
# bytes column
column = " ".join([" ".join(["{:02x}".format(c) for c in subchunk])
for subchunk in chunk_iter(chunk, 8)])
w = width * 2 + (width - 1) + ((width // 8) - 1)
if len(column) != w:
column += " " * (w - len(column))
fout.write(column)
# ASCII character column
fout.write(" |")
for c in chunk:
if c in PRINTABLE_CHARS:
fout.write(chr(c))
else:
fout.write(".")
if len(chunk) < width:
fout.write(" " * (width - len(chunk)))
fout.write("|")
fout.write("\n")
| 857,464 |
Initialises an Misspellings instance.
Args:
files: List of files to check. More can be added with add().
misspelling_file: Filename with a list of misspelled words
and their alternatives.
Raises:
IOError: Raised if misspelling_file can't be found.
ValueError: Raised if misspelling_file isn't correctly formatted.
|
def __init__(self, files=None, misspelling_file=None):
if misspelling_file:
self._misspelling_dict = defaultdict(list)
with open(misspelling_file, 'r') as f:
for line in f:
bad_word, correction = line.strip().split(' ', 1)
self._misspelling_dict[bad_word].append(correction)
self._files = []
if files:
self.add(files)
| 857,700 |
Adds files to check.
Args:
files: List of files to check.
|
def add(self, files):
if files.__class__.__name__ == 'str':
self._files.append(files)
else:
self._files.extend(files)
| 857,701 |
Returns a list of suggestions for a misspelled word.
Args:
word: The word to check.
Returns:
List of zero or more suggested replacements for word.
|
def suggestions(self, word):
suggestions = set(self._misspelling_dict.get(word, [])).union(
set(self._misspelling_dict.get(word.lower(), [])))
return sorted([same_case(source=word, destination=w)
for w in suggestions])
| 857,703 |
Send a message to a channel.
Args:
title: Message title.
title_link: Link of the message title.
message: Message body.
color: Message line color on Slack. This parameter should be one of the following values: 'good', 'warning',
'danger' or any hex color code.
Returns:
response: Response of Slack API.
Raises:
Exception:
|
def __send_notification(self, message, title, title_link='', color='good',
fields='', log_level=LogLv.INFO):
if log_level < self.log_level:
return None
payload = self.__build_payload(message, title, title_link, color, fields)
try:
response = self.__post(payload)
except Exception:
raise Exception(traceback.format_exc())
return response
| 857,734 |
Implement the nltk.stem.StemmerI interface and defaults to "passthrough" stemming
Implements a `stem()` method which transcodes a string (typically a token or n-gram).
Arguments:
stemmer (function or nltk.stem.StemmerI or None): Indicate the stemmer to use.
>>> Stemmer().stem('Running')
'Running'
|
def __init__(self, stemmer=None):
if stemmer is True:
self._stemmer = nltk.stem.LancasterStemmer()
self.stem = self._stemmer.stem
elif hasattr(stemmer, 'stem') and callable(stemmer.stem):
self._stemmer = stemmer
self.stem = self._stemmer.stem
elif hasattr(stemmer, 'lemmatize') and callable(stemmer.lemmatize):
self._stemmer = stemmer
# this may produce an unpicklable bound method
self.stem = self._stemmer.lemmatize
else:
super(Stemmer, self).__init__()
self._stemmer = None
| 858,075 |
Flatten a multi-deminision list and return a iterable
Note that dict and str will not be expanded, instead, they will be kept as a single element.
Args:
l (list): The list needs to be flattened
Returns:
A iterable of flattened list. To have a list instead use ``list(flatten(l))``
|
def flatten(l):
for el in l:
# I don;t want dict to be flattened
if isinstance(el, Iterable) and not isinstance(
el, (str, bytes)) and not isinstance(el, dict):
yield from flatten(el)
else:
yield el
| 858,388 |
Combine to argument into a single flat list
It is used when you are not sure whether arguments are lists but want to combine them into one flat list
Args:
a1: list or other thing
a2: list or other thing
Returns:
list: a flat list contain a1 and a2
|
def combine(a1, a2):
if not isinstance(a1, list):
a1 = [a1]
if not isinstance(a2, list):
a2 = [a2]
return a1 + a2
| 858,390 |
Chech an object is single or pair or neither.
Of course,, all pairs are single, so what the function is really detecting is whether an object is only single or at the same time a pair.
Args:
obj (object): Literally anything.
Returns:
str: 'Single', or 'Pair', or 'Neither'
|
def singleOrPair(obj):
if len(list(obj.__class__.__mro__)) <= 2:
return 'Neither'
else:
# Pair check comes first for Pair is a subclass of Single
if ancestorJr(obj) is Pair:
return 'Pair'
elif ancestor(obj) is Single:
return 'Single'
else:
return 'Neither'
| 858,391 |
Remove every instance that matches the input from a list
Match with ==, operation, which can be defined in __eq__.
Args:
tobeRemoved (object): the same object you want to remove from the list.
l (list): the llist you want to remove stuff from.
|
def removeEverything(toBeRemoved, l):
successful = True
while successful:
try:
# list.remove will remove item if equal,
# which is evaluated by __eq__
l.remove(toBeRemoved)
except:
successful = False
| 858,392 |
init
Args:
tag (str): The tag1
*value: the elements you want to put into single's value(list), can be one element or several seperate by comma, or put into a list or combination of those. *value will be flattend to a single one deminision list. In subclasses' init, raw data should be converted to single if needed according to specific subclass.
|
def __init__(self, tag, *value):
self.tag = tag
self.value = list(flatten(value))
| 858,394 |
Parse the single and its value and return the parsed str.
Args:
selfTag (str): The tag. Normally just ``self.tag``
selfValue (list): a list of value elements(single, subclasses, str, int). Normally just ``self.value``
Returns:
str: A parsed text
|
def printMe(self, selfTag, selfValue):
if len(selfValue) == 0:
return ''
# if value have only one element and it is not another single
# print differently
elif len(selfValue) == 1 and not ancestor(selfValue[0]) is Single:
text = '<{tag}>{value}</{tag}>\n'.format(
tag=selfTag, value=selfValue[0])
return text
else:
valueText = ''
for element in selfValue:
# if the element is another single
# or merely an object
# both possibility should not happen in the same time
# if so, user is not doing the right thing
if singleOrPair(element) == 'Single':
# ask that single to print itself
valueText += element.printMe(element.tag, element.value)
elif singleOrPair(element) == 'Pair':
valueText += element.printMe(element.key, element.value)
else:
# simply print that element
valueText += str(element) + '\n'
valueText = indent(valueText, 4)
text = '<{tag}>\n'.format(
tag=selfTag) + valueText + '</{tag}>\n'.format(tag=selfTag)
return text
| 858,395 |
Looks for all the non single values(str, int) *recursively* and returns a list of them
Args:
selfValue: A list of single, str, int. Normally just ``self.value``
Returns:
list: A list contains only non singles(str, int).
|
def findAll(self, selfValue):
resultList = []
for element in selfValue:
if isinstance(element, Single):
resultList += element.findAll(element.value)
else:
resultList.append(element)
return resultList
| 858,396 |
Looks for all the single values and subclasses *recursively* and returns a list of them
Args:
selfValue: A list of single, str, int. Normally just ``self.value``
Returns:
list: A list contains only singles and subclasses.
|
def findAllSingle(self, selfValue):
resultList = []
for element in selfValue:
if isinstance(element, Single):
resultList.append(element)
resultList += element.findAllSingle()
return resultList
| 858,397 |
convert value and add to self.value
Subclass must overwrite this method.
Subclass are responsible of creating whatever single instance it need from its ``add(*value)`` and call ``_add()`` to add them to ``self.value``
Args:
*value: the value to be added
|
def add(self, *value):
flattenedValueList = list(flatten(value))
return self._add(flattenedValueList, self.value)
| 858,398 |
Remove elements from a list by matching the elements in the other list.
This method only looks inside current instance's value, not recursive.
There is no need for a recursive one anyway.
Match by == operation.
Args:
removeList (list): The list of matching elements.
selfValue (list): The list you remove value from. Usually ``self.value``
|
def _remove(self, removeList, selfValue):
for removeValue in removeList:
print(removeValue, removeList)
# if removeValue equal to selfValue, remove
removeEverything(removeValue, selfValue)
| 858,399 |
remove elements from self.value by matching.
Create the exactly same single you want to delete and pass it(them) in.
Normally this method needs to be overwrited by subclass. It only looks inside current instance's value, not recursive. There is no need for a recursive one anyway.
Args:
*l: a single element, a bunch of element seperated by comma, or a list of elements, or any combination. Element is what you match with.
|
def remove(self, *l):
removeList = list(flatten(l))
self._remove(removeList, self.value)
| 858,400 |
init
Args:
path (str): The absolute path of the plist
|
def __init__(self, path):
self.me = Path(path)
self.tag = 'dict'
self.value = []
| 858,401 |
init
set self.tag to classname. e.g.::
ArraySingle.tag -> 'array'
Args:
*value: the elements you want to put into single's value(list), can be one element or several seperate by comma, or put into a list or combination of those. *value will be flattend to a single one deminision list. In subclasses' init, raw data should be converted to single if needed according to specific subclass.
|
def __init__(self, *value):
tag = self.__class__.__name__.replace('Single', '').lower()
super().__init__(tag, value)
| 858,403 |
init
Args:
key (str): the key
*value: the value to be stored
|
def __init__(self, key='', *value):
if key == '':
self.key = self.__class__.__name__
else:
self.key = key
if len(value) != 0:
self.value = list(flatten(value))
| 858,404 |
Parse the single and its value and return the parsed str.
Args:
selfTag (str): The tag. Normally just ``self.tag``
selfValue (list): a list of value elements(single, subclasses, str, int). Normally just ``self.value``
Returns:
str: A parsed text
|
def printMe(self, selfKey, selfValue):
text = '<key>{keyName}</key>\n'.format(keyName=selfKey)
if len(selfValue) == 0:
return ''
else:
valueText = ''
for element in selfValue:
if singleOrPair(element) == 'Single':
valueText += element.printMe(element.tag, element.value)
elif singleOrPair(element) == 'Pair':
valueText += element.printMe(element.key, element.value)
# maybe a else statement for non single non pair?
text += valueText
return text
| 858,405 |
init
Args:
Outer (class): One of the possible outer classes.
Inner (class): One of the possible inner classes.
*l: To be processed and set to value
|
def __init__(self, Outer, Inner, *l):
super().__init__()
self.value = [Outer()]
self.l = self.value[0].value
self.Outer = Outer
self.Inner = Inner
self.add(l)
| 858,406 |
add inner to outer
Args:
*l: element that is passed into Inner init
|
def add(self, *l):
for a in flatten(l):
self._add([self.Inner(a)], self.l)
| 858,407 |
remove inner from outer
Args:
*l element that is passes into Inner init
|
def remove(self, *l):
for a in flatten(l):
self._remove([self.Inner(a)], self.l)
| 858,408 |
init
Args:
dic (dict): key and value
|
def __init__(self, dic):
super().__init__()
self.value = [DictSingle()]
self.d = self.value[0].value
self.add(dic)
| 858,409 |
adds a dict as pair
Args:
dic (dict): key and value
|
def add(self, dic):
for kw in dic:
checkKey(kw, self.keyWord)
self._add([Pair(kw, StringSingle(dic[kw]))], self.d)
| 858,410 |
remove the pair by passing a identical dict
Args:
dic (dict): key and value
|
def remove(self, dic):
for kw in dic:
removePair = Pair(kw, dic[kw])
self._remove([removePair])
| 858,411 |
change value
Args:
path (str): the new environment path
|
def changeTo(self, path):
dictionary = DictSingle(Pair('PATH', StringSingle(path)))
self.value = [dictionary]
| 858,412 |
update self.value with basenumber and time interval
Args:
baseNumber (str): self.baseNumber
magnification (str): self.magnification
|
def _update(self, baseNumber, magnification):
interval = int(baseNumber * magnification)
self.value = [IntegerSingle(interval)]
| 858,413 |
init
Args:
*dic (dict): dictionary with format {'Day': 12, 'Hour': 34} Avaliable keys are Month, Day, Weekday, Hour, Minute. *Note the uppercase.* You can use gen(), genMix() to generate complex config dictionary.
|
def __init__(self, *dic):
super().__init__()
self.value = [ArraySingle()]
self.l = self.value[0].value
| 858,419 |
add a config to StartCalendarInterval.
Args:
*dic (dict): dictionary with format {'Day': 12, 'Hour': 34} Avaliable keys are Month, Day, Weekday, Hour, Minute. *Note the uppercase.* You can use gen(), genMix() to generate complex config dictionary.
|
def add(self, *dic):
dicList = list(flatten(dic))
# for every dict in the list passed in
for d in dicList:
# make a dict single (list of pairs)
di = []
for k in d:
# checkKey(k, self.keyWord)
di.append(Pair(k, IntegerSingle(d[k])))
dictSingle = DictSingle(di)
# append dict single to array single's value
self._add([dictSingle], self.l)
| 858,420 |
remove a calendar config.
Args:
*dic (dict): dictionary with format {'Day': 12, 'Hour': 34} Avaliable keys are Month, Day, Weekday, Hour, Minute. *Note the uppercase.* You can use gen(), genMix() to generate complex config dictionary.
|
def remove(self, *dic):
dicList = list(flatten(dic))
for d in dicList:
di = []
for k in d:
# checkkey(k, self.keyword)
di.append(Pair(k, IntegerSingle(d[k])))
dictSingle = DictSingle(di)
# append dict single to array single
self._remove([dictSingle], self.l)
| 858,421 |
Converts string permission flags into integer permission flags as
specified in const.PERM_STRING_MAP
Arguments:
- flags <str>: one or more flags
For example: "crud" or "ru" or "r"
- mapper <list=const.PERM_STRING_MAP>: a list containing tuples mapping
int permission flag to string permission flag. If not specified will
default to const.PERM_STRING_MAP.
Returns:
- int
|
def int_flags(flags, mapper=const.PERM_STRING_MAP):
r = 0
if not flags:
return r
if isinstance(flags, six.integer_types):
return flags
if not isinstance(flags, six.string_types):
raise TypeError("`flags` needs to be a string or integer type")
for f in flags:
for f_i, f_s in mapper:
if f_s == f:
r = r | f_i
return r
| 858,588 |
Check if the value of this namespace is matched by
keys
'*' is treated as wildcard
Arguments:
keys -- list of keys
Examples:
ns = Namespace("a.b.c")
ns.match(["a"]) #True
ns.match(["a","b"]) #True
ns.match(["a","b","c"]) #True
ns.match(["a","*","c"]) #True
ns.match(["b","b","c"]) #False
|
def match(self, keys, partial=True):
if not partial and len(keys) != self.length:
return False
c = 0
for k in keys:
if c >= self.length:
return False
a = self.keys[c]
if a != "*" and k != "*" and k != a:
return False
c += 1
return True
| 858,591 |
Returns the permissions level for the specified namespace
Arguments:
namespace -- permissioning namespace (str)
explicit -- require explicitly set permissions to the provided namespace
Returns:
int -- permissioning flags
|
def get_permissions(self, namespace, explicit=False):
if not isinstance(namespace, Namespace):
namespace = Namespace(namespace)
keys = namespace.keys
p, _ = self._check(keys, self.index, explicit=explicit)
return p
| 858,602 |
Checks if the permset has permission to the specified namespace
at the specified level
Arguments:
namespace -- permissioning namespace (str)
level -- permissioning level (int) (PERM_READ for example)
explicit -- require explicitly set permissions to the provided namespace
Returns:
bool
|
def check(self, namespace, level, explicit=False):
return (self.get_permissions(namespace, explicit=explicit) & level) != 0
| 858,603 |
Apply permissions in this set to the provided data, effectively
removing all keys from it are not permissioned to be viewed
Arguments:
data -- dict of data
Returns:
Cleaned data
|
def apply(self, data, path=None, applicator=None):
if applicator:
applicator.pset = self
else:
applicator = Applicator(self)
return applicator.apply(data, path=path)
| 858,604 |
Apply permissions in this set to the provided data, effectively
removing all keys from it are not permissioned to be viewed
Arguments:
data -- dict of data
Returns:
Cleaned data
|
def apply(self, data, path=None):
if path is None:
path = []
if not isinstance(data, dict):
return data
def _enumerate(value):
if isinstance(value, list):
for k, v in enumerate(value):
yield k, v
elif isinstance(value, dict):
for k, v in value.items():
yield k, v
def _set(container, key, value):
if isinstance(container, list):
container.append(value)
else:
container[key] = value
def _apply(ramap, value, status=False, wc=False, path=[]):
if not isinstance(value, dict) and not isinstance(value, list):
if status:
return value
else:
return None
if not wc:
status = ramap.get("__", status)
handler = None
key_handler = None
if path and self.handlers:
namespace = Namespace(path)
for _handler in self.handlers.values():
if namespace.match(_handler.get("namespace").keys, partial=False):
handler = _handler
key_handler = handler.get("key")
break
if isinstance(value, list):
if not key_handler:
key_handler = list_key_handler
rv = []
else:
rv = {}
for k, v in _enumerate(value):
if key_handler:
k = key_handler(v, k)
k = str(k)
if isinstance(v, dict) or isinstance(v, list):
if k in ramap:
r = _apply(ramap[k], v, status=status, path=path+[k])
if r:
_set(rv, k, r)
elif "*" in ramap:
r = _apply(ramap["*"], v, status=status, wc=True, path=path+[k])
if r:
_set(rv, k, r)
elif status:
_set(rv, k, v)
else:
if k in ramap:
if ramap[k].get("__", True):
_set(rv, k, v)
elif "*" in ramap and ramap["*"].get("__", True):
_set(rv, k, v)
elif status:
_set(rv, k, v)
return rv
# loop through all the handlers that specify the `explicit` arguments
# and temprorarily add deny rules for those to the targeted permissionset
tmpns = {}
for ns, handler in self.handlers.items():
if handler.get("explicit"):
p = self.pset.get_permissions(ns)
if p & const.PERM_READ:
exists = False
for _ns in self.pset.namespaces:
if Namespace(_ns).match(Namespace(ns).keys, partial=False):
exists = True
break
if exists:
continue
tmpns[ns] = p
self.pset[ns] = const.PERM_DENY
# apply permissions
rv = _apply(self.pset.read_access_map, data)
# remove temporarily added deny rules
for ns, p in tmpns.items():
if p is None:
del self.pset[ns]
else:
self.pset[ns] = p
return rv
| 858,606 |
Determine if an instance is of the sliced type and within bounds.
Args:
other: The instance to test.
Returns:
True if the object is both of the same type as sliced by the
created class as well as within the bounds defined by the class.
|
def __instancecheck__(cls, other):
# type: (Any) -> bool
try:
return bool(
isinstance(other, cls.__type__) and cls(other) # type: ignore
)
except ValueError:
return False
| 859,549 |
Create a new subclass of a type bounded by the arguments.
If a callable is passed as the third argument of the slice, it will be
used as the comparison function for the boundaries.
Args:
args: A tuple with two or three parameters: a type, a slice
representing the minimum and maximum lengths allowed for values
of that type and, optionally, a function to use on values
before comparing against the bounds.
|
def __getitem__(cls, args):
# type: (Union[Tuple[_T, Any], Tuple[_T, Any, Callable]]) -> type
type_, bound, keyfunc = cls._get_args(args)
keyfunc_name = cls._get_fullname(keyfunc)
identity = cls._identity
BaseClass, MetaClass = cls._get_bases(type_)
instantiate = cls._instantiate
@six.add_metaclass(MetaClass) # type: ignore
class _BoundedSubclass(BaseClass): # type: ignore
def __new__(cls, __value, *args, **kwargs):
# type: (Type[_BoundedSubclass], Any, *Any, **Any) -> type
instance = instantiate(
BaseClass, type_, __value, *args, **kwargs
)
cmp_val = keyfunc(instance)
if bound.start is not None or bound.stop is not None:
if bound.start is not None and cmp_val < bound.start:
if keyfunc is not identity:
raise ValueError(
"The value of {}({}) [{}] is below the minimum"
" allowed value of {}.".format(
keyfunc_name,
repr(__value),
repr(cmp_val),
bound.start,
)
)
raise ValueError(
"The value {} is below the minimum allowed value "
"of {}.".format(repr(__value), bound.start)
)
if bound.stop is not None and cmp_val > bound.stop:
if keyfunc is not identity:
raise ValueError(
"The value of {}({}) [{}] is above the maximum"
" allowed value of {}.".format(
keyfunc_name,
repr(__value),
repr(cmp_val),
bound.stop,
)
)
raise ValueError(
"The value {} is above the maximum allowed value "
"of {}.".format(repr(__value), bound.stop)
)
elif not cmp_val:
raise ValueError(
"{}({}) is False".format(keyfunc_name, repr(instance))
)
return instance
_BoundedSubclass.__type__ = type_
_BoundedSubclass.__class_repr__ = cls._get_class_repr(
type_, bound, keyfunc, keyfunc_name
)
return _BoundedSubclass
| 859,550 |
Get the base and meta classes to use in creating a subclass.
Args:
type_: The type to subclass.
Returns:
A tuple containing two values: a base class, and a metaclass.
|
def _get_bases(type_):
# type: (type) -> Tuple[type, type]
try:
class _(type_): # type: ignore
BaseClass = type_
except TypeError:
BaseClass = object
class MetaClass(_ValidationMeta, BaseClass.__class__): # type: ignore
return BaseClass, MetaClass
| 859,551 |
Return a class representation using the slice parameters.
Args:
type_: The type the class was sliced with.
bound: The boundaries specified for the values of type_.
keyfunc: The comparison function used to check the value
boundaries.
keyfunc_name: The name of keyfunc.
Returns:
A string representing the class.
|
def _get_class_repr(cls, type_, bound, keyfunc, keyfunc_name):
# type: (Any, slice, Callable, str) -> str
if keyfunc is not cls._default:
return "{}.{}[{}, {}, {}]".format(
cls.__module__,
cls.__name__,
cls._get_fullname(type_),
cls._get_bound_repr(bound),
keyfunc_name,
)
return "{}.{}[{}, {}]".format(
cls.__module__,
cls.__name__,
cls._get_fullname(type_),
cls._get_bound_repr(bound),
)
| 859,553 |
Get the full name of an object including the module.
Args:
obj: An object.
Returns:
The full class name of the object.
|
def _get_fullname(obj):
# type: (Any) -> str
if not hasattr(obj, "__name__"):
obj = obj.__class__
if obj.__module__ in ("builtins", "__builtin__"):
return obj.__name__
return "{}.{}".format(obj.__module__, obj.__name__)
| 859,555 |
Return the parameters necessary to check type boundaries.
Args:
args: A tuple with two parameters: a type, and a slice representing
the minimum and maximum lengths allowed for values of that
type.
Returns:
A tuple with three parameters: a type, a slice, and the len
function.
|
def _get_args(cls, args):
# type: (tuple) -> Tuple[type, slice, Callable]
if not isinstance(args, tuple) or not len(args) == 2:
raise TypeError(
"{}[...] takes exactly two arguments.".format(cls.__name__)
)
return super(_LengthBoundedMeta, cls)._get_args(args + (len,))
| 859,556 |
Return the parameters necessary to check type boundaries.
Args:
args: A tuple with one or two parameters: A type to cast the
value passed, and a predicate function to use for bounds
checking.
Returns:
A tuple with three parameters: a type, a slice, and the predicate
function. If no type was passed in args, the type defaults to Any.
|
def _get_args(cls, args):
# type: (tuple) -> Tuple[type, slice, Callable]
if isinstance(args, tuple):
if not len(args) == 2:
raise TypeError(
"{}[...] takes one or two argument.".format(cls.__name__)
)
return super(_ValidationBoundedMeta, cls)._get_args(
(args[0], None, args[1])
)
return super(_ValidationBoundedMeta, cls)._get_args((Any, None, args))
| 859,557 |
Return a class representation using the slice parameters.
Args:
type_: The type the class was sliced with.
bound: The boundaries specified for the values of type_.
keyfunc: The comparison function used to check the value
boundaries.
keyfunc_name: The name of keyfunc.
Returns:
A string representing the class.
|
def _get_class_repr(cls, type_, bound, keyfunc, keyfunc_name):
# type: (Any, slice, Callable, str) -> str
if type_ is not Any:
return "{}.{}[{}, {}]".format(
cls.__module__,
cls.__name__,
cls._get_fullname(type_),
keyfunc_name,
)
return "{}.{}[{}]".format(cls.__module__, cls.__name__, keyfunc_name)
| 859,558 |
Return the parameters necessary to check type boundaries.
Args:
args: A slice representing the minimum and maximum lengths allowed
for values of that string.
Returns:
A tuple with three parameters: a type, a slice, and the len
function.
|
def _get_args(cls, args):
# type: (tuple) -> Tuple[type, slice, Callable]
if isinstance(args, tuple):
raise TypeError(
"{}[...] takes exactly one argument.".format(cls.__name__)
)
return super(_StringMeta, cls)._get_args((_STR_TYPE, args))
| 859,559 |
Initialize the client with connection settings.
Args:
name; name of the client
exchange: name of the exchange to connect to
topics: list of routing keys to listen to
enable_ping: enable answering to ping requests
By default, the 'ping' routing key will be added in order to enable
response to ping requests expect specified otherwise.
|
def __init__(self, name, exchange, topics=[], enable_ping=True,
listen_all=False):
self.name = name
self.exchange = exchange
self.topics = topics
self.listeners = []
self.listen_all = listen_all
if enable_ping:
self.listeners.append(self._handle_ping)
if 'ping' not in self.topics:
self.topics.append('ping')
self._channel = None
self._conn = None
self._queue_name = None
| 860,061 |
Connect to the server and set everything up.
Args:
host: hostname to connect to
|
def connect(self, host='localhost'):
# Connect
get_logger().info("Connecting to RabbitMQ server...")
self._conn = pika.BlockingConnection(
pika.ConnectionParameters(host=host))
self._channel = self._conn.channel()
# Exchanger
get_logger().info("Declaring topic exchanger {}...".format(
self.exchange))
self._channel.exchange_declare(exchange=self.exchange, type='topic')
# Create queue
get_logger().info("Creating RabbitMQ queue...")
result = self._channel.queue_declare(exclusive=True)
self._queue_name = result.method.queue
# Binding
if self.listen_all:
get_logger().info(
"Binding queue to exchanger {} (listen all)...".format(
self.exchange
)
)
self._channel.queue_bind(
exchange=self.exchange,
queue=self._queue_name,
routing_key='*'
)
else:
for routing_key in self.topics:
get_logger().info(
"Binding queue to exchanger {} "
"with routing key {}...".format(
self.exchange, routing_key)
)
self._channel.queue_bind(
exchange=self.exchange,
queue=self._queue_name,
routing_key=routing_key
)
# Callback
get_logger().info("Binding callback...")
self._channel.basic_consume(
self._callback, queue=self._queue_name, no_ack=True)
| 860,062 |
Send a dict with internal routing key to the exchange.
Args:
topic: topic to publish the message to
dct: dict object to send
|
def publish(self, topic, dct):
get_logger().info("Publishing message {} on routing key "
"{}...".format(dct, topic))
self._channel.basic_publish(
exchange=self.exchange,
routing_key=topic,
body=json.dumps(dct)
)
| 860,063 |
Return a displayable name for the type.
Args:
type_: A class object.
Returns:
A string value describing the class name that can be used in a natural
language sentence.
|
def _get_type_name(type_):
# type: (type) -> str
name = repr(type_)
if name.startswith("<"):
name = getattr(type_, "__qualname__", getattr(type_, "__name__", ""))
return name.rsplit(".", 1)[-1] or repr(type_)
| 860,092 |
Return the source code for a class by checking the frame stack.
This is necessary because it is not possible to get the source of a class
being created by a metaclass directly.
Args:
class_name: The class to look for on the stack.
Returns:
The source code for the requested class if the class was found and the
source was accessible.
|
def _get_class_frame_source(class_name):
# type: (str) -> Optional[str]
for frame_info in inspect.stack():
try:
with open(frame_info[1]) as fp:
src = "".join(fp.readlines()[frame_info[2] - 1 :])
except IOError:
continue
if re.search(r"\bclass\b\s+\b{}\b".format(class_name), src):
reader = six.StringIO(src).readline
tokens = tokenize.generate_tokens(reader)
source_tokens = []
indent_level = 0
base_indent_level = 0
has_base_level = False
for token, value, _, _, _ in tokens: # type: ignore
source_tokens.append((token, value))
if token == tokenize.INDENT:
indent_level += 1
elif token == tokenize.DEDENT:
indent_level -= 1
if has_base_level and indent_level <= base_indent_level:
return (
tokenize.untokenize(source_tokens),
frame_info[0].f_globals,
frame_info[0].f_locals,
)
elif not has_base_level:
has_base_level = True
base_indent_level = indent_level
raise TypeError(
'Unable to retrieve source for class "{}"'.format(class_name)
)
| 860,093 |
Determine if an attribute can be replaced with a property.
Args:
names: The complete list of all attribute names for the class.
attrs: The attribute dict returned by __prepare__.
annotations: A mapping of all defined annotations for the class.
attr: The attribute to test.
Returns:
True if the attribute can be replaced with a property; else False.
|
def _is_propertyable(
names, # type: List[str]
attrs, # type: Dict[str, Any]
annotations, # type: Dict[str, type]
attr, # Dict[str, Any]
):
# type: (...) -> bool
return (
attr in annotations
and not attr.startswith("_")
and not attr.isupper()
and "__{}".format(attr) not in names
and not isinstance(getattr(attrs, attr, None), types.MethodType)
)
| 860,094 |
Create a property setter method for the attribute.
Args:
_: The name of the attribute to set. Unused.
private_attr: The name of the attribute that will store any data
related to the attribute.
type_: The annotated type defining what values can be stored in the
attribute.
Returns:
A method that takes self and a value and stores that value on self
in the private attribute iff the value is an instance of type_.
|
def _strict_object_meta_fset(_, private_attr, type_):
# type: (str, str, Type[_T]) -> Callable[[_T], None]
def _fset(self, value): # type: Any
# type: (...) -> None
rtype = type_
if isinstance(type_, TypeVar):
type_map = dict(
zip(self.__parameters__, self.__orig_class__.__args__)
)
rtype = type_map[type_]
if not is_instance(value, rtype):
raise TypeError(
"Cannot assign type of {} to attribute of type {}.".format(
_get_type_name(type(value)), _get_type_name(rtype)
)
)
vars(self)[private_attr] = value
return _fset
| 860,096 |
Test if two objects of the same base class are equal.
If the objects are not of the same class, Python will default to
comparison-by-ID.
Args:
other: The object to compare for equality.
Returns:
True if the objects are equal; else False.
|
def __eq__(self, other):
if other.__class__ is not self.__class__:
return NotImplemented
return (
self._tp__get_typed_properties()
== other._tp__get_typed_properties()
)
| 860,101 |
Test if self is less than an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is less than other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
|
def __lt__(self, other):
if other.__class__ is not self.__class__:
return NotImplemented
return (
self._tp__get_typed_properties()
< other._tp__get_typed_properties()
)
| 860,102 |
Test if self is greater than an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is greater than other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
|
def __gt__(self, other):
if other.__class__ is not self.__class__:
return NotImplemented
return not self <= other
| 860,103 |
Test if self is greater than or equal an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is greater than or equal to other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
|
def __ge__(self, other):
if other.__class__ is not self.__class__:
return NotImplemented
return not self < other
| 860,104 |
Return value parsed from output.
Args:
canary(str): This string must exist in the target line.
split_offset(int): Split offset for target value in string.
kal_out(int): Output from kal.
|
def extract_value_from_output(canary, split_offset, kal_out):
retval = ""
while retval == "":
for line in kal_out.splitlines():
if canary in line:
retval = str(line.split()[split_offset])
if retval == "":
retval = None
return retval
| 860,466 |
Create the class using all metaclasses.
Args:
metaclasses: A tuple of metaclasses that will be used to generate and
replace a specified class.
Returns:
A decorator that will recreate the class using the specified
metaclasses.
|
def metaclass(*metaclasses):
# type: (*type) -> Callable[[type], type]
def _inner(cls):
# pragma pylint: disable=unused-variable
metabases = tuple(
collections.OrderedDict( # noqa: F841
(c, None) for c in (metaclasses + (type(cls),))
).keys()
)
# pragma pylint: enable=unused-variable
_Meta = metabases[0]
for base in metabases[1:]:
class _Meta(base, _Meta): # pylint: disable=function-redefined
pass
return six.add_metaclass(_Meta)(cls)
return _inner
| 860,616 |
Using this feature you can register a customer’s credit card data and get a token sequential number.
Args:
payer_id:
name:
identification_number:
payment_method:
number:
expiration_date:
Returns:
|
def create_single_token(self, *, payer_id, name, identification_number, payment_method, number, expiration_date):
payload = {
"language": self.client.language.value,
"command": PaymentCommand.CREATE_TOKEN.value,
"merchant": {
"apiLogin": self.client.api_login,
"apiKey": self.client.api_key
},
"creditCardToken": {
"payerId": payer_id,
"name": name,
"identificationNumber": identification_number,
"paymentMethod": payment_method,
"number": number,
"expirationDate": expiration_date
},
"test": self.client.is_test
}
return self.client._post(self.url, json=payload)
| 860,964 |
With this functionality you can query previously the Credit Cards Token.
Args:
payer_id:
credit_card_token_id:
start_date:
end_date:
Returns:
|
def get_tokens(self, *, payer_id, credit_card_token_id, start_date, end_date):
payload = {
"language": self.client.language.value,
"command": PaymentCommand.GET_TOKENS.value,
"merchant": {
"apiLogin": self.client.api_login,
"apiKey": self.client.api_key
},
"creditCardTokenInformation": {
"payerId": payer_id,
"creditCardTokenId": credit_card_token_id,
"startDate": start_date.strftime('%Y-%m-%dT%H:%M:%S'),
"endDate": end_date.strftime('%Y-%m-%dT%H:%M:%S')
},
"test": self.client.is_test
}
return self.client._post(self.url, json=payload)
| 860,966 |
This feature allows you to delete a tokenized credit card register.
Args:
payer_id:
credit_card_token_id:
Returns:
|
def remove_token(self, *, payer_id, credit_card_token_id):
payload = {
"language": self.client.language.value,
"command": PaymentCommand.REMOVE_TOKEN.value,
"merchant": {
"apiLogin": self.client.api_login,
"apiKey": self.client.api_key
},
"removeCreditCardToken": {
"payerId": payer_id,
"creditCardTokenId": credit_card_token_id
},
"test": self.client.is_test
}
return self.client._post(self.url, json=payload)
| 860,967 |
Decode a Base X encoded string into the number
Arguments:
- `string`: The encoded string
- `alphabet`: The alphabet to use for encoding
Stolen from: http://stackoverflow.com/a/1119769/1144479
|
def base62_decode(string):
alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
base = len(alphabet)
strlen = len(string)
num = 0
idx = 0
for char in string:
power = (strlen - (idx + 1))
num += alphabet.index(char) * (base ** power)
idx += 1
return int(num)
| 861,303 |
Normally the connection guarantees response times of 3 seconds on average,
if there is an abnormal situation, the maximum response time is 1 minute.
It is highly recommended that you set “timeouts” when you connect with PayU.
Args:
method:
url:
headers:
**kwargs:
Returns:
|
def _request(self, method, url, headers=None, **kwargs):
_headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
if headers:
_headers.update(headers)
if self.is_debug:
self.logger.debug('{} {} {} {}'.format(method, url, headers, kwargs))
return self._parse(requests.request(method, url, headers=_headers, timeout=60, **kwargs))
| 862,074 |
Check all the information of a plan for subscriptions associated with the merchant.
Args:
plan_code: Plan’s identification code for the merchant.
Returns:
|
def get_plan(self, plan_code):
return self.client._get(self.url + 'plans/{}'.format(plan_code), headers=self.get_headers())
| 862,209 |
Delete an entire subscription plan associated with the merchant.
Args:
plan_code: Plan’s identification code for the merchant.
Returns:
|
def delete_plan(self, plan_code):
return self.client._delete(self.url + 'plans/{}'.format(plan_code), headers=self.get_headers())
| 862,210 |
Creation of a customer in the system.
Args:
full_name: Customer's complete name.
Alphanumeric. Max: 255.
email: Customer's email address.
Alphanumeric. Max: 255.
Returns:
|
def create_customer(self, *, full_name, email):
payload = {
"fullName": full_name,
"email": email
}
return self.client._post(self.url + 'customers', json=payload, headers=self.get_headers())
| 862,211 |
Queries the information related to the customer.
Args:
customer_id: Identifier of the client from which you want to find the associated information.
Returns:
|
def get_customer(self, customer_id):
return self.client._get(self.url + 'customers/{}'.format(customer_id), headers=self.get_headers())
| 862,212 |
Removes a user from the system.
Args:
customer_id: Identifier of the client to be deleted.
Returns:
|
def delete_customer(self, customer_id):
return self.client._delete(self.url + 'customers/{}'.format(customer_id), headers=self.get_headers())
| 862,213 |
Check the information of a credit card (Token) data identifier.
Args:
credit_card_id: Credit Card Token you want to consult.
Returns:
|
def get_credit_card(self, credit_card_id):
return self.client._get(self.url + 'creditCards/{}'.format(credit_card_id), headers=self.get_headers())
| 862,215 |
Delete a credit card (Token) associated with a user.
Args:
customer_id: Identifier of the client of whom you are going to delete the token.
credit_card_id: Identifier of the token to be deleted.
Returns:
|
def delete_credit_card(self, *, customer_id, credit_card_id):
fmt = 'customers/{}/creditCards/{}'.format(customer_id, credit_card_id)
return self.client._delete(self.url + fmt, headers=self.get_headers())
| 862,216 |
Check the basic information associated with the specified subscription.
Args:
subscription_id: Identification of the subscription.
Returns:
|
def get_subscription(self, subscription_id):
return self.client._put(self.url + 'subscriptions/{}'.format(subscription_id), headers=self.get_headers())
| 862,218 |
Update information associated with the specified subscription. At the moment it is only possible
to update the token of the credit card to which the charge of the subscription is made.
Args:
subscription_id: Identification of the subscription.
credit_card_token:
Returns:
|
def update_subscription(self, *, subscription_id, credit_card_token):
payload = {
"creditCardToken": credit_card_token
}
fmt = 'subscriptions/{}'.format(subscription_id)
return self.client._put(self.url + fmt, json=payload, headers=self.get_headers())
| 862,219 |
Unsubscribe, delete the relationship of the customer with the plan.
Args:
subscription_id: Identification of the subscription.
Returns:
|
def delete_subscription(self, subscription_id):
return self.client._delete(self.url + 'subscriptions/{}'.format(subscription_id), headers=self.get_headers())
| 862,220 |
Adds extra charges to the respective invoice for the current period.
Args:
subscription_id: Identification of the subscription
description:
plan_value:
plan_tax:
plan_tax_return_base:
currency:
Returns:
|
def create_additional_charge(self, *, subscription_id, description, plan_value, plan_tax, plan_tax_return_base,
currency):
payload = {
"description": description,
"additionalValues": [
{
"name": "ITEM_VALUE",
"value": plan_value,
"currency": currency
},
{
"name": "ITEM_TAX",
"value": plan_tax,
"currency": currency
},
{
"name": "ITEM_TAX_RETURN_BASE",
"value": plan_tax_return_base,
"currency": currency
}
]
}
fmt = 'subscriptions/{}/recurringBillItems'.format(subscription_id)
return self.client._post(self.url + fmt, json=payload, headers=self.get_headers())
| 862,221 |
Query extra charge information of an invoice from its identifier.
Args:
recurring_billing_id: Identifier of the additional charge.
Returns:
|
def get_additional_charge_by_identifier(self, recurring_billing_id):
fmt = 'recurringBillItems/{}'.format(recurring_billing_id)
return self.client._get(self.url + fmt, headers=self.get_headers())
| 862,222 |
Query extra charges of shop’s invoices that meet the stipulated filters.
Args:
description: Description entered in the extra charge.
Returns:
|
def get_additional_charge_by_description(self, description):
params = {
"description": description
}
return self.client._get(self.url + 'recurringBillItems/', params=params, headers=self.get_headers())
| 862,223 |
Updates the information from an additional charge in an invoice.
Args:
recurring_billing_id: Identifier of the additional charge.
description:
plan_value:
plan_tax:
plan_tax_return_base:
currency:
Returns:
|
def update_additional_charge(self, *, recurring_billing_id, description, plan_value, plan_tax, plan_tax_return_base,
currency):
payload = {
"description": description,
"additionalValues": [
{
"name": "ITEM_VALUE",
"value": plan_value,
"currency": currency
},
{
"name": "ITEM_TAX",
"value": plan_tax,
"currency": currency
},
{
"name": "ITEM_TAX_RETURN_BASE",
"value": plan_tax_return_base,
"currency": currency
}
]
}
fmt = 'recurringBillItems/{}'.format(recurring_billing_id)
return self.client._put(self.url + fmt, payload=payload, headers=self.get_headers())
| 862,224 |
Remove an extra charge from an invoice.
Args:
recurring_billing_id: Identifier of the additional charge.
Returns:
|
def delete_additional_charge(self, recurring_billing_id):
fmt = 'recurringBillItems/{}'.format(recurring_billing_id)
return self.client._delete(self.url + fmt, headers=self.get_headers())
| 862,225 |
Consulta de las facturas que están pagadas o pendientes por pagar. Se puede consultar por cliente,
por suscripción o por rango de fechas.
Args:
customer_id:
date_begin:
date_final:
Returns:
|
def get_recurring_bill_by_client(self, *, customer_id, date_begin=None, date_final=None):
params = {
"customerId": customer_id,
}
if date_begin and date_final:
params['dateBegin'] = date_begin.strftime('%Y-%m-%d')
params['dateFinal'] = date_final.strftime('%Y-%m-%d')
return self.client._get(self.url + 'recurringBill', params=params, headers=self.get_headers())
| 862,226 |
Consulta de las facturas que están pagadas o pendientes por pagar. Se puede consultar por cliente,
por suscripción o por rango de fechas.
Args:
subscription_id:
Returns:
|
def get_recurring_bill_by_subscription(self, subscription_id):
params = {
"subscriptionId": subscription_id,
}
return self.client._get(self.url + 'recurringBill', params=params, headers=self.get_headers())
| 862,227 |
Alternate constructor intended for using JSON format of private key.
Args:
key (dict) - Parsed JSON with service account credentials.
scopes (Union[str, collections.Iterable[str]]) -
List of permissions that the application requests.
subject (str) - The email address of the user for which
the application is requesting delegated access.
Returns:
ServiceAccount
|
def from_json(cls, key, scopes, subject=None):
credentials_type = key['type']
if credentials_type != 'service_account':
raise ValueError('key: expected type service_account '
'(got %s)' % credentials_type)
email = key['client_email']
key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM,
key['private_key'])
return cls(key=key, email=email, scopes=scopes, subject=subject)
| 862,792 |
Parse the result line of a phenomizer request.
Arguments:
line (str): A raw output line from phenomizer
Returns:
result (dict): A dictionary with the phenomizer info:
{
'p_value': float,
'gene_symbols': list(str),
'disease_nr': int,
'disease_source': str,
'description': str,
'raw_line': str
}
|
def parse_result(line):
if line.startswith("Problem"):
raise RuntimeError("Login credentials seems to be wrong")
result = {
'p_value': None,
'gene_symbols': [],
'disease_nr': None,
'disease_source': None,
'description': None,
'raw_line': line
}
result['raw_line'] = line.rstrip()
result_line = line.rstrip().split('\t')
try:
result['p_value'] = float(result_line[0])
except ValueError:
pass
try:
medical_litterature = result_line[2].split(':')
result['disease_source'] = medical_litterature[0]
result['disease_nr'] = int(medical_litterature[1])
except IndexError:
pass
try:
description = result_line[3]
result['description'] = description
except IndexError:
pass
if len(result_line) > 4:
for gene_symbol in result_line[4].split(','):
result['gene_symbols'].append(gene_symbol.strip())
return result
| 863,199 |
Query the phenomizer web tool
Arguments:
usr (str): A username for phenomizer
pwd (str): A password for phenomizer
hpo_terms (list): A list with hpo terms
Returns:
raw_answer : The raw result from phenomizer
|
def query_phenomizer(usr, pwd, *hpo_terms):
base_string = 'http://compbio.charite.de/phenomizer/phenomizer/PhenomizerServiceURI'
questions = {'mobilequery':'true', 'terms':','.join(hpo_terms), 'username':usr, 'password':pwd}
try:
r = requests.get(base_string, params=questions, timeout=10)
except requests.exceptions.Timeout:
raise RuntimeError("The request timed out.")
if not r.status_code == requests.codes.ok:
raise RuntimeError("Phenomizer returned a bad status code: %s" % r.status_code)
r.encoding = 'utf-8'
return r
| 863,200 |
Query the phenomizer web tool
Arguments:
usr (str): A username for phenomizer
pwd (str): A password for phenomizer
hpo_terms (list): A list with hpo terms
yields:
parsed_term (dict): A dictionary with the parsed information
from phenomizer
|
def query(usr, pwd, *hpo_terms):
raw_result = query_phenomizer(usr, pwd, *hpo_terms)
for line in raw_result.text.split('\n'):
if len(line) > 1:
if not line.startswith('#'):
yield parse_result(line)
| 863,201 |
Validate if the HPO term exists.
Check if there are any result when querying phenomizer.
Arguments:
usr (str): A username for phenomizer
pwd (str): A password for phenomizer
hpo_term (string): Represents the hpo term
Returns:
result (boolean): True if term exists, False otherwise
|
def validate_term(usr, pwd, hpo_term):
result = True
try:
for line in query(usr, pwd, hpo_term):
pass
except RuntimeError as err:
raise err
return result
| 863,202 |
Original doc: get a string from the user
Arguments:
title -- the dialog title
prompt -- the label text
**kw -- see SimpleDialog class
Return value is a string
|
def askstring(title, prompt, **kw):
return psidialogs.ask_string(title=title, message=prompt)
| 863,431 |
Get a URL and return its JSON response.
Args:
url (str): URL to be requested.
json_callback (func): Custom JSON loader function. Defaults
to :meth:`json.loads`.
kwargs (dict): Additional arguments to pass through to the
request.
Returns:
response body returned by :func:`json_callback` function.
|
async def get_json(self, url, json_callback=None, **kwargs):
if not json_callback:
json_callback = json.loads
response = await self.request(method='get', url=url, **kwargs)
return json_callback(response)
| 863,593 |
Aggregate data from all pages of an API query.
Args:
url (str): Google API endpoint URL.
params (dict): (optional) URL query parameters.
Returns:
list: Parsed JSON query response results.
|
async def get_all(self, url, params=None):
if not params:
params = {}
items = []
next_page_token = None
while True:
if next_page_token:
params['pageToken'] = next_page_token
response = await self.get_json(url, params=params)
items.append(response)
next_page_token = response.get('nextPageToken')
if not next_page_token:
break
return items
| 863,594 |
Parse message according to schema.
`message` should already be validated against the given schema.
See :ref:`schemadef` for more information.
Args:
message (dict): message data to parse.
schema (str): valid message schema.
Returns:
(dict): parsed message
|
def parse(self, message, schema):
func = {
'audit-log': self._parse_audit_log_msg,
'event': self._parse_event_msg,
}[schema]
return func(message)
| 863,843 |
Get all resource record sets for a managed zone, using the DNS zone.
Args:
dns_zone (str): Desired DNS zone to query.
params (dict): (optional) Additional query parameters for HTTP
requests to the GDNS API.
Returns:
list of dicts representing rrsets.
|
async def get_records_for_zone(self, dns_zone, params=None):
managed_zone = self.get_managed_zone(dns_zone)
url = f'{self._base_url}/managedZones/{managed_zone}/rrsets'
if not params:
params = {}
if 'fields' not in params:
# Get only the fields we care about
params['fields'] = ('rrsets/name,rrsets/kind,rrsets/rrdatas,'
'rrsets/type,rrsets/ttl,nextPageToken')
next_page_token = None
records = []
while True:
if next_page_token:
params['pageToken'] = next_page_token
response = await self.get_json(url, params=params)
records.extend(response['rrsets'])
next_page_token = response.get('nextPageToken')
if not next_page_token:
break
logging.info(f'Found {len(records)} rrsets for zone "{dns_zone}".')
return records
| 864,284 |
Check if a DNS change has completed.
Args:
zone (str): DNS zone of the change.
change_id (str): Identifier of the change.
Returns:
Boolean
|
async def is_change_done(self, zone, change_id):
zone_id = self.get_managed_zone(zone)
url = f'{self._base_url}/managedZones/{zone_id}/changes/{change_id}'
resp = await self.get_json(url)
return resp['status'] == self.DNS_CHANGES_DONE
| 864,285 |
Post changes to a zone.
Args:
zone (str): DNS zone of the change.
changes (dict): JSON compatible dict of a `Change
<https://cloud.google.com/dns/api/v1/changes>`_.
Returns:
string identifier of the change.
|
async def publish_changes(self, zone, changes):
zone_id = self.get_managed_zone(zone)
url = f'{self._base_url}/managedZones/{zone_id}/changes'
resp = await self.request('post', url, json=changes)
return json.loads(resp)['id']
| 864,286 |
Decorator used to output prettified JSON.
``response.content_type`` is set to ``application/json; charset=utf-8``.
Args:
fn (fn pointer): Function returning any basic python data structure.
Returns:
str: Data converted to prettified JSON.
|
def pretty_dump(fn):
@wraps(fn)
def pretty_dump_wrapper(*args, **kwargs):
response.content_type = "application/json; charset=utf-8"
return json.dumps(
fn(*args, **kwargs),
# sort_keys=True,
indent=4,
separators=(',', ': ')
)
return pretty_dump_wrapper
| 864,339 |
Return prettified JSON `data`, set ``response.content_type`` to
``application/json; charset=utf-8``.
Args:
data (any): Any basic python data structure.
Returns:
str: Data converted to prettified JSON.
|
def encode_json_body(data):
# support for StringIO / file - like objects
if hasattr(data, "read"):
return data
response.content_type = "application/json; charset=utf-8"
return json.dumps(
data,
indent=4,
separators=(',', ': ')
)
| 864,341 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.