repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
icometrix/dicom2nifti
dicom2nifti/common.py
https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/common.py#L638-L660
def validate_slice_increment(dicoms): """ Validate that the distance between all slices is equal (or very close to) :param dicoms: list of dicoms """ first_image_position = numpy.array(dicoms[0].ImagePositionPatient) previous_image_position = numpy.array(dicoms[1].ImagePositionPatient) increment = first_image_position - previous_image_position for dicom_ in dicoms[2:]: current_image_position = numpy.array(dicom_.ImagePositionPatient) current_increment = previous_image_position - current_image_position if not numpy.allclose(increment, current_increment, rtol=0.05, atol=0.1): logger.warning('Slice increment not consistent through all slices') logger.warning('---------------------------------------------------------') logger.warning('%s %s' % (previous_image_position, increment)) logger.warning('%s %s' % (current_image_position, current_increment)) if 'InstanceNumber' in dicom_: logger.warning('Instance Number: %s' % dicom_.InstanceNumber) logger.warning('---------------------------------------------------------') raise ConversionValidationError('SLICE_INCREMENT_INCONSISTENT') previous_image_position = current_image_position
[ "def", "validate_slice_increment", "(", "dicoms", ")", ":", "first_image_position", "=", "numpy", ".", "array", "(", "dicoms", "[", "0", "]", ".", "ImagePositionPatient", ")", "previous_image_position", "=", "numpy", ".", "array", "(", "dicoms", "[", "1", "]", ".", "ImagePositionPatient", ")", "increment", "=", "first_image_position", "-", "previous_image_position", "for", "dicom_", "in", "dicoms", "[", "2", ":", "]", ":", "current_image_position", "=", "numpy", ".", "array", "(", "dicom_", ".", "ImagePositionPatient", ")", "current_increment", "=", "previous_image_position", "-", "current_image_position", "if", "not", "numpy", ".", "allclose", "(", "increment", ",", "current_increment", ",", "rtol", "=", "0.05", ",", "atol", "=", "0.1", ")", ":", "logger", ".", "warning", "(", "'Slice increment not consistent through all slices'", ")", "logger", ".", "warning", "(", "'---------------------------------------------------------'", ")", "logger", ".", "warning", "(", "'%s %s'", "%", "(", "previous_image_position", ",", "increment", ")", ")", "logger", ".", "warning", "(", "'%s %s'", "%", "(", "current_image_position", ",", "current_increment", ")", ")", "if", "'InstanceNumber'", "in", "dicom_", ":", "logger", ".", "warning", "(", "'Instance Number: %s'", "%", "dicom_", ".", "InstanceNumber", ")", "logger", ".", "warning", "(", "'---------------------------------------------------------'", ")", "raise", "ConversionValidationError", "(", "'SLICE_INCREMENT_INCONSISTENT'", ")", "previous_image_position", "=", "current_image_position" ]
Validate that the distance between all slices is equal (or very close to) :param dicoms: list of dicoms
[ "Validate", "that", "the", "distance", "between", "all", "slices", "is", "equal", "(", "or", "very", "close", "to", ")" ]
python
train
55.782609
marrow/mongo
marrow/mongo/core/trait/collection.py
https://github.com/marrow/mongo/blob/2066dc73e281b8a46cb5fc965267d6b8e1b18467/marrow/mongo/core/trait/collection.py#L70-L82
def bind(cls, target): """Bind a copy of the collection to the class, modified per our class' settings. The given target (and eventual collection returned) must be safe within the context the document sublcass being bound is constructed within. E.g. at the module scope this binding must be thread-safe. """ if cls.__bound__ is not None: return cls cls.__bound__ = cls.get_collection(target) return cls
[ "def", "bind", "(", "cls", ",", "target", ")", ":", "if", "cls", ".", "__bound__", "is", "not", "None", ":", "return", "cls", "cls", ".", "__bound__", "=", "cls", ".", "get_collection", "(", "target", ")", "return", "cls" ]
Bind a copy of the collection to the class, modified per our class' settings. The given target (and eventual collection returned) must be safe within the context the document sublcass being bound is constructed within. E.g. at the module scope this binding must be thread-safe.
[ "Bind", "a", "copy", "of", "the", "collection", "to", "the", "class", "modified", "per", "our", "class", "settings", ".", "The", "given", "target", "(", "and", "eventual", "collection", "returned", ")", "must", "be", "safe", "within", "the", "context", "the", "document", "sublcass", "being", "bound", "is", "constructed", "within", ".", "E", ".", "g", ".", "at", "the", "module", "scope", "this", "binding", "must", "be", "thread", "-", "safe", "." ]
python
train
32.230769
apple/turicreate
src/unity/python/turicreate/data_structures/sarray.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/sarray.py#L3296-L3493
def unpack(self, column_name_prefix = "X", column_types=None, na_value=None, limit=None): """ Convert an SArray of list, array, or dict type to an SFrame with multiple columns. `unpack` expands an SArray using the values of each list/array/dict as elements in a new SFrame of multiple columns. For example, an SArray of lists each of length 4 will be expanded into an SFrame of 4 columns, one for each list element. An SArray of lists/arrays of varying size will be expand to a number of columns equal to the longest list/array. An SArray of dictionaries will be expanded into as many columns as there are keys. When unpacking an SArray of list or array type, new columns are named: `column_name_prefix`.0, `column_name_prefix`.1, etc. If unpacking a column of dict type, unpacked columns are named `column_name_prefix`.key1, `column_name_prefix`.key2, etc. When unpacking an SArray of list or dictionary types, missing values in the original element remain as missing values in the resultant columns. If the `na_value` parameter is specified, all values equal to this given value are also replaced with missing values. In an SArray of array.array type, NaN is interpreted as a missing value. :py:func:`turicreate.SFrame.pack_columns()` is the reverse effect of unpack Parameters ---------- column_name_prefix: str, optional If provided, unpacked column names would start with the given prefix. column_types: list[type], optional Column types for the unpacked columns. If not provided, column types are automatically inferred from first 100 rows. Defaults to None. na_value: optional Convert all values that are equal to `na_value` to missing value if specified. limit: list, optional Limits the set of list/array/dict keys to unpack. For list/array SArrays, 'limit' must contain integer indices. For dict SArray, 'limit' must contain dictionary keys. Returns ------- out : SFrame A new SFrame that contains all unpacked columns Examples -------- To unpack a dict SArray >>> sa = SArray([{ 'word': 'a', 'count': 1}, ... { 'word': 'cat', 'count': 2}, ... { 'word': 'is', 'count': 3}, ... { 'word': 'coming','count': 4}]) Normal case of unpacking SArray of type dict: >>> sa.unpack(column_name_prefix=None) Columns: count int word str <BLANKLINE> Rows: 4 <BLANKLINE> Data: +-------+--------+ | count | word | +-------+--------+ | 1 | a | | 2 | cat | | 3 | is | | 4 | coming | +-------+--------+ [4 rows x 2 columns] <BLANKLINE> Unpack only keys with 'word': >>> sa.unpack(limit=['word']) Columns: X.word str <BLANKLINE> Rows: 4 <BLANKLINE> Data: +--------+ | X.word | +--------+ | a | | cat | | is | | coming | +--------+ [4 rows x 1 columns] <BLANKLINE> >>> sa2 = SArray([ ... [1, 0, 1], ... [1, 1, 1], ... [0, 1]]) Convert all zeros to missing values: >>> sa2.unpack(column_types=[int, int, int], na_value=0) Columns: X.0 int X.1 int X.2 int <BLANKLINE> Rows: 3 <BLANKLINE> Data: +------+------+------+ | X.0 | X.1 | X.2 | +------+------+------+ | 1 | None | 1 | | 1 | 1 | 1 | | None | 1 | None | +------+------+------+ [3 rows x 3 columns] <BLANKLINE> """ from .sframe import SFrame as _SFrame if self.dtype not in [dict, array.array, list]: raise TypeError("Only SArray of dict/list/array type supports unpack") if column_name_prefix is None: column_name_prefix = "" if not(isinstance(column_name_prefix, six.string_types)): raise TypeError("'column_name_prefix' must be a string") # validate 'limit' if limit is not None: if (not _is_non_string_iterable(limit)): raise TypeError("'limit' must be a list") name_types = set([type(i) for i in limit]) if (len(name_types) != 1): raise TypeError("'limit' contains values that are different types") # limit value should be numeric if unpacking sarray.array value if (self.dtype != dict) and (name_types.pop() != int): raise TypeError("'limit' must contain integer values.") if len(set(limit)) != len(limit): raise ValueError("'limit' contains duplicate values") if (column_types is not None): if not _is_non_string_iterable(column_types): raise TypeError("column_types must be a list") for column_type in column_types: if (column_type not in (int, float, str, list, dict, array.array)): raise TypeError("column_types contains unsupported types. Supported types are ['float', 'int', 'list', 'dict', 'str', 'array.array']") if limit is not None: if len(limit) != len(column_types): raise ValueError("limit and column_types do not have the same length") elif self.dtype == dict: raise ValueError("if 'column_types' is given, 'limit' has to be provided to unpack dict type.") else: limit = range(len(column_types)) else: head_rows = self.head(100).dropna() lengths = [len(i) for i in head_rows] if len(lengths) == 0 or max(lengths) == 0: raise RuntimeError("Cannot infer number of items from the SArray, SArray may be empty. please explicitly provide column types") # infer column types for dict type at server side, for list and array, infer from client side if self.dtype != dict: length = max(lengths) if limit is None: limit = range(length) else: # adjust the length length = len(limit) if self.dtype == array.array: column_types = [float for i in range(length)] else: column_types = list() for i in limit: t = [(x[i] if ((x is not None) and len(x) > i) else None) for x in head_rows] column_types.append(infer_type_of_list(t)) with cython_context(): if (self.dtype == dict and column_types is None): limit = limit if limit is not None else [] return _SFrame(_proxy=self.__proxy__.unpack_dict(column_name_prefix.encode('utf-8'), limit, na_value)) else: return _SFrame(_proxy=self.__proxy__.unpack(column_name_prefix.encode('utf-8'), limit, column_types, na_value))
[ "def", "unpack", "(", "self", ",", "column_name_prefix", "=", "\"X\"", ",", "column_types", "=", "None", ",", "na_value", "=", "None", ",", "limit", "=", "None", ")", ":", "from", ".", "sframe", "import", "SFrame", "as", "_SFrame", "if", "self", ".", "dtype", "not", "in", "[", "dict", ",", "array", ".", "array", ",", "list", "]", ":", "raise", "TypeError", "(", "\"Only SArray of dict/list/array type supports unpack\"", ")", "if", "column_name_prefix", "is", "None", ":", "column_name_prefix", "=", "\"\"", "if", "not", "(", "isinstance", "(", "column_name_prefix", ",", "six", ".", "string_types", ")", ")", ":", "raise", "TypeError", "(", "\"'column_name_prefix' must be a string\"", ")", "# validate 'limit'", "if", "limit", "is", "not", "None", ":", "if", "(", "not", "_is_non_string_iterable", "(", "limit", ")", ")", ":", "raise", "TypeError", "(", "\"'limit' must be a list\"", ")", "name_types", "=", "set", "(", "[", "type", "(", "i", ")", "for", "i", "in", "limit", "]", ")", "if", "(", "len", "(", "name_types", ")", "!=", "1", ")", ":", "raise", "TypeError", "(", "\"'limit' contains values that are different types\"", ")", "# limit value should be numeric if unpacking sarray.array value", "if", "(", "self", ".", "dtype", "!=", "dict", ")", "and", "(", "name_types", ".", "pop", "(", ")", "!=", "int", ")", ":", "raise", "TypeError", "(", "\"'limit' must contain integer values.\"", ")", "if", "len", "(", "set", "(", "limit", ")", ")", "!=", "len", "(", "limit", ")", ":", "raise", "ValueError", "(", "\"'limit' contains duplicate values\"", ")", "if", "(", "column_types", "is", "not", "None", ")", ":", "if", "not", "_is_non_string_iterable", "(", "column_types", ")", ":", "raise", "TypeError", "(", "\"column_types must be a list\"", ")", "for", "column_type", "in", "column_types", ":", "if", "(", "column_type", "not", "in", "(", "int", ",", "float", ",", "str", ",", "list", ",", "dict", ",", "array", ".", "array", ")", ")", ":", "raise", "TypeError", "(", "\"column_types contains unsupported types. Supported types are ['float', 'int', 'list', 'dict', 'str', 'array.array']\"", ")", "if", "limit", "is", "not", "None", ":", "if", "len", "(", "limit", ")", "!=", "len", "(", "column_types", ")", ":", "raise", "ValueError", "(", "\"limit and column_types do not have the same length\"", ")", "elif", "self", ".", "dtype", "==", "dict", ":", "raise", "ValueError", "(", "\"if 'column_types' is given, 'limit' has to be provided to unpack dict type.\"", ")", "else", ":", "limit", "=", "range", "(", "len", "(", "column_types", ")", ")", "else", ":", "head_rows", "=", "self", ".", "head", "(", "100", ")", ".", "dropna", "(", ")", "lengths", "=", "[", "len", "(", "i", ")", "for", "i", "in", "head_rows", "]", "if", "len", "(", "lengths", ")", "==", "0", "or", "max", "(", "lengths", ")", "==", "0", ":", "raise", "RuntimeError", "(", "\"Cannot infer number of items from the SArray, SArray may be empty. please explicitly provide column types\"", ")", "# infer column types for dict type at server side, for list and array, infer from client side", "if", "self", ".", "dtype", "!=", "dict", ":", "length", "=", "max", "(", "lengths", ")", "if", "limit", "is", "None", ":", "limit", "=", "range", "(", "length", ")", "else", ":", "# adjust the length", "length", "=", "len", "(", "limit", ")", "if", "self", ".", "dtype", "==", "array", ".", "array", ":", "column_types", "=", "[", "float", "for", "i", "in", "range", "(", "length", ")", "]", "else", ":", "column_types", "=", "list", "(", ")", "for", "i", "in", "limit", ":", "t", "=", "[", "(", "x", "[", "i", "]", "if", "(", "(", "x", "is", "not", "None", ")", "and", "len", "(", "x", ")", ">", "i", ")", "else", "None", ")", "for", "x", "in", "head_rows", "]", "column_types", ".", "append", "(", "infer_type_of_list", "(", "t", ")", ")", "with", "cython_context", "(", ")", ":", "if", "(", "self", ".", "dtype", "==", "dict", "and", "column_types", "is", "None", ")", ":", "limit", "=", "limit", "if", "limit", "is", "not", "None", "else", "[", "]", "return", "_SFrame", "(", "_proxy", "=", "self", ".", "__proxy__", ".", "unpack_dict", "(", "column_name_prefix", ".", "encode", "(", "'utf-8'", ")", ",", "limit", ",", "na_value", ")", ")", "else", ":", "return", "_SFrame", "(", "_proxy", "=", "self", ".", "__proxy__", ".", "unpack", "(", "column_name_prefix", ".", "encode", "(", "'utf-8'", ")", ",", "limit", ",", "column_types", ",", "na_value", ")", ")" ]
Convert an SArray of list, array, or dict type to an SFrame with multiple columns. `unpack` expands an SArray using the values of each list/array/dict as elements in a new SFrame of multiple columns. For example, an SArray of lists each of length 4 will be expanded into an SFrame of 4 columns, one for each list element. An SArray of lists/arrays of varying size will be expand to a number of columns equal to the longest list/array. An SArray of dictionaries will be expanded into as many columns as there are keys. When unpacking an SArray of list or array type, new columns are named: `column_name_prefix`.0, `column_name_prefix`.1, etc. If unpacking a column of dict type, unpacked columns are named `column_name_prefix`.key1, `column_name_prefix`.key2, etc. When unpacking an SArray of list or dictionary types, missing values in the original element remain as missing values in the resultant columns. If the `na_value` parameter is specified, all values equal to this given value are also replaced with missing values. In an SArray of array.array type, NaN is interpreted as a missing value. :py:func:`turicreate.SFrame.pack_columns()` is the reverse effect of unpack Parameters ---------- column_name_prefix: str, optional If provided, unpacked column names would start with the given prefix. column_types: list[type], optional Column types for the unpacked columns. If not provided, column types are automatically inferred from first 100 rows. Defaults to None. na_value: optional Convert all values that are equal to `na_value` to missing value if specified. limit: list, optional Limits the set of list/array/dict keys to unpack. For list/array SArrays, 'limit' must contain integer indices. For dict SArray, 'limit' must contain dictionary keys. Returns ------- out : SFrame A new SFrame that contains all unpacked columns Examples -------- To unpack a dict SArray >>> sa = SArray([{ 'word': 'a', 'count': 1}, ... { 'word': 'cat', 'count': 2}, ... { 'word': 'is', 'count': 3}, ... { 'word': 'coming','count': 4}]) Normal case of unpacking SArray of type dict: >>> sa.unpack(column_name_prefix=None) Columns: count int word str <BLANKLINE> Rows: 4 <BLANKLINE> Data: +-------+--------+ | count | word | +-------+--------+ | 1 | a | | 2 | cat | | 3 | is | | 4 | coming | +-------+--------+ [4 rows x 2 columns] <BLANKLINE> Unpack only keys with 'word': >>> sa.unpack(limit=['word']) Columns: X.word str <BLANKLINE> Rows: 4 <BLANKLINE> Data: +--------+ | X.word | +--------+ | a | | cat | | is | | coming | +--------+ [4 rows x 1 columns] <BLANKLINE> >>> sa2 = SArray([ ... [1, 0, 1], ... [1, 1, 1], ... [0, 1]]) Convert all zeros to missing values: >>> sa2.unpack(column_types=[int, int, int], na_value=0) Columns: X.0 int X.1 int X.2 int <BLANKLINE> Rows: 3 <BLANKLINE> Data: +------+------+------+ | X.0 | X.1 | X.2 | +------+------+------+ | 1 | None | 1 | | 1 | 1 | 1 | | None | 1 | None | +------+------+------+ [3 rows x 3 columns] <BLANKLINE>
[ "Convert", "an", "SArray", "of", "list", "array", "or", "dict", "type", "to", "an", "SFrame", "with", "multiple", "columns", "." ]
python
train
37.156566
JoelBender/bacpypes
samples/ReadPropertyMultiple25.py
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/samples/ReadPropertyMultiple25.py#L43-L181
def do_read(self, args): """read <addr> ( <objid> ( <prop> [ <indx> ] )... )...""" args = args.split() if _debug: ReadPropertyMultipleConsoleCmd._debug("do_read %r", args) try: i = 0 addr = args[i] i += 1 read_access_spec_list = [] while i < len(args): obj_id = ObjectIdentifier(args[i]).value i += 1 prop_reference_list = [] while i < len(args): prop_id = args[i] if prop_id not in PropertyIdentifier.enumerations: break i += 1 if prop_id in ('all', 'required', 'optional'): pass else: datatype = get_datatype(obj_id[0], prop_id) if not datatype: raise ValueError("invalid property for object type") # build a property reference prop_reference = PropertyReference( propertyIdentifier=prop_id, ) # check for an array index if (i < len(args)) and args[i].isdigit(): prop_reference.propertyArrayIndex = int(args[i]) i += 1 # add it to the list prop_reference_list.append(prop_reference) # check for at least one property if not prop_reference_list: raise ValueError("provide at least one property") # build a read access specification read_access_spec = ReadAccessSpecification( objectIdentifier=obj_id, listOfPropertyReferences=prop_reference_list, ) # add it to the list read_access_spec_list.append(read_access_spec) # check for at least one if not read_access_spec_list: raise RuntimeError("at least one read access specification required") # build the request request = ReadPropertyMultipleRequest( listOfReadAccessSpecs=read_access_spec_list, ) request.pduDestination = Address(addr) if _debug: ReadPropertyMultipleConsoleCmd._debug(" - request: %r", request) # make an IOCB iocb = IOCB(request) if _debug: ReadPropertyMultipleConsoleCmd._debug(" - iocb: %r", iocb) # give it to the application deferred(this_application.request_io, iocb) # wait for it to complete iocb.wait() # do something for success if iocb.ioResponse: apdu = iocb.ioResponse # should be an ack if not isinstance(apdu, ReadPropertyMultipleACK): if _debug: ReadPropertyMultipleConsoleCmd._debug(" - not an ack") return # loop through the results for result in apdu.listOfReadAccessResults: # here is the object identifier objectIdentifier = result.objectIdentifier if _debug: ReadPropertyMultipleConsoleCmd._debug(" - objectIdentifier: %r", objectIdentifier) # now come the property values per object for element in result.listOfResults: # get the property and array index propertyIdentifier = element.propertyIdentifier if _debug: ReadPropertyMultipleConsoleCmd._debug(" - propertyIdentifier: %r", propertyIdentifier) propertyArrayIndex = element.propertyArrayIndex if _debug: ReadPropertyMultipleConsoleCmd._debug(" - propertyArrayIndex: %r", propertyArrayIndex) # here is the read result readResult = element.readResult sys.stdout.write(propertyIdentifier) if propertyArrayIndex is not None: sys.stdout.write("[" + str(propertyArrayIndex) + "]") # check for an error if readResult.propertyAccessError is not None: sys.stdout.write(" ! " + str(readResult.propertyAccessError) + '\n') else: # here is the value propertyValue = readResult.propertyValue # find the datatype datatype = get_datatype(objectIdentifier[0], propertyIdentifier) if _debug: ReadPropertyMultipleConsoleCmd._debug(" - datatype: %r", datatype) if not datatype: raise TypeError("unknown datatype") # special case for array parts, others are managed by cast_out if issubclass(datatype, Array) and (propertyArrayIndex is not None): if propertyArrayIndex == 0: value = propertyValue.cast_out(Unsigned) else: value = propertyValue.cast_out(datatype.subtype) else: value = propertyValue.cast_out(datatype) if _debug: ReadPropertyMultipleConsoleCmd._debug(" - value: %r", value) sys.stdout.write(" = " + str(value) + '\n') sys.stdout.flush() # do something for error/reject/abort if iocb.ioError: sys.stdout.write(str(iocb.ioError) + '\n') except Exception, error: ReadPropertyMultipleConsoleCmd._exception("exception: %r", error)
[ "def", "do_read", "(", "self", ",", "args", ")", ":", "args", "=", "args", ".", "split", "(", ")", "if", "_debug", ":", "ReadPropertyMultipleConsoleCmd", ".", "_debug", "(", "\"do_read %r\"", ",", "args", ")", "try", ":", "i", "=", "0", "addr", "=", "args", "[", "i", "]", "i", "+=", "1", "read_access_spec_list", "=", "[", "]", "while", "i", "<", "len", "(", "args", ")", ":", "obj_id", "=", "ObjectIdentifier", "(", "args", "[", "i", "]", ")", ".", "value", "i", "+=", "1", "prop_reference_list", "=", "[", "]", "while", "i", "<", "len", "(", "args", ")", ":", "prop_id", "=", "args", "[", "i", "]", "if", "prop_id", "not", "in", "PropertyIdentifier", ".", "enumerations", ":", "break", "i", "+=", "1", "if", "prop_id", "in", "(", "'all'", ",", "'required'", ",", "'optional'", ")", ":", "pass", "else", ":", "datatype", "=", "get_datatype", "(", "obj_id", "[", "0", "]", ",", "prop_id", ")", "if", "not", "datatype", ":", "raise", "ValueError", "(", "\"invalid property for object type\"", ")", "# build a property reference", "prop_reference", "=", "PropertyReference", "(", "propertyIdentifier", "=", "prop_id", ",", ")", "# check for an array index", "if", "(", "i", "<", "len", "(", "args", ")", ")", "and", "args", "[", "i", "]", ".", "isdigit", "(", ")", ":", "prop_reference", ".", "propertyArrayIndex", "=", "int", "(", "args", "[", "i", "]", ")", "i", "+=", "1", "# add it to the list", "prop_reference_list", ".", "append", "(", "prop_reference", ")", "# check for at least one property", "if", "not", "prop_reference_list", ":", "raise", "ValueError", "(", "\"provide at least one property\"", ")", "# build a read access specification", "read_access_spec", "=", "ReadAccessSpecification", "(", "objectIdentifier", "=", "obj_id", ",", "listOfPropertyReferences", "=", "prop_reference_list", ",", ")", "# add it to the list", "read_access_spec_list", ".", "append", "(", "read_access_spec", ")", "# check for at least one", "if", "not", "read_access_spec_list", ":", "raise", "RuntimeError", "(", "\"at least one read access specification required\"", ")", "# build the request", "request", "=", "ReadPropertyMultipleRequest", "(", "listOfReadAccessSpecs", "=", "read_access_spec_list", ",", ")", "request", ".", "pduDestination", "=", "Address", "(", "addr", ")", "if", "_debug", ":", "ReadPropertyMultipleConsoleCmd", ".", "_debug", "(", "\" - request: %r\"", ",", "request", ")", "# make an IOCB", "iocb", "=", "IOCB", "(", "request", ")", "if", "_debug", ":", "ReadPropertyMultipleConsoleCmd", ".", "_debug", "(", "\" - iocb: %r\"", ",", "iocb", ")", "# give it to the application", "deferred", "(", "this_application", ".", "request_io", ",", "iocb", ")", "# wait for it to complete", "iocb", ".", "wait", "(", ")", "# do something for success", "if", "iocb", ".", "ioResponse", ":", "apdu", "=", "iocb", ".", "ioResponse", "# should be an ack", "if", "not", "isinstance", "(", "apdu", ",", "ReadPropertyMultipleACK", ")", ":", "if", "_debug", ":", "ReadPropertyMultipleConsoleCmd", ".", "_debug", "(", "\" - not an ack\"", ")", "return", "# loop through the results", "for", "result", "in", "apdu", ".", "listOfReadAccessResults", ":", "# here is the object identifier", "objectIdentifier", "=", "result", ".", "objectIdentifier", "if", "_debug", ":", "ReadPropertyMultipleConsoleCmd", ".", "_debug", "(", "\" - objectIdentifier: %r\"", ",", "objectIdentifier", ")", "# now come the property values per object", "for", "element", "in", "result", ".", "listOfResults", ":", "# get the property and array index", "propertyIdentifier", "=", "element", ".", "propertyIdentifier", "if", "_debug", ":", "ReadPropertyMultipleConsoleCmd", ".", "_debug", "(", "\" - propertyIdentifier: %r\"", ",", "propertyIdentifier", ")", "propertyArrayIndex", "=", "element", ".", "propertyArrayIndex", "if", "_debug", ":", "ReadPropertyMultipleConsoleCmd", ".", "_debug", "(", "\" - propertyArrayIndex: %r\"", ",", "propertyArrayIndex", ")", "# here is the read result", "readResult", "=", "element", ".", "readResult", "sys", ".", "stdout", ".", "write", "(", "propertyIdentifier", ")", "if", "propertyArrayIndex", "is", "not", "None", ":", "sys", ".", "stdout", ".", "write", "(", "\"[\"", "+", "str", "(", "propertyArrayIndex", ")", "+", "\"]\"", ")", "# check for an error", "if", "readResult", ".", "propertyAccessError", "is", "not", "None", ":", "sys", ".", "stdout", ".", "write", "(", "\" ! \"", "+", "str", "(", "readResult", ".", "propertyAccessError", ")", "+", "'\\n'", ")", "else", ":", "# here is the value", "propertyValue", "=", "readResult", ".", "propertyValue", "# find the datatype", "datatype", "=", "get_datatype", "(", "objectIdentifier", "[", "0", "]", ",", "propertyIdentifier", ")", "if", "_debug", ":", "ReadPropertyMultipleConsoleCmd", ".", "_debug", "(", "\" - datatype: %r\"", ",", "datatype", ")", "if", "not", "datatype", ":", "raise", "TypeError", "(", "\"unknown datatype\"", ")", "# special case for array parts, others are managed by cast_out", "if", "issubclass", "(", "datatype", ",", "Array", ")", "and", "(", "propertyArrayIndex", "is", "not", "None", ")", ":", "if", "propertyArrayIndex", "==", "0", ":", "value", "=", "propertyValue", ".", "cast_out", "(", "Unsigned", ")", "else", ":", "value", "=", "propertyValue", ".", "cast_out", "(", "datatype", ".", "subtype", ")", "else", ":", "value", "=", "propertyValue", ".", "cast_out", "(", "datatype", ")", "if", "_debug", ":", "ReadPropertyMultipleConsoleCmd", ".", "_debug", "(", "\" - value: %r\"", ",", "value", ")", "sys", ".", "stdout", ".", "write", "(", "\" = \"", "+", "str", "(", "value", ")", "+", "'\\n'", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "# do something for error/reject/abort", "if", "iocb", ".", "ioError", ":", "sys", ".", "stdout", ".", "write", "(", "str", "(", "iocb", ".", "ioError", ")", "+", "'\\n'", ")", "except", "Exception", ",", "error", ":", "ReadPropertyMultipleConsoleCmd", ".", "_exception", "(", "\"exception: %r\"", ",", "error", ")" ]
read <addr> ( <objid> ( <prop> [ <indx> ] )... )...
[ "read", "<addr", ">", "(", "<objid", ">", "(", "<prop", ">", "[", "<indx", ">", "]", ")", "...", ")", "..." ]
python
train
42.726619
etcher-be/epab
epab/cmd/_release.py
https://github.com/etcher-be/epab/blob/024cde74d058281aa66e6e4b7b71dccbe803b1c1/epab/cmd/_release.py#L23-L36
def _clean(): """ Cleans up build dir """ LOGGER.info('Cleaning project directory...') folders_to_cleanup = [ '.eggs', 'build', f'{config.PACKAGE_NAME()}.egg-info', ] for folder in folders_to_cleanup: if os.path.exists(folder): LOGGER.info('\tremoving: %s', folder) shutil.rmtree(folder)
[ "def", "_clean", "(", ")", ":", "LOGGER", ".", "info", "(", "'Cleaning project directory...'", ")", "folders_to_cleanup", "=", "[", "'.eggs'", ",", "'build'", ",", "f'{config.PACKAGE_NAME()}.egg-info'", ",", "]", "for", "folder", "in", "folders_to_cleanup", ":", "if", "os", ".", "path", ".", "exists", "(", "folder", ")", ":", "LOGGER", ".", "info", "(", "'\\tremoving: %s'", ",", "folder", ")", "shutil", ".", "rmtree", "(", "folder", ")" ]
Cleans up build dir
[ "Cleans", "up", "build", "dir" ]
python
train
25.571429
wummel/linkchecker
linkcheck/bookmarks/safari.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/bookmarks/safari.py#L34-L49
def find_bookmark_file (): """Return the bookmark file of the Default profile. Returns absolute filename if found, or empty string if no bookmark file could be found. """ if sys.platform != 'darwin': return u"" try: dirname = get_profile_dir() if os.path.isdir(dirname): fname = os.path.join(dirname, u"Bookmarks.plist") if os.path.isfile(fname): return fname except Exception: pass return u""
[ "def", "find_bookmark_file", "(", ")", ":", "if", "sys", ".", "platform", "!=", "'darwin'", ":", "return", "u\"\"", "try", ":", "dirname", "=", "get_profile_dir", "(", ")", "if", "os", ".", "path", ".", "isdir", "(", "dirname", ")", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "dirname", ",", "u\"Bookmarks.plist\"", ")", "if", "os", ".", "path", ".", "isfile", "(", "fname", ")", ":", "return", "fname", "except", "Exception", ":", "pass", "return", "u\"\"" ]
Return the bookmark file of the Default profile. Returns absolute filename if found, or empty string if no bookmark file could be found.
[ "Return", "the", "bookmark", "file", "of", "the", "Default", "profile", ".", "Returns", "absolute", "filename", "if", "found", "or", "empty", "string", "if", "no", "bookmark", "file", "could", "be", "found", "." ]
python
train
30.125
ellethee/argparseinator
argparseinator/utils.py
https://github.com/ellethee/argparseinator/blob/05e9c00dfaa938b9c4ee2aadc6206f5e0918e24e/argparseinator/utils.py#L156-L171
def has_argument(arg, arguments): """ Verifica se ci sono argument con la classe. """ try: if not isinstance(arguments, list): arguments = arguments.__arguments__ for idx, (args, kwargs) in enumerate(arguments): arg_name = kwargs.get( 'dest', args[-1].lstrip('-').replace('-', '_')) if arg_name == arg: return idx idx = False except (ValueError, AttributeError): idx = False return idx
[ "def", "has_argument", "(", "arg", ",", "arguments", ")", ":", "try", ":", "if", "not", "isinstance", "(", "arguments", ",", "list", ")", ":", "arguments", "=", "arguments", ".", "__arguments__", "for", "idx", ",", "(", "args", ",", "kwargs", ")", "in", "enumerate", "(", "arguments", ")", ":", "arg_name", "=", "kwargs", ".", "get", "(", "'dest'", ",", "args", "[", "-", "1", "]", ".", "lstrip", "(", "'-'", ")", ".", "replace", "(", "'-'", ",", "'_'", ")", ")", "if", "arg_name", "==", "arg", ":", "return", "idx", "idx", "=", "False", "except", "(", "ValueError", ",", "AttributeError", ")", ":", "idx", "=", "False", "return", "idx" ]
Verifica se ci sono argument con la classe.
[ "Verifica", "se", "ci", "sono", "argument", "con", "la", "classe", "." ]
python
train
30.875
morepath/more.jwtauth
more/jwtauth/main.py
https://github.com/morepath/more.jwtauth/blob/1c3c5731612069a092e44cf612641c05edf1f083/more/jwtauth/main.py#L335-L352
def get_jwt(self, request): """Extract the JWT token from the authorisation header of the request. Returns the JWT token or None, if the token cannot be extracted. :param request: request object. :type request: :class:`morepath.Request` """ try: authorization = request.authorization except ValueError: return None if authorization is None: return None authtype, token = authorization if authtype.lower() != self.auth_header_prefix.lower(): return None return token
[ "def", "get_jwt", "(", "self", ",", "request", ")", ":", "try", ":", "authorization", "=", "request", ".", "authorization", "except", "ValueError", ":", "return", "None", "if", "authorization", "is", "None", ":", "return", "None", "authtype", ",", "token", "=", "authorization", "if", "authtype", ".", "lower", "(", ")", "!=", "self", ".", "auth_header_prefix", ".", "lower", "(", ")", ":", "return", "None", "return", "token" ]
Extract the JWT token from the authorisation header of the request. Returns the JWT token or None, if the token cannot be extracted. :param request: request object. :type request: :class:`morepath.Request`
[ "Extract", "the", "JWT", "token", "from", "the", "authorisation", "header", "of", "the", "request", "." ]
python
train
32.555556
dcaune/perseus-lib-python-common
majormode/perseus/utils/rdbms.py
https://github.com/dcaune/perseus-lib-python-common/blob/ba48fe0fd9bb4a75b53e7d10c41ada36a72d4496/majormode/perseus/utils/rdbms.py#L426-L479
def acquire_connection(settings, tag=None, logger_name=None, auto_commit=False): """ Return a connection to a Relational DataBase Management System (RDBMS) the most appropriate for the service requesting this connection. @param settings: a dictionary of connection properties:: { None: { 'rdbms_hostname': "...", 'rdbms_port': ..., 'rdbms_database_name': "...", 'rdbms_account_username': '...' 'rdbms_account_password': '...' }, 'tag': { 'rdbms_hostname': "...", 'rdbms_port': ..., 'rdbms_database_name': "...", 'rdbms_account_username': '...' 'rdbms_account_password': '...' }, ... } The key ``None`` is the default tag. @param tag: a tag that specifies which particular connection properties has to be used. @param logger_name: name of the logger for debug information. @param auto_commit: indicate whether the transaction needs to be committed at the end of the session. @return: a ``RdbmsConnection`` instance to be used supporting the Python clause ``with ...:``. @raise DefaultConnectionPropertiesSettingException: if the specified tag is not defined in the dictionary of connection properties, and when no default connection properties is defined either (tag ``None``). """ try: connection_properties = settings.get(tag, settings[None]) except KeyError: raise RdbmsConnection.DefaultConnectionPropertiesSettingException() return RdbmsConnection( connection_properties['rdbms_hostname'], connection_properties['rdbms_port'], connection_properties['rdbms_database_name'], connection_properties['rdbms_account_username'], connection_properties['rdbms_account_password'], logger_name=logger_name, auto_commit=auto_commit)
[ "def", "acquire_connection", "(", "settings", ",", "tag", "=", "None", ",", "logger_name", "=", "None", ",", "auto_commit", "=", "False", ")", ":", "try", ":", "connection_properties", "=", "settings", ".", "get", "(", "tag", ",", "settings", "[", "None", "]", ")", "except", "KeyError", ":", "raise", "RdbmsConnection", ".", "DefaultConnectionPropertiesSettingException", "(", ")", "return", "RdbmsConnection", "(", "connection_properties", "[", "'rdbms_hostname'", "]", ",", "connection_properties", "[", "'rdbms_port'", "]", ",", "connection_properties", "[", "'rdbms_database_name'", "]", ",", "connection_properties", "[", "'rdbms_account_username'", "]", ",", "connection_properties", "[", "'rdbms_account_password'", "]", ",", "logger_name", "=", "logger_name", ",", "auto_commit", "=", "auto_commit", ")" ]
Return a connection to a Relational DataBase Management System (RDBMS) the most appropriate for the service requesting this connection. @param settings: a dictionary of connection properties:: { None: { 'rdbms_hostname': "...", 'rdbms_port': ..., 'rdbms_database_name': "...", 'rdbms_account_username': '...' 'rdbms_account_password': '...' }, 'tag': { 'rdbms_hostname': "...", 'rdbms_port': ..., 'rdbms_database_name': "...", 'rdbms_account_username': '...' 'rdbms_account_password': '...' }, ... } The key ``None`` is the default tag. @param tag: a tag that specifies which particular connection properties has to be used. @param logger_name: name of the logger for debug information. @param auto_commit: indicate whether the transaction needs to be committed at the end of the session. @return: a ``RdbmsConnection`` instance to be used supporting the Python clause ``with ...:``. @raise DefaultConnectionPropertiesSettingException: if the specified tag is not defined in the dictionary of connection properties, and when no default connection properties is defined either (tag ``None``).
[ "Return", "a", "connection", "to", "a", "Relational", "DataBase", "Management", "System", "(", "RDBMS", ")", "the", "most", "appropriate", "for", "the", "service", "requesting", "this", "connection", "." ]
python
train
41.814815
gsi-upm/soil
examples/pubcrawl/pubcrawl.py
https://github.com/gsi-upm/soil/blob/a3ea434f237f039c3cadbc2e0a83ae626d77b818/examples/pubcrawl/pubcrawl.py#L43-L53
def exit(self, pub_id, *node_ids): '''Agents will notify the pub they want to leave''' try: pub = self['pubs'][pub_id] except KeyError: raise ValueError('Pub {} is not available'.format(pub_id)) for node_id in node_ids: node = self.get_agent(node_id) if pub_id == node['pub']: del node['pub'] pub['occupancy'] -= 1
[ "def", "exit", "(", "self", ",", "pub_id", ",", "*", "node_ids", ")", ":", "try", ":", "pub", "=", "self", "[", "'pubs'", "]", "[", "pub_id", "]", "except", "KeyError", ":", "raise", "ValueError", "(", "'Pub {} is not available'", ".", "format", "(", "pub_id", ")", ")", "for", "node_id", "in", "node_ids", ":", "node", "=", "self", ".", "get_agent", "(", "node_id", ")", "if", "pub_id", "==", "node", "[", "'pub'", "]", ":", "del", "node", "[", "'pub'", "]", "pub", "[", "'occupancy'", "]", "-=", "1" ]
Agents will notify the pub they want to leave
[ "Agents", "will", "notify", "the", "pub", "they", "want", "to", "leave" ]
python
train
37.818182
fastai/fastai
fastai/vision/image.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/vision/image.py#L409-L414
def rle_encode(img:NPArrayMask)->str: "Return run-length encoding string from `img`." pixels = np.concatenate([[0], img.flatten() , [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return ' '.join(str(x) for x in runs)
[ "def", "rle_encode", "(", "img", ":", "NPArrayMask", ")", "->", "str", ":", "pixels", "=", "np", ".", "concatenate", "(", "[", "[", "0", "]", ",", "img", ".", "flatten", "(", ")", ",", "[", "0", "]", "]", ")", "runs", "=", "np", ".", "where", "(", "pixels", "[", "1", ":", "]", "!=", "pixels", "[", ":", "-", "1", "]", ")", "[", "0", "]", "+", "1", "runs", "[", "1", ":", ":", "2", "]", "-=", "runs", "[", ":", ":", "2", "]", "return", "' '", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "runs", ")" ]
Return run-length encoding string from `img`.
[ "Return", "run", "-", "length", "encoding", "string", "from", "img", "." ]
python
train
44
basho/riak-python-client
riak/transports/http/connection.py
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/http/connection.py#L101-L115
def _security_auth_headers(self, username, password, headers): """ Add in the requisite HTTP Authentication Headers :param username: Riak Security Username :type str :param password: Riak Security Password :type str :param headers: Dictionary of headers :type dict """ userColonPassword = username + ":" + password b64UserColonPassword = base64. \ b64encode(str_to_bytes(userColonPassword)).decode("ascii") headers['Authorization'] = 'Basic %s' % b64UserColonPassword
[ "def", "_security_auth_headers", "(", "self", ",", "username", ",", "password", ",", "headers", ")", ":", "userColonPassword", "=", "username", "+", "\":\"", "+", "password", "b64UserColonPassword", "=", "base64", ".", "b64encode", "(", "str_to_bytes", "(", "userColonPassword", ")", ")", ".", "decode", "(", "\"ascii\"", ")", "headers", "[", "'Authorization'", "]", "=", "'Basic %s'", "%", "b64UserColonPassword" ]
Add in the requisite HTTP Authentication Headers :param username: Riak Security Username :type str :param password: Riak Security Password :type str :param headers: Dictionary of headers :type dict
[ "Add", "in", "the", "requisite", "HTTP", "Authentication", "Headers" ]
python
train
37.466667
pybel/pybel
src/pybel/struct/filters/node_filters.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/filters/node_filters.py#L84-L86
def count_passed_node_filter(graph: BELGraph, node_predicates: NodePredicates) -> int: """Count how many nodes pass a given set of node predicates.""" return sum(1 for _ in filter_nodes(graph, node_predicates=node_predicates))
[ "def", "count_passed_node_filter", "(", "graph", ":", "BELGraph", ",", "node_predicates", ":", "NodePredicates", ")", "->", "int", ":", "return", "sum", "(", "1", "for", "_", "in", "filter_nodes", "(", "graph", ",", "node_predicates", "=", "node_predicates", ")", ")" ]
Count how many nodes pass a given set of node predicates.
[ "Count", "how", "many", "nodes", "pass", "a", "given", "set", "of", "node", "predicates", "." ]
python
train
77.333333
thombashi/SimpleSQLite
simplesqlite/core.py
https://github.com/thombashi/SimpleSQLite/blob/b16f212132b9b98773e68bf7395abc2f60f56fe5/simplesqlite/core.py#L595-L623
def update(self, table_name, set_query, where=None): """Execute an UPDATE query. Args: table_name (|str|): Table name of executing the query. set_query (|str|): ``SET`` clause for the update query. where (|arg_where_type| , optional): ``WHERE`` clause for the update query. Defaults to |None|. Raises: IOError: |raises_write_permission| simplesqlite.NullDatabaseConnectionError: |raises_check_connection| simplesqlite.TableNotFoundError: |raises_verify_table_existence| simplesqlite.OperationalError: |raises_operational_error| """ self.validate_access_permission(["w", "a"]) self.verify_table_existence(table_name) query = SqlQuery.make_update(table_name, set_query, where) return self.execute_query(query, logging.getLogger().findCaller())
[ "def", "update", "(", "self", ",", "table_name", ",", "set_query", ",", "where", "=", "None", ")", ":", "self", ".", "validate_access_permission", "(", "[", "\"w\"", ",", "\"a\"", "]", ")", "self", ".", "verify_table_existence", "(", "table_name", ")", "query", "=", "SqlQuery", ".", "make_update", "(", "table_name", ",", "set_query", ",", "where", ")", "return", "self", ".", "execute_query", "(", "query", ",", "logging", ".", "getLogger", "(", ")", ".", "findCaller", "(", ")", ")" ]
Execute an UPDATE query. Args: table_name (|str|): Table name of executing the query. set_query (|str|): ``SET`` clause for the update query. where (|arg_where_type| , optional): ``WHERE`` clause for the update query. Defaults to |None|. Raises: IOError: |raises_write_permission| simplesqlite.NullDatabaseConnectionError: |raises_check_connection| simplesqlite.TableNotFoundError: |raises_verify_table_existence| simplesqlite.OperationalError: |raises_operational_error|
[ "Execute", "an", "UPDATE", "query", "." ]
python
train
34.275862
bretth/woven
woven/webservers.py
https://github.com/bretth/woven/blob/ec1da7b401a335f43129e7115fe7a4d145649f1e/woven/webservers.py#L277-L284
def webserver_list(): """ list of webserver packages """ p = set(get_packages()) w = set(['apache2','gunicorn','uwsgi','nginx']) installed = p & w return list(installed)
[ "def", "webserver_list", "(", ")", ":", "p", "=", "set", "(", "get_packages", "(", ")", ")", "w", "=", "set", "(", "[", "'apache2'", ",", "'gunicorn'", ",", "'uwsgi'", ",", "'nginx'", "]", ")", "installed", "=", "p", "&", "w", "return", "list", "(", "installed", ")" ]
list of webserver packages
[ "list", "of", "webserver", "packages" ]
python
train
23.75
genomoncology/related
src/related/fields.py
https://github.com/genomoncology/related/blob/be47c0081e60fc60afcde3a25f00ebcad5d18510/src/related/fields.py#L259-L274
def UUIDField(default=NOTHING, required=False, repr=True, cmp=True, key=None): """ Create new UUID field on a model. :param default: any value :param bool required: whether or not the object is invalid if not provided. :param bool repr: include this field should appear in object's repr. :param bool cmp: include this field in generated comparison. :param string key: override name of the value when converted to dict. """ cls = UUID default = _init_fields.init_default(required, default, uuid4) validator = _init_fields.init_validator(required, cls) return attrib(default=default, converter=converters.str_to_uuid, validator=validator, repr=repr, cmp=cmp, metadata=dict(key=key))
[ "def", "UUIDField", "(", "default", "=", "NOTHING", ",", "required", "=", "False", ",", "repr", "=", "True", ",", "cmp", "=", "True", ",", "key", "=", "None", ")", ":", "cls", "=", "UUID", "default", "=", "_init_fields", ".", "init_default", "(", "required", ",", "default", ",", "uuid4", ")", "validator", "=", "_init_fields", ".", "init_validator", "(", "required", ",", "cls", ")", "return", "attrib", "(", "default", "=", "default", ",", "converter", "=", "converters", ".", "str_to_uuid", ",", "validator", "=", "validator", ",", "repr", "=", "repr", ",", "cmp", "=", "cmp", ",", "metadata", "=", "dict", "(", "key", "=", "key", ")", ")" ]
Create new UUID field on a model. :param default: any value :param bool required: whether or not the object is invalid if not provided. :param bool repr: include this field should appear in object's repr. :param bool cmp: include this field in generated comparison. :param string key: override name of the value when converted to dict.
[ "Create", "new", "UUID", "field", "on", "a", "model", "." ]
python
train
46.875
globus/globus-cli
globus_cli/parsing/shared_options.py
https://github.com/globus/globus-cli/blob/336675ff24da64c5ee487243f39ae39fc49a7e14/globus_cli/parsing/shared_options.py#L664-L753
def server_add_and_update_opts(*args, **kwargs): """ shared collection of options for `globus transfer endpoint server add` and `globus transfer endpoint server update`. Accepts a toggle to know if it's being used as `add` or `update`. usage: >>> @server_add_and_update_opts >>> def command_func(subject, port, scheme, hostname): >>> ... or >>> @server_add_and_update_opts(add=True) >>> def command_func(subject, port, scheme, hostname): >>> ... """ def port_range_callback(ctx, param, value): if not value: return None value = value.lower().strip() if value == "unspecified": return None, None if value == "unrestricted": return 1024, 65535 try: lower, upper = map(int, value.split("-")) except ValueError: # too many/few values from split or non-integer(s) raise click.BadParameter( "must specify as 'unspecified', " "'unrestricted', or as range separated " "by a hyphen (e.g. '50000-51000')" ) if not 1024 <= lower <= 65535 or not 1024 <= upper <= 65535: raise click.BadParameter("must be within the 1024-65535 range") return (lower, upper) if lower <= upper else (upper, lower) def inner_decorator(f, add=False): f = click.option("--hostname", required=add, help="Server Hostname.")(f) default_scheme = "gsiftp" if add else None f = click.option( "--scheme", help="Scheme for the Server.", type=CaseInsensitiveChoice(("gsiftp", "ftp")), default=default_scheme, show_default=add, )(f) default_port = 2811 if add else None f = click.option( "--port", help="Port for Globus control channel connections.", type=int, default=default_port, show_default=add, )(f) f = click.option( "--subject", help=( "Subject of the X509 Certificate of the server. When " "unspecified, the CN must match the server hostname." ), )(f) for adjective, our_preposition, their_preposition in [ ("incoming", "to", "from"), ("outgoing", "from", "to"), ]: f = click.option( "--{}-data-ports".format(adjective), callback=port_range_callback, help="Indicate to firewall administrators at other sites how to " "allow {} traffic {} this server {} their own. Specify as " "either 'unspecified', 'unrestricted', or as range of " "ports separated by a hyphen (e.g. '50000-51000') within " "the 1024-65535 range.".format( adjective, our_preposition, their_preposition ), )(f) return f return detect_and_decorate(inner_decorator, args, kwargs)
[ "def", "server_add_and_update_opts", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "port_range_callback", "(", "ctx", ",", "param", ",", "value", ")", ":", "if", "not", "value", ":", "return", "None", "value", "=", "value", ".", "lower", "(", ")", ".", "strip", "(", ")", "if", "value", "==", "\"unspecified\"", ":", "return", "None", ",", "None", "if", "value", "==", "\"unrestricted\"", ":", "return", "1024", ",", "65535", "try", ":", "lower", ",", "upper", "=", "map", "(", "int", ",", "value", ".", "split", "(", "\"-\"", ")", ")", "except", "ValueError", ":", "# too many/few values from split or non-integer(s)", "raise", "click", ".", "BadParameter", "(", "\"must specify as 'unspecified', \"", "\"'unrestricted', or as range separated \"", "\"by a hyphen (e.g. '50000-51000')\"", ")", "if", "not", "1024", "<=", "lower", "<=", "65535", "or", "not", "1024", "<=", "upper", "<=", "65535", ":", "raise", "click", ".", "BadParameter", "(", "\"must be within the 1024-65535 range\"", ")", "return", "(", "lower", ",", "upper", ")", "if", "lower", "<=", "upper", "else", "(", "upper", ",", "lower", ")", "def", "inner_decorator", "(", "f", ",", "add", "=", "False", ")", ":", "f", "=", "click", ".", "option", "(", "\"--hostname\"", ",", "required", "=", "add", ",", "help", "=", "\"Server Hostname.\"", ")", "(", "f", ")", "default_scheme", "=", "\"gsiftp\"", "if", "add", "else", "None", "f", "=", "click", ".", "option", "(", "\"--scheme\"", ",", "help", "=", "\"Scheme for the Server.\"", ",", "type", "=", "CaseInsensitiveChoice", "(", "(", "\"gsiftp\"", ",", "\"ftp\"", ")", ")", ",", "default", "=", "default_scheme", ",", "show_default", "=", "add", ",", ")", "(", "f", ")", "default_port", "=", "2811", "if", "add", "else", "None", "f", "=", "click", ".", "option", "(", "\"--port\"", ",", "help", "=", "\"Port for Globus control channel connections.\"", ",", "type", "=", "int", ",", "default", "=", "default_port", ",", "show_default", "=", "add", ",", ")", "(", "f", ")", "f", "=", "click", ".", "option", "(", "\"--subject\"", ",", "help", "=", "(", "\"Subject of the X509 Certificate of the server. When \"", "\"unspecified, the CN must match the server hostname.\"", ")", ",", ")", "(", "f", ")", "for", "adjective", ",", "our_preposition", ",", "their_preposition", "in", "[", "(", "\"incoming\"", ",", "\"to\"", ",", "\"from\"", ")", ",", "(", "\"outgoing\"", ",", "\"from\"", ",", "\"to\"", ")", ",", "]", ":", "f", "=", "click", ".", "option", "(", "\"--{}-data-ports\"", ".", "format", "(", "adjective", ")", ",", "callback", "=", "port_range_callback", ",", "help", "=", "\"Indicate to firewall administrators at other sites how to \"", "\"allow {} traffic {} this server {} their own. Specify as \"", "\"either 'unspecified', 'unrestricted', or as range of \"", "\"ports separated by a hyphen (e.g. '50000-51000') within \"", "\"the 1024-65535 range.\"", ".", "format", "(", "adjective", ",", "our_preposition", ",", "their_preposition", ")", ",", ")", "(", "f", ")", "return", "f", "return", "detect_and_decorate", "(", "inner_decorator", ",", "args", ",", "kwargs", ")" ]
shared collection of options for `globus transfer endpoint server add` and `globus transfer endpoint server update`. Accepts a toggle to know if it's being used as `add` or `update`. usage: >>> @server_add_and_update_opts >>> def command_func(subject, port, scheme, hostname): >>> ... or >>> @server_add_and_update_opts(add=True) >>> def command_func(subject, port, scheme, hostname): >>> ...
[ "shared", "collection", "of", "options", "for", "globus", "transfer", "endpoint", "server", "add", "and", "globus", "transfer", "endpoint", "server", "update", ".", "Accepts", "a", "toggle", "to", "know", "if", "it", "s", "being", "used", "as", "add", "or", "update", "." ]
python
train
33.144444
SoCo/SoCo
soco/snapshot.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/snapshot.py#L255-L274
def _save_queue(self): """Save the current state of the queue.""" if self.queue is not None: # Maximum batch is 486, anything larger will still only # return 486 batch_size = 400 total = 0 num_return = batch_size # Need to get all the tracks in batches, but Only get the next # batch if all the items requested were in the last batch while num_return == batch_size: queue_items = self.device.get_queue(total, batch_size) # Check how many entries were returned num_return = len(queue_items) # Make sure the queue is not empty if num_return > 0: self.queue.append(queue_items) # Update the total that have been processed total = total + num_return
[ "def", "_save_queue", "(", "self", ")", ":", "if", "self", ".", "queue", "is", "not", "None", ":", "# Maximum batch is 486, anything larger will still only", "# return 486", "batch_size", "=", "400", "total", "=", "0", "num_return", "=", "batch_size", "# Need to get all the tracks in batches, but Only get the next", "# batch if all the items requested were in the last batch", "while", "num_return", "==", "batch_size", ":", "queue_items", "=", "self", ".", "device", ".", "get_queue", "(", "total", ",", "batch_size", ")", "# Check how many entries were returned", "num_return", "=", "len", "(", "queue_items", ")", "# Make sure the queue is not empty", "if", "num_return", ">", "0", ":", "self", ".", "queue", ".", "append", "(", "queue_items", ")", "# Update the total that have been processed", "total", "=", "total", "+", "num_return" ]
Save the current state of the queue.
[ "Save", "the", "current", "state", "of", "the", "queue", "." ]
python
train
43.55
numenta/nupic
src/nupic/algorithms/anomaly_likelihood.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/anomaly_likelihood.py#L648-L684
def _anomalyScoreMovingAverage(anomalyScores, windowSize=10, verbosity=0, ): """ Given a list of anomaly scores return a list of averaged records. anomalyScores is assumed to be a list of records of the form: [datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0] Each record in the returned list list contains: [datetime, value, averagedScore] *Note:* we only average the anomaly score. """ historicalValues = [] total = 0.0 averagedRecordList = [] # Aggregated records for record in anomalyScores: # Skip (but log) records without correct number of entries if not isinstance(record, (list, tuple)) or len(record) != 3: if verbosity >= 1: print("Malformed record:", record) continue avg, historicalValues, total = ( MovingAverage.compute(historicalValues, total, record[2], windowSize) ) averagedRecordList.append( [record[0], record[1], avg] ) if verbosity > 2: print("Aggregating input record:", record) print("Result:", [record[0], record[1], avg]) return averagedRecordList, historicalValues, total
[ "def", "_anomalyScoreMovingAverage", "(", "anomalyScores", ",", "windowSize", "=", "10", ",", "verbosity", "=", "0", ",", ")", ":", "historicalValues", "=", "[", "]", "total", "=", "0.0", "averagedRecordList", "=", "[", "]", "# Aggregated records", "for", "record", "in", "anomalyScores", ":", "# Skip (but log) records without correct number of entries", "if", "not", "isinstance", "(", "record", ",", "(", "list", ",", "tuple", ")", ")", "or", "len", "(", "record", ")", "!=", "3", ":", "if", "verbosity", ">=", "1", ":", "print", "(", "\"Malformed record:\"", ",", "record", ")", "continue", "avg", ",", "historicalValues", ",", "total", "=", "(", "MovingAverage", ".", "compute", "(", "historicalValues", ",", "total", ",", "record", "[", "2", "]", ",", "windowSize", ")", ")", "averagedRecordList", ".", "append", "(", "[", "record", "[", "0", "]", ",", "record", "[", "1", "]", ",", "avg", "]", ")", "if", "verbosity", ">", "2", ":", "print", "(", "\"Aggregating input record:\"", ",", "record", ")", "print", "(", "\"Result:\"", ",", "[", "record", "[", "0", "]", ",", "record", "[", "1", "]", ",", "avg", "]", ")", "return", "averagedRecordList", ",", "historicalValues", ",", "total" ]
Given a list of anomaly scores return a list of averaged records. anomalyScores is assumed to be a list of records of the form: [datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0] Each record in the returned list list contains: [datetime, value, averagedScore] *Note:* we only average the anomaly score.
[ "Given", "a", "list", "of", "anomaly", "scores", "return", "a", "list", "of", "averaged", "records", ".", "anomalyScores", "is", "assumed", "to", "be", "a", "list", "of", "records", "of", "the", "form", ":", "[", "datetime", ".", "datetime", "(", "2013", "8", "10", "23", "0", ")", "6", ".", "0", "1", ".", "0", "]" ]
python
valid
31.702703
yakupadakli/python-unsplash
unsplash/stat.py
https://github.com/yakupadakli/python-unsplash/blob/6e43dce3225237e1b8111fd475fb98b1ea33972c/unsplash/stat.py#L21-L29
def month(self): """ Get the overall Unsplash stats for the past 30 days. :return [Stat]: The Unsplash Stat. """ url = "/stats/month" result = self._get(url) return StatModel.parse(result)
[ "def", "month", "(", "self", ")", ":", "url", "=", "\"/stats/month\"", "result", "=", "self", ".", "_get", "(", "url", ")", "return", "StatModel", ".", "parse", "(", "result", ")" ]
Get the overall Unsplash stats for the past 30 days. :return [Stat]: The Unsplash Stat.
[ "Get", "the", "overall", "Unsplash", "stats", "for", "the", "past", "30", "days", "." ]
python
train
26.333333
jeffh/rpi_courses
rpi_courses/sis_parser/features.py
https://github.com/jeffh/rpi_courses/blob/c97176f73f866f112c785910ebf3ff8a790e8e9a/rpi_courses/sis_parser/features.py#L32-L52
def semester_feature(catalog, soup): """The year and semester information that this xml file hold courses for. """ raw = _text(soup.findAll('h3')).split('\n')[1] match = RE_SEMESTER_RANGE.match(raw) catalog.year = int(match.group('year')) #month_mapping = {'Spring': 1, 'Summer': 5, 'Fall': 9} month_mapping = {'january': 1, 'may': 5, 'august': 9} catalog.month = month_mapping[match.group('start_month').lower()] if catalog.url: match = RE_SEMESTER_URL.match(catalog.url) if match: catalog.year = int(match.group('year')) catalog.month = int(match.group('month')) semester_mapping = {1: 'Spring', 5: 'Summer', 9: 'Fall'} catalog.semester = semester_mapping[catalog.month] catalog.name = '%s %d' % (catalog.semester, catalog.year) logger.info('Catalog type: %s' % catalog.name)
[ "def", "semester_feature", "(", "catalog", ",", "soup", ")", ":", "raw", "=", "_text", "(", "soup", ".", "findAll", "(", "'h3'", ")", ")", ".", "split", "(", "'\\n'", ")", "[", "1", "]", "match", "=", "RE_SEMESTER_RANGE", ".", "match", "(", "raw", ")", "catalog", ".", "year", "=", "int", "(", "match", ".", "group", "(", "'year'", ")", ")", "#month_mapping = {'Spring': 1, 'Summer': 5, 'Fall': 9}", "month_mapping", "=", "{", "'january'", ":", "1", ",", "'may'", ":", "5", ",", "'august'", ":", "9", "}", "catalog", ".", "month", "=", "month_mapping", "[", "match", ".", "group", "(", "'start_month'", ")", ".", "lower", "(", ")", "]", "if", "catalog", ".", "url", ":", "match", "=", "RE_SEMESTER_URL", ".", "match", "(", "catalog", ".", "url", ")", "if", "match", ":", "catalog", ".", "year", "=", "int", "(", "match", ".", "group", "(", "'year'", ")", ")", "catalog", ".", "month", "=", "int", "(", "match", ".", "group", "(", "'month'", ")", ")", "semester_mapping", "=", "{", "1", ":", "'Spring'", ",", "5", ":", "'Summer'", ",", "9", ":", "'Fall'", "}", "catalog", ".", "semester", "=", "semester_mapping", "[", "catalog", ".", "month", "]", "catalog", ".", "name", "=", "'%s %d'", "%", "(", "catalog", ".", "semester", ",", "catalog", ".", "year", ")", "logger", ".", "info", "(", "'Catalog type: %s'", "%", "catalog", ".", "name", ")" ]
The year and semester information that this xml file hold courses for.
[ "The", "year", "and", "semester", "information", "that", "this", "xml", "file", "hold", "courses", "for", "." ]
python
train
40.571429
mitsei/dlkit
dlkit/primordium/locale/types/time.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/primordium/locale/types/time.py#L70-L100
def get_type_data(name): """Return dictionary representation of type. Can be used to initialize primordium.type.primitives.Type """ name = name.upper() if name in CELESTIAL_TIME_TYPES: namespace = 'time' domain = 'Celestial Time Systems' time_name = CELESTIAL_TIME_TYPES[name] elif name in EARTH_TIME_TYPES: namespace = 'time' domain = 'Earth Time Systems' time_name = EARTH_TIME_TYPES[name] elif name in SUPER_FUN_TIME_TYPES: namespace = 'time' domain = 'Alternative Time Systems' time_name = SUPER_FUN_TIME_TYPES[name] else: raise NotFound('Time Type: ' + name) return { 'authority': 'okapia.net', 'namespace': namespace, 'identifier': name, 'domain': domain, 'display_name': time_name + ' Time Type', 'display_label': time_name, 'description': ('The time type for ' + time_name + ' time.') }
[ "def", "get_type_data", "(", "name", ")", ":", "name", "=", "name", ".", "upper", "(", ")", "if", "name", "in", "CELESTIAL_TIME_TYPES", ":", "namespace", "=", "'time'", "domain", "=", "'Celestial Time Systems'", "time_name", "=", "CELESTIAL_TIME_TYPES", "[", "name", "]", "elif", "name", "in", "EARTH_TIME_TYPES", ":", "namespace", "=", "'time'", "domain", "=", "'Earth Time Systems'", "time_name", "=", "EARTH_TIME_TYPES", "[", "name", "]", "elif", "name", "in", "SUPER_FUN_TIME_TYPES", ":", "namespace", "=", "'time'", "domain", "=", "'Alternative Time Systems'", "time_name", "=", "SUPER_FUN_TIME_TYPES", "[", "name", "]", "else", ":", "raise", "NotFound", "(", "'Time Type: '", "+", "name", ")", "return", "{", "'authority'", ":", "'okapia.net'", ",", "'namespace'", ":", "namespace", ",", "'identifier'", ":", "name", ",", "'domain'", ":", "domain", ",", "'display_name'", ":", "time_name", "+", "' Time Type'", ",", "'display_label'", ":", "time_name", ",", "'description'", ":", "(", "'The time type for '", "+", "time_name", "+", "' time.'", ")", "}" ]
Return dictionary representation of type. Can be used to initialize primordium.type.primitives.Type
[ "Return", "dictionary", "representation", "of", "type", "." ]
python
train
30.419355
carpedm20/fbchat
fbchat/_client.py
https://github.com/carpedm20/fbchat/blob/f480d68b5773473e6daba7f66075ee30e8d737a8/fbchat/_client.py#L941-L1003
def fetchThreadInfo(self, *thread_ids): """ Get threads' info from IDs, unordered .. warning:: Sends two requests if users or pages are present, to fetch all available info! :param thread_ids: One or more thread ID(s) to query :return: :class:`models.Thread` objects, labeled by their ID :rtype: dict :raises: FBchatException if request failed """ queries = [] for thread_id in thread_ids: params = { "id": thread_id, "message_limit": 0, "load_messages": False, "load_read_receipts": False, "before": None, } queries.append(GraphQL(doc_id="2147762685294928", params=params)) j = self.graphql_requests(*queries) for i, entry in enumerate(j): if entry.get("message_thread") is None: # If you don't have an existing thread with this person, attempt to retrieve user data anyways j[i]["message_thread"] = { "thread_key": {"other_user_id": thread_ids[i]}, "thread_type": "ONE_TO_ONE", } pages_and_user_ids = [ k["message_thread"]["thread_key"]["other_user_id"] for k in j if k["message_thread"].get("thread_type") == "ONE_TO_ONE" ] pages_and_users = {} if len(pages_and_user_ids) != 0: pages_and_users = self._fetchInfo(*pages_and_user_ids) rtn = {} for i, entry in enumerate(j): entry = entry["message_thread"] if entry.get("thread_type") == "GROUP": _id = entry["thread_key"]["thread_fbid"] rtn[_id] = Group._from_graphql(entry) elif entry.get("thread_type") == "ONE_TO_ONE": _id = entry["thread_key"]["other_user_id"] if pages_and_users.get(_id) is None: raise FBchatException("Could not fetch thread {}".format(_id)) entry.update(pages_and_users[_id]) if entry["type"] == ThreadType.USER: rtn[_id] = User._from_graphql(entry) else: rtn[_id] = Page._from_graphql(entry) else: raise FBchatException( "{} had an unknown thread type: {}".format(thread_ids[i], entry) ) return rtn
[ "def", "fetchThreadInfo", "(", "self", ",", "*", "thread_ids", ")", ":", "queries", "=", "[", "]", "for", "thread_id", "in", "thread_ids", ":", "params", "=", "{", "\"id\"", ":", "thread_id", ",", "\"message_limit\"", ":", "0", ",", "\"load_messages\"", ":", "False", ",", "\"load_read_receipts\"", ":", "False", ",", "\"before\"", ":", "None", ",", "}", "queries", ".", "append", "(", "GraphQL", "(", "doc_id", "=", "\"2147762685294928\"", ",", "params", "=", "params", ")", ")", "j", "=", "self", ".", "graphql_requests", "(", "*", "queries", ")", "for", "i", ",", "entry", "in", "enumerate", "(", "j", ")", ":", "if", "entry", ".", "get", "(", "\"message_thread\"", ")", "is", "None", ":", "# If you don't have an existing thread with this person, attempt to retrieve user data anyways", "j", "[", "i", "]", "[", "\"message_thread\"", "]", "=", "{", "\"thread_key\"", ":", "{", "\"other_user_id\"", ":", "thread_ids", "[", "i", "]", "}", ",", "\"thread_type\"", ":", "\"ONE_TO_ONE\"", ",", "}", "pages_and_user_ids", "=", "[", "k", "[", "\"message_thread\"", "]", "[", "\"thread_key\"", "]", "[", "\"other_user_id\"", "]", "for", "k", "in", "j", "if", "k", "[", "\"message_thread\"", "]", ".", "get", "(", "\"thread_type\"", ")", "==", "\"ONE_TO_ONE\"", "]", "pages_and_users", "=", "{", "}", "if", "len", "(", "pages_and_user_ids", ")", "!=", "0", ":", "pages_and_users", "=", "self", ".", "_fetchInfo", "(", "*", "pages_and_user_ids", ")", "rtn", "=", "{", "}", "for", "i", ",", "entry", "in", "enumerate", "(", "j", ")", ":", "entry", "=", "entry", "[", "\"message_thread\"", "]", "if", "entry", ".", "get", "(", "\"thread_type\"", ")", "==", "\"GROUP\"", ":", "_id", "=", "entry", "[", "\"thread_key\"", "]", "[", "\"thread_fbid\"", "]", "rtn", "[", "_id", "]", "=", "Group", ".", "_from_graphql", "(", "entry", ")", "elif", "entry", ".", "get", "(", "\"thread_type\"", ")", "==", "\"ONE_TO_ONE\"", ":", "_id", "=", "entry", "[", "\"thread_key\"", "]", "[", "\"other_user_id\"", "]", "if", "pages_and_users", ".", "get", "(", "_id", ")", "is", "None", ":", "raise", "FBchatException", "(", "\"Could not fetch thread {}\"", ".", "format", "(", "_id", ")", ")", "entry", ".", "update", "(", "pages_and_users", "[", "_id", "]", ")", "if", "entry", "[", "\"type\"", "]", "==", "ThreadType", ".", "USER", ":", "rtn", "[", "_id", "]", "=", "User", ".", "_from_graphql", "(", "entry", ")", "else", ":", "rtn", "[", "_id", "]", "=", "Page", ".", "_from_graphql", "(", "entry", ")", "else", ":", "raise", "FBchatException", "(", "\"{} had an unknown thread type: {}\"", ".", "format", "(", "thread_ids", "[", "i", "]", ",", "entry", ")", ")", "return", "rtn" ]
Get threads' info from IDs, unordered .. warning:: Sends two requests if users or pages are present, to fetch all available info! :param thread_ids: One or more thread ID(s) to query :return: :class:`models.Thread` objects, labeled by their ID :rtype: dict :raises: FBchatException if request failed
[ "Get", "threads", "info", "from", "IDs", "unordered" ]
python
train
38.396825
morpframework/morpfw
morpfw/authn/pas/user/view.py
https://github.com/morpframework/morpfw/blob/803fbf29714e6f29456482f1cfbdbd4922b020b0/morpfw/authn/pas/user/view.py#L60-L89
def process_login(context, request): """Authenticate username and password and log in user""" username = request.json['username'] password = request.json['password'] # Do the password validation. user = context.authenticate(username, password) if not user: @request.after def adjust_status(response): response.status = 401 return { 'status': 'error', 'error': { 'code': 401, 'message': 'Invalid Username / Password' } } @request.after def remember(response): """Remember the identity of the user logged in.""" # We pass the extra info to the identity object. response.headers.add('Access-Control-Expose-Headers', 'Authorization') identity = user.identity request.app.remember_identity(response, request, identity) return { 'status': 'success' }
[ "def", "process_login", "(", "context", ",", "request", ")", ":", "username", "=", "request", ".", "json", "[", "'username'", "]", "password", "=", "request", ".", "json", "[", "'password'", "]", "# Do the password validation.", "user", "=", "context", ".", "authenticate", "(", "username", ",", "password", ")", "if", "not", "user", ":", "@", "request", ".", "after", "def", "adjust_status", "(", "response", ")", ":", "response", ".", "status", "=", "401", "return", "{", "'status'", ":", "'error'", ",", "'error'", ":", "{", "'code'", ":", "401", ",", "'message'", ":", "'Invalid Username / Password'", "}", "}", "@", "request", ".", "after", "def", "remember", "(", "response", ")", ":", "\"\"\"Remember the identity of the user logged in.\"\"\"", "# We pass the extra info to the identity object.", "response", ".", "headers", ".", "add", "(", "'Access-Control-Expose-Headers'", ",", "'Authorization'", ")", "identity", "=", "user", ".", "identity", "request", ".", "app", ".", "remember_identity", "(", "response", ",", "request", ",", "identity", ")", "return", "{", "'status'", ":", "'success'", "}" ]
Authenticate username and password and log in user
[ "Authenticate", "username", "and", "password", "and", "log", "in", "user" ]
python
train
30.6
Azure/azure-event-hubs-python
azure/eventhub/common.py
https://github.com/Azure/azure-event-hubs-python/blob/737c5f966557ada2cf10fa0d8f3c19671ae96348/azure/eventhub/common.py#L217-L236
def body_as_str(self, encoding='UTF-8'): """ The body of the event data as a string if the data is of a compatible type. :param encoding: The encoding to use for decoding message data. Default is 'UTF-8' :rtype: str or unicode """ data = self.body try: return "".join(b.decode(encoding) for b in data) except TypeError: return six.text_type(data) except: # pylint: disable=bare-except pass try: return data.decode(encoding) except Exception as e: raise TypeError("Message data is not compatible with string type: {}".format(e))
[ "def", "body_as_str", "(", "self", ",", "encoding", "=", "'UTF-8'", ")", ":", "data", "=", "self", ".", "body", "try", ":", "return", "\"\"", ".", "join", "(", "b", ".", "decode", "(", "encoding", ")", "for", "b", "in", "data", ")", "except", "TypeError", ":", "return", "six", ".", "text_type", "(", "data", ")", "except", ":", "# pylint: disable=bare-except", "pass", "try", ":", "return", "data", ".", "decode", "(", "encoding", ")", "except", "Exception", "as", "e", ":", "raise", "TypeError", "(", "\"Message data is not compatible with string type: {}\"", ".", "format", "(", "e", ")", ")" ]
The body of the event data as a string if the data is of a compatible type. :param encoding: The encoding to use for decoding message data. Default is 'UTF-8' :rtype: str or unicode
[ "The", "body", "of", "the", "event", "data", "as", "a", "string", "if", "the", "data", "is", "of", "a", "compatible", "type", "." ]
python
train
33.75
pyviz/holoviews
holoviews/plotting/bokeh/element.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/bokeh/element.py#L519-L539
def _set_active_tools(self, plot): "Activates the list of active tools" for tool in self.active_tools: if isinstance(tool, util.basestring): tool_type = TOOL_TYPES[tool] matching = [t for t in plot.toolbar.tools if isinstance(t, tool_type)] if not matching: self.param.warning('Tool of type %r could not be found ' 'and could not be activated by default.' % tool) continue tool = matching[0] if isinstance(tool, tools.Drag): plot.toolbar.active_drag = tool if isinstance(tool, tools.Scroll): plot.toolbar.active_scroll = tool if isinstance(tool, tools.Tap): plot.toolbar.active_tap = tool if isinstance(tool, tools.Inspection): plot.toolbar.active_inspect.append(tool)
[ "def", "_set_active_tools", "(", "self", ",", "plot", ")", ":", "for", "tool", "in", "self", ".", "active_tools", ":", "if", "isinstance", "(", "tool", ",", "util", ".", "basestring", ")", ":", "tool_type", "=", "TOOL_TYPES", "[", "tool", "]", "matching", "=", "[", "t", "for", "t", "in", "plot", ".", "toolbar", ".", "tools", "if", "isinstance", "(", "t", ",", "tool_type", ")", "]", "if", "not", "matching", ":", "self", ".", "param", ".", "warning", "(", "'Tool of type %r could not be found '", "'and could not be activated by default.'", "%", "tool", ")", "continue", "tool", "=", "matching", "[", "0", "]", "if", "isinstance", "(", "tool", ",", "tools", ".", "Drag", ")", ":", "plot", ".", "toolbar", ".", "active_drag", "=", "tool", "if", "isinstance", "(", "tool", ",", "tools", ".", "Scroll", ")", ":", "plot", ".", "toolbar", ".", "active_scroll", "=", "tool", "if", "isinstance", "(", "tool", ",", "tools", ".", "Tap", ")", ":", "plot", ".", "toolbar", ".", "active_tap", "=", "tool", "if", "isinstance", "(", "tool", ",", "tools", ".", "Inspection", ")", ":", "plot", ".", "toolbar", ".", "active_inspect", ".", "append", "(", "tool", ")" ]
Activates the list of active tools
[ "Activates", "the", "list", "of", "active", "tools" ]
python
train
47.52381
bykof/billomapy
billomapy/billomapy.py
https://github.com/bykof/billomapy/blob/a28ba69fd37654fa145d0411d52c200e7f8984ab/billomapy/billomapy.py#L4165-L4177
def update_template(self, template_id, template_dict): """ Updates a template :param template_id: the template id :param template_dict: dict :return: dict """ return self._create_put_request( resource=TEMPLATES, billomat_id=template_id, send_data=template_dict )
[ "def", "update_template", "(", "self", ",", "template_id", ",", "template_dict", ")", ":", "return", "self", ".", "_create_put_request", "(", "resource", "=", "TEMPLATES", ",", "billomat_id", "=", "template_id", ",", "send_data", "=", "template_dict", ")" ]
Updates a template :param template_id: the template id :param template_dict: dict :return: dict
[ "Updates", "a", "template" ]
python
train
27
globus/globus-cli
globus_cli/commands/endpoint/server/delete.py
https://github.com/globus/globus-cli/blob/336675ff24da64c5ee487243f39ae39fc49a7e14/globus_cli/commands/endpoint/server/delete.py#L64-L105
def server_delete(endpoint_id, server): """ Executor for `globus endpoint server show` """ client = get_client() mode = _detect_mode(server) # list (even if not necessary) in order to make errors more consistent when # mode='id' endpoint, server_list = get_endpoint_w_server_list(endpoint_id) if server_list == "S3": raise click.UsageError("You cannot delete servers from S3 endpoints.") # we don't *have to* raise an error in the GCP case, since the API would # deny it too, but doing so makes our errors a little bit more consistent # with deletes against S3 endpoints and shares if endpoint["is_globus_connect"]: raise click.UsageError( "You cannot delete servers from Globus Connect Personal endpoints" ) if mode != "id": matches = _spec_to_matches(server_list, server, mode) if not matches: raise click.UsageError('No server was found matching "{}"'.format(server)) elif len(matches) > 1: raise click.UsageError( dedent( """\ Multiple servers matched "{}": {} """ ).format(server, [x["id"] for x in matches]) ) else: server = matches[0]["id"] response = client.delete_endpoint_server(endpoint_id, server) formatted_print(response, text_format=FORMAT_TEXT_RAW, response_key="message")
[ "def", "server_delete", "(", "endpoint_id", ",", "server", ")", ":", "client", "=", "get_client", "(", ")", "mode", "=", "_detect_mode", "(", "server", ")", "# list (even if not necessary) in order to make errors more consistent when", "# mode='id'", "endpoint", ",", "server_list", "=", "get_endpoint_w_server_list", "(", "endpoint_id", ")", "if", "server_list", "==", "\"S3\"", ":", "raise", "click", ".", "UsageError", "(", "\"You cannot delete servers from S3 endpoints.\"", ")", "# we don't *have to* raise an error in the GCP case, since the API would", "# deny it too, but doing so makes our errors a little bit more consistent", "# with deletes against S3 endpoints and shares", "if", "endpoint", "[", "\"is_globus_connect\"", "]", ":", "raise", "click", ".", "UsageError", "(", "\"You cannot delete servers from Globus Connect Personal endpoints\"", ")", "if", "mode", "!=", "\"id\"", ":", "matches", "=", "_spec_to_matches", "(", "server_list", ",", "server", ",", "mode", ")", "if", "not", "matches", ":", "raise", "click", ".", "UsageError", "(", "'No server was found matching \"{}\"'", ".", "format", "(", "server", ")", ")", "elif", "len", "(", "matches", ")", ">", "1", ":", "raise", "click", ".", "UsageError", "(", "dedent", "(", "\"\"\"\\\n Multiple servers matched \"{}\":\n {}\n \"\"\"", ")", ".", "format", "(", "server", ",", "[", "x", "[", "\"id\"", "]", "for", "x", "in", "matches", "]", ")", ")", "else", ":", "server", "=", "matches", "[", "0", "]", "[", "\"id\"", "]", "response", "=", "client", ".", "delete_endpoint_server", "(", "endpoint_id", ",", "server", ")", "formatted_print", "(", "response", ",", "text_format", "=", "FORMAT_TEXT_RAW", ",", "response_key", "=", "\"message\"", ")" ]
Executor for `globus endpoint server show`
[ "Executor", "for", "globus", "endpoint", "server", "show" ]
python
train
34.142857
numenta/nupic
src/nupic/database/connection.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/connection.py#L212-L235
def _createDefaultPolicy(cls): """ [private] Create the default database connection policy instance Parameters: ---------------------------------------------------------------- retval: The default database connection policy instance """ logger = _getLogger(cls) logger.debug( "Creating database connection policy: platform=%r; pymysql.VERSION=%r", platform.system(), pymysql.VERSION) if platform.system() == "Java": # NOTE: PooledDB doesn't seem to work under Jython # NOTE: not appropriate for multi-threaded applications. # TODO: this was fixed in Webware DBUtils r8228, so once # we pick up a realease with this fix, we should use # PooledConnectionPolicy for both Jython and Python. policy = SingleSharedConnectionPolicy() else: policy = PooledConnectionPolicy() return policy
[ "def", "_createDefaultPolicy", "(", "cls", ")", ":", "logger", "=", "_getLogger", "(", "cls", ")", "logger", ".", "debug", "(", "\"Creating database connection policy: platform=%r; pymysql.VERSION=%r\"", ",", "platform", ".", "system", "(", ")", ",", "pymysql", ".", "VERSION", ")", "if", "platform", ".", "system", "(", ")", "==", "\"Java\"", ":", "# NOTE: PooledDB doesn't seem to work under Jython", "# NOTE: not appropriate for multi-threaded applications.", "# TODO: this was fixed in Webware DBUtils r8228, so once", "# we pick up a realease with this fix, we should use", "# PooledConnectionPolicy for both Jython and Python.", "policy", "=", "SingleSharedConnectionPolicy", "(", ")", "else", ":", "policy", "=", "PooledConnectionPolicy", "(", ")", "return", "policy" ]
[private] Create the default database connection policy instance Parameters: ---------------------------------------------------------------- retval: The default database connection policy instance
[ "[", "private", "]", "Create", "the", "default", "database", "connection", "policy", "instance" ]
python
valid
36.583333
radjkarl/imgProcessor
imgProcessor/measure/SNR/SNR_hinken.py
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/measure/SNR/SNR_hinken.py#L8-L44
def SNR_hinken(imgs, bg=0, roi=None): ''' signal-to-noise ratio (SNR) as mean(images) / std(images) as defined in Hinken et.al. 2011 (DOI: 10.1063/1.3541766) works on unloaded images no memory overload if too many images are given ''' mean = None M = len(imgs) if bg is not 0: bg = imread(bg)[roi] if roi is not None: bg = bg[roi] #calc mean: for i in imgs: img = imread(i).asfarray() if roi is not None: img = img[roi] img -= bg if mean is None: #init mean = np.zeros_like(img) std = np.zeros_like(img) mean += img del img mean /= M #calc std of mean: for i in imgs: img = imread(i).asfarray() if roi is not None: img = img[roi] img -= bg std += (mean - img)**2 del img std = (std / M)**0.5 return mean.mean() / std.mean()
[ "def", "SNR_hinken", "(", "imgs", ",", "bg", "=", "0", ",", "roi", "=", "None", ")", ":", "mean", "=", "None", "M", "=", "len", "(", "imgs", ")", "if", "bg", "is", "not", "0", ":", "bg", "=", "imread", "(", "bg", ")", "[", "roi", "]", "if", "roi", "is", "not", "None", ":", "bg", "=", "bg", "[", "roi", "]", "#calc mean:\r", "for", "i", "in", "imgs", ":", "img", "=", "imread", "(", "i", ")", ".", "asfarray", "(", ")", "if", "roi", "is", "not", "None", ":", "img", "=", "img", "[", "roi", "]", "img", "-=", "bg", "if", "mean", "is", "None", ":", "#init\r", "mean", "=", "np", ".", "zeros_like", "(", "img", ")", "std", "=", "np", ".", "zeros_like", "(", "img", ")", "mean", "+=", "img", "del", "img", "mean", "/=", "M", "#calc std of mean:\r", "for", "i", "in", "imgs", ":", "img", "=", "imread", "(", "i", ")", ".", "asfarray", "(", ")", "if", "roi", "is", "not", "None", ":", "img", "=", "img", "[", "roi", "]", "img", "-=", "bg", "std", "+=", "(", "mean", "-", "img", ")", "**", "2", "del", "img", "std", "=", "(", "std", "/", "M", ")", "**", "0.5", "return", "mean", ".", "mean", "(", ")", "/", "std", ".", "mean", "(", ")" ]
signal-to-noise ratio (SNR) as mean(images) / std(images) as defined in Hinken et.al. 2011 (DOI: 10.1063/1.3541766) works on unloaded images no memory overload if too many images are given
[ "signal", "-", "to", "-", "noise", "ratio", "(", "SNR", ")", "as", "mean", "(", "images", ")", "/", "std", "(", "images", ")", "as", "defined", "in", "Hinken", "et", ".", "al", ".", "2011", "(", "DOI", ":", "10", ".", "1063", "/", "1", ".", "3541766", ")", "works", "on", "unloaded", "images", "no", "memory", "overload", "if", "too", "many", "images", "are", "given" ]
python
train
26.162162
pypa/pipenv
pipenv/patched/notpip/_internal/utils/misc.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/utils/misc.py#L295-L310
def renames(old, new): # type: (str, str) -> None """Like os.renames(), but handles renaming across devices.""" # Implementation borrowed from os.renames(). head, tail = os.path.split(new) if head and tail and not os.path.exists(head): os.makedirs(head) shutil.move(old, new) head, tail = os.path.split(old) if head and tail: try: os.removedirs(head) except OSError: pass
[ "def", "renames", "(", "old", ",", "new", ")", ":", "# type: (str, str) -> None", "# Implementation borrowed from os.renames().", "head", ",", "tail", "=", "os", ".", "path", ".", "split", "(", "new", ")", "if", "head", "and", "tail", "and", "not", "os", ".", "path", ".", "exists", "(", "head", ")", ":", "os", ".", "makedirs", "(", "head", ")", "shutil", ".", "move", "(", "old", ",", "new", ")", "head", ",", "tail", "=", "os", ".", "path", ".", "split", "(", "old", ")", "if", "head", "and", "tail", ":", "try", ":", "os", ".", "removedirs", "(", "head", ")", "except", "OSError", ":", "pass" ]
Like os.renames(), but handles renaming across devices.
[ "Like", "os", ".", "renames", "()", "but", "handles", "renaming", "across", "devices", "." ]
python
train
27.375
limpyd/redis-limpyd
limpyd/indexes.py
https://github.com/limpyd/redis-limpyd/blob/3c745dde1390a0bd09690b77a089dcc08c6c7e43/limpyd/indexes.py#L827-L847
def remove(self, *args): """Remove the instance tied to the field for the given "value" (via `args`) from the index For the parameters, see ``BaseIndex.remove`` Notes ----- This method calls the ``unstore`` method that should be overridden in subclasses to remove data from the index sorted-set key """ key = self.get_storage_key(*args) args = list(args) value = args[-1] pk = self.instance.pk.get() logger.debug("removing %s from index %s" % (pk, key)) self.unstore(key, pk, self.prepare_value_for_storage(value, pk)) self._deindexed_values.add(tuple(args))
[ "def", "remove", "(", "self", ",", "*", "args", ")", ":", "key", "=", "self", ".", "get_storage_key", "(", "*", "args", ")", "args", "=", "list", "(", "args", ")", "value", "=", "args", "[", "-", "1", "]", "pk", "=", "self", ".", "instance", ".", "pk", ".", "get", "(", ")", "logger", ".", "debug", "(", "\"removing %s from index %s\"", "%", "(", "pk", ",", "key", ")", ")", "self", ".", "unstore", "(", "key", ",", "pk", ",", "self", ".", "prepare_value_for_storage", "(", "value", ",", "pk", ")", ")", "self", ".", "_deindexed_values", ".", "add", "(", "tuple", "(", "args", ")", ")" ]
Remove the instance tied to the field for the given "value" (via `args`) from the index For the parameters, see ``BaseIndex.remove`` Notes ----- This method calls the ``unstore`` method that should be overridden in subclasses to remove data from the index sorted-set key
[ "Remove", "the", "instance", "tied", "to", "the", "field", "for", "the", "given", "value", "(", "via", "args", ")", "from", "the", "index" ]
python
train
31.238095
gatagat/lap
lap/lapmod.py
https://github.com/gatagat/lap/blob/c2b6309ba246d18205a71228cdaea67210e1a039/lap/lapmod.py#L273-L341
def lapmod(n, cc, ii, kk, fast=True, return_cost=True, fp_version=FP_DYNAMIC): """Solve sparse linear assignment problem using Jonker-Volgenant algorithm. n: number of rows of the assignment cost matrix cc: 1D array of all finite elements of the assignement cost matrix ii: 1D array of indices of the row starts in cc. The following must hold: ii[0] = 0 and ii[n+1] = len(cc). kk: 1D array of the column indices so that: cost[i, kk[ii[i] + k]] == cc[ii[i] + k]. Indices within one row must be sorted. extend_cost: whether or not extend a non-square matrix [default: False] cost_limit: an upper limit for a cost of a single assignment [default: np.inf] return_cost: whether or not to return the assignment cost Returns (opt, x, y) where: opt: cost of the assignment x: vector of columns assigned to rows y: vector of rows assigned to columns or (x, y) if return_cost is not True. When extend_cost and/or cost_limit is set, all unmatched entries will be marked by -1 in x/y. """ # log = logging.getLogger('lapmod') check_cost(n, cc, ii, kk) if fast is True: # log.debug('[----CR & RT & ARR & augmentation ----]') x, y = _lapmod(n, cc, ii, kk, fp_version=fp_version) else: cc = np.ascontiguousarray(cc, dtype=np.float64) ii = np.ascontiguousarray(ii, dtype=np.int32) kk = np.ascontiguousarray(kk, dtype=np.int32) x = np.empty((n,), dtype=np.int32) y = np.empty((n,), dtype=np.int32) v = np.empty((n,), dtype=np.float64) free_rows = np.empty((n,), dtype=np.int32) # log.debug('[----Column reduction & reduction transfer----]') n_free_rows = _pycrrt(n, cc, ii, kk, free_rows, x, y, v) # log.debug( # 'free, x, y, v: %s %s %s %s', free_rows[:n_free_rows], x, y, v) if n_free_rows == 0: # log.info('Reduction solved it.') if return_cost is True: return get_cost(n, cc, ii, kk, x), x, y else: return x, y for it in range(2): # log.debug('[---Augmenting row reduction (iteration: %d)---]', it) n_free_rows = _pyarr( n, cc, ii, kk, n_free_rows, free_rows, x, y, v) # log.debug( # 'free, x, y, v: %s %s %s %s', free_rows[:n_free_rows], x, y, v) if n_free_rows == 0: # log.info('Augmenting row reduction solved it.') if return_cost is True: return get_cost(n, cc, ii, kk, x), x, y else: return x, y # log.info('[----Augmentation----]') _pya(n, cc, ii, kk, n_free_rows, free_rows, x, y, v) # log.debug('x, y, v: %s %s %s', x, y, v) if return_cost is True: return get_cost(n, cc, ii, kk, x), x, y else: return x, y
[ "def", "lapmod", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "fast", "=", "True", ",", "return_cost", "=", "True", ",", "fp_version", "=", "FP_DYNAMIC", ")", ":", "# log = logging.getLogger('lapmod')", "check_cost", "(", "n", ",", "cc", ",", "ii", ",", "kk", ")", "if", "fast", "is", "True", ":", "# log.debug('[----CR & RT & ARR & augmentation ----]')", "x", ",", "y", "=", "_lapmod", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "fp_version", "=", "fp_version", ")", "else", ":", "cc", "=", "np", ".", "ascontiguousarray", "(", "cc", ",", "dtype", "=", "np", ".", "float64", ")", "ii", "=", "np", ".", "ascontiguousarray", "(", "ii", ",", "dtype", "=", "np", ".", "int32", ")", "kk", "=", "np", ".", "ascontiguousarray", "(", "kk", ",", "dtype", "=", "np", ".", "int32", ")", "x", "=", "np", ".", "empty", "(", "(", "n", ",", ")", ",", "dtype", "=", "np", ".", "int32", ")", "y", "=", "np", ".", "empty", "(", "(", "n", ",", ")", ",", "dtype", "=", "np", ".", "int32", ")", "v", "=", "np", ".", "empty", "(", "(", "n", ",", ")", ",", "dtype", "=", "np", ".", "float64", ")", "free_rows", "=", "np", ".", "empty", "(", "(", "n", ",", ")", ",", "dtype", "=", "np", ".", "int32", ")", "# log.debug('[----Column reduction & reduction transfer----]')", "n_free_rows", "=", "_pycrrt", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "free_rows", ",", "x", ",", "y", ",", "v", ")", "# log.debug(", "# 'free, x, y, v: %s %s %s %s', free_rows[:n_free_rows], x, y, v)", "if", "n_free_rows", "==", "0", ":", "# log.info('Reduction solved it.')", "if", "return_cost", "is", "True", ":", "return", "get_cost", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "x", ")", ",", "x", ",", "y", "else", ":", "return", "x", ",", "y", "for", "it", "in", "range", "(", "2", ")", ":", "# log.debug('[---Augmenting row reduction (iteration: %d)---]', it)", "n_free_rows", "=", "_pyarr", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "n_free_rows", ",", "free_rows", ",", "x", ",", "y", ",", "v", ")", "# log.debug(", "# 'free, x, y, v: %s %s %s %s', free_rows[:n_free_rows], x, y, v)", "if", "n_free_rows", "==", "0", ":", "# log.info('Augmenting row reduction solved it.')", "if", "return_cost", "is", "True", ":", "return", "get_cost", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "x", ")", ",", "x", ",", "y", "else", ":", "return", "x", ",", "y", "# log.info('[----Augmentation----]')", "_pya", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "n_free_rows", ",", "free_rows", ",", "x", ",", "y", ",", "v", ")", "# log.debug('x, y, v: %s %s %s', x, y, v)", "if", "return_cost", "is", "True", ":", "return", "get_cost", "(", "n", ",", "cc", ",", "ii", ",", "kk", ",", "x", ")", ",", "x", ",", "y", "else", ":", "return", "x", ",", "y" ]
Solve sparse linear assignment problem using Jonker-Volgenant algorithm. n: number of rows of the assignment cost matrix cc: 1D array of all finite elements of the assignement cost matrix ii: 1D array of indices of the row starts in cc. The following must hold: ii[0] = 0 and ii[n+1] = len(cc). kk: 1D array of the column indices so that: cost[i, kk[ii[i] + k]] == cc[ii[i] + k]. Indices within one row must be sorted. extend_cost: whether or not extend a non-square matrix [default: False] cost_limit: an upper limit for a cost of a single assignment [default: np.inf] return_cost: whether or not to return the assignment cost Returns (opt, x, y) where: opt: cost of the assignment x: vector of columns assigned to rows y: vector of rows assigned to columns or (x, y) if return_cost is not True. When extend_cost and/or cost_limit is set, all unmatched entries will be marked by -1 in x/y.
[ "Solve", "sparse", "linear", "assignment", "problem", "using", "Jonker", "-", "Volgenant", "algorithm", "." ]
python
train
41.913043
JosuaKrause/quick_server
quick_server/quick_server.py
https://github.com/JosuaKrause/quick_server/blob/55dc7c5fe726a341f8476f749fe0f9da156fc1cb/quick_server/quick_server.py#L1189-L1202
def log_request(self, code='-', size='-'): """Logs the current request.""" print_size = getattr(thread_local, 'size', -1) if size != '-': size_str = ' (%s)' % size elif print_size >= 0: size_str = self.log_size_string(print_size) + ' ' else: size_str = '' if not self.server.suppress_noise or (code != 200 and code != 304): self.log_message( '%s"%s" %s', size_str, self.requestline, str(code)) if print_size >= 0: thread_local.size = -1
[ "def", "log_request", "(", "self", ",", "code", "=", "'-'", ",", "size", "=", "'-'", ")", ":", "print_size", "=", "getattr", "(", "thread_local", ",", "'size'", ",", "-", "1", ")", "if", "size", "!=", "'-'", ":", "size_str", "=", "' (%s)'", "%", "size", "elif", "print_size", ">=", "0", ":", "size_str", "=", "self", ".", "log_size_string", "(", "print_size", ")", "+", "' '", "else", ":", "size_str", "=", "''", "if", "not", "self", ".", "server", ".", "suppress_noise", "or", "(", "code", "!=", "200", "and", "code", "!=", "304", ")", ":", "self", ".", "log_message", "(", "'%s\"%s\" %s'", ",", "size_str", ",", "self", ".", "requestline", ",", "str", "(", "code", ")", ")", "if", "print_size", ">=", "0", ":", "thread_local", ".", "size", "=", "-", "1" ]
Logs the current request.
[ "Logs", "the", "current", "request", "." ]
python
train
39.642857
cdeboever3/cdpybio
cdpybio/star.py
https://github.com/cdeboever3/cdpybio/blob/38efdf0e11d01bc00a135921cb91a19c03db5d5c/cdpybio/star.py#L786-L838
def make_logs_df(fns, define_sample_name=None): """Make pandas DataFrame from multiple STAR Log.final.out files. Parameters ---------- fns : string List of paths to Log.final.out files. define_sample_name : function that takes string as input Function mapping filename to sample name. For instance, you may have the sample name in the path and use a regex to extract it. The sample names will be used as the column names. If this is not provided, the columns will be named as the input files. Returns ------- df : pandas.DataFrame DataFrame with info from log file. """ dfs = [] for fn in fns: dfs.append(_read_log(fn, define_sample_name=define_sample_name)) df = pd.concat(dfs,axis=1) df = df.T for label in [ 'Mapping speed, Million of reads per hour', 'Number of input reads', 'Average input read length', 'Uniquely mapped reads number', 'Average mapped length', 'Number of splices: Total', 'Number of splices: GT/AG', 'Number of splices: GC/AG', 'Number of splices: AT/AC', 'Number of splices: Non-canonical', 'Number of reads mapped to multiple loci', 'Number of reads mapped to too many loci']: df[label] = df[label].astype(float) for label in [ 'Uniquely mapped reads %', 'Mismatch rate per base, %', 'Deletion rate per base', 'Insertion rate per base', '% of reads mapped to multiple loci', '% of reads mapped to too many loci', '% of reads unmapped: too many mismatches', '% of reads unmapped: too short', '% of reads unmapped: other']: df[label] = df[label].apply(lambda x: x.strip('%')).astype(float) return df
[ "def", "make_logs_df", "(", "fns", ",", "define_sample_name", "=", "None", ")", ":", "dfs", "=", "[", "]", "for", "fn", "in", "fns", ":", "dfs", ".", "append", "(", "_read_log", "(", "fn", ",", "define_sample_name", "=", "define_sample_name", ")", ")", "df", "=", "pd", ".", "concat", "(", "dfs", ",", "axis", "=", "1", ")", "df", "=", "df", ".", "T", "for", "label", "in", "[", "'Mapping speed, Million of reads per hour'", ",", "'Number of input reads'", ",", "'Average input read length'", ",", "'Uniquely mapped reads number'", ",", "'Average mapped length'", ",", "'Number of splices: Total'", ",", "'Number of splices: GT/AG'", ",", "'Number of splices: GC/AG'", ",", "'Number of splices: AT/AC'", ",", "'Number of splices: Non-canonical'", ",", "'Number of reads mapped to multiple loci'", ",", "'Number of reads mapped to too many loci'", "]", ":", "df", "[", "label", "]", "=", "df", "[", "label", "]", ".", "astype", "(", "float", ")", "for", "label", "in", "[", "'Uniquely mapped reads %'", ",", "'Mismatch rate per base, %'", ",", "'Deletion rate per base'", ",", "'Insertion rate per base'", ",", "'% of reads mapped to multiple loci'", ",", "'% of reads mapped to too many loci'", ",", "'% of reads unmapped: too many mismatches'", ",", "'% of reads unmapped: too short'", ",", "'% of reads unmapped: other'", "]", ":", "df", "[", "label", "]", "=", "df", "[", "label", "]", ".", "apply", "(", "lambda", "x", ":", "x", ".", "strip", "(", "'%'", ")", ")", ".", "astype", "(", "float", ")", "return", "df" ]
Make pandas DataFrame from multiple STAR Log.final.out files. Parameters ---------- fns : string List of paths to Log.final.out files. define_sample_name : function that takes string as input Function mapping filename to sample name. For instance, you may have the sample name in the path and use a regex to extract it. The sample names will be used as the column names. If this is not provided, the columns will be named as the input files. Returns ------- df : pandas.DataFrame DataFrame with info from log file.
[ "Make", "pandas", "DataFrame", "from", "multiple", "STAR", "Log", ".", "final", ".", "out", "files", "." ]
python
train
35.037736
samirelanduk/quickplots
quickplots/series.py
https://github.com/samirelanduk/quickplots/blob/59f5e6ff367b2c1c24ba7cf1805d03552034c6d8/quickplots/series.py#L164-L174
def remove_data_point(self, x, y): """Removes the given data point from the series. :param x: The numerical x value of the data point to be removed. :param y: The numerical y value of the data point to be removed. :raises ValueError: if you try to remove the last data point from\ a series.""" if len(self._data) == 1: raise ValueError("You cannot remove a Series' last data point") self._data.remove((x, y))
[ "def", "remove_data_point", "(", "self", ",", "x", ",", "y", ")", ":", "if", "len", "(", "self", ".", "_data", ")", "==", "1", ":", "raise", "ValueError", "(", "\"You cannot remove a Series' last data point\"", ")", "self", ".", "_data", ".", "remove", "(", "(", "x", ",", "y", ")", ")" ]
Removes the given data point from the series. :param x: The numerical x value of the data point to be removed. :param y: The numerical y value of the data point to be removed. :raises ValueError: if you try to remove the last data point from\ a series.
[ "Removes", "the", "given", "data", "point", "from", "the", "series", "." ]
python
train
42.545455
carpedm20/fbchat
fbchat/_client.py
https://github.com/carpedm20/fbchat/blob/f480d68b5773473e6daba7f66075ee30e8d737a8/fbchat/_client.py#L108-L118
def _generatePayload(self, query): """Adds the following defaults to the payload: __rev, __user, __a, ttstamp, fb_dtsg, __req """ payload = self._payload_default.copy() if query: payload.update(query) payload["__req"] = str_base(self._req_counter, 36) payload["seq"] = self._seq self._req_counter += 1 return payload
[ "def", "_generatePayload", "(", "self", ",", "query", ")", ":", "payload", "=", "self", ".", "_payload_default", ".", "copy", "(", ")", "if", "query", ":", "payload", ".", "update", "(", "query", ")", "payload", "[", "\"__req\"", "]", "=", "str_base", "(", "self", ".", "_req_counter", ",", "36", ")", "payload", "[", "\"seq\"", "]", "=", "self", ".", "_seq", "self", ".", "_req_counter", "+=", "1", "return", "payload" ]
Adds the following defaults to the payload: __rev, __user, __a, ttstamp, fb_dtsg, __req
[ "Adds", "the", "following", "defaults", "to", "the", "payload", ":", "__rev", "__user", "__a", "ttstamp", "fb_dtsg", "__req" ]
python
train
35.636364
PyCQA/pyflakes
pyflakes/checker.py
https://github.com/PyCQA/pyflakes/blob/232cb1d27ee134bf96adc8f37e53589dc259b159/pyflakes/checker.py#L1091-L1097
def isDocstring(self, node): """ Determine if the given node is a docstring, as long as it is at the correct place in the node tree. """ return isinstance(node, ast.Str) or (isinstance(node, ast.Expr) and isinstance(node.value, ast.Str))
[ "def", "isDocstring", "(", "self", ",", "node", ")", ":", "return", "isinstance", "(", "node", ",", "ast", ".", "Str", ")", "or", "(", "isinstance", "(", "node", ",", "ast", ".", "Expr", ")", "and", "isinstance", "(", "node", ".", "value", ",", "ast", ".", "Str", ")", ")" ]
Determine if the given node is a docstring, as long as it is at the correct place in the node tree.
[ "Determine", "if", "the", "given", "node", "is", "a", "docstring", "as", "long", "as", "it", "is", "at", "the", "correct", "place", "in", "the", "node", "tree", "." ]
python
train
45.142857
geopy/geopy
geopy/units.py
https://github.com/geopy/geopy/blob/02c838d965e76497f3c3d61f53808c86b5c58224/geopy/units.py#L24-L32
def radians(degrees=0, arcminutes=0, arcseconds=0): """ TODO docs. """ if arcminutes: degrees += arcminutes / arcmin(degrees=1.) if arcseconds: degrees += arcseconds / arcsec(degrees=1.) return math.radians(degrees)
[ "def", "radians", "(", "degrees", "=", "0", ",", "arcminutes", "=", "0", ",", "arcseconds", "=", "0", ")", ":", "if", "arcminutes", ":", "degrees", "+=", "arcminutes", "/", "arcmin", "(", "degrees", "=", "1.", ")", "if", "arcseconds", ":", "degrees", "+=", "arcseconds", "/", "arcsec", "(", "degrees", "=", "1.", ")", "return", "math", ".", "radians", "(", "degrees", ")" ]
TODO docs.
[ "TODO", "docs", "." ]
python
train
27.444444
IAMconsortium/pyam
pyam/core.py
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/core.py#L1334-L1338
def _aggregate(df, by): """Aggregate `df` by specified column(s), return indexed `pd.Series`""" by = [by] if isstr(by) else by cols = [c for c in list(df.columns) if c not in ['value'] + by] return df.groupby(cols).sum()['value']
[ "def", "_aggregate", "(", "df", ",", "by", ")", ":", "by", "=", "[", "by", "]", "if", "isstr", "(", "by", ")", "else", "by", "cols", "=", "[", "c", "for", "c", "in", "list", "(", "df", ".", "columns", ")", "if", "c", "not", "in", "[", "'value'", "]", "+", "by", "]", "return", "df", ".", "groupby", "(", "cols", ")", ".", "sum", "(", ")", "[", "'value'", "]" ]
Aggregate `df` by specified column(s), return indexed `pd.Series`
[ "Aggregate", "df", "by", "specified", "column", "(", "s", ")", "return", "indexed", "pd", ".", "Series" ]
python
train
48.2
IAMconsortium/pyam
pyam/plotting.py
https://github.com/IAMconsortium/pyam/blob/4077929ca6e7be63a0e3ecf882c5f1da97b287bf/pyam/plotting.py#L188-L205
def read_shapefile(fname, region_col=None, **kwargs): """Read a shapefile for use in regional plots. Shapefiles must have a column denoted as "region". Parameters ---------- fname : string path to shapefile to be read by geopandas region_col : string, default None if provided, rename a column in the shapefile to "region" """ gdf = gpd.read_file(fname, **kwargs) if region_col is not None: gdf = gdf.rename(columns={region_col: 'region'}) if 'region' not in gdf.columns: raise IOError('Must provide a region column') gdf['region'] = gdf['region'].str.upper() return gdf
[ "def", "read_shapefile", "(", "fname", ",", "region_col", "=", "None", ",", "*", "*", "kwargs", ")", ":", "gdf", "=", "gpd", ".", "read_file", "(", "fname", ",", "*", "*", "kwargs", ")", "if", "region_col", "is", "not", "None", ":", "gdf", "=", "gdf", ".", "rename", "(", "columns", "=", "{", "region_col", ":", "'region'", "}", ")", "if", "'region'", "not", "in", "gdf", ".", "columns", ":", "raise", "IOError", "(", "'Must provide a region column'", ")", "gdf", "[", "'region'", "]", "=", "gdf", "[", "'region'", "]", ".", "str", ".", "upper", "(", ")", "return", "gdf" ]
Read a shapefile for use in regional plots. Shapefiles must have a column denoted as "region". Parameters ---------- fname : string path to shapefile to be read by geopandas region_col : string, default None if provided, rename a column in the shapefile to "region"
[ "Read", "a", "shapefile", "for", "use", "in", "regional", "plots", ".", "Shapefiles", "must", "have", "a", "column", "denoted", "as", "region", "." ]
python
train
35.222222
nschloe/colorio
experiments/new-cs.py
https://github.com/nschloe/colorio/blob/357d6001b3cf30f752e23726bf429dc1d1c60b3a/experiments/new-cs.py#L423-L458
def apply_M(self, ax, ay): """Linear operator that converts ax, ay to abcd. """ jac = numpy.array( [[self.dx.dot(ax), self.dy.dot(ax)], [self.dx.dot(ay), self.dy.dot(ay)]] ) # jacs and J are of shape (2, 2, k). M must be of the same shape and # contain the result of the k 2x2 dot products. Perhaps there's a # dot() for this. M = numpy.einsum("ijl,jkl->ikl", jac, self.J) # M = numpy.array([ # [ # jac[0][0]*self.J[0][0] + jac[0][1]*self.J[1][0], # jac[0][0]*self.J[0][1] + jac[0][1]*self.J[1][1], # ], # [ # jac[1][0]*self.J[0][0] + jac[1][1]*self.J[1][0], # jac[1][0]*self.J[0][1] + jac[1][1]*self.J[1][1], # ], # ]) # One could use # # M = numpy.moveaxis(M, -1, 0) # _, sigma, _ = numpy.linalg.svd(M) # # but computing the singular values explicitly via # <https://scicomp.stackexchange.com/a/14103/3980> is faster and more # explicit. a = (M[0, 0] + M[1, 1]) / 2 b = (M[0, 0] - M[1, 1]) / 2 c = (M[1, 0] + M[0, 1]) / 2 d = (M[1, 0] - M[0, 1]) / 2 return a, b, c, d
[ "def", "apply_M", "(", "self", ",", "ax", ",", "ay", ")", ":", "jac", "=", "numpy", ".", "array", "(", "[", "[", "self", ".", "dx", ".", "dot", "(", "ax", ")", ",", "self", ".", "dy", ".", "dot", "(", "ax", ")", "]", ",", "[", "self", ".", "dx", ".", "dot", "(", "ay", ")", ",", "self", ".", "dy", ".", "dot", "(", "ay", ")", "]", "]", ")", "# jacs and J are of shape (2, 2, k). M must be of the same shape and", "# contain the result of the k 2x2 dot products. Perhaps there's a", "# dot() for this.", "M", "=", "numpy", ".", "einsum", "(", "\"ijl,jkl->ikl\"", ",", "jac", ",", "self", ".", "J", ")", "# M = numpy.array([", "# [", "# jac[0][0]*self.J[0][0] + jac[0][1]*self.J[1][0],", "# jac[0][0]*self.J[0][1] + jac[0][1]*self.J[1][1],", "# ],", "# [", "# jac[1][0]*self.J[0][0] + jac[1][1]*self.J[1][0],", "# jac[1][0]*self.J[0][1] + jac[1][1]*self.J[1][1],", "# ],", "# ])", "# One could use", "#", "# M = numpy.moveaxis(M, -1, 0)", "# _, sigma, _ = numpy.linalg.svd(M)", "#", "# but computing the singular values explicitly via", "# <https://scicomp.stackexchange.com/a/14103/3980> is faster and more", "# explicit.", "a", "=", "(", "M", "[", "0", ",", "0", "]", "+", "M", "[", "1", ",", "1", "]", ")", "/", "2", "b", "=", "(", "M", "[", "0", ",", "0", "]", "-", "M", "[", "1", ",", "1", "]", ")", "/", "2", "c", "=", "(", "M", "[", "1", ",", "0", "]", "+", "M", "[", "0", ",", "1", "]", ")", "/", "2", "d", "=", "(", "M", "[", "1", ",", "0", "]", "-", "M", "[", "0", ",", "1", "]", ")", "/", "2", "return", "a", ",", "b", ",", "c", ",", "d" ]
Linear operator that converts ax, ay to abcd.
[ "Linear", "operator", "that", "converts", "ax", "ay", "to", "abcd", "." ]
python
train
34.916667
metagriffin/morph
morph/__init__.py
https://github.com/metagriffin/morph/blob/907f169b0155712c466d3aac29f0907d0f36b443/morph/__init__.py#L95-L111
def tolist(obj, flat=True, split=True): ''' Returns `obj` as a list: if it is falsy, returns an empty list; if it is a string and `split` is truthy, then it is split into substrings using Unix shell semantics; if it is sequence-like, a list is returned optionally flattened if `flat` is truthy (see :func:`flatten`). ''' # todo: it would be "pretty awesome" if this could auto-detect # comma-separation rather than space-separation if not obj: return [] if isseq(obj): return flatten(obj) if flat else list(obj) if isstr(obj) and split: return shlex.split(obj) return [obj]
[ "def", "tolist", "(", "obj", ",", "flat", "=", "True", ",", "split", "=", "True", ")", ":", "# todo: it would be \"pretty awesome\" if this could auto-detect", "# comma-separation rather than space-separation", "if", "not", "obj", ":", "return", "[", "]", "if", "isseq", "(", "obj", ")", ":", "return", "flatten", "(", "obj", ")", "if", "flat", "else", "list", "(", "obj", ")", "if", "isstr", "(", "obj", ")", "and", "split", ":", "return", "shlex", ".", "split", "(", "obj", ")", "return", "[", "obj", "]" ]
Returns `obj` as a list: if it is falsy, returns an empty list; if it is a string and `split` is truthy, then it is split into substrings using Unix shell semantics; if it is sequence-like, a list is returned optionally flattened if `flat` is truthy (see :func:`flatten`).
[ "Returns", "obj", "as", "a", "list", ":", "if", "it", "is", "falsy", "returns", "an", "empty", "list", ";", "if", "it", "is", "a", "string", "and", "split", "is", "truthy", "then", "it", "is", "split", "into", "substrings", "using", "Unix", "shell", "semantics", ";", "if", "it", "is", "sequence", "-", "like", "a", "list", "is", "returned", "optionally", "flattened", "if", "flat", "is", "truthy", "(", "see", ":", "func", ":", "flatten", ")", "." ]
python
train
35.352941
choderalab/pymbar
pymbar/timeseries.py
https://github.com/choderalab/pymbar/blob/69d1f0ff680e9ac1c6a51a5a207ea28f3ed86740/pymbar/timeseries.py#L632-L736
def subsampleCorrelatedData(A_t, g=None, fast=False, conservative=False, verbose=False): """Determine the indices of an uncorrelated subsample of the data. Parameters ---------- A_t : np.ndarray A_t[t] is the t-th value of timeseries A(t). Length is deduced from vector. g : float, optional if provided, the statistical inefficiency g is used to subsample the timeseries -- otherwise it will be computed (default: None) fast : bool, optional, default=False fast can be set to True to give a less accurate but very quick estimate (default: False) conservative : bool, optional, default=False if set to True, uniformly-spaced indices are chosen with interval ceil(g), where g is the statistical inefficiency. Otherwise, indices are chosen non-uniformly with interval of approximately g in order to end up with approximately T/g total indices verbose : bool, optional, default=False if True, some output is printed Returns ------- indices : list of int the indices of an uncorrelated subsample of the data Notes ----- The statistical inefficiency is computed with the function computeStatisticalInefficiency(). ToDo ---- Instead of using regular stride, use irregular stride so more data can be fit in when g is non-integral. Examples -------- Subsample a correlated timeseries to extract an effectively uncorrelated dataset. >>> from pymbar import testsystems >>> A_t = testsystems.correlated_timeseries_example(N=10000, tau=5.0) # generate a test correlated timeseries >>> indices = subsampleCorrelatedData(A_t) # compute indices of uncorrelated timeseries >>> A_n = A_t[indices] # extract uncorrelated samples Extract uncorrelated samples from multiple timeseries data from the same process. >>> # Generate multiple correlated timeseries data of different lengths. >>> T_k = [1000, 2000, 3000, 4000, 5000] >>> K = len(T_k) # number of timeseries >>> tau = 5.0 # exponential relaxation time >>> A_kt = [ testsystems.correlated_timeseries_example(N=T, tau=tau) for T in T_k ] # A_kt[k] is correlated timeseries k >>> # Estimate statistical inefficiency from all timeseries data. >>> g = statisticalInefficiencyMultiple(A_kt) >>> # Count number of uncorrelated samples in each timeseries. >>> N_k = np.array([ len(subsampleCorrelatedData(A_t, g=g)) for A_t in A_kt ]) # N_k[k] is the number of uncorrelated samples in timeseries k >>> N = N_k.sum() # total number of uncorrelated samples >>> # Subsample all trajectories to produce uncorrelated samples >>> A_kn = [ A_t[subsampleCorrelatedData(A_t, g=g)] for A_t in A_kt ] # A_kn[k] is uncorrelated subset of trajectory A_kt[t] >>> # Concatenate data into one timeseries. >>> A_n = np.zeros([N], np.float32) # A_n[n] is nth sample in concatenated set of uncorrelated samples >>> A_n[0:N_k[0]] = A_kn[0] >>> for k in range(1,K): A_n[N_k[0:k].sum():N_k[0:k+1].sum()] = A_kn[k] """ # Create np copy of arrays. A_t = np.array(A_t) # Get the length of the timeseries. T = A_t.size # Compute the statistical inefficiency for the timeseries. if not g: if verbose: print("Computing statistical inefficiency...") g = statisticalInefficiency(A_t, A_t, fast=fast) if verbose: print("g = %f" % g) if conservative: # Round g up to determine the stride we can use to pick out regularly-spaced uncorrelated samples. stride = int(math.ceil(g)) if verbose: print("conservative subsampling: using stride of %d" % stride) # Assemble list of indices of uncorrelated snapshots. indices = range(0, T, stride) else: # Choose indices as floor(n*g), with n = 0,1,2,..., until we run out of data. indices = [] n = 0 while int(round(n * g)) < T: t = int(round(n * g)) # ensure we don't sample the same point twice if (n == 0) or (t != indices[n - 1]): indices.append(t) n += 1 if verbose: print("standard subsampling: using average stride of %f" % g) # Number of samples in subsampled timeseries. N = len(indices) if verbose: print("The resulting subsampled set has %d samples (original timeseries had %d)." % (N, T)) # Return the list of indices of uncorrelated snapshots. return indices
[ "def", "subsampleCorrelatedData", "(", "A_t", ",", "g", "=", "None", ",", "fast", "=", "False", ",", "conservative", "=", "False", ",", "verbose", "=", "False", ")", ":", "# Create np copy of arrays.", "A_t", "=", "np", ".", "array", "(", "A_t", ")", "# Get the length of the timeseries.", "T", "=", "A_t", ".", "size", "# Compute the statistical inefficiency for the timeseries.", "if", "not", "g", ":", "if", "verbose", ":", "print", "(", "\"Computing statistical inefficiency...\"", ")", "g", "=", "statisticalInefficiency", "(", "A_t", ",", "A_t", ",", "fast", "=", "fast", ")", "if", "verbose", ":", "print", "(", "\"g = %f\"", "%", "g", ")", "if", "conservative", ":", "# Round g up to determine the stride we can use to pick out regularly-spaced uncorrelated samples.", "stride", "=", "int", "(", "math", ".", "ceil", "(", "g", ")", ")", "if", "verbose", ":", "print", "(", "\"conservative subsampling: using stride of %d\"", "%", "stride", ")", "# Assemble list of indices of uncorrelated snapshots.", "indices", "=", "range", "(", "0", ",", "T", ",", "stride", ")", "else", ":", "# Choose indices as floor(n*g), with n = 0,1,2,..., until we run out of data.", "indices", "=", "[", "]", "n", "=", "0", "while", "int", "(", "round", "(", "n", "*", "g", ")", ")", "<", "T", ":", "t", "=", "int", "(", "round", "(", "n", "*", "g", ")", ")", "# ensure we don't sample the same point twice", "if", "(", "n", "==", "0", ")", "or", "(", "t", "!=", "indices", "[", "n", "-", "1", "]", ")", ":", "indices", ".", "append", "(", "t", ")", "n", "+=", "1", "if", "verbose", ":", "print", "(", "\"standard subsampling: using average stride of %f\"", "%", "g", ")", "# Number of samples in subsampled timeseries.", "N", "=", "len", "(", "indices", ")", "if", "verbose", ":", "print", "(", "\"The resulting subsampled set has %d samples (original timeseries had %d).\"", "%", "(", "N", ",", "T", ")", ")", "# Return the list of indices of uncorrelated snapshots.", "return", "indices" ]
Determine the indices of an uncorrelated subsample of the data. Parameters ---------- A_t : np.ndarray A_t[t] is the t-th value of timeseries A(t). Length is deduced from vector. g : float, optional if provided, the statistical inefficiency g is used to subsample the timeseries -- otherwise it will be computed (default: None) fast : bool, optional, default=False fast can be set to True to give a less accurate but very quick estimate (default: False) conservative : bool, optional, default=False if set to True, uniformly-spaced indices are chosen with interval ceil(g), where g is the statistical inefficiency. Otherwise, indices are chosen non-uniformly with interval of approximately g in order to end up with approximately T/g total indices verbose : bool, optional, default=False if True, some output is printed Returns ------- indices : list of int the indices of an uncorrelated subsample of the data Notes ----- The statistical inefficiency is computed with the function computeStatisticalInefficiency(). ToDo ---- Instead of using regular stride, use irregular stride so more data can be fit in when g is non-integral. Examples -------- Subsample a correlated timeseries to extract an effectively uncorrelated dataset. >>> from pymbar import testsystems >>> A_t = testsystems.correlated_timeseries_example(N=10000, tau=5.0) # generate a test correlated timeseries >>> indices = subsampleCorrelatedData(A_t) # compute indices of uncorrelated timeseries >>> A_n = A_t[indices] # extract uncorrelated samples Extract uncorrelated samples from multiple timeseries data from the same process. >>> # Generate multiple correlated timeseries data of different lengths. >>> T_k = [1000, 2000, 3000, 4000, 5000] >>> K = len(T_k) # number of timeseries >>> tau = 5.0 # exponential relaxation time >>> A_kt = [ testsystems.correlated_timeseries_example(N=T, tau=tau) for T in T_k ] # A_kt[k] is correlated timeseries k >>> # Estimate statistical inefficiency from all timeseries data. >>> g = statisticalInefficiencyMultiple(A_kt) >>> # Count number of uncorrelated samples in each timeseries. >>> N_k = np.array([ len(subsampleCorrelatedData(A_t, g=g)) for A_t in A_kt ]) # N_k[k] is the number of uncorrelated samples in timeseries k >>> N = N_k.sum() # total number of uncorrelated samples >>> # Subsample all trajectories to produce uncorrelated samples >>> A_kn = [ A_t[subsampleCorrelatedData(A_t, g=g)] for A_t in A_kt ] # A_kn[k] is uncorrelated subset of trajectory A_kt[t] >>> # Concatenate data into one timeseries. >>> A_n = np.zeros([N], np.float32) # A_n[n] is nth sample in concatenated set of uncorrelated samples >>> A_n[0:N_k[0]] = A_kn[0] >>> for k in range(1,K): A_n[N_k[0:k].sum():N_k[0:k+1].sum()] = A_kn[k]
[ "Determine", "the", "indices", "of", "an", "uncorrelated", "subsample", "of", "the", "data", "." ]
python
train
42.209524
attm2x/m2x-python
m2x/v2/metadata.py
https://github.com/attm2x/m2x-python/blob/df83f590114692b1f96577148b7ba260065905bb/m2x/v2/metadata.py#L39-L55
def update_metadata(self, params): """ Generic method for a resource's Update Metadata endpoint. Example endpoints: * `Update Device Metadata <https://m2x.att.com/developer/documentation/v2/device#Update-Device-Metadata>`_ * `Update Distribution Metadata <https://m2x.att.com/developer/documentation/v2/distribution#Update-Distribution-Metadata>`_ * `Update Collection Metadata <https://m2x.att.com/developer/documentation/v2/collections#Update-Collection-Metadata>`_ :param params: The metadata being updated :return: The API response, see M2X API docs for details :rtype: dict :raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request """ return self.api.put(self.metadata_path(), data=params)
[ "def", "update_metadata", "(", "self", ",", "params", ")", ":", "return", "self", ".", "api", ".", "put", "(", "self", ".", "metadata_path", "(", ")", ",", "data", "=", "params", ")" ]
Generic method for a resource's Update Metadata endpoint. Example endpoints: * `Update Device Metadata <https://m2x.att.com/developer/documentation/v2/device#Update-Device-Metadata>`_ * `Update Distribution Metadata <https://m2x.att.com/developer/documentation/v2/distribution#Update-Distribution-Metadata>`_ * `Update Collection Metadata <https://m2x.att.com/developer/documentation/v2/collections#Update-Collection-Metadata>`_ :param params: The metadata being updated :return: The API response, see M2X API docs for details :rtype: dict :raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request
[ "Generic", "method", "for", "a", "resource", "s", "Update", "Metadata", "endpoint", "." ]
python
test
45.176471
Nachtfeuer/pipeline
spline/application.py
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/application.py#L130-L160
def run(self, definition): """Processing the pipeline.""" self.logger.info("Running with Python %s", sys.version.replace("\n", "")) self.logger.info("Running on platform %s", platform.platform()) self.logger.info("Current cpu count is %d", multiprocessing.cpu_count()) self.logger.info("Processing pipeline definition '%s'", definition) document = self.validate_document(definition) if self.options.validate_only: self.logger.info("Stopping after validation as requested!") return [] self.provide_temporary_scripts_path() versions = VersionsCheck().process(document) VersionsReport().process(versions) collector = Application.create_and_run_collector(document, self.options) matrix = find_matrix(document) output = [] if len(matrix) == 0: model = {} if 'model' not in document else document['model'] pipeline = Pipeline(model=model, options=self.options) pipeline.hooks = Hooks(document) result = pipeline.process(document['pipeline']) else: result = self.run_matrix(matrix, document) output = result['output'] self.shutdown(collector, success=result['success']) return output
[ "def", "run", "(", "self", ",", "definition", ")", ":", "self", ".", "logger", ".", "info", "(", "\"Running with Python %s\"", ",", "sys", ".", "version", ".", "replace", "(", "\"\\n\"", ",", "\"\"", ")", ")", "self", ".", "logger", ".", "info", "(", "\"Running on platform %s\"", ",", "platform", ".", "platform", "(", ")", ")", "self", ".", "logger", ".", "info", "(", "\"Current cpu count is %d\"", ",", "multiprocessing", ".", "cpu_count", "(", ")", ")", "self", ".", "logger", ".", "info", "(", "\"Processing pipeline definition '%s'\"", ",", "definition", ")", "document", "=", "self", ".", "validate_document", "(", "definition", ")", "if", "self", ".", "options", ".", "validate_only", ":", "self", ".", "logger", ".", "info", "(", "\"Stopping after validation as requested!\"", ")", "return", "[", "]", "self", ".", "provide_temporary_scripts_path", "(", ")", "versions", "=", "VersionsCheck", "(", ")", ".", "process", "(", "document", ")", "VersionsReport", "(", ")", ".", "process", "(", "versions", ")", "collector", "=", "Application", ".", "create_and_run_collector", "(", "document", ",", "self", ".", "options", ")", "matrix", "=", "find_matrix", "(", "document", ")", "output", "=", "[", "]", "if", "len", "(", "matrix", ")", "==", "0", ":", "model", "=", "{", "}", "if", "'model'", "not", "in", "document", "else", "document", "[", "'model'", "]", "pipeline", "=", "Pipeline", "(", "model", "=", "model", ",", "options", "=", "self", ".", "options", ")", "pipeline", ".", "hooks", "=", "Hooks", "(", "document", ")", "result", "=", "pipeline", ".", "process", "(", "document", "[", "'pipeline'", "]", ")", "else", ":", "result", "=", "self", ".", "run_matrix", "(", "matrix", ",", "document", ")", "output", "=", "result", "[", "'output'", "]", "self", ".", "shutdown", "(", "collector", ",", "success", "=", "result", "[", "'success'", "]", ")", "return", "output" ]
Processing the pipeline.
[ "Processing", "the", "pipeline", "." ]
python
train
41.258065
sci-bots/mpm
mpm/api.py
https://github.com/sci-bots/mpm/blob/a69651cda4b37ee6b17df4fe0809249e7f4dc536/mpm/api.py#L94-L124
def _remove_broken_links(): ''' Remove broken links in `<conda prefix>/etc/microdrop/plugins/enabled/`. Returns ------- list List of links removed (if any). ''' enabled_dir = MICRODROP_CONDA_PLUGINS.joinpath('enabled') if not enabled_dir.isdir(): return [] broken_links = [] for dir_i in enabled_dir.walkdirs(errors='ignore'): if platform.system() == 'Windows': if dir_i.isjunction() and not dir_i.readlink().isdir(): # Junction/link target no longer exists. broken_links.append(dir_i) else: raise NotImplementedError('Unsupported platform') removed_links = [] for link_i in broken_links: try: link_i.unlink() except: pass else: removed_links.append(link_i) return removed_links
[ "def", "_remove_broken_links", "(", ")", ":", "enabled_dir", "=", "MICRODROP_CONDA_PLUGINS", ".", "joinpath", "(", "'enabled'", ")", "if", "not", "enabled_dir", ".", "isdir", "(", ")", ":", "return", "[", "]", "broken_links", "=", "[", "]", "for", "dir_i", "in", "enabled_dir", ".", "walkdirs", "(", "errors", "=", "'ignore'", ")", ":", "if", "platform", ".", "system", "(", ")", "==", "'Windows'", ":", "if", "dir_i", ".", "isjunction", "(", ")", "and", "not", "dir_i", ".", "readlink", "(", ")", ".", "isdir", "(", ")", ":", "# Junction/link target no longer exists.", "broken_links", ".", "append", "(", "dir_i", ")", "else", ":", "raise", "NotImplementedError", "(", "'Unsupported platform'", ")", "removed_links", "=", "[", "]", "for", "link_i", "in", "broken_links", ":", "try", ":", "link_i", ".", "unlink", "(", ")", "except", ":", "pass", "else", ":", "removed_links", ".", "append", "(", "link_i", ")", "return", "removed_links" ]
Remove broken links in `<conda prefix>/etc/microdrop/plugins/enabled/`. Returns ------- list List of links removed (if any).
[ "Remove", "broken", "links", "in", "<conda", "prefix", ">", "/", "etc", "/", "microdrop", "/", "plugins", "/", "enabled", "/", "." ]
python
train
27.451613
gwastro/pycbc
pycbc/workflow/core.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/core.py#L926-L940
def add_multiifo_output_list_opt(self, opt, outputs): """ Add an option that determines a list of outputs from multiple detectors. Files will be supplied as --opt ifo1:input1 ifo2:input2 ..... """ # NOTE: Here we have to use the raw arguments functionality as the # file and ifo are not space separated. self.add_raw_arg(opt) self.add_raw_arg(' ') for outfile in outputs: self.add_raw_arg(outfile.ifo) self.add_raw_arg(':') self.add_raw_arg(outfile.name) self.add_raw_arg(' ') self._add_output(outfile)
[ "def", "add_multiifo_output_list_opt", "(", "self", ",", "opt", ",", "outputs", ")", ":", "# NOTE: Here we have to use the raw arguments functionality as the", "# file and ifo are not space separated.", "self", ".", "add_raw_arg", "(", "opt", ")", "self", ".", "add_raw_arg", "(", "' '", ")", "for", "outfile", "in", "outputs", ":", "self", ".", "add_raw_arg", "(", "outfile", ".", "ifo", ")", "self", ".", "add_raw_arg", "(", "':'", ")", "self", ".", "add_raw_arg", "(", "outfile", ".", "name", ")", "self", ".", "add_raw_arg", "(", "' '", ")", "self", ".", "_add_output", "(", "outfile", ")" ]
Add an option that determines a list of outputs from multiple detectors. Files will be supplied as --opt ifo1:input1 ifo2:input2 .....
[ "Add", "an", "option", "that", "determines", "a", "list", "of", "outputs", "from", "multiple", "detectors", ".", "Files", "will", "be", "supplied", "as", "--", "opt", "ifo1", ":", "input1", "ifo2", ":", "input2", "....." ]
python
train
42.266667
quora/qcore
qcore/asserts.py
https://github.com/quora/qcore/blob/fa5cd438eea554db35fd29cbc8dfbde69f09961c/qcore/asserts.py#L104-L130
def assert_eq(expected, actual, message=None, tolerance=None, extra=None): """Raises an AssertionError if expected != actual. If tolerance is specified, raises an AssertionError if either - expected or actual isn't a number, or - the difference between expected and actual is larger than the tolerance. """ if tolerance is None: assert expected == actual, _assert_fail_message( message, expected, actual, "!=", extra ) else: assert isinstance(tolerance, _number_types), ( "tolerance parameter to assert_eq must be a number: %r" % tolerance ) assert isinstance(expected, _number_types) and isinstance( actual, _number_types ), ( "parameters must be numbers when tolerance is specified: %r, %r" % (expected, actual) ) diff = abs(expected - actual) assert diff <= tolerance, _assert_fail_message( message, expected, actual, "is more than %r away from" % tolerance, extra )
[ "def", "assert_eq", "(", "expected", ",", "actual", ",", "message", "=", "None", ",", "tolerance", "=", "None", ",", "extra", "=", "None", ")", ":", "if", "tolerance", "is", "None", ":", "assert", "expected", "==", "actual", ",", "_assert_fail_message", "(", "message", ",", "expected", ",", "actual", ",", "\"!=\"", ",", "extra", ")", "else", ":", "assert", "isinstance", "(", "tolerance", ",", "_number_types", ")", ",", "(", "\"tolerance parameter to assert_eq must be a number: %r\"", "%", "tolerance", ")", "assert", "isinstance", "(", "expected", ",", "_number_types", ")", "and", "isinstance", "(", "actual", ",", "_number_types", ")", ",", "(", "\"parameters must be numbers when tolerance is specified: %r, %r\"", "%", "(", "expected", ",", "actual", ")", ")", "diff", "=", "abs", "(", "expected", "-", "actual", ")", "assert", "diff", "<=", "tolerance", ",", "_assert_fail_message", "(", "message", ",", "expected", ",", "actual", ",", "\"is more than %r away from\"", "%", "tolerance", ",", "extra", ")" ]
Raises an AssertionError if expected != actual. If tolerance is specified, raises an AssertionError if either - expected or actual isn't a number, or - the difference between expected and actual is larger than the tolerance.
[ "Raises", "an", "AssertionError", "if", "expected", "!", "=", "actual", "." ]
python
train
38
funilrys/PyFunceble
PyFunceble/production.py
https://github.com/funilrys/PyFunceble/blob/cdf69cbde120199171f7158e1c33635753e6e2f5/PyFunceble/production.py#L477-L520
def _update_setup_py(self): """ Update :code:`setup.py` so that it always have the right name. """ # We initiate the path to the file we have to filter. setup_py_path = PyFunceble.CURRENT_DIRECTORY + "setup.py" if self.is_dev_version(): # The current version is the `dev` version. # We map what we have to replace. # Format: {match:replacement} regexes = { 'name="PyFunceble-dev"': r'name=".*"', '"Development Status :: 4 - Beta"': r'"Development\sStatus\s::.*"', } elif self.is_master_version(): # The current version is the `dev` version. # We map what we have to replace. regexes = { 'name="PyFunceble"': r'name=".*"', '"Development Status :: 5 - Production/Stable"': r'"Development\sStatus\s::.*"', } else: # The current version is not the `dev` nor the `master` version. # We raise an exception to the user, the current branch is not meant for # production. raise Exception("Please switch to `dev` or `master` branch.") # We get the file content. to_update = File(setup_py_path).read() for replacement, regex in regexes.items(): # We loop through our map. # And we process the replacement. to_update = Regex(to_update, regex, replace_with=replacement).replace() # We finally replace the content of the file with the filtered # version. File(setup_py_path).write(to_update, overwrite=True)
[ "def", "_update_setup_py", "(", "self", ")", ":", "# We initiate the path to the file we have to filter.", "setup_py_path", "=", "PyFunceble", ".", "CURRENT_DIRECTORY", "+", "\"setup.py\"", "if", "self", ".", "is_dev_version", "(", ")", ":", "# The current version is the `dev` version.", "# We map what we have to replace.", "# Format: {match:replacement}", "regexes", "=", "{", "'name=\"PyFunceble-dev\"'", ":", "r'name=\".*\"'", ",", "'\"Development Status :: 4 - Beta\"'", ":", "r'\"Development\\sStatus\\s::.*\"'", ",", "}", "elif", "self", ".", "is_master_version", "(", ")", ":", "# The current version is the `dev` version.", "# We map what we have to replace.", "regexes", "=", "{", "'name=\"PyFunceble\"'", ":", "r'name=\".*\"'", ",", "'\"Development Status :: 5 - Production/Stable\"'", ":", "r'\"Development\\sStatus\\s::.*\"'", ",", "}", "else", ":", "# The current version is not the `dev` nor the `master` version.", "# We raise an exception to the user, the current branch is not meant for", "# production.", "raise", "Exception", "(", "\"Please switch to `dev` or `master` branch.\"", ")", "# We get the file content.", "to_update", "=", "File", "(", "setup_py_path", ")", ".", "read", "(", ")", "for", "replacement", ",", "regex", "in", "regexes", ".", "items", "(", ")", ":", "# We loop through our map.", "# And we process the replacement.", "to_update", "=", "Regex", "(", "to_update", ",", "regex", ",", "replace_with", "=", "replacement", ")", ".", "replace", "(", ")", "# We finally replace the content of the file with the filtered", "# version.", "File", "(", "setup_py_path", ")", ".", "write", "(", "to_update", ",", "overwrite", "=", "True", ")" ]
Update :code:`setup.py` so that it always have the right name.
[ "Update", ":", "code", ":", "setup", ".", "py", "so", "that", "it", "always", "have", "the", "right", "name", "." ]
python
test
36.977273
Erotemic/utool
utool/util_cplat.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_cplat.py#L1075-L1104
def get_flops(): """ # DOESNT WORK """ from sys import stdout from re import compile filename = "linpack.out" fpnum = r'\d+\.\d+E[+-]\d\d' fpnum_1 = fpnum + r' +' pattern = compile(r'^ *' + fpnum_1 + fpnum_1 + fpnum_1 + r'(' + fpnum + r') +' + fpnum_1 + fpnum + r' *\n$') speeds = [0.0, 1.0e75, 0.0] file = open(filename) count = 0 while file : line = file.readline() if not line : break if pattern.match(line) : count = count + 1 x = float(pattern.sub(r'\1', line)) if x < 1.0 : print(count) speeds[0] = speeds[0] + x speeds[1] = min(speeds[1], x) speeds[2] = max(speeds[2], x) file.close() if count != 0 : speeds[0] = speeds[0] / count stdout.write("%6.1f MFlops (%d from %.1f to %.1f)\n" % (speeds[0], count, speeds[1], speeds[2]))
[ "def", "get_flops", "(", ")", ":", "from", "sys", "import", "stdout", "from", "re", "import", "compile", "filename", "=", "\"linpack.out\"", "fpnum", "=", "r'\\d+\\.\\d+E[+-]\\d\\d'", "fpnum_1", "=", "fpnum", "+", "r' +'", "pattern", "=", "compile", "(", "r'^ *'", "+", "fpnum_1", "+", "fpnum_1", "+", "fpnum_1", "+", "r'('", "+", "fpnum", "+", "r') +'", "+", "fpnum_1", "+", "fpnum", "+", "r' *\\n$'", ")", "speeds", "=", "[", "0.0", ",", "1.0e75", ",", "0.0", "]", "file", "=", "open", "(", "filename", ")", "count", "=", "0", "while", "file", ":", "line", "=", "file", ".", "readline", "(", ")", "if", "not", "line", ":", "break", "if", "pattern", ".", "match", "(", "line", ")", ":", "count", "=", "count", "+", "1", "x", "=", "float", "(", "pattern", ".", "sub", "(", "r'\\1'", ",", "line", ")", ")", "if", "x", "<", "1.0", ":", "print", "(", "count", ")", "speeds", "[", "0", "]", "=", "speeds", "[", "0", "]", "+", "x", "speeds", "[", "1", "]", "=", "min", "(", "speeds", "[", "1", "]", ",", "x", ")", "speeds", "[", "2", "]", "=", "max", "(", "speeds", "[", "2", "]", ",", "x", ")", "file", ".", "close", "(", ")", "if", "count", "!=", "0", ":", "speeds", "[", "0", "]", "=", "speeds", "[", "0", "]", "/", "count", "stdout", ".", "write", "(", "\"%6.1f MFlops (%d from %.1f to %.1f)\\n\"", "%", "(", "speeds", "[", "0", "]", ",", "count", ",", "speeds", "[", "1", "]", ",", "speeds", "[", "2", "]", ")", ")" ]
# DOESNT WORK
[ "#", "DOESNT", "WORK" ]
python
train
29.866667
mitsei/dlkit
dlkit/services/commenting.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/services/commenting.py#L526-L534
def get_books(self): """Pass through to provider BookLookupSession.get_books""" # Implemented from kitosid template for - # osid.resource.BinLookupSession.get_bins_template catalogs = self._get_provider_session('book_lookup_session').get_books() cat_list = [] for cat in catalogs: cat_list.append(Book(self._provider_manager, cat, self._runtime, self._proxy)) return BookList(cat_list)
[ "def", "get_books", "(", "self", ")", ":", "# Implemented from kitosid template for -", "# osid.resource.BinLookupSession.get_bins_template", "catalogs", "=", "self", ".", "_get_provider_session", "(", "'book_lookup_session'", ")", ".", "get_books", "(", ")", "cat_list", "=", "[", "]", "for", "cat", "in", "catalogs", ":", "cat_list", ".", "append", "(", "Book", "(", "self", ".", "_provider_manager", ",", "cat", ",", "self", ".", "_runtime", ",", "self", ".", "_proxy", ")", ")", "return", "BookList", "(", "cat_list", ")" ]
Pass through to provider BookLookupSession.get_books
[ "Pass", "through", "to", "provider", "BookLookupSession", ".", "get_books" ]
python
train
49.444444
pysal/giddy
giddy/markov.py
https://github.com/pysal/giddy/blob/13fae6c18933614be78e91a6b5060693bea33a04/giddy/markov.py#L811-L830
def summary(self, file_name=None): """ A summary method to call the Markov homogeneity test to test for temporally lagged spatial dependence. To learn more about the properties of the tests, refer to :cite:`Rey2016a` and :cite:`Kang2018`. """ class_names = ["C%d" % i for i in range(self.k)] regime_names = ["LAG%d" % i for i in range(self.k)] ht = homogeneity(self.T, class_names=class_names, regime_names=regime_names) title = "Spatial Markov Test" if self.variable_name: title = title + ": " + self.variable_name if file_name: ht.summary(file_name=file_name, title=title) else: ht.summary(title=title)
[ "def", "summary", "(", "self", ",", "file_name", "=", "None", ")", ":", "class_names", "=", "[", "\"C%d\"", "%", "i", "for", "i", "in", "range", "(", "self", ".", "k", ")", "]", "regime_names", "=", "[", "\"LAG%d\"", "%", "i", "for", "i", "in", "range", "(", "self", ".", "k", ")", "]", "ht", "=", "homogeneity", "(", "self", ".", "T", ",", "class_names", "=", "class_names", ",", "regime_names", "=", "regime_names", ")", "title", "=", "\"Spatial Markov Test\"", "if", "self", ".", "variable_name", ":", "title", "=", "title", "+", "\": \"", "+", "self", ".", "variable_name", "if", "file_name", ":", "ht", ".", "summary", "(", "file_name", "=", "file_name", ",", "title", "=", "title", ")", "else", ":", "ht", ".", "summary", "(", "title", "=", "title", ")" ]
A summary method to call the Markov homogeneity test to test for temporally lagged spatial dependence. To learn more about the properties of the tests, refer to :cite:`Rey2016a` and :cite:`Kang2018`.
[ "A", "summary", "method", "to", "call", "the", "Markov", "homogeneity", "test", "to", "test", "for", "temporally", "lagged", "spatial", "dependence", "." ]
python
train
37.6
bachya/pyflunearyou
pyflunearyou/cdc.py
https://github.com/bachya/pyflunearyou/blob/16a2f839c8df851e925e010a6b5c5708386febac/pyflunearyou/cdc.py#L58-L67
async def status_by_state(self, state: str) -> dict: """Return the CDC status for the specified state.""" data = await self.raw_cdc_data() try: info = next((v for k, v in data.items() if state in k)) except StopIteration: return {} return adjust_status(info)
[ "async", "def", "status_by_state", "(", "self", ",", "state", ":", "str", ")", "->", "dict", ":", "data", "=", "await", "self", ".", "raw_cdc_data", "(", ")", "try", ":", "info", "=", "next", "(", "(", "v", "for", "k", ",", "v", "in", "data", ".", "items", "(", ")", "if", "state", "in", "k", ")", ")", "except", "StopIteration", ":", "return", "{", "}", "return", "adjust_status", "(", "info", ")" ]
Return the CDC status for the specified state.
[ "Return", "the", "CDC", "status", "for", "the", "specified", "state", "." ]
python
train
31.5
konstantint/PassportEye
passporteye/mrz/image.py
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/image.py#L328-L343
def read_mrz(file, save_roi=False, extra_cmdline_params=''): """The main interface function to this module, encapsulating the recognition pipeline. Given an image filename, runs MRZPipeline on it, returning the parsed MRZ object. :param file: A filename or a stream to read the file data from. :param save_roi: when this is True, the .aux['roi'] field will contain the Region of Interest where the MRZ was parsed from. :param extra_cmdline_params:extra parameters to the ocr.py """ p = MRZPipeline(file, extra_cmdline_params) mrz = p.result if mrz is not None: mrz.aux['text'] = p['text'] if save_roi: mrz.aux['roi'] = p['roi'] return mrz
[ "def", "read_mrz", "(", "file", ",", "save_roi", "=", "False", ",", "extra_cmdline_params", "=", "''", ")", ":", "p", "=", "MRZPipeline", "(", "file", ",", "extra_cmdline_params", ")", "mrz", "=", "p", ".", "result", "if", "mrz", "is", "not", "None", ":", "mrz", ".", "aux", "[", "'text'", "]", "=", "p", "[", "'text'", "]", "if", "save_roi", ":", "mrz", ".", "aux", "[", "'roi'", "]", "=", "p", "[", "'roi'", "]", "return", "mrz" ]
The main interface function to this module, encapsulating the recognition pipeline. Given an image filename, runs MRZPipeline on it, returning the parsed MRZ object. :param file: A filename or a stream to read the file data from. :param save_roi: when this is True, the .aux['roi'] field will contain the Region of Interest where the MRZ was parsed from. :param extra_cmdline_params:extra parameters to the ocr.py
[ "The", "main", "interface", "function", "to", "this", "module", "encapsulating", "the", "recognition", "pipeline", ".", "Given", "an", "image", "filename", "runs", "MRZPipeline", "on", "it", "returning", "the", "parsed", "MRZ", "object", "." ]
python
train
43.5
StackStorm/pybind
pybind/slxos/v17s_1_02/mpls_state/rsvp/sessions/psbs/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/mpls_state/rsvp/sessions/psbs/__init__.py#L2844-L2867
def _set_session_style(self, v, load=False): """ Setter method for session_style, mapped from YANG variable /mpls_state/rsvp/sessions/psbs/session_style (session-reservation-style) If this variable is read-only (config: false) in the source YANG file, then _set_session_style is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_session_style() directly. YANG Description: Style of session """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'reservation-style-shared-explicit': {'value': 2}, u'reservation-style-wildcard-filter': {'value': 0}, u'reservation-style-unknown': {'value': 3}, u'reservation-style-fixed-filter': {'value': 1}},), is_leaf=True, yang_name="session-style", rest_name="session-style", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='session-reservation-style', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """session_style must be of a type compatible with session-reservation-style""", 'defined-type': "brocade-mpls-operational:session-reservation-style", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'reservation-style-shared-explicit': {'value': 2}, u'reservation-style-wildcard-filter': {'value': 0}, u'reservation-style-unknown': {'value': 3}, u'reservation-style-fixed-filter': {'value': 1}},), is_leaf=True, yang_name="session-style", rest_name="session-style", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='session-reservation-style', is_config=False)""", }) self.__session_style = t if hasattr(self, '_set'): self._set()
[ "def", "_set_session_style", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "RestrictedClassType", "(", "base_type", "=", "unicode", ",", "restriction_type", "=", "\"dict_key\"", ",", "restriction_arg", "=", "{", "u'reservation-style-shared-explicit'", ":", "{", "'value'", ":", "2", "}", ",", "u'reservation-style-wildcard-filter'", ":", "{", "'value'", ":", "0", "}", ",", "u'reservation-style-unknown'", ":", "{", "'value'", ":", "3", "}", ",", "u'reservation-style-fixed-filter'", ":", "{", "'value'", ":", "1", "}", "}", ",", ")", ",", "is_leaf", "=", "True", ",", "yang_name", "=", "\"session-style\"", ",", "rest_name", "=", "\"session-style\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-mpls-operational'", ",", "defining_module", "=", "'brocade-mpls-operational'", ",", "yang_type", "=", "'session-reservation-style'", ",", "is_config", "=", "False", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"session_style must be of a type compatible with session-reservation-style\"\"\"", ",", "'defined-type'", ":", "\"brocade-mpls-operational:session-reservation-style\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'reservation-style-shared-explicit': {'value': 2}, u'reservation-style-wildcard-filter': {'value': 0}, u'reservation-style-unknown': {'value': 3}, u'reservation-style-fixed-filter': {'value': 1}},), is_leaf=True, yang_name=\"session-style\", rest_name=\"session-style\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='session-reservation-style', is_config=False)\"\"\"", ",", "}", ")", "self", ".", "__session_style", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for session_style, mapped from YANG variable /mpls_state/rsvp/sessions/psbs/session_style (session-reservation-style) If this variable is read-only (config: false) in the source YANG file, then _set_session_style is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_session_style() directly. YANG Description: Style of session
[ "Setter", "method", "for", "session_style", "mapped", "from", "YANG", "variable", "/", "mpls_state", "/", "rsvp", "/", "sessions", "/", "psbs", "/", "session_style", "(", "session", "-", "reservation", "-", "style", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_session_style", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_session_style", "()", "directly", "." ]
python
train
95.916667
softlayer/softlayer-python
SoftLayer/managers/hardware.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/hardware.py#L616-L639
def reflash_firmware(self, hardware_id, ipmi=True, raid_controller=True, bios=True): """Reflash hardware firmware. This will cause the server to be unavailable for ~60 minutes. The firmware will not be upgraded but rather reflashed to the version installed. :param int hardware_id: The ID of the hardware to have its firmware reflashed. :param bool ipmi: Reflash the ipmi firmware. :param bool raid_controller: Reflash the raid controller firmware. :param bool bios: Reflash the bios firmware. Example:: # Check the servers active transactions to see progress result = mgr.reflash_firmware(hardware_id=1234) """ return self.hardware.createFirmwareReflashTransaction( bool(ipmi), bool(raid_controller), bool(bios), id=hardware_id)
[ "def", "reflash_firmware", "(", "self", ",", "hardware_id", ",", "ipmi", "=", "True", ",", "raid_controller", "=", "True", ",", "bios", "=", "True", ")", ":", "return", "self", ".", "hardware", ".", "createFirmwareReflashTransaction", "(", "bool", "(", "ipmi", ")", ",", "bool", "(", "raid_controller", ")", ",", "bool", "(", "bios", ")", ",", "id", "=", "hardware_id", ")" ]
Reflash hardware firmware. This will cause the server to be unavailable for ~60 minutes. The firmware will not be upgraded but rather reflashed to the version installed. :param int hardware_id: The ID of the hardware to have its firmware reflashed. :param bool ipmi: Reflash the ipmi firmware. :param bool raid_controller: Reflash the raid controller firmware. :param bool bios: Reflash the bios firmware. Example:: # Check the servers active transactions to see progress result = mgr.reflash_firmware(hardware_id=1234)
[ "Reflash", "hardware", "firmware", "." ]
python
train
39.958333
idlesign/django-siteprefs
siteprefs/toolbox.py
https://github.com/idlesign/django-siteprefs/blob/3d6bf5e64220fe921468a36fce68e15d7947cf92/siteprefs/toolbox.py#L183-L193
def unpatch_locals(depth=3): """Restores the original values of module variables considered preferences if they are still PatchedLocal and not PrefProxy. """ for name, locals_dict in traverse_local_prefs(depth): if isinstance(locals_dict[name], PatchedLocal): locals_dict[name] = locals_dict[name].val del get_frame_locals(depth)[__PATCHED_LOCALS_SENTINEL]
[ "def", "unpatch_locals", "(", "depth", "=", "3", ")", ":", "for", "name", ",", "locals_dict", "in", "traverse_local_prefs", "(", "depth", ")", ":", "if", "isinstance", "(", "locals_dict", "[", "name", "]", ",", "PatchedLocal", ")", ":", "locals_dict", "[", "name", "]", "=", "locals_dict", "[", "name", "]", ".", "val", "del", "get_frame_locals", "(", "depth", ")", "[", "__PATCHED_LOCALS_SENTINEL", "]" ]
Restores the original values of module variables considered preferences if they are still PatchedLocal and not PrefProxy.
[ "Restores", "the", "original", "values", "of", "module", "variables", "considered", "preferences", "if", "they", "are", "still", "PatchedLocal", "and", "not", "PrefProxy", "." ]
python
valid
35.636364
OpenKMIP/PyKMIP
kmip/core/messages/payloads/signature_verify.py
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/core/messages/payloads/signature_verify.py#L323-L381
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Write the data encoding the SignatureVerify request payload to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the data attribute is not defined. """ local_stream = utils.BytearrayStream() if self._unique_identifier: self._unique_identifier.write( local_stream, kmip_version=kmip_version ) if self._cryptographic_parameters: self._cryptographic_parameters.write( local_stream, kmip_version=kmip_version ) if self._data: self._data.write(local_stream, kmip_version=kmip_version) if self._digested_data: self._digested_data.write(local_stream, kmip_version=kmip_version) if self._signature_data: self._signature_data.write( local_stream, kmip_version=kmip_version ) if self._correlation_value: self._correlation_value.write( local_stream, kmip_version=kmip_version ) if self._init_indicator: self._init_indicator.write( local_stream, kmip_version=kmip_version ) if self._final_indicator: self._final_indicator.write( local_stream, kmip_version=kmip_version ) self.length = local_stream.length() super(SignatureVerifyRequestPayload, self).write( output_stream, kmip_version=kmip_version ) output_stream.write(local_stream.buffer)
[ "def", "write", "(", "self", ",", "output_stream", ",", "kmip_version", "=", "enums", ".", "KMIPVersion", ".", "KMIP_1_0", ")", ":", "local_stream", "=", "utils", ".", "BytearrayStream", "(", ")", "if", "self", ".", "_unique_identifier", ":", "self", ".", "_unique_identifier", ".", "write", "(", "local_stream", ",", "kmip_version", "=", "kmip_version", ")", "if", "self", ".", "_cryptographic_parameters", ":", "self", ".", "_cryptographic_parameters", ".", "write", "(", "local_stream", ",", "kmip_version", "=", "kmip_version", ")", "if", "self", ".", "_data", ":", "self", ".", "_data", ".", "write", "(", "local_stream", ",", "kmip_version", "=", "kmip_version", ")", "if", "self", ".", "_digested_data", ":", "self", ".", "_digested_data", ".", "write", "(", "local_stream", ",", "kmip_version", "=", "kmip_version", ")", "if", "self", ".", "_signature_data", ":", "self", ".", "_signature_data", ".", "write", "(", "local_stream", ",", "kmip_version", "=", "kmip_version", ")", "if", "self", ".", "_correlation_value", ":", "self", ".", "_correlation_value", ".", "write", "(", "local_stream", ",", "kmip_version", "=", "kmip_version", ")", "if", "self", ".", "_init_indicator", ":", "self", ".", "_init_indicator", ".", "write", "(", "local_stream", ",", "kmip_version", "=", "kmip_version", ")", "if", "self", ".", "_final_indicator", ":", "self", ".", "_final_indicator", ".", "write", "(", "local_stream", ",", "kmip_version", "=", "kmip_version", ")", "self", ".", "length", "=", "local_stream", ".", "length", "(", ")", "super", "(", "SignatureVerifyRequestPayload", ",", "self", ")", ".", "write", "(", "output_stream", ",", "kmip_version", "=", "kmip_version", ")", "output_stream", ".", "write", "(", "local_stream", ".", "buffer", ")" ]
Write the data encoding the SignatureVerify request payload to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the data attribute is not defined.
[ "Write", "the", "data", "encoding", "the", "SignatureVerify", "request", "payload", "to", "a", "stream", "." ]
python
test
34.813559
lmjohns3/theanets
theanets/layers/base.py
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/layers/base.py#L148-L154
def input_size(self): '''Size of layer input (for layers with one input).''' shape = self.input_shape if shape is None: raise util.ConfigurationError( 'undefined input size for layer "{}"'.format(self.name)) return shape[-1]
[ "def", "input_size", "(", "self", ")", ":", "shape", "=", "self", ".", "input_shape", "if", "shape", "is", "None", ":", "raise", "util", ".", "ConfigurationError", "(", "'undefined input size for layer \"{}\"'", ".", "format", "(", "self", ".", "name", ")", ")", "return", "shape", "[", "-", "1", "]" ]
Size of layer input (for layers with one input).
[ "Size", "of", "layer", "input", "(", "for", "layers", "with", "one", "input", ")", "." ]
python
test
39.714286
google/grr
grr/client_builder/grr_response_client_builder/build.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client_builder/grr_response_client_builder/build.py#L872-L1000
def MakeDeployableBinary(self, template_path, output_path): """This will add the config to the client template and create a .rpm.""" rpmbuild_binary = "/usr/bin/rpmbuild" if not os.path.exists(rpmbuild_binary): logging.error("rpmbuild not found, unable to repack client.") return with utils.TempDirectory() as tmp_dir: template_dir = os.path.join(tmp_dir, "dist") utils.EnsureDirExists(template_dir) zf = zipfile.ZipFile(template_path) for name in zf.namelist(): dirname = os.path.dirname(name) utils.EnsureDirExists(os.path.join(template_dir, dirname)) with open(os.path.join(template_dir, name), "wb") as fd: fd.write(zf.read(name)) # Set up a RPM building environment. rpm_root_dir = os.path.join(tmp_dir, "rpmbuild") rpm_build_dir = os.path.join(rpm_root_dir, "BUILD") utils.EnsureDirExists(rpm_build_dir) rpm_buildroot_dir = os.path.join(rpm_root_dir, "BUILDROOT") utils.EnsureDirExists(rpm_buildroot_dir) rpm_rpms_dir = os.path.join(rpm_root_dir, "RPMS") utils.EnsureDirExists(rpm_rpms_dir) rpm_specs_dir = os.path.join(rpm_root_dir, "SPECS") utils.EnsureDirExists(rpm_specs_dir) template_binary_dir = os.path.join(tmp_dir, "dist/rpmbuild/grr-client") target_binary_dir = "%s%s" % ( rpm_build_dir, config.CONFIG.Get("ClientBuilder.target_dir", context=self.context)) utils.EnsureDirExists(os.path.dirname(target_binary_dir)) try: shutil.rmtree(target_binary_dir) except OSError: pass # TODO(user):pytype: incorrect move() definition in typeshed. # pytype: disable=wrong-arg-types shutil.move(template_binary_dir, target_binary_dir) # pytype: enable=wrong-arg-types client_name = config.CONFIG.Get("Client.name", context=self.context) client_binary_name = config.CONFIG.Get( "Client.binary_name", context=self.context) if client_binary_name != "grr-client": # TODO(user):pytype: incorrect move() definition in typeshed. # pytype: disable=wrong-arg-types shutil.move( os.path.join(target_binary_dir, "grr-client"), os.path.join(target_binary_dir, client_binary_name)) # pytype: enable=wrong-arg-types if config.CONFIG.Get("Client.fleetspeak_enabled", context=self.context): self._GenerateFleetspeakConfig(template_dir, rpm_build_dir) if not config.CONFIG.Get( "Client.fleetspeak_service_name", context=self.context): # The Fleetspeak service name is required when generating the RPM # spec file. raise BuildError("Client.fleetspeak_service_name is not set.") else: self._GenerateInitConfigs(template_dir, rpm_build_dir) # Generate spec spec_filename = os.path.join(rpm_specs_dir, "%s.spec" % client_name) self.GenerateFile( os.path.join(tmp_dir, "dist/rpmbuild/grr.spec.in"), spec_filename) # Generate prelinking blacklist file prelink_target_filename = os.path.join(rpm_build_dir, "etc/prelink.conf.d", "%s.conf" % client_name) utils.EnsureDirExists(os.path.dirname(prelink_target_filename)) self.GenerateFile( os.path.join(tmp_dir, "dist/rpmbuild/prelink_blacklist.conf.in"), prelink_target_filename) # Create a client config. client_context = ["Client Context"] + self.context client_config_content = self.GetClientConfig(client_context) with open( os.path.join( target_binary_dir, config.CONFIG.Get( "ClientBuilder.config_filename", context=self.context)), "wb") as fd: fd.write(client_config_content) # Set the daemon to executable. os.chmod(os.path.join(target_binary_dir, client_binary_name), 0o755) client_arch = config.CONFIG.Get("Template.arch", context=self.context) if client_arch == "amd64": client_arch = "x86_64" command = [ rpmbuild_binary, "--define", "_topdir " + rpm_root_dir, "--target", client_arch, "--buildroot", rpm_buildroot_dir, "-bb", spec_filename ] try: subprocess.check_output(command, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: logging.error("Error calling %s.", command) logging.error(e.output) raise client_version = config.CONFIG.Get( "Template.version_string", context=self.context) rpm_filename = os.path.join( rpm_rpms_dir, client_arch, "%s-%s-1.%s.rpm" % (client_name, client_version, client_arch)) utils.EnsureDirExists(os.path.dirname(output_path)) shutil.move(rpm_filename, output_path) logging.info("Created package %s", output_path) self.Sign(output_path) return output_path
[ "def", "MakeDeployableBinary", "(", "self", ",", "template_path", ",", "output_path", ")", ":", "rpmbuild_binary", "=", "\"/usr/bin/rpmbuild\"", "if", "not", "os", ".", "path", ".", "exists", "(", "rpmbuild_binary", ")", ":", "logging", ".", "error", "(", "\"rpmbuild not found, unable to repack client.\"", ")", "return", "with", "utils", ".", "TempDirectory", "(", ")", "as", "tmp_dir", ":", "template_dir", "=", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "\"dist\"", ")", "utils", ".", "EnsureDirExists", "(", "template_dir", ")", "zf", "=", "zipfile", ".", "ZipFile", "(", "template_path", ")", "for", "name", "in", "zf", ".", "namelist", "(", ")", ":", "dirname", "=", "os", ".", "path", ".", "dirname", "(", "name", ")", "utils", ".", "EnsureDirExists", "(", "os", ".", "path", ".", "join", "(", "template_dir", ",", "dirname", ")", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "template_dir", ",", "name", ")", ",", "\"wb\"", ")", "as", "fd", ":", "fd", ".", "write", "(", "zf", ".", "read", "(", "name", ")", ")", "# Set up a RPM building environment.", "rpm_root_dir", "=", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "\"rpmbuild\"", ")", "rpm_build_dir", "=", "os", ".", "path", ".", "join", "(", "rpm_root_dir", ",", "\"BUILD\"", ")", "utils", ".", "EnsureDirExists", "(", "rpm_build_dir", ")", "rpm_buildroot_dir", "=", "os", ".", "path", ".", "join", "(", "rpm_root_dir", ",", "\"BUILDROOT\"", ")", "utils", ".", "EnsureDirExists", "(", "rpm_buildroot_dir", ")", "rpm_rpms_dir", "=", "os", ".", "path", ".", "join", "(", "rpm_root_dir", ",", "\"RPMS\"", ")", "utils", ".", "EnsureDirExists", "(", "rpm_rpms_dir", ")", "rpm_specs_dir", "=", "os", ".", "path", ".", "join", "(", "rpm_root_dir", ",", "\"SPECS\"", ")", "utils", ".", "EnsureDirExists", "(", "rpm_specs_dir", ")", "template_binary_dir", "=", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "\"dist/rpmbuild/grr-client\"", ")", "target_binary_dir", "=", "\"%s%s\"", "%", "(", "rpm_build_dir", ",", "config", ".", "CONFIG", ".", "Get", "(", "\"ClientBuilder.target_dir\"", ",", "context", "=", "self", ".", "context", ")", ")", "utils", ".", "EnsureDirExists", "(", "os", ".", "path", ".", "dirname", "(", "target_binary_dir", ")", ")", "try", ":", "shutil", ".", "rmtree", "(", "target_binary_dir", ")", "except", "OSError", ":", "pass", "# TODO(user):pytype: incorrect move() definition in typeshed.", "# pytype: disable=wrong-arg-types", "shutil", ".", "move", "(", "template_binary_dir", ",", "target_binary_dir", ")", "# pytype: enable=wrong-arg-types", "client_name", "=", "config", ".", "CONFIG", ".", "Get", "(", "\"Client.name\"", ",", "context", "=", "self", ".", "context", ")", "client_binary_name", "=", "config", ".", "CONFIG", ".", "Get", "(", "\"Client.binary_name\"", ",", "context", "=", "self", ".", "context", ")", "if", "client_binary_name", "!=", "\"grr-client\"", ":", "# TODO(user):pytype: incorrect move() definition in typeshed.", "# pytype: disable=wrong-arg-types", "shutil", ".", "move", "(", "os", ".", "path", ".", "join", "(", "target_binary_dir", ",", "\"grr-client\"", ")", ",", "os", ".", "path", ".", "join", "(", "target_binary_dir", ",", "client_binary_name", ")", ")", "# pytype: enable=wrong-arg-types", "if", "config", ".", "CONFIG", ".", "Get", "(", "\"Client.fleetspeak_enabled\"", ",", "context", "=", "self", ".", "context", ")", ":", "self", ".", "_GenerateFleetspeakConfig", "(", "template_dir", ",", "rpm_build_dir", ")", "if", "not", "config", ".", "CONFIG", ".", "Get", "(", "\"Client.fleetspeak_service_name\"", ",", "context", "=", "self", ".", "context", ")", ":", "# The Fleetspeak service name is required when generating the RPM", "# spec file.", "raise", "BuildError", "(", "\"Client.fleetspeak_service_name is not set.\"", ")", "else", ":", "self", ".", "_GenerateInitConfigs", "(", "template_dir", ",", "rpm_build_dir", ")", "# Generate spec", "spec_filename", "=", "os", ".", "path", ".", "join", "(", "rpm_specs_dir", ",", "\"%s.spec\"", "%", "client_name", ")", "self", ".", "GenerateFile", "(", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "\"dist/rpmbuild/grr.spec.in\"", ")", ",", "spec_filename", ")", "# Generate prelinking blacklist file", "prelink_target_filename", "=", "os", ".", "path", ".", "join", "(", "rpm_build_dir", ",", "\"etc/prelink.conf.d\"", ",", "\"%s.conf\"", "%", "client_name", ")", "utils", ".", "EnsureDirExists", "(", "os", ".", "path", ".", "dirname", "(", "prelink_target_filename", ")", ")", "self", ".", "GenerateFile", "(", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "\"dist/rpmbuild/prelink_blacklist.conf.in\"", ")", ",", "prelink_target_filename", ")", "# Create a client config.", "client_context", "=", "[", "\"Client Context\"", "]", "+", "self", ".", "context", "client_config_content", "=", "self", ".", "GetClientConfig", "(", "client_context", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "target_binary_dir", ",", "config", ".", "CONFIG", ".", "Get", "(", "\"ClientBuilder.config_filename\"", ",", "context", "=", "self", ".", "context", ")", ")", ",", "\"wb\"", ")", "as", "fd", ":", "fd", ".", "write", "(", "client_config_content", ")", "# Set the daemon to executable.", "os", ".", "chmod", "(", "os", ".", "path", ".", "join", "(", "target_binary_dir", ",", "client_binary_name", ")", ",", "0o755", ")", "client_arch", "=", "config", ".", "CONFIG", ".", "Get", "(", "\"Template.arch\"", ",", "context", "=", "self", ".", "context", ")", "if", "client_arch", "==", "\"amd64\"", ":", "client_arch", "=", "\"x86_64\"", "command", "=", "[", "rpmbuild_binary", ",", "\"--define\"", ",", "\"_topdir \"", "+", "rpm_root_dir", ",", "\"--target\"", ",", "client_arch", ",", "\"--buildroot\"", ",", "rpm_buildroot_dir", ",", "\"-bb\"", ",", "spec_filename", "]", "try", ":", "subprocess", ".", "check_output", "(", "command", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "except", "subprocess", ".", "CalledProcessError", "as", "e", ":", "logging", ".", "error", "(", "\"Error calling %s.\"", ",", "command", ")", "logging", ".", "error", "(", "e", ".", "output", ")", "raise", "client_version", "=", "config", ".", "CONFIG", ".", "Get", "(", "\"Template.version_string\"", ",", "context", "=", "self", ".", "context", ")", "rpm_filename", "=", "os", ".", "path", ".", "join", "(", "rpm_rpms_dir", ",", "client_arch", ",", "\"%s-%s-1.%s.rpm\"", "%", "(", "client_name", ",", "client_version", ",", "client_arch", ")", ")", "utils", ".", "EnsureDirExists", "(", "os", ".", "path", ".", "dirname", "(", "output_path", ")", ")", "shutil", ".", "move", "(", "rpm_filename", ",", "output_path", ")", "logging", ".", "info", "(", "\"Created package %s\"", ",", "output_path", ")", "self", ".", "Sign", "(", "output_path", ")", "return", "output_path" ]
This will add the config to the client template and create a .rpm.
[ "This", "will", "add", "the", "config", "to", "the", "client", "template", "and", "create", "a", ".", "rpm", "." ]
python
train
37.844961
synw/dataswim
dataswim/charts/__init__.py
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/charts/__init__.py#L83-L96
def sbar_(self, stack_index=None, label=None, style=None, opts=None, options={}): """ Get a stacked bar chart """ self.opts(dict(stack_index=stack_index, color_index=stack_index)) try: if stack_index is None: self.err(self.sbar_, "Please provide a stack index parameter") options["stack_index"] = stack_index return self._get_chart("bar", style=style, opts=opts, label=label, options=options) except Exception as e: self.err(e, self.sbar_, "Can not draw stacked bar chart")
[ "def", "sbar_", "(", "self", ",", "stack_index", "=", "None", ",", "label", "=", "None", ",", "style", "=", "None", ",", "opts", "=", "None", ",", "options", "=", "{", "}", ")", ":", "self", ".", "opts", "(", "dict", "(", "stack_index", "=", "stack_index", ",", "color_index", "=", "stack_index", ")", ")", "try", ":", "if", "stack_index", "is", "None", ":", "self", ".", "err", "(", "self", ".", "sbar_", ",", "\"Please provide a stack index parameter\"", ")", "options", "[", "\"stack_index\"", "]", "=", "stack_index", "return", "self", ".", "_get_chart", "(", "\"bar\"", ",", "style", "=", "style", ",", "opts", "=", "opts", ",", "label", "=", "label", ",", "options", "=", "options", ")", "except", "Exception", "as", "e", ":", "self", ".", "err", "(", "e", ",", "self", ".", "sbar_", ",", "\"Can not draw stacked bar chart\"", ")" ]
Get a stacked bar chart
[ "Get", "a", "stacked", "bar", "chart" ]
python
train
36
twisted/vertex
vertex/q2q.py
https://github.com/twisted/vertex/blob/feb591aa1b9a3b2b8fdcf53e4962dad2a0bc38ca/vertex/q2q.py#L1134-L1158
def _write(self, body, id): """ Respond to a WRITE command, sending some data over a virtual channel created by VIRTUAL. The answer is simply an acknowledgement, as it is simply meant to note that the write went through without errors. An occurrence of I{Write} on the wire, together with the response generated by this method, might have this apperance:: C: -Command: Write C: -Ask: 1 C: -Length: 13 C: Id: [email protected]>[email protected]:q2q-example:0 C: C: HELLO WORLD C: S: -Answer: 1 S: """ if id not in self.connections: raise error.ConnectionDone() connection = self.connections[id] connection.dataReceived(body) return {}
[ "def", "_write", "(", "self", ",", "body", ",", "id", ")", ":", "if", "id", "not", "in", "self", ".", "connections", ":", "raise", "error", ".", "ConnectionDone", "(", ")", "connection", "=", "self", ".", "connections", "[", "id", "]", "connection", ".", "dataReceived", "(", "body", ")", "return", "{", "}" ]
Respond to a WRITE command, sending some data over a virtual channel created by VIRTUAL. The answer is simply an acknowledgement, as it is simply meant to note that the write went through without errors. An occurrence of I{Write} on the wire, together with the response generated by this method, might have this apperance:: C: -Command: Write C: -Ask: 1 C: -Length: 13 C: Id: [email protected]>[email protected]:q2q-example:0 C: C: HELLO WORLD C: S: -Answer: 1 S:
[ "Respond", "to", "a", "WRITE", "command", "sending", "some", "data", "over", "a", "virtual", "channel", "created", "by", "VIRTUAL", ".", "The", "answer", "is", "simply", "an", "acknowledgement", "as", "it", "is", "simply", "meant", "to", "note", "that", "the", "write", "went", "through", "without", "errors", "." ]
python
train
33.08
ryanvarley/ExoData
exodata/astroclasses.py
https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/astroclasses.py#L645-L657
def isTransiting(self): """ Checks the the istransiting tag to see if the planet transits. Note that this only works as of catalogue version ee12343381ae4106fd2db908e25ffc537a2ee98c (11th March 2014) where the istransiting tag was implemented """ try: isTransiting = self.params['istransiting'] except KeyError: return False if isTransiting == '1': return True else: return False
[ "def", "isTransiting", "(", "self", ")", ":", "try", ":", "isTransiting", "=", "self", ".", "params", "[", "'istransiting'", "]", "except", "KeyError", ":", "return", "False", "if", "isTransiting", "==", "'1'", ":", "return", "True", "else", ":", "return", "False" ]
Checks the the istransiting tag to see if the planet transits. Note that this only works as of catalogue version ee12343381ae4106fd2db908e25ffc537a2ee98c (11th March 2014) where the istransiting tag was implemented
[ "Checks", "the", "the", "istransiting", "tag", "to", "see", "if", "the", "planet", "transits", ".", "Note", "that", "this", "only", "works", "as", "of", "catalogue", "version", "ee12343381ae4106fd2db908e25ffc537a2ee98c", "(", "11th", "March", "2014", ")", "where", "the", "istransiting", "tag", "was", "implemented" ]
python
train
36.384615
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/util/event.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/util/event.py#L375-L389
def disconnect(self, callback=None): """Disconnect a callback from this emitter. If no callback is specified, then *all* callbacks are removed. If the callback was not already connected, then the call does nothing. """ if callback is None: self._callbacks = [] self._callback_refs = [] else: callback = self._normalize_cb(callback) if callback in self._callbacks: idx = self._callbacks.index(callback) self._callbacks.pop(idx) self._callback_refs.pop(idx)
[ "def", "disconnect", "(", "self", ",", "callback", "=", "None", ")", ":", "if", "callback", "is", "None", ":", "self", ".", "_callbacks", "=", "[", "]", "self", ".", "_callback_refs", "=", "[", "]", "else", ":", "callback", "=", "self", ".", "_normalize_cb", "(", "callback", ")", "if", "callback", "in", "self", ".", "_callbacks", ":", "idx", "=", "self", ".", "_callbacks", ".", "index", "(", "callback", ")", "self", ".", "_callbacks", ".", "pop", "(", "idx", ")", "self", ".", "_callback_refs", ".", "pop", "(", "idx", ")" ]
Disconnect a callback from this emitter. If no callback is specified, then *all* callbacks are removed. If the callback was not already connected, then the call does nothing.
[ "Disconnect", "a", "callback", "from", "this", "emitter", "." ]
python
train
39.066667
bububa/pyTOP
pyTOP/packages/requests/utils.py
https://github.com/bububa/pyTOP/blob/1e48009bcfe886be392628244b370e6374e1f2b2/pyTOP/packages/requests/utils.py#L166-L178
def randombytes(n): """Return n random bytes.""" # Use /dev/urandom if it is available. Fall back to random module # if not. It might be worthwhile to extend this function to use # other platform-specific mechanisms for getting random bytes. if os.path.exists("/dev/urandom"): f = open("/dev/urandom") s = f.read(n) f.close() return s else: L = [chr(random.randrange(0, 256)) for i in range(n)] return "".join(L)
[ "def", "randombytes", "(", "n", ")", ":", "# Use /dev/urandom if it is available. Fall back to random module", "# if not. It might be worthwhile to extend this function to use", "# other platform-specific mechanisms for getting random bytes.", "if", "os", ".", "path", ".", "exists", "(", "\"/dev/urandom\"", ")", ":", "f", "=", "open", "(", "\"/dev/urandom\"", ")", "s", "=", "f", ".", "read", "(", "n", ")", "f", ".", "close", "(", ")", "return", "s", "else", ":", "L", "=", "[", "chr", "(", "random", ".", "randrange", "(", "0", ",", "256", ")", ")", "for", "i", "in", "range", "(", "n", ")", "]", "return", "\"\"", ".", "join", "(", "L", ")" ]
Return n random bytes.
[ "Return", "n", "random", "bytes", "." ]
python
train
36.461538
inveniosoftware-attic/invenio-utils
invenio_utils/url.py
https://github.com/inveniosoftware-attic/invenio-utils/blob/9a1c6db4e3f1370901f329f510480dd8df188296/invenio_utils/url.py#L123-L130
def get_safe_redirect_target(arg='next'): """Get URL to redirect to and ensure that it is local.""" for target in request.args.get(arg), request.referrer: if not target: continue if is_local_url(target): return target return None
[ "def", "get_safe_redirect_target", "(", "arg", "=", "'next'", ")", ":", "for", "target", "in", "request", ".", "args", ".", "get", "(", "arg", ")", ",", "request", ".", "referrer", ":", "if", "not", "target", ":", "continue", "if", "is_local_url", "(", "target", ")", ":", "return", "target", "return", "None" ]
Get URL to redirect to and ensure that it is local.
[ "Get", "URL", "to", "redirect", "to", "and", "ensure", "that", "it", "is", "local", "." ]
python
train
34.25
sbg/sevenbridges-python
sevenbridges/meta/transformer.py
https://github.com/sbg/sevenbridges-python/blob/f62640d1018d959f0b686f2dbe5e183085336607/sevenbridges/meta/transformer.py#L22-L35
def to_project(project): """Serializes project to id string :param project: object to serialize :return: string id """ from sevenbridges.models.project import Project if not project: raise SbgError('Project is required!') elif isinstance(project, Project): return project.id elif isinstance(project, six.string_types): return project else: raise SbgError('Invalid project parameter!')
[ "def", "to_project", "(", "project", ")", ":", "from", "sevenbridges", ".", "models", ".", "project", "import", "Project", "if", "not", "project", ":", "raise", "SbgError", "(", "'Project is required!'", ")", "elif", "isinstance", "(", "project", ",", "Project", ")", ":", "return", "project", ".", "id", "elif", "isinstance", "(", "project", ",", "six", ".", "string_types", ")", ":", "return", "project", "else", ":", "raise", "SbgError", "(", "'Invalid project parameter!'", ")" ]
Serializes project to id string :param project: object to serialize :return: string id
[ "Serializes", "project", "to", "id", "string", ":", "param", "project", ":", "object", "to", "serialize", ":", "return", ":", "string", "id" ]
python
train
35.071429
ThreatConnect-Inc/tcex
tcex/tcex_ti/mappings/tcex_ti_mappings.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_ti/mappings/tcex_ti_mappings.py#L150-L164
def create(self): """ Creates the Indicator/Group/Victim or Security Label given Owner Args: """ if not self.can_create(): self._tcex.handle_error(905, [self.type]) response = self.tc_requests.create(self.api_type, self.api_sub_type, self._data, self.owner) if self.tc_requests.success(response): self._set_unique_id(response.json().get('data').get(self.api_entity)) return response
[ "def", "create", "(", "self", ")", ":", "if", "not", "self", ".", "can_create", "(", ")", ":", "self", ".", "_tcex", ".", "handle_error", "(", "905", ",", "[", "self", ".", "type", "]", ")", "response", "=", "self", ".", "tc_requests", ".", "create", "(", "self", ".", "api_type", ",", "self", ".", "api_sub_type", ",", "self", ".", "_data", ",", "self", ".", "owner", ")", "if", "self", ".", "tc_requests", ".", "success", "(", "response", ")", ":", "self", ".", "_set_unique_id", "(", "response", ".", "json", "(", ")", ".", "get", "(", "'data'", ")", ".", "get", "(", "self", ".", "api_entity", ")", ")", "return", "response" ]
Creates the Indicator/Group/Victim or Security Label given Owner Args:
[ "Creates", "the", "Indicator", "/", "Group", "/", "Victim", "or", "Security", "Label", "given", "Owner" ]
python
train
30.666667
FactoryBoy/factory_boy
factory/django.py
https://github.com/FactoryBoy/factory_boy/blob/edaa7c7f5a14065b229927903bd7989cc93cd069/factory/django.py#L179-L185
def _create(cls, model_class, *args, **kwargs): """Create an instance of the model, and save it to the database.""" if cls._meta.django_get_or_create: return cls._get_or_create(model_class, *args, **kwargs) manager = cls._get_manager(model_class) return manager.create(*args, **kwargs)
[ "def", "_create", "(", "cls", ",", "model_class", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "cls", ".", "_meta", ".", "django_get_or_create", ":", "return", "cls", ".", "_get_or_create", "(", "model_class", ",", "*", "args", ",", "*", "*", "kwargs", ")", "manager", "=", "cls", ".", "_get_manager", "(", "model_class", ")", "return", "manager", ".", "create", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Create an instance of the model, and save it to the database.
[ "Create", "an", "instance", "of", "the", "model", "and", "save", "it", "to", "the", "database", "." ]
python
train
46.285714
SHTOOLS/SHTOOLS
pyshtools/shclasses/shtensor.py
https://github.com/SHTOOLS/SHTOOLS/blob/9a115cf83002df2ddec6b7f41aeb6be688e285de/pyshtools/shclasses/shtensor.py#L1510-L1594
def plot_eigh(self, colorbar=True, cb_orientation='vertical', tick_interval=[60, 60], minor_tick_interval=[20, 20], xlabel='Longitude', ylabel='Latitude', axes_labelsize=9, tick_labelsize=8, show=True, fname=None, **kwargs): """ Plot the two eigenvalues and maximum absolute value eigenvalue of the horizontal tensor. Usage ----- x.plot_eigh([tick_interval, minor_tick_interval, xlabel, ylabel, colorbar, cb_orientation, cb_label, axes_labelsize, tick_labelsize, show, fname, **kwargs]) Parameters ---------- tick_interval : list or tuple, optional, default = [60, 60] Intervals to use when plotting the major x and y ticks. If set to None, major ticks will not be plotted. minor_tick_interval : list or tuple, optional, default = [20, 20] Intervals to use when plotting the minor x and y ticks. If set to None, minor ticks will not be plotted. xlabel : str, optional, default = 'Longitude' Label for the longitude axis. ylabel : str, optional, default = 'Latitude' Label for the latitude axis. colorbar : bool, optional, default = True If True, plot a colorbar. cb_orientation : str, optional, default = 'vertical' Orientation of the colorbar: either 'vertical' or 'horizontal'. cb_label : str, optional, default = None Text label for the colorbar. axes_labelsize : int, optional, default = 9 The font size for the x and y axes labels. tick_labelsize : int, optional, default = 8 The font size for the x and y tick labels. show : bool, optional, default = True If True, plot the image to the screen. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. kwargs : optional Keyword arguements that will be sent to the SHGrid.plot() and plt.imshow() methods. """ if colorbar is True: if cb_orientation == 'horizontal': scale = 2.3 else: scale = 1.4 else: scale = 1.65 figsize = (_mpl.rcParams['figure.figsize'][0], _mpl.rcParams['figure.figsize'][0] * scale) fig, ax = _plt.subplots(3, 1, figsize=figsize) self.plot_eigh1(colorbar=colorbar, cb_orientation=cb_orientation, ax=ax.flat[0], xlabel=xlabel, ylabel=ylabel, tick_interval=tick_interval, tick_labelsize=tick_labelsize, minor_tick_interval=minor_tick_interval, **kwargs) self.plot_eigh2(colorbar=colorbar, cb_orientation=cb_orientation, ax=ax.flat[1], xlabel=xlabel, ylabel=ylabel, tick_interval=tick_interval, tick_labelsize=tick_labelsize, minor_tick_interval=minor_tick_interval, **kwargs) self.plot_eighh(colorbar=colorbar, cb_orientation=cb_orientation, ax=ax.flat[2], xlabel=xlabel, ylabel=ylabel, tick_interval=tick_interval, tick_labelsize=tick_labelsize, minor_tick_interval=minor_tick_interval, **kwargs) fig.tight_layout(pad=0.5) if show: fig.show() if fname is not None: fig.savefig(fname) return fig, ax
[ "def", "plot_eigh", "(", "self", ",", "colorbar", "=", "True", ",", "cb_orientation", "=", "'vertical'", ",", "tick_interval", "=", "[", "60", ",", "60", "]", ",", "minor_tick_interval", "=", "[", "20", ",", "20", "]", ",", "xlabel", "=", "'Longitude'", ",", "ylabel", "=", "'Latitude'", ",", "axes_labelsize", "=", "9", ",", "tick_labelsize", "=", "8", ",", "show", "=", "True", ",", "fname", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "colorbar", "is", "True", ":", "if", "cb_orientation", "==", "'horizontal'", ":", "scale", "=", "2.3", "else", ":", "scale", "=", "1.4", "else", ":", "scale", "=", "1.65", "figsize", "=", "(", "_mpl", ".", "rcParams", "[", "'figure.figsize'", "]", "[", "0", "]", ",", "_mpl", ".", "rcParams", "[", "'figure.figsize'", "]", "[", "0", "]", "*", "scale", ")", "fig", ",", "ax", "=", "_plt", ".", "subplots", "(", "3", ",", "1", ",", "figsize", "=", "figsize", ")", "self", ".", "plot_eigh1", "(", "colorbar", "=", "colorbar", ",", "cb_orientation", "=", "cb_orientation", ",", "ax", "=", "ax", ".", "flat", "[", "0", "]", ",", "xlabel", "=", "xlabel", ",", "ylabel", "=", "ylabel", ",", "tick_interval", "=", "tick_interval", ",", "tick_labelsize", "=", "tick_labelsize", ",", "minor_tick_interval", "=", "minor_tick_interval", ",", "*", "*", "kwargs", ")", "self", ".", "plot_eigh2", "(", "colorbar", "=", "colorbar", ",", "cb_orientation", "=", "cb_orientation", ",", "ax", "=", "ax", ".", "flat", "[", "1", "]", ",", "xlabel", "=", "xlabel", ",", "ylabel", "=", "ylabel", ",", "tick_interval", "=", "tick_interval", ",", "tick_labelsize", "=", "tick_labelsize", ",", "minor_tick_interval", "=", "minor_tick_interval", ",", "*", "*", "kwargs", ")", "self", ".", "plot_eighh", "(", "colorbar", "=", "colorbar", ",", "cb_orientation", "=", "cb_orientation", ",", "ax", "=", "ax", ".", "flat", "[", "2", "]", ",", "xlabel", "=", "xlabel", ",", "ylabel", "=", "ylabel", ",", "tick_interval", "=", "tick_interval", ",", "tick_labelsize", "=", "tick_labelsize", ",", "minor_tick_interval", "=", "minor_tick_interval", ",", "*", "*", "kwargs", ")", "fig", ".", "tight_layout", "(", "pad", "=", "0.5", ")", "if", "show", ":", "fig", ".", "show", "(", ")", "if", "fname", "is", "not", "None", ":", "fig", ".", "savefig", "(", "fname", ")", "return", "fig", ",", "ax" ]
Plot the two eigenvalues and maximum absolute value eigenvalue of the horizontal tensor. Usage ----- x.plot_eigh([tick_interval, minor_tick_interval, xlabel, ylabel, colorbar, cb_orientation, cb_label, axes_labelsize, tick_labelsize, show, fname, **kwargs]) Parameters ---------- tick_interval : list or tuple, optional, default = [60, 60] Intervals to use when plotting the major x and y ticks. If set to None, major ticks will not be plotted. minor_tick_interval : list or tuple, optional, default = [20, 20] Intervals to use when plotting the minor x and y ticks. If set to None, minor ticks will not be plotted. xlabel : str, optional, default = 'Longitude' Label for the longitude axis. ylabel : str, optional, default = 'Latitude' Label for the latitude axis. colorbar : bool, optional, default = True If True, plot a colorbar. cb_orientation : str, optional, default = 'vertical' Orientation of the colorbar: either 'vertical' or 'horizontal'. cb_label : str, optional, default = None Text label for the colorbar. axes_labelsize : int, optional, default = 9 The font size for the x and y axes labels. tick_labelsize : int, optional, default = 8 The font size for the x and y tick labels. show : bool, optional, default = True If True, plot the image to the screen. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. kwargs : optional Keyword arguements that will be sent to the SHGrid.plot() and plt.imshow() methods.
[ "Plot", "the", "two", "eigenvalues", "and", "maximum", "absolute", "value", "eigenvalue", "of", "the", "horizontal", "tensor", "." ]
python
train
43.435294
robotpy/pyfrc
docs/conf.py
https://github.com/robotpy/pyfrc/blob/7672ea3f17c8d4b702a9f18a7372d95feee7e37d/docs/conf.py#L146-L172
def process_child(node): """This function changes class references to not have the intermediate module name by hacking at the doctree""" # Edit descriptions to be nicer if isinstance(node, sphinx.addnodes.desc_addname): if len(node.children) == 1: child = node.children[0] text = child.astext() if text.startswith("wpilib.") and text.endswith("."): # remove the last element text = ".".join(text.split(".")[:-2]) + "." node.children[0] = docutils.nodes.Text(text) # Edit literals to be nicer elif isinstance(node, docutils.nodes.literal): child = node.children[0] text = child.astext() # Remove the imported module name if text.startswith("wpilib."): stext = text.split(".") text = ".".join(stext[:-2] + [stext[-1]]) node.children[0] = docutils.nodes.Text(text) for child in node.children: process_child(child)
[ "def", "process_child", "(", "node", ")", ":", "# Edit descriptions to be nicer", "if", "isinstance", "(", "node", ",", "sphinx", ".", "addnodes", ".", "desc_addname", ")", ":", "if", "len", "(", "node", ".", "children", ")", "==", "1", ":", "child", "=", "node", ".", "children", "[", "0", "]", "text", "=", "child", ".", "astext", "(", ")", "if", "text", ".", "startswith", "(", "\"wpilib.\"", ")", "and", "text", ".", "endswith", "(", "\".\"", ")", ":", "# remove the last element", "text", "=", "\".\"", ".", "join", "(", "text", ".", "split", "(", "\".\"", ")", "[", ":", "-", "2", "]", ")", "+", "\".\"", "node", ".", "children", "[", "0", "]", "=", "docutils", ".", "nodes", ".", "Text", "(", "text", ")", "# Edit literals to be nicer", "elif", "isinstance", "(", "node", ",", "docutils", ".", "nodes", ".", "literal", ")", ":", "child", "=", "node", ".", "children", "[", "0", "]", "text", "=", "child", ".", "astext", "(", ")", "# Remove the imported module name", "if", "text", ".", "startswith", "(", "\"wpilib.\"", ")", ":", "stext", "=", "text", ".", "split", "(", "\".\"", ")", "text", "=", "\".\"", ".", "join", "(", "stext", "[", ":", "-", "2", "]", "+", "[", "stext", "[", "-", "1", "]", "]", ")", "node", ".", "children", "[", "0", "]", "=", "docutils", ".", "nodes", ".", "Text", "(", "text", ")", "for", "child", "in", "node", ".", "children", ":", "process_child", "(", "child", ")" ]
This function changes class references to not have the intermediate module name by hacking at the doctree
[ "This", "function", "changes", "class", "references", "to", "not", "have", "the", "intermediate", "module", "name", "by", "hacking", "at", "the", "doctree" ]
python
train
36.555556
cimm-kzn/CGRtools
CGRtools/containers/common.py
https://github.com/cimm-kzn/CGRtools/blob/15a19b04f6e4e1d0dab8e0d32a0877c7f7d70f34/CGRtools/containers/common.py#L128-L135
def environment(self, atom): """ pairs of (bond, atom) connected to atom :param atom: number :return: list """ return tuple((bond, self._node[n]) for n, bond in self._adj[atom].items())
[ "def", "environment", "(", "self", ",", "atom", ")", ":", "return", "tuple", "(", "(", "bond", ",", "self", ".", "_node", "[", "n", "]", ")", "for", "n", ",", "bond", "in", "self", ".", "_adj", "[", "atom", "]", ".", "items", "(", ")", ")" ]
pairs of (bond, atom) connected to atom :param atom: number :return: list
[ "pairs", "of", "(", "bond", "atom", ")", "connected", "to", "atom" ]
python
train
28.375
saltstack/salt
salt/utils/stringutils.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/stringutils.py#L594-L609
def camel_to_snake_case(camel_input): ''' Converts camelCase (or CamelCase) to snake_case. From https://codereview.stackexchange.com/questions/185966/functions-to-convert-camelcase-strings-to-snake-case :param str camel_input: The camelcase or CamelCase string to convert to snake_case :return str ''' res = camel_input[0].lower() for i, letter in enumerate(camel_input[1:], 1): if letter.isupper(): if camel_input[i-1].islower() or (i != len(camel_input)-1 and camel_input[i+1].islower()): res += '_' res += letter.lower() return res
[ "def", "camel_to_snake_case", "(", "camel_input", ")", ":", "res", "=", "camel_input", "[", "0", "]", ".", "lower", "(", ")", "for", "i", ",", "letter", "in", "enumerate", "(", "camel_input", "[", "1", ":", "]", ",", "1", ")", ":", "if", "letter", ".", "isupper", "(", ")", ":", "if", "camel_input", "[", "i", "-", "1", "]", ".", "islower", "(", ")", "or", "(", "i", "!=", "len", "(", "camel_input", ")", "-", "1", "and", "camel_input", "[", "i", "+", "1", "]", ".", "islower", "(", ")", ")", ":", "res", "+=", "'_'", "res", "+=", "letter", ".", "lower", "(", ")", "return", "res" ]
Converts camelCase (or CamelCase) to snake_case. From https://codereview.stackexchange.com/questions/185966/functions-to-convert-camelcase-strings-to-snake-case :param str camel_input: The camelcase or CamelCase string to convert to snake_case :return str
[ "Converts", "camelCase", "(", "or", "CamelCase", ")", "to", "snake_case", ".", "From", "https", ":", "//", "codereview", ".", "stackexchange", ".", "com", "/", "questions", "/", "185966", "/", "functions", "-", "to", "-", "convert", "-", "camelcase", "-", "strings", "-", "to", "-", "snake", "-", "case" ]
python
train
37.5625
fabiocaccamo/django-maintenance-mode
maintenance_mode/http.py
https://github.com/fabiocaccamo/django-maintenance-mode/blob/008221a6b8a687667c2480fa799c7a4228598441/maintenance_mode/http.py#L68-L204
def need_maintenance_response(request): """ Tells if the given request needs a maintenance response or not. """ try: view_match = resolve(request.path) view_func = view_match[0] view_dict = view_func.__dict__ view_force_maintenance_mode_off = view_dict.get( 'force_maintenance_mode_off', False) if view_force_maintenance_mode_off: # view has 'force_maintenance_mode_off' decorator return False view_force_maintenance_mode_on = view_dict.get( 'force_maintenance_mode_on', False) if view_force_maintenance_mode_on: # view has 'force_maintenance_mode_on' decorator return True except Resolver404: pass if not get_maintenance_mode(): return False try: url_off = reverse('maintenance_mode_off') resolve(url_off) if url_off == request.path_info: return False except NoReverseMatch: # maintenance_mode.urls not added pass if hasattr(request, 'user'): if django.VERSION < (1, 10): if settings.MAINTENANCE_MODE_IGNORE_ANONYMOUS_USER \ and request.user.is_anonymous(): return False if settings.MAINTENANCE_MODE_IGNORE_AUTHENTICATED_USER \ and request.user.is_authenticated(): return False else: if settings.MAINTENANCE_MODE_IGNORE_ANONYMOUS_USER \ and request.user.is_anonymous: return False if settings.MAINTENANCE_MODE_IGNORE_AUTHENTICATED_USER \ and request.user.is_authenticated: return False if settings.MAINTENANCE_MODE_IGNORE_STAFF \ and request.user.is_staff: return False if settings.MAINTENANCE_MODE_IGNORE_SUPERUSER \ and request.user.is_superuser: return False if settings.MAINTENANCE_MODE_IGNORE_ADMIN_SITE: try: request_path = request.path if request.path else '' if not request_path.endswith('/'): request_path += '/' admin_url = reverse('admin:index') if request_path.startswith(admin_url): return False except NoReverseMatch: # admin.urls not added pass if settings.MAINTENANCE_MODE_IGNORE_TESTS: is_testing = False if (len(sys.argv) > 0 and 'runtests' in sys.argv[0]) \ or (len(sys.argv) > 1 and sys.argv[1] == 'test'): # python runtests.py | python manage.py test | python # setup.py test | django-admin.py test is_testing = True if is_testing: return False if settings.MAINTENANCE_MODE_IGNORE_IP_ADDRESSES: if settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS: try: get_client_ip_address_func = import_string( settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS) except ImportError: raise ImproperlyConfigured( 'settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS ' 'is not a valid function path.') else: client_ip_address = get_client_ip_address_func(request) else: client_ip_address = get_client_ip_address(request) for ip_address in settings.MAINTENANCE_MODE_IGNORE_IP_ADDRESSES: ip_address_re = re.compile(ip_address) if ip_address_re.match(client_ip_address): return False if settings.MAINTENANCE_MODE_IGNORE_URLS: for url in settings.MAINTENANCE_MODE_IGNORE_URLS: if not isinstance(url, pattern_class): url = str(url) url_re = re.compile(url) if url_re.match(request.path_info): return False if settings.MAINTENANCE_MODE_REDIRECT_URL: redirect_url_re = re.compile( settings.MAINTENANCE_MODE_REDIRECT_URL) if redirect_url_re.match(request.path_info): return False return True
[ "def", "need_maintenance_response", "(", "request", ")", ":", "try", ":", "view_match", "=", "resolve", "(", "request", ".", "path", ")", "view_func", "=", "view_match", "[", "0", "]", "view_dict", "=", "view_func", ".", "__dict__", "view_force_maintenance_mode_off", "=", "view_dict", ".", "get", "(", "'force_maintenance_mode_off'", ",", "False", ")", "if", "view_force_maintenance_mode_off", ":", "# view has 'force_maintenance_mode_off' decorator", "return", "False", "view_force_maintenance_mode_on", "=", "view_dict", ".", "get", "(", "'force_maintenance_mode_on'", ",", "False", ")", "if", "view_force_maintenance_mode_on", ":", "# view has 'force_maintenance_mode_on' decorator", "return", "True", "except", "Resolver404", ":", "pass", "if", "not", "get_maintenance_mode", "(", ")", ":", "return", "False", "try", ":", "url_off", "=", "reverse", "(", "'maintenance_mode_off'", ")", "resolve", "(", "url_off", ")", "if", "url_off", "==", "request", ".", "path_info", ":", "return", "False", "except", "NoReverseMatch", ":", "# maintenance_mode.urls not added", "pass", "if", "hasattr", "(", "request", ",", "'user'", ")", ":", "if", "django", ".", "VERSION", "<", "(", "1", ",", "10", ")", ":", "if", "settings", ".", "MAINTENANCE_MODE_IGNORE_ANONYMOUS_USER", "and", "request", ".", "user", ".", "is_anonymous", "(", ")", ":", "return", "False", "if", "settings", ".", "MAINTENANCE_MODE_IGNORE_AUTHENTICATED_USER", "and", "request", ".", "user", ".", "is_authenticated", "(", ")", ":", "return", "False", "else", ":", "if", "settings", ".", "MAINTENANCE_MODE_IGNORE_ANONYMOUS_USER", "and", "request", ".", "user", ".", "is_anonymous", ":", "return", "False", "if", "settings", ".", "MAINTENANCE_MODE_IGNORE_AUTHENTICATED_USER", "and", "request", ".", "user", ".", "is_authenticated", ":", "return", "False", "if", "settings", ".", "MAINTENANCE_MODE_IGNORE_STAFF", "and", "request", ".", "user", ".", "is_staff", ":", "return", "False", "if", "settings", ".", "MAINTENANCE_MODE_IGNORE_SUPERUSER", "and", "request", ".", "user", ".", "is_superuser", ":", "return", "False", "if", "settings", ".", "MAINTENANCE_MODE_IGNORE_ADMIN_SITE", ":", "try", ":", "request_path", "=", "request", ".", "path", "if", "request", ".", "path", "else", "''", "if", "not", "request_path", ".", "endswith", "(", "'/'", ")", ":", "request_path", "+=", "'/'", "admin_url", "=", "reverse", "(", "'admin:index'", ")", "if", "request_path", ".", "startswith", "(", "admin_url", ")", ":", "return", "False", "except", "NoReverseMatch", ":", "# admin.urls not added", "pass", "if", "settings", ".", "MAINTENANCE_MODE_IGNORE_TESTS", ":", "is_testing", "=", "False", "if", "(", "len", "(", "sys", ".", "argv", ")", ">", "0", "and", "'runtests'", "in", "sys", ".", "argv", "[", "0", "]", ")", "or", "(", "len", "(", "sys", ".", "argv", ")", ">", "1", "and", "sys", ".", "argv", "[", "1", "]", "==", "'test'", ")", ":", "# python runtests.py | python manage.py test | python", "# setup.py test | django-admin.py test", "is_testing", "=", "True", "if", "is_testing", ":", "return", "False", "if", "settings", ".", "MAINTENANCE_MODE_IGNORE_IP_ADDRESSES", ":", "if", "settings", ".", "MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS", ":", "try", ":", "get_client_ip_address_func", "=", "import_string", "(", "settings", ".", "MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS", ")", "except", "ImportError", ":", "raise", "ImproperlyConfigured", "(", "'settings.MAINTENANCE_MODE_GET_CLIENT_IP_ADDRESS '", "'is not a valid function path.'", ")", "else", ":", "client_ip_address", "=", "get_client_ip_address_func", "(", "request", ")", "else", ":", "client_ip_address", "=", "get_client_ip_address", "(", "request", ")", "for", "ip_address", "in", "settings", ".", "MAINTENANCE_MODE_IGNORE_IP_ADDRESSES", ":", "ip_address_re", "=", "re", ".", "compile", "(", "ip_address", ")", "if", "ip_address_re", ".", "match", "(", "client_ip_address", ")", ":", "return", "False", "if", "settings", ".", "MAINTENANCE_MODE_IGNORE_URLS", ":", "for", "url", "in", "settings", ".", "MAINTENANCE_MODE_IGNORE_URLS", ":", "if", "not", "isinstance", "(", "url", ",", "pattern_class", ")", ":", "url", "=", "str", "(", "url", ")", "url_re", "=", "re", ".", "compile", "(", "url", ")", "if", "url_re", ".", "match", "(", "request", ".", "path_info", ")", ":", "return", "False", "if", "settings", ".", "MAINTENANCE_MODE_REDIRECT_URL", ":", "redirect_url_re", "=", "re", ".", "compile", "(", "settings", ".", "MAINTENANCE_MODE_REDIRECT_URL", ")", "if", "redirect_url_re", ".", "match", "(", "request", ".", "path_info", ")", ":", "return", "False", "return", "True" ]
Tells if the given request needs a maintenance response or not.
[ "Tells", "if", "the", "given", "request", "needs", "a", "maintenance", "response", "or", "not", "." ]
python
train
29.635036
brainiak/brainiak
brainiak/isc.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/isc.py#L1237-L1383
def timeshift_isc(data, pairwise=False, summary_statistic='median', n_shifts=1000, tolerate_nans=True, random_state=None): """Circular time-shift randomization for one-sample ISC test For each voxel or ROI, compute the actual ISC and p-values from a null distribution of ISCs where response time series are first circularly shifted by random intervals. If pairwise, apply time-shift randomization to each subjects and compute pairwise ISCs. If leave-one-out approach is used (pairwise=False), apply the random time-shift to only the left-out subject in each iteration of the leave-one-out procedure. Input data should be a list where each item is a time-points by voxels ndarray for a given subject. Multiple input ndarrays must be the same shape. If a single ndarray is supplied, the last dimension is assumed to correspond to subjects. When using leave-one-out approach, NaNs are ignored when computing mean time series of N-1 subjects (default: tolerate_nans=True). Alternatively, you may supply a float between 0 and 1 indicating a threshold proportion of N subjects with non-NaN values required when computing the average time series for a given voxel. For example, if tolerate_nans=.8, ISCs will be computed for any voxel where >= 80% of subjects have non-NaN values, while voxels with < 80% non-NaN values will be assigned NaNs. If set to False, NaNs are not tolerated and voxels with one or more NaNs among the N-1 subjects will be assigned NaN. Setting tolerate_nans to True or False will not affect the pairwise approach; however, if a threshold float is provided, voxels that do not reach this threshold will be excluded. Note that accommodating NaNs may be notably slower than setting tolerate_nans to False. Returns the observed ISC and p-values (two-tailed test), as well as the null distribution of ISCs computed on randomly time-shifted data. The implementation is based on the work in [Kauppi2010]_ and [Kauppi2014]_. .. [Kauppi2010] "Inter-subject correlation of brain hemodynamic responses during watching a movie: localization in space and frequency.", J. P. Kauppi, I. P. Jääskeläinen, M. Sams, J. Tohka, 2010, Frontiers in Neuroinformatics, 4, 5. https://doi.org/10.3389/fninf.2010.00005 Parameters ---------- data : list or ndarray (n_TRs x n_voxels x n_subjects) fMRI data for which to compute ISFC pairwise : bool, default: False Whether to use pairwise (True) or leave-one-out (False) approach summary_statistic : str, default: 'median' Summary statistic, either 'median' (default) or 'mean' n_shifts : int, default: 1000 Number of randomly shifted samples tolerate_nans : bool or float, default: True Accommodate NaNs (when averaging in leave-one-out approach) random_state = int, None, or np.random.RandomState, default: None Initial random seed Returns ------- observed : float, observed ISC (without time-shifting) Actual ISCs p : float, p-value p-value based on time-shifting randomization test distribution : ndarray, time-shifts by voxels (optional) Time-shifted null distribution if return_bootstrap=True """ # Check response time series input format data, n_TRs, n_voxels, n_subjects = _check_timeseries_input(data) # Get actual observed ISC observed = isc(data, pairwise=pairwise, summary_statistic=summary_statistic, tolerate_nans=tolerate_nans) # Roll axis to get subjects in first dimension for loop if pairwise: data = np.rollaxis(data, 2, 0) # Iterate through randomized shifts to create null distribution distribution = [] for i in np.arange(n_shifts): # Random seed to be deterministically re-randomized at each iteration if isinstance(random_state, np.random.RandomState): prng = random_state else: prng = np.random.RandomState(random_state) # Get a random set of shifts based on number of TRs, shifts = prng.choice(np.arange(n_TRs), size=n_subjects, replace=True) # In pairwise approach, apply all shifts then compute pairwise ISCs if pairwise: # Apply circular shift to each subject's time series shifted_data = [] for subject, shift in zip(data, shifts): shifted_data.append(np.concatenate( (subject[-shift:, :], subject[:-shift, :]))) shifted_data = np.dstack(shifted_data) # Compute null ISC on shifted data for pairwise approach shifted_isc = isc(shifted_data, pairwise=pairwise, summary_statistic=summary_statistic, tolerate_nans=tolerate_nans) # In leave-one-out, apply shift only to each left-out participant elif not pairwise: shifted_isc = [] for s, shift in enumerate(shifts): shifted_subject = np.concatenate((data[-shift:, :, s], data[:-shift, :, s])) nonshifted_mean = np.mean(np.delete(data, s, 2), axis=2) loo_isc = isc(np.dstack((shifted_subject, nonshifted_mean)), pairwise=False, summary_statistic=None, tolerate_nans=tolerate_nans) shifted_isc.append(loo_isc) # Get summary statistics across left-out subjects shifted_isc = compute_summary_statistic( np.dstack(shifted_isc), summary_statistic=summary_statistic, axis=2) distribution.append(shifted_isc) # Update random state for next iteration random_state = np.random.RandomState(prng.randint(0, MAX_RANDOM_SEED)) # Convert distribution to numpy array distribution = np.vstack(distribution) # Get p-value for actual median from shifted distribution p = p_from_null(observed, distribution, side='two-sided', exact=False, axis=0) return observed, p, distribution
[ "def", "timeshift_isc", "(", "data", ",", "pairwise", "=", "False", ",", "summary_statistic", "=", "'median'", ",", "n_shifts", "=", "1000", ",", "tolerate_nans", "=", "True", ",", "random_state", "=", "None", ")", ":", "# Check response time series input format", "data", ",", "n_TRs", ",", "n_voxels", ",", "n_subjects", "=", "_check_timeseries_input", "(", "data", ")", "# Get actual observed ISC", "observed", "=", "isc", "(", "data", ",", "pairwise", "=", "pairwise", ",", "summary_statistic", "=", "summary_statistic", ",", "tolerate_nans", "=", "tolerate_nans", ")", "# Roll axis to get subjects in first dimension for loop", "if", "pairwise", ":", "data", "=", "np", ".", "rollaxis", "(", "data", ",", "2", ",", "0", ")", "# Iterate through randomized shifts to create null distribution", "distribution", "=", "[", "]", "for", "i", "in", "np", ".", "arange", "(", "n_shifts", ")", ":", "# Random seed to be deterministically re-randomized at each iteration", "if", "isinstance", "(", "random_state", ",", "np", ".", "random", ".", "RandomState", ")", ":", "prng", "=", "random_state", "else", ":", "prng", "=", "np", ".", "random", ".", "RandomState", "(", "random_state", ")", "# Get a random set of shifts based on number of TRs,", "shifts", "=", "prng", ".", "choice", "(", "np", ".", "arange", "(", "n_TRs", ")", ",", "size", "=", "n_subjects", ",", "replace", "=", "True", ")", "# In pairwise approach, apply all shifts then compute pairwise ISCs", "if", "pairwise", ":", "# Apply circular shift to each subject's time series", "shifted_data", "=", "[", "]", "for", "subject", ",", "shift", "in", "zip", "(", "data", ",", "shifts", ")", ":", "shifted_data", ".", "append", "(", "np", ".", "concatenate", "(", "(", "subject", "[", "-", "shift", ":", ",", ":", "]", ",", "subject", "[", ":", "-", "shift", ",", ":", "]", ")", ")", ")", "shifted_data", "=", "np", ".", "dstack", "(", "shifted_data", ")", "# Compute null ISC on shifted data for pairwise approach", "shifted_isc", "=", "isc", "(", "shifted_data", ",", "pairwise", "=", "pairwise", ",", "summary_statistic", "=", "summary_statistic", ",", "tolerate_nans", "=", "tolerate_nans", ")", "# In leave-one-out, apply shift only to each left-out participant", "elif", "not", "pairwise", ":", "shifted_isc", "=", "[", "]", "for", "s", ",", "shift", "in", "enumerate", "(", "shifts", ")", ":", "shifted_subject", "=", "np", ".", "concatenate", "(", "(", "data", "[", "-", "shift", ":", ",", ":", ",", "s", "]", ",", "data", "[", ":", "-", "shift", ",", ":", ",", "s", "]", ")", ")", "nonshifted_mean", "=", "np", ".", "mean", "(", "np", ".", "delete", "(", "data", ",", "s", ",", "2", ")", ",", "axis", "=", "2", ")", "loo_isc", "=", "isc", "(", "np", ".", "dstack", "(", "(", "shifted_subject", ",", "nonshifted_mean", ")", ")", ",", "pairwise", "=", "False", ",", "summary_statistic", "=", "None", ",", "tolerate_nans", "=", "tolerate_nans", ")", "shifted_isc", ".", "append", "(", "loo_isc", ")", "# Get summary statistics across left-out subjects", "shifted_isc", "=", "compute_summary_statistic", "(", "np", ".", "dstack", "(", "shifted_isc", ")", ",", "summary_statistic", "=", "summary_statistic", ",", "axis", "=", "2", ")", "distribution", ".", "append", "(", "shifted_isc", ")", "# Update random state for next iteration", "random_state", "=", "np", ".", "random", ".", "RandomState", "(", "prng", ".", "randint", "(", "0", ",", "MAX_RANDOM_SEED", ")", ")", "# Convert distribution to numpy array", "distribution", "=", "np", ".", "vstack", "(", "distribution", ")", "# Get p-value for actual median from shifted distribution", "p", "=", "p_from_null", "(", "observed", ",", "distribution", ",", "side", "=", "'two-sided'", ",", "exact", "=", "False", ",", "axis", "=", "0", ")", "return", "observed", ",", "p", ",", "distribution" ]
Circular time-shift randomization for one-sample ISC test For each voxel or ROI, compute the actual ISC and p-values from a null distribution of ISCs where response time series are first circularly shifted by random intervals. If pairwise, apply time-shift randomization to each subjects and compute pairwise ISCs. If leave-one-out approach is used (pairwise=False), apply the random time-shift to only the left-out subject in each iteration of the leave-one-out procedure. Input data should be a list where each item is a time-points by voxels ndarray for a given subject. Multiple input ndarrays must be the same shape. If a single ndarray is supplied, the last dimension is assumed to correspond to subjects. When using leave-one-out approach, NaNs are ignored when computing mean time series of N-1 subjects (default: tolerate_nans=True). Alternatively, you may supply a float between 0 and 1 indicating a threshold proportion of N subjects with non-NaN values required when computing the average time series for a given voxel. For example, if tolerate_nans=.8, ISCs will be computed for any voxel where >= 80% of subjects have non-NaN values, while voxels with < 80% non-NaN values will be assigned NaNs. If set to False, NaNs are not tolerated and voxels with one or more NaNs among the N-1 subjects will be assigned NaN. Setting tolerate_nans to True or False will not affect the pairwise approach; however, if a threshold float is provided, voxels that do not reach this threshold will be excluded. Note that accommodating NaNs may be notably slower than setting tolerate_nans to False. Returns the observed ISC and p-values (two-tailed test), as well as the null distribution of ISCs computed on randomly time-shifted data. The implementation is based on the work in [Kauppi2010]_ and [Kauppi2014]_. .. [Kauppi2010] "Inter-subject correlation of brain hemodynamic responses during watching a movie: localization in space and frequency.", J. P. Kauppi, I. P. Jääskeläinen, M. Sams, J. Tohka, 2010, Frontiers in Neuroinformatics, 4, 5. https://doi.org/10.3389/fninf.2010.00005 Parameters ---------- data : list or ndarray (n_TRs x n_voxels x n_subjects) fMRI data for which to compute ISFC pairwise : bool, default: False Whether to use pairwise (True) or leave-one-out (False) approach summary_statistic : str, default: 'median' Summary statistic, either 'median' (default) or 'mean' n_shifts : int, default: 1000 Number of randomly shifted samples tolerate_nans : bool or float, default: True Accommodate NaNs (when averaging in leave-one-out approach) random_state = int, None, or np.random.RandomState, default: None Initial random seed Returns ------- observed : float, observed ISC (without time-shifting) Actual ISCs p : float, p-value p-value based on time-shifting randomization test distribution : ndarray, time-shifts by voxels (optional) Time-shifted null distribution if return_bootstrap=True
[ "Circular", "time", "-", "shift", "randomization", "for", "one", "-", "sample", "ISC", "test" ]
python
train
42.952381
alex-kostirin/pyatomac
atomac/ldtpd/value.py
https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/ldtpd/value.py#L30-L50
def verifyscrollbarvertical(self, window_name, object_name): """ Verify scrollbar is vertical @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @return: 1 on success. @rtype: integer """ try: object_handle = self._get_object_handle(window_name, object_name) if object_handle.AXOrientation == "AXVerticalOrientation": return 1 except: pass return 0
[ "def", "verifyscrollbarvertical", "(", "self", ",", "window_name", ",", "object_name", ")", ":", "try", ":", "object_handle", "=", "self", ".", "_get_object_handle", "(", "window_name", ",", "object_name", ")", "if", "object_handle", ".", "AXOrientation", "==", "\"AXVerticalOrientation\"", ":", "return", "1", "except", ":", "pass", "return", "0" ]
Verify scrollbar is vertical @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @return: 1 on success. @rtype: integer
[ "Verify", "scrollbar", "is", "vertical" ]
python
valid
33.285714
HPENetworking/PYHPEIMC
pyhpeimc/plat/device.py
https://github.com/HPENetworking/PYHPEIMC/blob/4fba31827573587e03a6233c7db60f188038c8e5/pyhpeimc/plat/device.py#L780-L829
def set_inteface_up(ifindex, auth, url, devid=None, devip=None): """ function takest devid and ifindex of specific device and interface and issues a RESTFUL call to "undo shut" the specified interface on the target device. :param devid: int or str value of the target device :param devip: ipv4 address of the target devices :param ifindex: int or str value of the target interface :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: HTTP status code 204 with no values. :rype: int >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.device import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> int_down_response = set_interface_down( '9', auth.creds, auth.url, devid = '10') 204 >>> int_up_response = set_inteface_up( '9', auth.creds, auth.url, devid = '10') >>> int_down_response = set_interface_down( '9', auth.creds, auth.url, devid = '10') 204 >>> int_up_response = set_inteface_up('9', auth.creds, auth.url, devip = '10.101.0.221') >>> assert type(int_up_response) is int >>> assert int_up_response is 204 """ if devip is not None: devid = get_dev_details(devip, auth, url)['id'] set_int_up_url = "/imcrs/plat/res/device/" + str(devid) + "/interface/" + str(ifindex) + "/up" f_url = url + set_int_up_url try: response = requests.put(f_url, auth=auth, headers=HEADERS) if response.status_code == 204: return response.status_code except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + " set_inteface_up: An Error has occured"
[ "def", "set_inteface_up", "(", "ifindex", ",", "auth", ",", "url", ",", "devid", "=", "None", ",", "devip", "=", "None", ")", ":", "if", "devip", "is", "not", "None", ":", "devid", "=", "get_dev_details", "(", "devip", ",", "auth", ",", "url", ")", "[", "'id'", "]", "set_int_up_url", "=", "\"/imcrs/plat/res/device/\"", "+", "str", "(", "devid", ")", "+", "\"/interface/\"", "+", "str", "(", "ifindex", ")", "+", "\"/up\"", "f_url", "=", "url", "+", "set_int_up_url", "try", ":", "response", "=", "requests", ".", "put", "(", "f_url", ",", "auth", "=", "auth", ",", "headers", "=", "HEADERS", ")", "if", "response", ".", "status_code", "==", "204", ":", "return", "response", ".", "status_code", "except", "requests", ".", "exceptions", ".", "RequestException", "as", "error", ":", "return", "\"Error:\\n\"", "+", "str", "(", "error", ")", "+", "\" set_inteface_up: An Error has occured\"" ]
function takest devid and ifindex of specific device and interface and issues a RESTFUL call to "undo shut" the specified interface on the target device. :param devid: int or str value of the target device :param devip: ipv4 address of the target devices :param ifindex: int or str value of the target interface :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :return: HTTP status code 204 with no values. :rype: int >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.device import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> int_down_response = set_interface_down( '9', auth.creds, auth.url, devid = '10') 204 >>> int_up_response = set_inteface_up( '9', auth.creds, auth.url, devid = '10') >>> int_down_response = set_interface_down( '9', auth.creds, auth.url, devid = '10') 204 >>> int_up_response = set_inteface_up('9', auth.creds, auth.url, devip = '10.101.0.221') >>> assert type(int_up_response) is int >>> assert int_up_response is 204
[ "function", "takest", "devid", "and", "ifindex", "of", "specific", "device", "and", "interface", "and", "issues", "a", "RESTFUL", "call", "to", "undo", "shut", "the", "specified", "interface", "on", "the", "target", "device", "." ]
python
train
34.86
gbowerman/azurerm
examples/delete_rg.py
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/examples/delete_rg.py#L9-L38
def main(): '''Main routine.''' # check for single command argument if len(sys.argv) != 2: sys.exit('Usage: python ' + sys.argv[0] + ' rg_name') rgname = sys.argv[1] # if in Azure cloud shell, authenticate using the MSI endpoint if 'ACC_CLOUD' in os.environ and 'MSI_ENDPOINT' in os.environ: access_token = azurerm.get_access_token_from_cli() subscription_id = azurerm.get_subscription_from_cli() else: # load service principal details from a config file try: with open('azurermconfig.json') as configfile: configdata = json.load(configfile) except FileNotFoundError: sys.exit('Error: Expecting azurermconfig.json in current folder') tenant_id = configdata['tenantId'] app_id = configdata['appId'] app_secret = configdata['appSecret'] subscription_id = configdata['subscriptionId'] # authenticate access_token = azurerm.get_access_token(tenant_id, app_id, app_secret) # delete a resource group rgreturn = azurerm.delete_resource_group(access_token, subscription_id, rgname) print(rgreturn)
[ "def", "main", "(", ")", ":", "# check for single command argument", "if", "len", "(", "sys", ".", "argv", ")", "!=", "2", ":", "sys", ".", "exit", "(", "'Usage: python '", "+", "sys", ".", "argv", "[", "0", "]", "+", "' rg_name'", ")", "rgname", "=", "sys", ".", "argv", "[", "1", "]", "# if in Azure cloud shell, authenticate using the MSI endpoint", "if", "'ACC_CLOUD'", "in", "os", ".", "environ", "and", "'MSI_ENDPOINT'", "in", "os", ".", "environ", ":", "access_token", "=", "azurerm", ".", "get_access_token_from_cli", "(", ")", "subscription_id", "=", "azurerm", ".", "get_subscription_from_cli", "(", ")", "else", ":", "# load service principal details from a config file ", "try", ":", "with", "open", "(", "'azurermconfig.json'", ")", "as", "configfile", ":", "configdata", "=", "json", ".", "load", "(", "configfile", ")", "except", "FileNotFoundError", ":", "sys", ".", "exit", "(", "'Error: Expecting azurermconfig.json in current folder'", ")", "tenant_id", "=", "configdata", "[", "'tenantId'", "]", "app_id", "=", "configdata", "[", "'appId'", "]", "app_secret", "=", "configdata", "[", "'appSecret'", "]", "subscription_id", "=", "configdata", "[", "'subscriptionId'", "]", "# authenticate", "access_token", "=", "azurerm", ".", "get_access_token", "(", "tenant_id", ",", "app_id", ",", "app_secret", ")", "# delete a resource group", "rgreturn", "=", "azurerm", ".", "delete_resource_group", "(", "access_token", ",", "subscription_id", ",", "rgname", ")", "print", "(", "rgreturn", ")" ]
Main routine.
[ "Main", "routine", "." ]
python
train
38.033333
Nike-Inc/cerberus-python-client
cerberus/client.py
https://github.com/Nike-Inc/cerberus-python-client/blob/ef38356822e722fcb6a6ed4a1b38a5b493e753ae/cerberus/client.py#L587-L601
def _get_all_secret_version_ids(self, secure_data_path, limit=None): """ Convenience function that returns a generator that will paginate over the secret version ids secure_data_path -- full path to the key in the safety deposit box limit -- Default(100), limits how many records to be returned from the api at once. """ offset = 0 # Prime the versions dictionary so that all the logic can happen in the loop versions = {'has_next': True, 'next_offset': 0} while (versions['has_next']): offset = versions['next_offset'] versions = self.get_secret_versions(secure_data_path, limit, offset) for summary in versions['secure_data_version_summaries']: yield summary
[ "def", "_get_all_secret_version_ids", "(", "self", ",", "secure_data_path", ",", "limit", "=", "None", ")", ":", "offset", "=", "0", "# Prime the versions dictionary so that all the logic can happen in the loop", "versions", "=", "{", "'has_next'", ":", "True", ",", "'next_offset'", ":", "0", "}", "while", "(", "versions", "[", "'has_next'", "]", ")", ":", "offset", "=", "versions", "[", "'next_offset'", "]", "versions", "=", "self", ".", "get_secret_versions", "(", "secure_data_path", ",", "limit", ",", "offset", ")", "for", "summary", "in", "versions", "[", "'secure_data_version_summaries'", "]", ":", "yield", "summary" ]
Convenience function that returns a generator that will paginate over the secret version ids secure_data_path -- full path to the key in the safety deposit box limit -- Default(100), limits how many records to be returned from the api at once.
[ "Convenience", "function", "that", "returns", "a", "generator", "that", "will", "paginate", "over", "the", "secret", "version", "ids", "secure_data_path", "--", "full", "path", "to", "the", "key", "in", "the", "safety", "deposit", "box", "limit", "--", "Default", "(", "100", ")", "limits", "how", "many", "records", "to", "be", "returned", "from", "the", "api", "at", "once", "." ]
python
train
51.4
saltstack/salt
salt/modules/match.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/match.py#L258-L285
def pcre(tgt, minion_id=None): ''' Return True if the minion ID matches the given pcre target minion_id Specify the minion ID to match against the target expression .. versionadded:: 2014.7.0 CLI Example: .. code-block:: bash salt '*' match.pcre '.*' ''' if minion_id is not None: opts = copy.copy(__opts__) if not isinstance(minion_id, six.string_types): minion_id = six.text_type(minion_id) opts['id'] = minion_id else: opts = __opts__ matchers = salt.loader.matchers(opts) try: return matchers['pcre_match.match'](tgt, opts=__opts__) except Exception as exc: log.exception(exc) return False
[ "def", "pcre", "(", "tgt", ",", "minion_id", "=", "None", ")", ":", "if", "minion_id", "is", "not", "None", ":", "opts", "=", "copy", ".", "copy", "(", "__opts__", ")", "if", "not", "isinstance", "(", "minion_id", ",", "six", ".", "string_types", ")", ":", "minion_id", "=", "six", ".", "text_type", "(", "minion_id", ")", "opts", "[", "'id'", "]", "=", "minion_id", "else", ":", "opts", "=", "__opts__", "matchers", "=", "salt", ".", "loader", ".", "matchers", "(", "opts", ")", "try", ":", "return", "matchers", "[", "'pcre_match.match'", "]", "(", "tgt", ",", "opts", "=", "__opts__", ")", "except", "Exception", "as", "exc", ":", "log", ".", "exception", "(", "exc", ")", "return", "False" ]
Return True if the minion ID matches the given pcre target minion_id Specify the minion ID to match against the target expression .. versionadded:: 2014.7.0 CLI Example: .. code-block:: bash salt '*' match.pcre '.*'
[ "Return", "True", "if", "the", "minion", "ID", "matches", "the", "given", "pcre", "target" ]
python
train
25.25
axltxl/m2bk
m2bk/log.py
https://github.com/axltxl/m2bk/blob/980083dfd17e6e783753a946e9aa809714551141/m2bk/log.py#L112-L120
def msg_err(message): """ Log an error message :param message: the message to be logged """ to_stdout(" !!! {message}".format(message=message), colorf=red, bold=True) if _logger: _logger.error(message)
[ "def", "msg_err", "(", "message", ")", ":", "to_stdout", "(", "\" !!! {message}\"", ".", "format", "(", "message", "=", "message", ")", ",", "colorf", "=", "red", ",", "bold", "=", "True", ")", "if", "_logger", ":", "_logger", ".", "error", "(", "message", ")" ]
Log an error message :param message: the message to be logged
[ "Log", "an", "error", "message" ]
python
train
25.111111
peri-source/peri
peri/opt/optimize.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1228-L1231
def calc_grad(self): """The gradient of the cost w.r.t. the parameters.""" residuals = self.calc_residuals() return 2*np.dot(self.J, residuals)
[ "def", "calc_grad", "(", "self", ")", ":", "residuals", "=", "self", ".", "calc_residuals", "(", ")", "return", "2", "*", "np", ".", "dot", "(", "self", ".", "J", ",", "residuals", ")" ]
The gradient of the cost w.r.t. the parameters.
[ "The", "gradient", "of", "the", "cost", "w", ".", "r", ".", "t", ".", "the", "parameters", "." ]
python
valid
41
sloria/pypi-cli
pypi_cli.py
https://github.com/sloria/pypi-cli/blob/beb007bf2bdd285209876ce2758982b5d8b54d5d/pypi_cli.py#L451-L456
def min_version(self): """Version with the fewest downloads.""" data = self.version_downloads if not data: return (None, 0) return min(data.items(), key=lambda item: item[1])
[ "def", "min_version", "(", "self", ")", ":", "data", "=", "self", ".", "version_downloads", "if", "not", "data", ":", "return", "(", "None", ",", "0", ")", "return", "min", "(", "data", ".", "items", "(", ")", ",", "key", "=", "lambda", "item", ":", "item", "[", "1", "]", ")" ]
Version with the fewest downloads.
[ "Version", "with", "the", "fewest", "downloads", "." ]
python
train
35.5
sanger-pathogens/Fastaq
pyfastaq/sequences.py
https://github.com/sanger-pathogens/Fastaq/blob/2c775c846d2491678a9637daa320592e02c26c72/pyfastaq/sequences.py#L237-L248
def add_insertions(self, skip=10, window=1, test=False): '''Adds a random base within window bases around every skip bases. e.g. skip=10, window=1 means a random base added somwhere in theintervals [9,11], [19,21] ... ''' assert 2 * window < skip new_seq = list(self.seq) for i in range(len(self) - skip, 0, -skip): pos = random.randrange(i - window, i + window + 1) base = random.choice(['A', 'C', 'G', 'T']) if test: base = 'N' new_seq.insert(pos, base) self.seq = ''.join(new_seq)
[ "def", "add_insertions", "(", "self", ",", "skip", "=", "10", ",", "window", "=", "1", ",", "test", "=", "False", ")", ":", "assert", "2", "*", "window", "<", "skip", "new_seq", "=", "list", "(", "self", ".", "seq", ")", "for", "i", "in", "range", "(", "len", "(", "self", ")", "-", "skip", ",", "0", ",", "-", "skip", ")", ":", "pos", "=", "random", ".", "randrange", "(", "i", "-", "window", ",", "i", "+", "window", "+", "1", ")", "base", "=", "random", ".", "choice", "(", "[", "'A'", ",", "'C'", ",", "'G'", ",", "'T'", "]", ")", "if", "test", ":", "base", "=", "'N'", "new_seq", ".", "insert", "(", "pos", ",", "base", ")", "self", ".", "seq", "=", "''", ".", "join", "(", "new_seq", ")" ]
Adds a random base within window bases around every skip bases. e.g. skip=10, window=1 means a random base added somwhere in theintervals [9,11], [19,21] ...
[ "Adds", "a", "random", "base", "within", "window", "bases", "around", "every", "skip", "bases", ".", "e", ".", "g", ".", "skip", "=", "10", "window", "=", "1", "means", "a", "random", "base", "added", "somwhere", "in", "theintervals", "[", "9", "11", "]", "[", "19", "21", "]", "..." ]
python
valid
48.083333
radjkarl/imgProcessor
imgProcessor/camera/flatField/vignettingFromRandomSteps.py
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/flatField/vignettingFromRandomSteps.py#L169-L197
def error(self, nCells=15): ''' calculate the standard deviation of all fitted images, averaged to a grid ''' s0, s1 = self.fits[0].shape aR = s0 / s1 if aR > 1: ss0 = int(nCells) ss1 = int(ss0 / aR) else: ss1 = int(nCells) ss0 = int(ss1 * aR) L = len(self.fits) arr = np.array(self.fits) arr[np.array(self._fit_masks)] = np.nan avg = np.tile(np.nanmean(arr, axis=0), (L, 1, 1)) arr = (arr - avg) / avg out = np.empty(shape=(L, ss0, ss1)) with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) for n, f in enumerate(arr): out[n] = subCell2DFnArray(f, np.nanmean, (ss0, ss1)) return np.nanmean(out**2)**0.5
[ "def", "error", "(", "self", ",", "nCells", "=", "15", ")", ":", "s0", ",", "s1", "=", "self", ".", "fits", "[", "0", "]", ".", "shape", "aR", "=", "s0", "/", "s1", "if", "aR", ">", "1", ":", "ss0", "=", "int", "(", "nCells", ")", "ss1", "=", "int", "(", "ss0", "/", "aR", ")", "else", ":", "ss1", "=", "int", "(", "nCells", ")", "ss0", "=", "int", "(", "ss1", "*", "aR", ")", "L", "=", "len", "(", "self", ".", "fits", ")", "arr", "=", "np", ".", "array", "(", "self", ".", "fits", ")", "arr", "[", "np", ".", "array", "(", "self", ".", "_fit_masks", ")", "]", "=", "np", ".", "nan", "avg", "=", "np", ".", "tile", "(", "np", ".", "nanmean", "(", "arr", ",", "axis", "=", "0", ")", ",", "(", "L", ",", "1", ",", "1", ")", ")", "arr", "=", "(", "arr", "-", "avg", ")", "/", "avg", "out", "=", "np", ".", "empty", "(", "shape", "=", "(", "L", ",", "ss0", ",", "ss1", ")", ")", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ",", "category", "=", "RuntimeWarning", ")", "for", "n", ",", "f", "in", "enumerate", "(", "arr", ")", ":", "out", "[", "n", "]", "=", "subCell2DFnArray", "(", "f", ",", "np", ".", "nanmean", ",", "(", "ss0", ",", "ss1", ")", ")", "return", "np", ".", "nanmean", "(", "out", "**", "2", ")", "**", "0.5" ]
calculate the standard deviation of all fitted images, averaged to a grid
[ "calculate", "the", "standard", "deviation", "of", "all", "fitted", "images", "averaged", "to", "a", "grid" ]
python
train
29.689655
ranaroussi/ezibpy
ezibpy/utils.py
https://github.com/ranaroussi/ezibpy/blob/1a9d4bf52018abd2a01af7c991d7cf00cda53e0c/ezibpy/utils.py#L204-L207
def order_to_dict(order): """Convert an IBPy Order object to a dict containing any non-default values.""" default = Order() return {field: val for field, val in vars(order).items() if val != getattr(default, field, None)}
[ "def", "order_to_dict", "(", "order", ")", ":", "default", "=", "Order", "(", ")", "return", "{", "field", ":", "val", "for", "field", ",", "val", "in", "vars", "(", "order", ")", ".", "items", "(", ")", "if", "val", "!=", "getattr", "(", "default", ",", "field", ",", "None", ")", "}" ]
Convert an IBPy Order object to a dict containing any non-default values.
[ "Convert", "an", "IBPy", "Order", "object", "to", "a", "dict", "containing", "any", "non", "-", "default", "values", "." ]
python
train
57.5
GoogleCloudPlatform/compute-image-packages
packages/python-google-compute-engine/google_compute_engine/boto/compute_auth.py
https://github.com/GoogleCloudPlatform/compute-image-packages/blob/53ea8cd069fb4d9a1984d1c167e54c133033f8da/packages/python-google-compute-engine/google_compute_engine/boto/compute_auth.py#L50-L57
def _GetGsScopes(self): """Return all Google Storage scopes available on this VM.""" service_accounts = self.watcher.GetMetadata(metadata_key=self.metadata_key) try: scopes = service_accounts[self.service_account]['scopes'] return list(GS_SCOPES.intersection(set(scopes))) if scopes else None except KeyError: return None
[ "def", "_GetGsScopes", "(", "self", ")", ":", "service_accounts", "=", "self", ".", "watcher", ".", "GetMetadata", "(", "metadata_key", "=", "self", ".", "metadata_key", ")", "try", ":", "scopes", "=", "service_accounts", "[", "self", ".", "service_account", "]", "[", "'scopes'", "]", "return", "list", "(", "GS_SCOPES", ".", "intersection", "(", "set", "(", "scopes", ")", ")", ")", "if", "scopes", "else", "None", "except", "KeyError", ":", "return", "None" ]
Return all Google Storage scopes available on this VM.
[ "Return", "all", "Google", "Storage", "scopes", "available", "on", "this", "VM", "." ]
python
train
43.5
siznax/wptools
wptools/query.py
https://github.com/siznax/wptools/blob/100eaea585c34aa9ad87a9eda8982bb4898f6ec9/wptools/query.py#L370-L392
def wikidata(self, title, wikibase=None): """ Returns Wikidata query string """ self.domain = 'www.wikidata.org' self.uri = self.wiki_uri(self.domain) query = self.WIKIDATA.substitute( WIKI=self.uri, ENDPOINT=self.endpoint, LANG=self.variant or self.lang, PROPS="aliases|info|claims|descriptions|labels|sitelinks") if wikibase: query += "&ids=%s" % wikibase elif title: title = safequote(title) query += "&sites=%swiki" % self.lang query += "&titles=%s" % title self.set_status('wikidata', wikibase or title) return query
[ "def", "wikidata", "(", "self", ",", "title", ",", "wikibase", "=", "None", ")", ":", "self", ".", "domain", "=", "'www.wikidata.org'", "self", ".", "uri", "=", "self", ".", "wiki_uri", "(", "self", ".", "domain", ")", "query", "=", "self", ".", "WIKIDATA", ".", "substitute", "(", "WIKI", "=", "self", ".", "uri", ",", "ENDPOINT", "=", "self", ".", "endpoint", ",", "LANG", "=", "self", ".", "variant", "or", "self", ".", "lang", ",", "PROPS", "=", "\"aliases|info|claims|descriptions|labels|sitelinks\"", ")", "if", "wikibase", ":", "query", "+=", "\"&ids=%s\"", "%", "wikibase", "elif", "title", ":", "title", "=", "safequote", "(", "title", ")", "query", "+=", "\"&sites=%swiki\"", "%", "self", ".", "lang", "query", "+=", "\"&titles=%s\"", "%", "title", "self", ".", "set_status", "(", "'wikidata'", ",", "wikibase", "or", "title", ")", "return", "query" ]
Returns Wikidata query string
[ "Returns", "Wikidata", "query", "string" ]
python
train
29.521739
saltstack/salt
salt/cli/daemons.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cli/daemons.py#L130-L190
def prepare(self): ''' Run the preparation sequence required to start a salt master server. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare() ''' super(Master, self).prepare() try: if self.config['verify_env']: v_dirs = [ self.config['pki_dir'], os.path.join(self.config['pki_dir'], 'minions'), os.path.join(self.config['pki_dir'], 'minions_pre'), os.path.join(self.config['pki_dir'], 'minions_denied'), os.path.join(self.config['pki_dir'], 'minions_autosign'), os.path.join(self.config['pki_dir'], 'minions_rejected'), self.config['cachedir'], os.path.join(self.config['cachedir'], 'jobs'), os.path.join(self.config['cachedir'], 'proc'), self.config['sock_dir'], self.config['token_dir'], self.config['syndic_dir'], self.config['sqlite_queue_dir'], ] verify_env( v_dirs, self.config['user'], permissive=self.config['permissive_pki_access'], root_dir=self.config['root_dir'], pki_dir=self.config['pki_dir'], ) # Clear out syndics from cachedir for syndic_file in os.listdir(self.config['syndic_dir']): os.remove(os.path.join(self.config['syndic_dir'], syndic_file)) except OSError as error: self.environment_failure(error) self.setup_logfile_logger() verify_log(self.config) self.action_log_info('Setting up') # TODO: AIO core is separate from transport if not verify_socket(self.config['interface'], self.config['publish_port'], self.config['ret_port']): self.shutdown(4, 'The ports are not available to bind') self.config['interface'] = ip_bracket(self.config['interface']) migrations.migrate_paths(self.config) # Late import so logging works correctly import salt.master self.master = salt.master.Master(self.config) self.daemonize_if_required() self.set_pidfile() salt.utils.process.notify_systemd()
[ "def", "prepare", "(", "self", ")", ":", "super", "(", "Master", ",", "self", ")", ".", "prepare", "(", ")", "try", ":", "if", "self", ".", "config", "[", "'verify_env'", "]", ":", "v_dirs", "=", "[", "self", ".", "config", "[", "'pki_dir'", "]", ",", "os", ".", "path", ".", "join", "(", "self", ".", "config", "[", "'pki_dir'", "]", ",", "'minions'", ")", ",", "os", ".", "path", ".", "join", "(", "self", ".", "config", "[", "'pki_dir'", "]", ",", "'minions_pre'", ")", ",", "os", ".", "path", ".", "join", "(", "self", ".", "config", "[", "'pki_dir'", "]", ",", "'minions_denied'", ")", ",", "os", ".", "path", ".", "join", "(", "self", ".", "config", "[", "'pki_dir'", "]", ",", "'minions_autosign'", ")", ",", "os", ".", "path", ".", "join", "(", "self", ".", "config", "[", "'pki_dir'", "]", ",", "'minions_rejected'", ")", ",", "self", ".", "config", "[", "'cachedir'", "]", ",", "os", ".", "path", ".", "join", "(", "self", ".", "config", "[", "'cachedir'", "]", ",", "'jobs'", ")", ",", "os", ".", "path", ".", "join", "(", "self", ".", "config", "[", "'cachedir'", "]", ",", "'proc'", ")", ",", "self", ".", "config", "[", "'sock_dir'", "]", ",", "self", ".", "config", "[", "'token_dir'", "]", ",", "self", ".", "config", "[", "'syndic_dir'", "]", ",", "self", ".", "config", "[", "'sqlite_queue_dir'", "]", ",", "]", "verify_env", "(", "v_dirs", ",", "self", ".", "config", "[", "'user'", "]", ",", "permissive", "=", "self", ".", "config", "[", "'permissive_pki_access'", "]", ",", "root_dir", "=", "self", ".", "config", "[", "'root_dir'", "]", ",", "pki_dir", "=", "self", ".", "config", "[", "'pki_dir'", "]", ",", ")", "# Clear out syndics from cachedir", "for", "syndic_file", "in", "os", ".", "listdir", "(", "self", ".", "config", "[", "'syndic_dir'", "]", ")", ":", "os", ".", "remove", "(", "os", ".", "path", ".", "join", "(", "self", ".", "config", "[", "'syndic_dir'", "]", ",", "syndic_file", ")", ")", "except", "OSError", "as", "error", ":", "self", ".", "environment_failure", "(", "error", ")", "self", ".", "setup_logfile_logger", "(", ")", "verify_log", "(", "self", ".", "config", ")", "self", ".", "action_log_info", "(", "'Setting up'", ")", "# TODO: AIO core is separate from transport", "if", "not", "verify_socket", "(", "self", ".", "config", "[", "'interface'", "]", ",", "self", ".", "config", "[", "'publish_port'", "]", ",", "self", ".", "config", "[", "'ret_port'", "]", ")", ":", "self", ".", "shutdown", "(", "4", ",", "'The ports are not available to bind'", ")", "self", ".", "config", "[", "'interface'", "]", "=", "ip_bracket", "(", "self", ".", "config", "[", "'interface'", "]", ")", "migrations", ".", "migrate_paths", "(", "self", ".", "config", ")", "# Late import so logging works correctly", "import", "salt", ".", "master", "self", ".", "master", "=", "salt", ".", "master", ".", "Master", "(", "self", ".", "config", ")", "self", ".", "daemonize_if_required", "(", ")", "self", ".", "set_pidfile", "(", ")", "salt", ".", "utils", ".", "process", ".", "notify_systemd", "(", ")" ]
Run the preparation sequence required to start a salt master server. If sub-classed, don't **ever** forget to run: super(YourSubClass, self).prepare()
[ "Run", "the", "preparation", "sequence", "required", "to", "start", "a", "salt", "master", "server", "." ]
python
train
41.868852
aiogram/aiogram
aiogram/types/user.py
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/types/user.py#L48-L58
def locale(self) -> babel.core.Locale or None: """ Get user's locale :return: :class:`babel.core.Locale` """ if not self.language_code: return None if not hasattr(self, '_locale'): setattr(self, '_locale', babel.core.Locale.parse(self.language_code, sep='-')) return getattr(self, '_locale')
[ "def", "locale", "(", "self", ")", "->", "babel", ".", "core", ".", "Locale", "or", "None", ":", "if", "not", "self", ".", "language_code", ":", "return", "None", "if", "not", "hasattr", "(", "self", ",", "'_locale'", ")", ":", "setattr", "(", "self", ",", "'_locale'", ",", "babel", ".", "core", ".", "Locale", ".", "parse", "(", "self", ".", "language_code", ",", "sep", "=", "'-'", ")", ")", "return", "getattr", "(", "self", ",", "'_locale'", ")" ]
Get user's locale :return: :class:`babel.core.Locale`
[ "Get", "user", "s", "locale" ]
python
train
32.909091
scheibler/khard
khard/khard.py
https://github.com/scheibler/khard/blob/0f69430c2680f1ff5f073a977a3c5b753b96cc17/khard/khard.py#L567-L593
def load_address_books(names, config, search_queries): """Load all address books with the given names from the config. :param names: the address books to load :type names: list(str) :param config: the config instance to use when looking up address books :type config: config.Config :param search_queries: a mapping of address book names to search queries :type search_queries: dict :yields: the loaded address books :ytype: addressbook.AddressBook """ all_names = {str(book) for book in config.abooks} if not names: names = all_names elif not all_names.issuperset(names): sys.exit('Error: The entered address books "{}" do not exist.\n' 'Possible values are: {}'.format( '", "'.join(set(names) - all_names), ', '.join(all_names))) # load address books which are defined in the configuration file for name in names: address_book = config.abook.get_abook(name) address_book.load(search_queries[address_book.name], search_in_source_files=config.search_in_source_files()) yield address_book
[ "def", "load_address_books", "(", "names", ",", "config", ",", "search_queries", ")", ":", "all_names", "=", "{", "str", "(", "book", ")", "for", "book", "in", "config", ".", "abooks", "}", "if", "not", "names", ":", "names", "=", "all_names", "elif", "not", "all_names", ".", "issuperset", "(", "names", ")", ":", "sys", ".", "exit", "(", "'Error: The entered address books \"{}\" do not exist.\\n'", "'Possible values are: {}'", ".", "format", "(", "'\", \"'", ".", "join", "(", "set", "(", "names", ")", "-", "all_names", ")", ",", "', '", ".", "join", "(", "all_names", ")", ")", ")", "# load address books which are defined in the configuration file", "for", "name", "in", "names", ":", "address_book", "=", "config", ".", "abook", ".", "get_abook", "(", "name", ")", "address_book", ".", "load", "(", "search_queries", "[", "address_book", ".", "name", "]", ",", "search_in_source_files", "=", "config", ".", "search_in_source_files", "(", ")", ")", "yield", "address_book" ]
Load all address books with the given names from the config. :param names: the address books to load :type names: list(str) :param config: the config instance to use when looking up address books :type config: config.Config :param search_queries: a mapping of address book names to search queries :type search_queries: dict :yields: the loaded address books :ytype: addressbook.AddressBook
[ "Load", "all", "address", "books", "with", "the", "given", "names", "from", "the", "config", "." ]
python
test
42.111111
RRZE-HPC/kerncraft
kerncraft/models/roofline.py
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/models/roofline.py#L62-L161
def calculate_cache_access(self): """Apply cache prediction to generate cache access behaviour.""" self.results = {'misses': self.predictor.get_misses(), 'hits': self.predictor.get_hits(), 'evicts': self.predictor.get_evicts(), 'verbose infos': self.predictor.get_infos(), # only for verbose outputs 'bottleneck level': 0, 'mem bottlenecks': []} element_size = self.kernel.datatypes_size[self.kernel.datatype] cacheline_size = float(self.machine['cacheline size']) elements_per_cacheline = int(cacheline_size // element_size) total_flops = sum(self.kernel._flops.values())*elements_per_cacheline # TODO let user choose threads_per_core: threads_per_core = 1 # Compile relevant information # CPU-L1 stats (in bytes!) # We compile CPU-L1 stats on our own, because cacheprediction only works on cache lines read_offsets, write_offsets = zip(*list(self.kernel.compile_global_offsets( iteration=range(0, elements_per_cacheline)))) read_offsets = set([item for sublist in read_offsets if sublist is not None for item in sublist]) write_offsets = set([item for sublist in write_offsets if sublist is not None for item in sublist]) write_streams = len(write_offsets) read_streams = len(read_offsets) + write_streams # write-allocate total_loads = read_streams * element_size # total_evicts = write_streams * element_size bw, measurement_kernel = self.machine.get_bandwidth( 0, read_streams - write_streams, # no write-allocate in L1 write_streams, threads_per_core, cores=self.cores) # Calculate performance (arithmetic intensity * bandwidth with # arithmetic intensity = flops / bytes loaded ) if total_loads == 0: # This happens in case of full-caching arith_intens = None performance = None else: arith_intens = float(total_flops)/total_loads performance = PrefixedUnit(arith_intens * float(bw), 'FLOP/s') self.results['mem bottlenecks'].append({ 'performance': self.conv_perf(PrefixedUnit(performance, 'FLOP/s')), 'level': self.machine['memory hierarchy'][0]['level'], 'arithmetic intensity': arith_intens, 'bw kernel': measurement_kernel, 'bandwidth': bw, 'bytes transfered': total_loads}) self.results['bottleneck level'] = len(self.results['mem bottlenecks'])-1 self.results['min performance'] = self.conv_perf(performance) # for other cache and memory levels: for cache_level, cache_info in list(enumerate(self.machine['memory hierarchy']))[:-1]: # Compiling stats (in bytes!) total_misses = self.results['misses'][cache_level]*cacheline_size total_evicts = self.results['evicts'][cache_level]*cacheline_size # choose bw according to cache level and problem # first, compile stream counts at current cache level # write-allocate is allready resolved above read_streams = self.results['misses'][cache_level] write_streams = self.results['evicts'][cache_level] # second, try to find best fitting kernel (closest to stream seen stream counts): bw, measurement_kernel = self.machine.get_bandwidth( cache_level+1, read_streams, write_streams, threads_per_core, cores=self.cores) # Calculate performance (arithmetic intensity * bandwidth with # arithmetic intensity = flops / bytes transfered) bytes_transfered = total_misses + total_evicts if bytes_transfered == 0: # This happens in case of full-caching arith_intens = float('inf') performance = PrefixedUnit(float('inf'), 'FLOP/s') else: arith_intens = float(total_flops)/bytes_transfered performance = PrefixedUnit(arith_intens * float(bw), 'FLOP/s') self.results['mem bottlenecks'].append({ 'performance': self.conv_perf(performance), 'level': (self.machine['memory hierarchy'][cache_level + 1]['level']), 'arithmetic intensity': arith_intens, 'bw kernel': measurement_kernel, 'bandwidth': bw, 'bytes transfered': bytes_transfered}) if performance < self.results.get('min performance', {'FLOP/s': performance})['FLOP/s']: self.results['bottleneck level'] = len(self.results['mem bottlenecks'])-1 self.results['min performance'] = self.conv_perf(performance) return self.results
[ "def", "calculate_cache_access", "(", "self", ")", ":", "self", ".", "results", "=", "{", "'misses'", ":", "self", ".", "predictor", ".", "get_misses", "(", ")", ",", "'hits'", ":", "self", ".", "predictor", ".", "get_hits", "(", ")", ",", "'evicts'", ":", "self", ".", "predictor", ".", "get_evicts", "(", ")", ",", "'verbose infos'", ":", "self", ".", "predictor", ".", "get_infos", "(", ")", ",", "# only for verbose outputs", "'bottleneck level'", ":", "0", ",", "'mem bottlenecks'", ":", "[", "]", "}", "element_size", "=", "self", ".", "kernel", ".", "datatypes_size", "[", "self", ".", "kernel", ".", "datatype", "]", "cacheline_size", "=", "float", "(", "self", ".", "machine", "[", "'cacheline size'", "]", ")", "elements_per_cacheline", "=", "int", "(", "cacheline_size", "//", "element_size", ")", "total_flops", "=", "sum", "(", "self", ".", "kernel", ".", "_flops", ".", "values", "(", ")", ")", "*", "elements_per_cacheline", "# TODO let user choose threads_per_core:", "threads_per_core", "=", "1", "# Compile relevant information", "# CPU-L1 stats (in bytes!)", "# We compile CPU-L1 stats on our own, because cacheprediction only works on cache lines", "read_offsets", ",", "write_offsets", "=", "zip", "(", "*", "list", "(", "self", ".", "kernel", ".", "compile_global_offsets", "(", "iteration", "=", "range", "(", "0", ",", "elements_per_cacheline", ")", ")", ")", ")", "read_offsets", "=", "set", "(", "[", "item", "for", "sublist", "in", "read_offsets", "if", "sublist", "is", "not", "None", "for", "item", "in", "sublist", "]", ")", "write_offsets", "=", "set", "(", "[", "item", "for", "sublist", "in", "write_offsets", "if", "sublist", "is", "not", "None", "for", "item", "in", "sublist", "]", ")", "write_streams", "=", "len", "(", "write_offsets", ")", "read_streams", "=", "len", "(", "read_offsets", ")", "+", "write_streams", "# write-allocate", "total_loads", "=", "read_streams", "*", "element_size", "# total_evicts = write_streams * element_size", "bw", ",", "measurement_kernel", "=", "self", ".", "machine", ".", "get_bandwidth", "(", "0", ",", "read_streams", "-", "write_streams", ",", "# no write-allocate in L1", "write_streams", ",", "threads_per_core", ",", "cores", "=", "self", ".", "cores", ")", "# Calculate performance (arithmetic intensity * bandwidth with", "# arithmetic intensity = flops / bytes loaded )", "if", "total_loads", "==", "0", ":", "# This happens in case of full-caching", "arith_intens", "=", "None", "performance", "=", "None", "else", ":", "arith_intens", "=", "float", "(", "total_flops", ")", "/", "total_loads", "performance", "=", "PrefixedUnit", "(", "arith_intens", "*", "float", "(", "bw", ")", ",", "'FLOP/s'", ")", "self", ".", "results", "[", "'mem bottlenecks'", "]", ".", "append", "(", "{", "'performance'", ":", "self", ".", "conv_perf", "(", "PrefixedUnit", "(", "performance", ",", "'FLOP/s'", ")", ")", ",", "'level'", ":", "self", ".", "machine", "[", "'memory hierarchy'", "]", "[", "0", "]", "[", "'level'", "]", ",", "'arithmetic intensity'", ":", "arith_intens", ",", "'bw kernel'", ":", "measurement_kernel", ",", "'bandwidth'", ":", "bw", ",", "'bytes transfered'", ":", "total_loads", "}", ")", "self", ".", "results", "[", "'bottleneck level'", "]", "=", "len", "(", "self", ".", "results", "[", "'mem bottlenecks'", "]", ")", "-", "1", "self", ".", "results", "[", "'min performance'", "]", "=", "self", ".", "conv_perf", "(", "performance", ")", "# for other cache and memory levels:", "for", "cache_level", ",", "cache_info", "in", "list", "(", "enumerate", "(", "self", ".", "machine", "[", "'memory hierarchy'", "]", ")", ")", "[", ":", "-", "1", "]", ":", "# Compiling stats (in bytes!)", "total_misses", "=", "self", ".", "results", "[", "'misses'", "]", "[", "cache_level", "]", "*", "cacheline_size", "total_evicts", "=", "self", ".", "results", "[", "'evicts'", "]", "[", "cache_level", "]", "*", "cacheline_size", "# choose bw according to cache level and problem", "# first, compile stream counts at current cache level", "# write-allocate is allready resolved above", "read_streams", "=", "self", ".", "results", "[", "'misses'", "]", "[", "cache_level", "]", "write_streams", "=", "self", ".", "results", "[", "'evicts'", "]", "[", "cache_level", "]", "# second, try to find best fitting kernel (closest to stream seen stream counts):", "bw", ",", "measurement_kernel", "=", "self", ".", "machine", ".", "get_bandwidth", "(", "cache_level", "+", "1", ",", "read_streams", ",", "write_streams", ",", "threads_per_core", ",", "cores", "=", "self", ".", "cores", ")", "# Calculate performance (arithmetic intensity * bandwidth with", "# arithmetic intensity = flops / bytes transfered)", "bytes_transfered", "=", "total_misses", "+", "total_evicts", "if", "bytes_transfered", "==", "0", ":", "# This happens in case of full-caching", "arith_intens", "=", "float", "(", "'inf'", ")", "performance", "=", "PrefixedUnit", "(", "float", "(", "'inf'", ")", ",", "'FLOP/s'", ")", "else", ":", "arith_intens", "=", "float", "(", "total_flops", ")", "/", "bytes_transfered", "performance", "=", "PrefixedUnit", "(", "arith_intens", "*", "float", "(", "bw", ")", ",", "'FLOP/s'", ")", "self", ".", "results", "[", "'mem bottlenecks'", "]", ".", "append", "(", "{", "'performance'", ":", "self", ".", "conv_perf", "(", "performance", ")", ",", "'level'", ":", "(", "self", ".", "machine", "[", "'memory hierarchy'", "]", "[", "cache_level", "+", "1", "]", "[", "'level'", "]", ")", ",", "'arithmetic intensity'", ":", "arith_intens", ",", "'bw kernel'", ":", "measurement_kernel", ",", "'bandwidth'", ":", "bw", ",", "'bytes transfered'", ":", "bytes_transfered", "}", ")", "if", "performance", "<", "self", ".", "results", ".", "get", "(", "'min performance'", ",", "{", "'FLOP/s'", ":", "performance", "}", ")", "[", "'FLOP/s'", "]", ":", "self", ".", "results", "[", "'bottleneck level'", "]", "=", "len", "(", "self", ".", "results", "[", "'mem bottlenecks'", "]", ")", "-", "1", "self", ".", "results", "[", "'min performance'", "]", "=", "self", ".", "conv_perf", "(", "performance", ")", "return", "self", ".", "results" ]
Apply cache prediction to generate cache access behaviour.
[ "Apply", "cache", "prediction", "to", "generate", "cache", "access", "behaviour", "." ]
python
test
49.01
DLR-RM/RAFCON
source/rafcon/utils/execution_log.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/utils/execution_log.py#L78-L302
def log_to_collapsed_structure(execution_history_items, throw_on_pickle_error=True, include_erroneous_data_ports=False, full_next=False): """ Collapsed structure means that all history items belonging to the same state execution are merged together into one object (e.g. CallItem and ReturnItem of an ExecutionState). This is based on the log structure in which all Items which belong together have the same run_id. The collapsed items hold input as well as output data (direct and scoped), and the outcome the state execution. :param dict execution_history_items: history items, in the simplest case directly the opened shelve log file :param bool throw_on_pickle_error: flag if an error is thrown if an object cannot be un-pickled :param bool include_erroneous_data_ports: flag if to include erroneous data ports :param bool full_next: flag to indicate if the next relationship has also to be created at the end of container states :return: start_item, the StateMachineStartItem of the log file next_, a dict mapping run_id --> run_id of the next executed state on the same hierarchy level concurrent, a dict mapping run_id --> []list of run_ids of the concurrent next executed states (if present) hierarchy, a dict mapping run_id --> run_id of the next executed state on the deeper hierarchy level (the start state within that HierarchyState) items, a dict mapping run_id --> collapsed representation of the execution of the state with that run_id :rtype: tuple """ # for debugging purposes # execution_history_items_dict = dict() # for k, v in execution_history_items.items(): # execution_history_items_dict[k] = v start_item, previous, next_, concurrent, grouped = log_to_raw_structure(execution_history_items) start_item = None collapsed_next = {} collapsed_concurrent ={} collapsed_hierarchy = {} collapsed_items = {} # single state executions are not supported if len(next_) == 0 or len(next_) == 1: for rid, gitems in grouped.items(): if gitems[0]['item_type'] == 'StateMachineStartItem': item = gitems[0] execution_item = {} ## add base properties will throw if not existing for l in ['description', 'path_by_name', 'state_name', 'run_id', 'state_type', 'path', 'timestamp', 'root_state_storage_id', 'state_machine_version', 'used_rafcon_version', 'creation_time', 'last_update', 'os_environment']: try: execution_item[l] = item[l] except KeyError: logger.warning("Key {} not in history start item".format(str(l))) ## add extended properties (added in later rafcon versions), ## will add default value if not existing instead for l, default in [('semantic_data', {}), ('is_library', None), ('library_state_name', None), ('library_name', None), ('library_path', None)]: execution_item[l] = item.get(l, default) start_item = execution_item return start_item, collapsed_next, collapsed_concurrent, collapsed_hierarchy, collapsed_items # build collapsed items for rid, gitems in grouped.items(): if gitems[0]['item_type'] == 'StateMachineStartItem': item = gitems[0] execution_item = {} ## add base properties will throw if not existing for l in ['description', 'path_by_name', 'state_name', 'run_id', 'state_type', 'path', 'timestamp', 'root_state_storage_id', 'state_machine_version', 'used_rafcon_version', 'creation_time', 'last_update', 'os_environment']: try: execution_item[l] = item[l] except KeyError: logger.warning("Key {} not in history start item".format(str(l))) ## add extended properties (added in later rafcon versions), ## will add default value if not existing instead for l, default in [('semantic_data', {}), ('is_library', None), ('library_state_name', None), ('library_name', None), ('library_path', None)]: execution_item[l] = item.get(l, default) start_item = execution_item collapsed_next[rid] = execution_history_items[next_[gitems[0]['history_item_id']]]['run_id'] collapsed_items[rid] = execution_item elif gitems[0]['state_type'] == 'ExecutionState' or \ gitems[0]['state_type'] == 'HierarchyState' or \ gitems[0]['state_type'] == 'LibraryState' or \ 'Concurrency' in gitems[0]['state_type']: # for item in gitems: # if item["description"] is not None: # print(item["item_type"], item["call_type"], item["state_type"], item["state_name"]) # print(item["description"]) # select call and return items for this state try: call_item = gitems[[gitems[i]['item_type'] == 'CallItem' and \ gitems[i]['call_type'] == 'EXECUTE' \ for i in range(len(gitems))].index(True)] except ValueError: # fall back to container call, should only happen for root state try: call_item = gitems[[gitems[i]['item_type'] == 'CallItem' and \ gitems[i]['call_type'] == 'CONTAINER' \ for i in range(len(gitems))].index(True)] except ValueError: logger.warning('Could not find a CallItem in run_id group %s\nThere will probably be log information missing on this execution branch!' % str(rid)) ## create dummy returnitem with the properties referenced later in this code call_item = dict(description=None, history_item_id=None, path_by_name=None, state_name=None, run_id=None, state_type=None, path=None, timestamp=None, input_output_data={}, scoped_data={}) try: return_item = gitems[[gitems[i]['item_type'] == 'ReturnItem' and \ gitems[i]['call_type'] == 'EXECUTE' \ for i in range(len(gitems))].index(True)] except ValueError: # fall back to container call, should only happen for root state try: return_item = gitems[[gitems[i]['item_type'] == 'ReturnItem' and \ gitems[i]['call_type'] == 'CONTAINER' \ for i in range(len(gitems))].index(True)] except ValueError: logger.warning('Could not find a ReturnItem in run_id group %s\nThere will probably be log information missing on this execution branch!' % str(rid)) ## create dummy returnitem with the properties referenced later in this code return_item = dict(history_item_id=None, outcome_name=None, outcome_id=None, timestamp=None, input_output_data={}, scoped_data={}) # next item (on same hierarchy level) is always after return item if return_item['history_item_id'] in next_: # no next relationship at the end of containers if execution_history_items[next_[return_item['history_item_id']]]['state_type'] == 'HierarchyState' and execution_history_items[next_[return_item['history_item_id']]]['item_type'] == 'ReturnItem' and execution_history_items[next_[return_item['history_item_id']]]['call_type'] == 'CONTAINER': if full_next: collapsed_next[rid] = execution_history_items[next_[return_item['history_item_id']]]['run_id'] else: pass else: collapsed_next[rid] = execution_history_items[next_[return_item['history_item_id']]]['run_id'] # treat hierarchy level if call_item['history_item_id'] in previous: if execution_history_items[previous[call_item['history_item_id']]]['state_type'] == 'HierarchyState' and execution_history_items[previous[call_item['history_item_id']]]['item_type'] == 'CallItem': prev_rid = execution_history_items[previous[call_item['history_item_id']]]['run_id'] collapsed_hierarchy[prev_rid] = rid # treat concurrency level if execution_history_items[previous[call_item['history_item_id']]]['item_type'] == 'ConcurrencyItem': prev_rid = execution_history_items[previous[call_item['history_item_id']]]['run_id'] if prev_rid in collapsed_concurrent: collapsed_concurrent[prev_rid].append(rid) else: collapsed_concurrent[prev_rid] = [rid] # assemble grouped item execution_item = {} ## add base properties will throw if not existing for l in ['description', 'path_by_name', 'state_name', 'run_id', 'state_type', 'path']: execution_item[l] = call_item[l] ## add extended properties (added in later rafcon versions), ## will add default value if not existing instead for l, default in [('semantic_data', {}), ('is_library', None), ('library_state_name', None), ('library_name', None), ('library_path', None)]: execution_item[l] = return_item.get(l, default) for l in ['outcome_name', 'outcome_id']: execution_item[l] = return_item[l] for l in ['timestamp']: execution_item[l+'_call'] = call_item[l] execution_item[l+'_return'] = return_item[l] def unpickle_data(data_dict): r = dict() # support backward compatibility if isinstance(data_dict, string_types): # formerly data dict was a json string r = json.loads(data_dict) else: for k, v in data_dict.items(): if not k.startswith('!'): # ! indicates storage error try: r[k] = pickle.loads(v) except Exception as e: if throw_on_pickle_error: raise elif include_erroneous_data_ports: r['!' + k] = (str(e), v) else: pass # ignore elif include_erroneous_data_ports: r[k] = v return r execution_item['data_ins'] = unpickle_data(call_item['input_output_data']) execution_item['data_outs'] = unpickle_data(return_item['input_output_data']) execution_item['scoped_data_ins'] = unpickle_data(call_item['scoped_data']) execution_item['scoped_data_outs'] = unpickle_data(return_item['scoped_data']) execution_item['semantic_data'] = unpickle_data(execution_item['semantic_data']) collapsed_items[rid] = execution_item return start_item, collapsed_next, collapsed_concurrent, collapsed_hierarchy, collapsed_items
[ "def", "log_to_collapsed_structure", "(", "execution_history_items", ",", "throw_on_pickle_error", "=", "True", ",", "include_erroneous_data_ports", "=", "False", ",", "full_next", "=", "False", ")", ":", "# for debugging purposes", "# execution_history_items_dict = dict()", "# for k, v in execution_history_items.items():", "# execution_history_items_dict[k] = v", "start_item", ",", "previous", ",", "next_", ",", "concurrent", ",", "grouped", "=", "log_to_raw_structure", "(", "execution_history_items", ")", "start_item", "=", "None", "collapsed_next", "=", "{", "}", "collapsed_concurrent", "=", "{", "}", "collapsed_hierarchy", "=", "{", "}", "collapsed_items", "=", "{", "}", "# single state executions are not supported", "if", "len", "(", "next_", ")", "==", "0", "or", "len", "(", "next_", ")", "==", "1", ":", "for", "rid", ",", "gitems", "in", "grouped", ".", "items", "(", ")", ":", "if", "gitems", "[", "0", "]", "[", "'item_type'", "]", "==", "'StateMachineStartItem'", ":", "item", "=", "gitems", "[", "0", "]", "execution_item", "=", "{", "}", "## add base properties will throw if not existing", "for", "l", "in", "[", "'description'", ",", "'path_by_name'", ",", "'state_name'", ",", "'run_id'", ",", "'state_type'", ",", "'path'", ",", "'timestamp'", ",", "'root_state_storage_id'", ",", "'state_machine_version'", ",", "'used_rafcon_version'", ",", "'creation_time'", ",", "'last_update'", ",", "'os_environment'", "]", ":", "try", ":", "execution_item", "[", "l", "]", "=", "item", "[", "l", "]", "except", "KeyError", ":", "logger", ".", "warning", "(", "\"Key {} not in history start item\"", ".", "format", "(", "str", "(", "l", ")", ")", ")", "## add extended properties (added in later rafcon versions),", "## will add default value if not existing instead", "for", "l", ",", "default", "in", "[", "(", "'semantic_data'", ",", "{", "}", ")", ",", "(", "'is_library'", ",", "None", ")", ",", "(", "'library_state_name'", ",", "None", ")", ",", "(", "'library_name'", ",", "None", ")", ",", "(", "'library_path'", ",", "None", ")", "]", ":", "execution_item", "[", "l", "]", "=", "item", ".", "get", "(", "l", ",", "default", ")", "start_item", "=", "execution_item", "return", "start_item", ",", "collapsed_next", ",", "collapsed_concurrent", ",", "collapsed_hierarchy", ",", "collapsed_items", "# build collapsed items", "for", "rid", ",", "gitems", "in", "grouped", ".", "items", "(", ")", ":", "if", "gitems", "[", "0", "]", "[", "'item_type'", "]", "==", "'StateMachineStartItem'", ":", "item", "=", "gitems", "[", "0", "]", "execution_item", "=", "{", "}", "## add base properties will throw if not existing", "for", "l", "in", "[", "'description'", ",", "'path_by_name'", ",", "'state_name'", ",", "'run_id'", ",", "'state_type'", ",", "'path'", ",", "'timestamp'", ",", "'root_state_storage_id'", ",", "'state_machine_version'", ",", "'used_rafcon_version'", ",", "'creation_time'", ",", "'last_update'", ",", "'os_environment'", "]", ":", "try", ":", "execution_item", "[", "l", "]", "=", "item", "[", "l", "]", "except", "KeyError", ":", "logger", ".", "warning", "(", "\"Key {} not in history start item\"", ".", "format", "(", "str", "(", "l", ")", ")", ")", "## add extended properties (added in later rafcon versions),", "## will add default value if not existing instead", "for", "l", ",", "default", "in", "[", "(", "'semantic_data'", ",", "{", "}", ")", ",", "(", "'is_library'", ",", "None", ")", ",", "(", "'library_state_name'", ",", "None", ")", ",", "(", "'library_name'", ",", "None", ")", ",", "(", "'library_path'", ",", "None", ")", "]", ":", "execution_item", "[", "l", "]", "=", "item", ".", "get", "(", "l", ",", "default", ")", "start_item", "=", "execution_item", "collapsed_next", "[", "rid", "]", "=", "execution_history_items", "[", "next_", "[", "gitems", "[", "0", "]", "[", "'history_item_id'", "]", "]", "]", "[", "'run_id'", "]", "collapsed_items", "[", "rid", "]", "=", "execution_item", "elif", "gitems", "[", "0", "]", "[", "'state_type'", "]", "==", "'ExecutionState'", "or", "gitems", "[", "0", "]", "[", "'state_type'", "]", "==", "'HierarchyState'", "or", "gitems", "[", "0", "]", "[", "'state_type'", "]", "==", "'LibraryState'", "or", "'Concurrency'", "in", "gitems", "[", "0", "]", "[", "'state_type'", "]", ":", "# for item in gitems:", "# if item[\"description\"] is not None:", "# print(item[\"item_type\"], item[\"call_type\"], item[\"state_type\"], item[\"state_name\"])", "# print(item[\"description\"])", "# select call and return items for this state", "try", ":", "call_item", "=", "gitems", "[", "[", "gitems", "[", "i", "]", "[", "'item_type'", "]", "==", "'CallItem'", "and", "gitems", "[", "i", "]", "[", "'call_type'", "]", "==", "'EXECUTE'", "for", "i", "in", "range", "(", "len", "(", "gitems", ")", ")", "]", ".", "index", "(", "True", ")", "]", "except", "ValueError", ":", "# fall back to container call, should only happen for root state", "try", ":", "call_item", "=", "gitems", "[", "[", "gitems", "[", "i", "]", "[", "'item_type'", "]", "==", "'CallItem'", "and", "gitems", "[", "i", "]", "[", "'call_type'", "]", "==", "'CONTAINER'", "for", "i", "in", "range", "(", "len", "(", "gitems", ")", ")", "]", ".", "index", "(", "True", ")", "]", "except", "ValueError", ":", "logger", ".", "warning", "(", "'Could not find a CallItem in run_id group %s\\nThere will probably be log information missing on this execution branch!'", "%", "str", "(", "rid", ")", ")", "## create dummy returnitem with the properties referenced later in this code", "call_item", "=", "dict", "(", "description", "=", "None", ",", "history_item_id", "=", "None", ",", "path_by_name", "=", "None", ",", "state_name", "=", "None", ",", "run_id", "=", "None", ",", "state_type", "=", "None", ",", "path", "=", "None", ",", "timestamp", "=", "None", ",", "input_output_data", "=", "{", "}", ",", "scoped_data", "=", "{", "}", ")", "try", ":", "return_item", "=", "gitems", "[", "[", "gitems", "[", "i", "]", "[", "'item_type'", "]", "==", "'ReturnItem'", "and", "gitems", "[", "i", "]", "[", "'call_type'", "]", "==", "'EXECUTE'", "for", "i", "in", "range", "(", "len", "(", "gitems", ")", ")", "]", ".", "index", "(", "True", ")", "]", "except", "ValueError", ":", "# fall back to container call, should only happen for root state", "try", ":", "return_item", "=", "gitems", "[", "[", "gitems", "[", "i", "]", "[", "'item_type'", "]", "==", "'ReturnItem'", "and", "gitems", "[", "i", "]", "[", "'call_type'", "]", "==", "'CONTAINER'", "for", "i", "in", "range", "(", "len", "(", "gitems", ")", ")", "]", ".", "index", "(", "True", ")", "]", "except", "ValueError", ":", "logger", ".", "warning", "(", "'Could not find a ReturnItem in run_id group %s\\nThere will probably be log information missing on this execution branch!'", "%", "str", "(", "rid", ")", ")", "## create dummy returnitem with the properties referenced later in this code", "return_item", "=", "dict", "(", "history_item_id", "=", "None", ",", "outcome_name", "=", "None", ",", "outcome_id", "=", "None", ",", "timestamp", "=", "None", ",", "input_output_data", "=", "{", "}", ",", "scoped_data", "=", "{", "}", ")", "# next item (on same hierarchy level) is always after return item", "if", "return_item", "[", "'history_item_id'", "]", "in", "next_", ":", "# no next relationship at the end of containers", "if", "execution_history_items", "[", "next_", "[", "return_item", "[", "'history_item_id'", "]", "]", "]", "[", "'state_type'", "]", "==", "'HierarchyState'", "and", "execution_history_items", "[", "next_", "[", "return_item", "[", "'history_item_id'", "]", "]", "]", "[", "'item_type'", "]", "==", "'ReturnItem'", "and", "execution_history_items", "[", "next_", "[", "return_item", "[", "'history_item_id'", "]", "]", "]", "[", "'call_type'", "]", "==", "'CONTAINER'", ":", "if", "full_next", ":", "collapsed_next", "[", "rid", "]", "=", "execution_history_items", "[", "next_", "[", "return_item", "[", "'history_item_id'", "]", "]", "]", "[", "'run_id'", "]", "else", ":", "pass", "else", ":", "collapsed_next", "[", "rid", "]", "=", "execution_history_items", "[", "next_", "[", "return_item", "[", "'history_item_id'", "]", "]", "]", "[", "'run_id'", "]", "# treat hierarchy level", "if", "call_item", "[", "'history_item_id'", "]", "in", "previous", ":", "if", "execution_history_items", "[", "previous", "[", "call_item", "[", "'history_item_id'", "]", "]", "]", "[", "'state_type'", "]", "==", "'HierarchyState'", "and", "execution_history_items", "[", "previous", "[", "call_item", "[", "'history_item_id'", "]", "]", "]", "[", "'item_type'", "]", "==", "'CallItem'", ":", "prev_rid", "=", "execution_history_items", "[", "previous", "[", "call_item", "[", "'history_item_id'", "]", "]", "]", "[", "'run_id'", "]", "collapsed_hierarchy", "[", "prev_rid", "]", "=", "rid", "# treat concurrency level", "if", "execution_history_items", "[", "previous", "[", "call_item", "[", "'history_item_id'", "]", "]", "]", "[", "'item_type'", "]", "==", "'ConcurrencyItem'", ":", "prev_rid", "=", "execution_history_items", "[", "previous", "[", "call_item", "[", "'history_item_id'", "]", "]", "]", "[", "'run_id'", "]", "if", "prev_rid", "in", "collapsed_concurrent", ":", "collapsed_concurrent", "[", "prev_rid", "]", ".", "append", "(", "rid", ")", "else", ":", "collapsed_concurrent", "[", "prev_rid", "]", "=", "[", "rid", "]", "# assemble grouped item", "execution_item", "=", "{", "}", "## add base properties will throw if not existing", "for", "l", "in", "[", "'description'", ",", "'path_by_name'", ",", "'state_name'", ",", "'run_id'", ",", "'state_type'", ",", "'path'", "]", ":", "execution_item", "[", "l", "]", "=", "call_item", "[", "l", "]", "## add extended properties (added in later rafcon versions),", "## will add default value if not existing instead", "for", "l", ",", "default", "in", "[", "(", "'semantic_data'", ",", "{", "}", ")", ",", "(", "'is_library'", ",", "None", ")", ",", "(", "'library_state_name'", ",", "None", ")", ",", "(", "'library_name'", ",", "None", ")", ",", "(", "'library_path'", ",", "None", ")", "]", ":", "execution_item", "[", "l", "]", "=", "return_item", ".", "get", "(", "l", ",", "default", ")", "for", "l", "in", "[", "'outcome_name'", ",", "'outcome_id'", "]", ":", "execution_item", "[", "l", "]", "=", "return_item", "[", "l", "]", "for", "l", "in", "[", "'timestamp'", "]", ":", "execution_item", "[", "l", "+", "'_call'", "]", "=", "call_item", "[", "l", "]", "execution_item", "[", "l", "+", "'_return'", "]", "=", "return_item", "[", "l", "]", "def", "unpickle_data", "(", "data_dict", ")", ":", "r", "=", "dict", "(", ")", "# support backward compatibility", "if", "isinstance", "(", "data_dict", ",", "string_types", ")", ":", "# formerly data dict was a json string", "r", "=", "json", ".", "loads", "(", "data_dict", ")", "else", ":", "for", "k", ",", "v", "in", "data_dict", ".", "items", "(", ")", ":", "if", "not", "k", ".", "startswith", "(", "'!'", ")", ":", "# ! indicates storage error", "try", ":", "r", "[", "k", "]", "=", "pickle", ".", "loads", "(", "v", ")", "except", "Exception", "as", "e", ":", "if", "throw_on_pickle_error", ":", "raise", "elif", "include_erroneous_data_ports", ":", "r", "[", "'!'", "+", "k", "]", "=", "(", "str", "(", "e", ")", ",", "v", ")", "else", ":", "pass", "# ignore", "elif", "include_erroneous_data_ports", ":", "r", "[", "k", "]", "=", "v", "return", "r", "execution_item", "[", "'data_ins'", "]", "=", "unpickle_data", "(", "call_item", "[", "'input_output_data'", "]", ")", "execution_item", "[", "'data_outs'", "]", "=", "unpickle_data", "(", "return_item", "[", "'input_output_data'", "]", ")", "execution_item", "[", "'scoped_data_ins'", "]", "=", "unpickle_data", "(", "call_item", "[", "'scoped_data'", "]", ")", "execution_item", "[", "'scoped_data_outs'", "]", "=", "unpickle_data", "(", "return_item", "[", "'scoped_data'", "]", ")", "execution_item", "[", "'semantic_data'", "]", "=", "unpickle_data", "(", "execution_item", "[", "'semantic_data'", "]", ")", "collapsed_items", "[", "rid", "]", "=", "execution_item", "return", "start_item", ",", "collapsed_next", ",", "collapsed_concurrent", ",", "collapsed_hierarchy", ",", "collapsed_items" ]
Collapsed structure means that all history items belonging to the same state execution are merged together into one object (e.g. CallItem and ReturnItem of an ExecutionState). This is based on the log structure in which all Items which belong together have the same run_id. The collapsed items hold input as well as output data (direct and scoped), and the outcome the state execution. :param dict execution_history_items: history items, in the simplest case directly the opened shelve log file :param bool throw_on_pickle_error: flag if an error is thrown if an object cannot be un-pickled :param bool include_erroneous_data_ports: flag if to include erroneous data ports :param bool full_next: flag to indicate if the next relationship has also to be created at the end of container states :return: start_item, the StateMachineStartItem of the log file next_, a dict mapping run_id --> run_id of the next executed state on the same hierarchy level concurrent, a dict mapping run_id --> []list of run_ids of the concurrent next executed states (if present) hierarchy, a dict mapping run_id --> run_id of the next executed state on the deeper hierarchy level (the start state within that HierarchyState) items, a dict mapping run_id --> collapsed representation of the execution of the state with that run_id :rtype: tuple
[ "Collapsed", "structure", "means", "that", "all", "history", "items", "belonging", "to", "the", "same", "state", "execution", "are", "merged", "together", "into", "one", "object", "(", "e", ".", "g", ".", "CallItem", "and", "ReturnItem", "of", "an", "ExecutionState", ")", ".", "This", "is", "based", "on", "the", "log", "structure", "in", "which", "all", "Items", "which", "belong", "together", "have", "the", "same", "run_id", ".", "The", "collapsed", "items", "hold", "input", "as", "well", "as", "output", "data", "(", "direct", "and", "scoped", ")", "and", "the", "outcome", "the", "state", "execution", ".", ":", "param", "dict", "execution_history_items", ":", "history", "items", "in", "the", "simplest", "case", "directly", "the", "opened", "shelve", "log", "file", ":", "param", "bool", "throw_on_pickle_error", ":", "flag", "if", "an", "error", "is", "thrown", "if", "an", "object", "cannot", "be", "un", "-", "pickled", ":", "param", "bool", "include_erroneous_data_ports", ":", "flag", "if", "to", "include", "erroneous", "data", "ports", ":", "param", "bool", "full_next", ":", "flag", "to", "indicate", "if", "the", "next", "relationship", "has", "also", "to", "be", "created", "at", "the", "end", "of", "container", "states", ":", "return", ":", "start_item", "the", "StateMachineStartItem", "of", "the", "log", "file", "next_", "a", "dict", "mapping", "run_id", "--", ">", "run_id", "of", "the", "next", "executed", "state", "on", "the", "same", "hierarchy", "level", "concurrent", "a", "dict", "mapping", "run_id", "--", ">", "[]", "list", "of", "run_ids", "of", "the", "concurrent", "next", "executed", "states", "(", "if", "present", ")", "hierarchy", "a", "dict", "mapping", "run_id", "--", ">", "run_id", "of", "the", "next", "executed", "state", "on", "the", "deeper", "hierarchy", "level", "(", "the", "start", "state", "within", "that", "HierarchyState", ")", "items", "a", "dict", "mapping", "run_id", "--", ">", "collapsed", "representation", "of", "the", "execution", "of", "the", "state", "with", "that", "run_id", ":", "rtype", ":", "tuple" ]
python
train
55.528889
assemblerflow/flowcraft
flowcraft/generator/recipe.py
https://github.com/assemblerflow/flowcraft/blob/fc3f4bddded1efc76006600016dc71a06dd908c0/flowcraft/generator/recipe.py#L528-L561
def brew_innuendo(args): """Brews a given list of processes according to the recipe Parameters ---------- args : argparse.Namespace The arguments passed through argparser that will be used to check the the recipe, tasks and brew the process Returns ------- str The final pipeline string, ready for the engine. list List of process strings. """ # Create recipe class instance automatic_pipeline = Innuendo() if not args.tasks: input_processes = " ".join( automatic_pipeline.process_descriptions.keys()) else: input_processes = args.tasks # Validate the provided pipeline processes validated = automatic_pipeline.validate_pipeline(input_processes) if not validated: sys.exit(1) # Get the final pipeline string pipeline_string = automatic_pipeline.run_auto_pipeline(input_processes) return pipeline_string
[ "def", "brew_innuendo", "(", "args", ")", ":", "# Create recipe class instance", "automatic_pipeline", "=", "Innuendo", "(", ")", "if", "not", "args", ".", "tasks", ":", "input_processes", "=", "\" \"", ".", "join", "(", "automatic_pipeline", ".", "process_descriptions", ".", "keys", "(", ")", ")", "else", ":", "input_processes", "=", "args", ".", "tasks", "# Validate the provided pipeline processes", "validated", "=", "automatic_pipeline", ".", "validate_pipeline", "(", "input_processes", ")", "if", "not", "validated", ":", "sys", ".", "exit", "(", "1", ")", "# Get the final pipeline string", "pipeline_string", "=", "automatic_pipeline", ".", "run_auto_pipeline", "(", "input_processes", ")", "return", "pipeline_string" ]
Brews a given list of processes according to the recipe Parameters ---------- args : argparse.Namespace The arguments passed through argparser that will be used to check the the recipe, tasks and brew the process Returns ------- str The final pipeline string, ready for the engine. list List of process strings.
[ "Brews", "a", "given", "list", "of", "processes", "according", "to", "the", "recipe" ]
python
test
27.029412
onnx/onnxmltools
onnxmltools/convert/sparkml/_parse.py
https://github.com/onnx/onnxmltools/blob/d4e4c31990fc2d9fd1f92139f497d360914c9df2/onnxmltools/convert/sparkml/_parse.py#L15-L37
def _get_variable_for_input(scope, input_name, global_inputs, output_dict): ''' Find the corresponding Variable for a given raw operator (model) name The variable is either supplied as graph/global inputs or has been generated as output by previous ops :param input_name: :param global_inputs: :param output_dict: :return: ''' if input_name in output_dict: value = output_dict[input_name] ref_count = value[0] variable = value[1] output_dict[input_name] = [ref_count+1, variable] return variable matches = [x for x in global_inputs if x.raw_name == input_name] if matches: return matches[0] # # create a new Var # return scope.declare_local_variable(input_name)
[ "def", "_get_variable_for_input", "(", "scope", ",", "input_name", ",", "global_inputs", ",", "output_dict", ")", ":", "if", "input_name", "in", "output_dict", ":", "value", "=", "output_dict", "[", "input_name", "]", "ref_count", "=", "value", "[", "0", "]", "variable", "=", "value", "[", "1", "]", "output_dict", "[", "input_name", "]", "=", "[", "ref_count", "+", "1", ",", "variable", "]", "return", "variable", "matches", "=", "[", "x", "for", "x", "in", "global_inputs", "if", "x", ".", "raw_name", "==", "input_name", "]", "if", "matches", ":", "return", "matches", "[", "0", "]", "#", "# create a new Var", "#", "return", "scope", ".", "declare_local_variable", "(", "input_name", ")" ]
Find the corresponding Variable for a given raw operator (model) name The variable is either supplied as graph/global inputs or has been generated as output by previous ops :param input_name: :param global_inputs: :param output_dict: :return:
[ "Find", "the", "corresponding", "Variable", "for", "a", "given", "raw", "operator", "(", "model", ")", "name", "The", "variable", "is", "either", "supplied", "as", "graph", "/", "global", "inputs", "or", "has", "been", "generated", "as", "output", "by", "previous", "ops", ":", "param", "input_name", ":", ":", "param", "global_inputs", ":", ":", "param", "output_dict", ":", ":", "return", ":" ]
python
train
32.521739
redhat-cip/python-dciclient
dciclient/v1/shell_commands/purge.py
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/shell_commands/purge.py#L29-L87
def purge(context, resource, force): """purge(context, resource, force) Purge soft-deleted resources. >>> dcictl purge --resource remotecis """ resources = ['components', 'topics', 'tests', 'teams', 'feeders', 'remotecis', 'jobs', 'files', 'users', 'products'] l_resources = resources if resource is None else resource.split(',') wrong_resources = [res for res in l_resources if res not in resources] test_auth = base.purge(context, 'users', **{'force': False}) if len(wrong_resources) > 0: msg = 'Unkown resource have been specified: %s' % wrong_resources if context.format == 'json': utils.print_json(msg) else: click.echo(msg) elif test_auth.status_code == 401: utils.format_output(test_auth, context.format) else: purged = {} if force: # If in force mode. First we retrieve the number of items to be # purged and then we purge them. This allows to presents meaningful # informations to the user that used this command. for res in l_resources: item_purged = base.purge(context, res, **{'force': False}) \ .json()['_meta']['count'] if item_purged and \ base.purge(context, res, **{'force': True}).status_code == 204: purged[res] = '%s item(s) purged' % item_purged if len(purged.keys()): utils.print_json(purged) else: utils.print_json({'message': 'No item to be purged'}) else: # If not in force mode. The various endpoints are queried for the # informations about the resources to be purged and displayed. for res in l_resources: resource_to_delete = base.purge(context, res, **{'force': force}) if resource_to_delete.json()['_meta']['count'] > 0: purged[res] = resource_to_delete.json() if len(purged.keys()): for item in purged.keys(): if len(l_resources) > 1: click.echo('\n%s:\n' % item) utils.format_output(purged[item][item], context.format) else: utils.format_output({}, context.format)
[ "def", "purge", "(", "context", ",", "resource", ",", "force", ")", ":", "resources", "=", "[", "'components'", ",", "'topics'", ",", "'tests'", ",", "'teams'", ",", "'feeders'", ",", "'remotecis'", ",", "'jobs'", ",", "'files'", ",", "'users'", ",", "'products'", "]", "l_resources", "=", "resources", "if", "resource", "is", "None", "else", "resource", ".", "split", "(", "','", ")", "wrong_resources", "=", "[", "res", "for", "res", "in", "l_resources", "if", "res", "not", "in", "resources", "]", "test_auth", "=", "base", ".", "purge", "(", "context", ",", "'users'", ",", "*", "*", "{", "'force'", ":", "False", "}", ")", "if", "len", "(", "wrong_resources", ")", ">", "0", ":", "msg", "=", "'Unkown resource have been specified: %s'", "%", "wrong_resources", "if", "context", ".", "format", "==", "'json'", ":", "utils", ".", "print_json", "(", "msg", ")", "else", ":", "click", ".", "echo", "(", "msg", ")", "elif", "test_auth", ".", "status_code", "==", "401", ":", "utils", ".", "format_output", "(", "test_auth", ",", "context", ".", "format", ")", "else", ":", "purged", "=", "{", "}", "if", "force", ":", "# If in force mode. First we retrieve the number of items to be", "# purged and then we purge them. This allows to presents meaningful", "# informations to the user that used this command.", "for", "res", "in", "l_resources", ":", "item_purged", "=", "base", ".", "purge", "(", "context", ",", "res", ",", "*", "*", "{", "'force'", ":", "False", "}", ")", ".", "json", "(", ")", "[", "'_meta'", "]", "[", "'count'", "]", "if", "item_purged", "and", "base", ".", "purge", "(", "context", ",", "res", ",", "*", "*", "{", "'force'", ":", "True", "}", ")", ".", "status_code", "==", "204", ":", "purged", "[", "res", "]", "=", "'%s item(s) purged'", "%", "item_purged", "if", "len", "(", "purged", ".", "keys", "(", ")", ")", ":", "utils", ".", "print_json", "(", "purged", ")", "else", ":", "utils", ".", "print_json", "(", "{", "'message'", ":", "'No item to be purged'", "}", ")", "else", ":", "# If not in force mode. The various endpoints are queried for the", "# informations about the resources to be purged and displayed.", "for", "res", "in", "l_resources", ":", "resource_to_delete", "=", "base", ".", "purge", "(", "context", ",", "res", ",", "*", "*", "{", "'force'", ":", "force", "}", ")", "if", "resource_to_delete", ".", "json", "(", ")", "[", "'_meta'", "]", "[", "'count'", "]", ">", "0", ":", "purged", "[", "res", "]", "=", "resource_to_delete", ".", "json", "(", ")", "if", "len", "(", "purged", ".", "keys", "(", ")", ")", ":", "for", "item", "in", "purged", ".", "keys", "(", ")", ":", "if", "len", "(", "l_resources", ")", ">", "1", ":", "click", ".", "echo", "(", "'\\n%s:\\n'", "%", "item", ")", "utils", ".", "format_output", "(", "purged", "[", "item", "]", "[", "item", "]", ",", "context", ".", "format", ")", "else", ":", "utils", ".", "format_output", "(", "{", "}", ",", "context", ".", "format", ")" ]
purge(context, resource, force) Purge soft-deleted resources. >>> dcictl purge --resource remotecis
[ "purge", "(", "context", "resource", "force", ")" ]
python
train
40.40678
jsocol/pystatsd
statsd/client/base.py
https://github.com/jsocol/pystatsd/blob/006a86394c44ff71e6e8e52529daa3c0fdcc93fb/statsd/client/base.py#L37-L39
def decr(self, stat, count=1, rate=1): """Decrement a stat by `count`.""" self.incr(stat, -count, rate)
[ "def", "decr", "(", "self", ",", "stat", ",", "count", "=", "1", ",", "rate", "=", "1", ")", ":", "self", ".", "incr", "(", "stat", ",", "-", "count", ",", "rate", ")" ]
Decrement a stat by `count`.
[ "Decrement", "a", "stat", "by", "count", "." ]
python
train
39
gwastro/pycbc-glue
pycbc_glue/ligolw/utils/process.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/ligolw/utils/process.py#L204-L209
def doc_includes_process(xmldoc, program): """ Return True if the process table in xmldoc includes entries for a program named program. """ return program in lsctables.ProcessTable.get_table(xmldoc).getColumnByName(u"program")
[ "def", "doc_includes_process", "(", "xmldoc", ",", "program", ")", ":", "return", "program", "in", "lsctables", ".", "ProcessTable", ".", "get_table", "(", "xmldoc", ")", ".", "getColumnByName", "(", "u\"program\"", ")" ]
Return True if the process table in xmldoc includes entries for a program named program.
[ "Return", "True", "if", "the", "process", "table", "in", "xmldoc", "includes", "entries", "for", "a", "program", "named", "program", "." ]
python
train
37.666667
thombashi/pytablewriter
pytablewriter/writer/text/_jsonlines.py
https://github.com/thombashi/pytablewriter/blob/52ea85ed8e89097afa64f137c6a1b3acdfefdbda/pytablewriter/writer/text/_jsonlines.py#L32-L48
def write_table(self): """ |write_table| with `Line-delimited JSON(LDJSON) <https://en.wikipedia.org/wiki/JSON_streaming#Line-delimited_JSON>`__ /NDJSON/JSON Lines format. :raises pytablewriter.EmptyHeaderError: If the |headers| is empty. :Example: :ref:`example-jsonl-writer` """ with self._logger: self._verify_property() self._preprocess() for values in self._table_value_matrix: self._write_line(json.dumps(values))
[ "def", "write_table", "(", "self", ")", ":", "with", "self", ".", "_logger", ":", "self", ".", "_verify_property", "(", ")", "self", ".", "_preprocess", "(", ")", "for", "values", "in", "self", ".", "_table_value_matrix", ":", "self", ".", "_write_line", "(", "json", ".", "dumps", "(", "values", ")", ")" ]
|write_table| with `Line-delimited JSON(LDJSON) <https://en.wikipedia.org/wiki/JSON_streaming#Line-delimited_JSON>`__ /NDJSON/JSON Lines format. :raises pytablewriter.EmptyHeaderError: If the |headers| is empty. :Example: :ref:`example-jsonl-writer`
[ "|write_table|", "with", "Line", "-", "delimited", "JSON", "(", "LDJSON", ")", "<https", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "JSON_streaming#Line", "-", "delimited_JSON", ">", "__", "/", "NDJSON", "/", "JSON", "Lines", "format", "." ]
python
train
31.411765