repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
TkTech/Jawa
jawa/util/flags.py
https://github.com/TkTech/Jawa/blob/94c8424e699029ac33fbc0e866fff0ecb2742289/jawa/util/flags.py#L40-L46
def set(self, name, value): """ Sets the value of the field `name` to `value`, which is `True` or `False`. """ flag = self.flags[name] self._value = (self.value | flag) if value else (self.value & ~flag)
[ "def", "set", "(", "self", ",", "name", ",", "value", ")", ":", "flag", "=", "self", ".", "flags", "[", "name", "]", "self", ".", "_value", "=", "(", "self", ".", "value", "|", "flag", ")", "if", "value", "else", "(", "self", ".", "value", "&", "~", "flag", ")" ]
Sets the value of the field `name` to `value`, which is `True` or `False`.
[ "Sets", "the", "value", "of", "the", "field", "name", "to", "value", "which", "is", "True", "or", "False", "." ]
python
train
timothyb0912/pylogit
pylogit/pylogit.py
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/pylogit.py#L83-L225
def create_choice_model(data, alt_id_col, obs_id_col, choice_col, specification, model_type, intercept_ref_pos=None, shape_ref_pos=None, names=None, intercept_names=None, shape_names=None, nest_spec=None, mixing_id_col=None, mixing_vars=None): """ Parameters ---------- data : string or pandas dataframe. If `data` is a string, it should be an absolute or relative path to a CSV file containing the long format data for this choice model. Note long format has one row per available alternative for each observation. If `data` is a pandas dataframe, `data` should already be in long format. alt_id_col : string. Should denote the column in data that contains the alternative identifiers for each row. obs_id_col : string. Should denote the column in data that contains the observation identifiers for each row. choice_col : string. Should denote the column in data which contains the ones and zeros that denote whether or not the given row corresponds to the chosen alternative for the given individual. specification : OrderedDict. Keys are a proper subset of the columns in `long_form_df`. Values are either a list or a single string, `all_diff` or `all_same`. If a list, the elements should be: 1) single objects that are within the alternative ID column of `long_form_df` 2) lists of objects that are within the alternative ID column of `long_form_df`. For each single object in the list, a unique column will be created (i.e. there will be a unique coefficient for that variable in the corresponding utility equation of the corresponding alternative). For lists within the `specification_dict` values, a single column will be created for all the alternatives within iterable (i.e. there will be one common coefficient for the variables in the iterable). model_type : string. Denotes the model type of the choice_model being instantiated. Should be one of the following values: - "MNL" - "Asym" - "Cloglog" - "Scobit" - "Uneven" - "Nested Logit" - "Mixed Logit" intercept_ref_pos : int, optional. Valid only when the intercepts being estimated are not part of the index. Specifies the alternative in the ordered array of unique alternative ids whose intercept or alternative-specific constant is not estimated, to ensure model identifiability. Default == None. shape_ref_pos : int, optional. Specifies the alternative in the ordered array of unique alternative ids whose shape parameter is not estimated, to ensure model identifiability. Default == None. names : OrderedDict or None, optional. Should have the same keys as `specification`. For each key: - if the corresponding value in `specification` is "all_same", then there should be a single string as the value in names. - if the corresponding value in `specification` is "all_diff", then there should be a list of strings as the value in names. There should be one string in the value in names for each possible alternative. - if the corresponding value in `specification` is a list, then there should be a list of strings as the value in names. There should be one string the value in names per item in the value in `specification`. Default == None. intercept_names : list of strings or None, optional. If a list is passed, then the list should have the same number of elements as there are possible alternatives in data, minus 1. Each element of the list should be the name of the corresponding alternative's intercept term, in sorted order of the possible alternative IDs. If None is passed, the resulting names that are shown in the estimation results will be ["Outside_ASC_{}".format(x) for x in shape_names]. Default = None. shape_names : list of strings or None, optional. If a list is passed, then the list should have the same number of elements as there are possible alternative IDs in data. Each element of the list should be a string denoting the name of the corresponding alternative, in sorted order of the possible alternative IDs. The resulting names which are shown in the estimation results will be ["shape_{}".format(x) for x in shape_names]. Default = None. nest_spec : OrderedDict or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id only be associated with a single nest! Default == None. mixing_id_col : str, or None, optional. Should be a column heading in `data`. Should denote the column in `data` which contains the identifiers of the units of observation over which the coefficients of the model are thought to be randomly distributed. If `model_type == "Mixed Logit"`, then `mixing_id_col` must be passed. Default == None. mixing_vars : list, or None, optional. All elements of the list should be strings. Each string should be present in the values of `names.values()` and they're associated variables should only be index variables (i.e. part of the design matrix). If `model_type == "Mixed Logit"`, then `mixing_vars` must be passed. Default == None. Returns ------- model_obj : instantiation of the Choice Model Class corresponding to the model type passed as the function argument. The returned object will have been instantiated with the arguments passed to this function. """ # Make sure the model type is valid ensure_valid_model_type(model_type, valid_model_types) # Carry out the appropriate instantiation process for the chosen # choice model model_kwargs = {"intercept_ref_pos": intercept_ref_pos, "shape_ref_pos": shape_ref_pos, "names": names, "intercept_names": intercept_names, "shape_names": shape_names, "nest_spec": nest_spec, "mixing_id_col": mixing_id_col, "mixing_vars": mixing_vars} return model_type_to_class[model_type](data, alt_id_col, obs_id_col, choice_col, specification, **model_kwargs)
[ "def", "create_choice_model", "(", "data", ",", "alt_id_col", ",", "obs_id_col", ",", "choice_col", ",", "specification", ",", "model_type", ",", "intercept_ref_pos", "=", "None", ",", "shape_ref_pos", "=", "None", ",", "names", "=", "None", ",", "intercept_names", "=", "None", ",", "shape_names", "=", "None", ",", "nest_spec", "=", "None", ",", "mixing_id_col", "=", "None", ",", "mixing_vars", "=", "None", ")", ":", "# Make sure the model type is valid", "ensure_valid_model_type", "(", "model_type", ",", "valid_model_types", ")", "# Carry out the appropriate instantiation process for the chosen", "# choice model", "model_kwargs", "=", "{", "\"intercept_ref_pos\"", ":", "intercept_ref_pos", ",", "\"shape_ref_pos\"", ":", "shape_ref_pos", ",", "\"names\"", ":", "names", ",", "\"intercept_names\"", ":", "intercept_names", ",", "\"shape_names\"", ":", "shape_names", ",", "\"nest_spec\"", ":", "nest_spec", ",", "\"mixing_id_col\"", ":", "mixing_id_col", ",", "\"mixing_vars\"", ":", "mixing_vars", "}", "return", "model_type_to_class", "[", "model_type", "]", "(", "data", ",", "alt_id_col", ",", "obs_id_col", ",", "choice_col", ",", "specification", ",", "*", "*", "model_kwargs", ")" ]
Parameters ---------- data : string or pandas dataframe. If `data` is a string, it should be an absolute or relative path to a CSV file containing the long format data for this choice model. Note long format has one row per available alternative for each observation. If `data` is a pandas dataframe, `data` should already be in long format. alt_id_col : string. Should denote the column in data that contains the alternative identifiers for each row. obs_id_col : string. Should denote the column in data that contains the observation identifiers for each row. choice_col : string. Should denote the column in data which contains the ones and zeros that denote whether or not the given row corresponds to the chosen alternative for the given individual. specification : OrderedDict. Keys are a proper subset of the columns in `long_form_df`. Values are either a list or a single string, `all_diff` or `all_same`. If a list, the elements should be: 1) single objects that are within the alternative ID column of `long_form_df` 2) lists of objects that are within the alternative ID column of `long_form_df`. For each single object in the list, a unique column will be created (i.e. there will be a unique coefficient for that variable in the corresponding utility equation of the corresponding alternative). For lists within the `specification_dict` values, a single column will be created for all the alternatives within iterable (i.e. there will be one common coefficient for the variables in the iterable). model_type : string. Denotes the model type of the choice_model being instantiated. Should be one of the following values: - "MNL" - "Asym" - "Cloglog" - "Scobit" - "Uneven" - "Nested Logit" - "Mixed Logit" intercept_ref_pos : int, optional. Valid only when the intercepts being estimated are not part of the index. Specifies the alternative in the ordered array of unique alternative ids whose intercept or alternative-specific constant is not estimated, to ensure model identifiability. Default == None. shape_ref_pos : int, optional. Specifies the alternative in the ordered array of unique alternative ids whose shape parameter is not estimated, to ensure model identifiability. Default == None. names : OrderedDict or None, optional. Should have the same keys as `specification`. For each key: - if the corresponding value in `specification` is "all_same", then there should be a single string as the value in names. - if the corresponding value in `specification` is "all_diff", then there should be a list of strings as the value in names. There should be one string in the value in names for each possible alternative. - if the corresponding value in `specification` is a list, then there should be a list of strings as the value in names. There should be one string the value in names per item in the value in `specification`. Default == None. intercept_names : list of strings or None, optional. If a list is passed, then the list should have the same number of elements as there are possible alternatives in data, minus 1. Each element of the list should be the name of the corresponding alternative's intercept term, in sorted order of the possible alternative IDs. If None is passed, the resulting names that are shown in the estimation results will be ["Outside_ASC_{}".format(x) for x in shape_names]. Default = None. shape_names : list of strings or None, optional. If a list is passed, then the list should have the same number of elements as there are possible alternative IDs in data. Each element of the list should be a string denoting the name of the corresponding alternative, in sorted order of the possible alternative IDs. The resulting names which are shown in the estimation results will be ["shape_{}".format(x) for x in shape_names]. Default = None. nest_spec : OrderedDict or None, optional. Keys are strings that define the name of the nests. Values are lists of alternative ids, denoting which alternatives belong to which nests. Each alternative id only be associated with a single nest! Default == None. mixing_id_col : str, or None, optional. Should be a column heading in `data`. Should denote the column in `data` which contains the identifiers of the units of observation over which the coefficients of the model are thought to be randomly distributed. If `model_type == "Mixed Logit"`, then `mixing_id_col` must be passed. Default == None. mixing_vars : list, or None, optional. All elements of the list should be strings. Each string should be present in the values of `names.values()` and they're associated variables should only be index variables (i.e. part of the design matrix). If `model_type == "Mixed Logit"`, then `mixing_vars` must be passed. Default == None. Returns ------- model_obj : instantiation of the Choice Model Class corresponding to the model type passed as the function argument. The returned object will have been instantiated with the arguments passed to this function.
[ "Parameters", "----------", "data", ":", "string", "or", "pandas", "dataframe", ".", "If", "data", "is", "a", "string", "it", "should", "be", "an", "absolute", "or", "relative", "path", "to", "a", "CSV", "file", "containing", "the", "long", "format", "data", "for", "this", "choice", "model", ".", "Note", "long", "format", "has", "one", "row", "per", "available", "alternative", "for", "each", "observation", ".", "If", "data", "is", "a", "pandas", "dataframe", "data", "should", "already", "be", "in", "long", "format", ".", "alt_id_col", ":", "string", ".", "Should", "denote", "the", "column", "in", "data", "that", "contains", "the", "alternative", "identifiers", "for", "each", "row", ".", "obs_id_col", ":", "string", ".", "Should", "denote", "the", "column", "in", "data", "that", "contains", "the", "observation", "identifiers", "for", "each", "row", ".", "choice_col", ":", "string", ".", "Should", "denote", "the", "column", "in", "data", "which", "contains", "the", "ones", "and", "zeros", "that", "denote", "whether", "or", "not", "the", "given", "row", "corresponds", "to", "the", "chosen", "alternative", "for", "the", "given", "individual", ".", "specification", ":", "OrderedDict", ".", "Keys", "are", "a", "proper", "subset", "of", "the", "columns", "in", "long_form_df", ".", "Values", "are", "either", "a", "list", "or", "a", "single", "string", "all_diff", "or", "all_same", ".", "If", "a", "list", "the", "elements", "should", "be", ":", "1", ")", "single", "objects", "that", "are", "within", "the", "alternative", "ID", "column", "of", "long_form_df", "2", ")", "lists", "of", "objects", "that", "are", "within", "the", "alternative", "ID", "column", "of", "long_form_df", ".", "For", "each", "single", "object", "in", "the", "list", "a", "unique", "column", "will", "be", "created", "(", "i", ".", "e", ".", "there", "will", "be", "a", "unique", "coefficient", "for", "that", "variable", "in", "the", "corresponding", "utility", "equation", "of", "the", "corresponding", "alternative", ")", ".", "For", "lists", "within", "the", "specification_dict", "values", "a", "single", "column", "will", "be", "created", "for", "all", "the", "alternatives", "within", "iterable", "(", "i", ".", "e", ".", "there", "will", "be", "one", "common", "coefficient", "for", "the", "variables", "in", "the", "iterable", ")", ".", "model_type", ":", "string", ".", "Denotes", "the", "model", "type", "of", "the", "choice_model", "being", "instantiated", ".", "Should", "be", "one", "of", "the", "following", "values", ":" ]
python
train
gwastro/pycbc
pycbc/workflow/pegasus_workflow.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/workflow/pegasus_workflow.py#L141-L149
def add_raw_arg(self, arg): """ Add an argument to the command line of this job, but do *NOT* add white space between arguments. This can be added manually by adding ' ' if needed """ if not isinstance(arg, File): arg = str(arg) self._raw_options += [arg]
[ "def", "add_raw_arg", "(", "self", ",", "arg", ")", ":", "if", "not", "isinstance", "(", "arg", ",", "File", ")", ":", "arg", "=", "str", "(", "arg", ")", "self", ".", "_raw_options", "+=", "[", "arg", "]" ]
Add an argument to the command line of this job, but do *NOT* add white space between arguments. This can be added manually by adding ' ' if needed
[ "Add", "an", "argument", "to", "the", "command", "line", "of", "this", "job", "but", "do", "*", "NOT", "*", "add", "white", "space", "between", "arguments", ".", "This", "can", "be", "added", "manually", "by", "adding", "if", "needed" ]
python
train
gwpy/gwpy
gwpy/types/array.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/types/array.py#L441-L474
def flatten(self, order='C'): """Return a copy of the array collapsed into one dimension. Any index information is removed as part of the flattening, and the result is returned as a `~astropy.units.Quantity` array. Parameters ---------- order : {'C', 'F', 'A', 'K'}, optional 'C' means to flatten in row-major (C-style) order. 'F' means to flatten in column-major (Fortran- style) order. 'A' means to flatten in column-major order if `a` is Fortran *contiguous* in memory, row-major order otherwise. 'K' means to flatten `a` in the order the elements occur in memory. The default is 'C'. Returns ------- y : `~astropy.units.Quantity` A copy of the input array, flattened to one dimension. See Also -------- ravel : Return a flattened array. flat : A 1-D flat iterator over the array. Examples -------- >>> a = Array([[1,2], [3,4]], unit='m', name='Test') >>> a.flatten() <Quantity [1., 2., 3., 4.] m> """ return super(Array, self).flatten(order=order).view(Quantity)
[ "def", "flatten", "(", "self", ",", "order", "=", "'C'", ")", ":", "return", "super", "(", "Array", ",", "self", ")", ".", "flatten", "(", "order", "=", "order", ")", ".", "view", "(", "Quantity", ")" ]
Return a copy of the array collapsed into one dimension. Any index information is removed as part of the flattening, and the result is returned as a `~astropy.units.Quantity` array. Parameters ---------- order : {'C', 'F', 'A', 'K'}, optional 'C' means to flatten in row-major (C-style) order. 'F' means to flatten in column-major (Fortran- style) order. 'A' means to flatten in column-major order if `a` is Fortran *contiguous* in memory, row-major order otherwise. 'K' means to flatten `a` in the order the elements occur in memory. The default is 'C'. Returns ------- y : `~astropy.units.Quantity` A copy of the input array, flattened to one dimension. See Also -------- ravel : Return a flattened array. flat : A 1-D flat iterator over the array. Examples -------- >>> a = Array([[1,2], [3,4]], unit='m', name='Test') >>> a.flatten() <Quantity [1., 2., 3., 4.] m>
[ "Return", "a", "copy", "of", "the", "array", "collapsed", "into", "one", "dimension", "." ]
python
train
saltstack/salt
salt/modules/inspectlib/fsdb.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspectlib/fsdb.py#L231-L255
def delete(self, obj, matches=None, mt=None, lt=None, eq=None): ''' Delete object from the database. :param obj: :param matches: :param mt: :param lt: :param eq: :return: ''' deleted = False objects = list() for _obj in self.get(obj): if not self.__criteria(_obj, matches=matches, mt=mt, lt=lt, eq=eq): objects.append(_obj) else: deleted = True self.flush(obj._TABLE) self.create_table_from_object(obj()) for _obj in objects: self.store(_obj) return deleted
[ "def", "delete", "(", "self", ",", "obj", ",", "matches", "=", "None", ",", "mt", "=", "None", ",", "lt", "=", "None", ",", "eq", "=", "None", ")", ":", "deleted", "=", "False", "objects", "=", "list", "(", ")", "for", "_obj", "in", "self", ".", "get", "(", "obj", ")", ":", "if", "not", "self", ".", "__criteria", "(", "_obj", ",", "matches", "=", "matches", ",", "mt", "=", "mt", ",", "lt", "=", "lt", ",", "eq", "=", "eq", ")", ":", "objects", ".", "append", "(", "_obj", ")", "else", ":", "deleted", "=", "True", "self", ".", "flush", "(", "obj", ".", "_TABLE", ")", "self", ".", "create_table_from_object", "(", "obj", "(", ")", ")", "for", "_obj", "in", "objects", ":", "self", ".", "store", "(", "_obj", ")", "return", "deleted" ]
Delete object from the database. :param obj: :param matches: :param mt: :param lt: :param eq: :return:
[ "Delete", "object", "from", "the", "database", "." ]
python
train
trevisanj/a99
a99/fileio.py
https://github.com/trevisanj/a99/blob/193e6e3c9b3e4f4a0ba7eb3eece846fe7045c539/a99/fileio.py#L206-L222
def create_symlink(source, link_name): """ Creates symbolic link for either operating system. http://stackoverflow.com/questions/6260149/os-symlink-support-in-windows """ os_symlink = getattr(os, "symlink", None) if isinstance(os_symlink, collections.Callable): os_symlink(source, link_name) else: import ctypes csl = ctypes.windll.kernel32.CreateSymbolicLinkW csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32) csl.restype = ctypes.c_ubyte flags = 1 if os.path.isdir(source) else 0 if csl(link_name, source, flags) == 0: raise ctypes.WinError()
[ "def", "create_symlink", "(", "source", ",", "link_name", ")", ":", "os_symlink", "=", "getattr", "(", "os", ",", "\"symlink\"", ",", "None", ")", "if", "isinstance", "(", "os_symlink", ",", "collections", ".", "Callable", ")", ":", "os_symlink", "(", "source", ",", "link_name", ")", "else", ":", "import", "ctypes", "csl", "=", "ctypes", ".", "windll", ".", "kernel32", ".", "CreateSymbolicLinkW", "csl", ".", "argtypes", "=", "(", "ctypes", ".", "c_wchar_p", ",", "ctypes", ".", "c_wchar_p", ",", "ctypes", ".", "c_uint32", ")", "csl", ".", "restype", "=", "ctypes", ".", "c_ubyte", "flags", "=", "1", "if", "os", ".", "path", ".", "isdir", "(", "source", ")", "else", "0", "if", "csl", "(", "link_name", ",", "source", ",", "flags", ")", "==", "0", ":", "raise", "ctypes", ".", "WinError", "(", ")" ]
Creates symbolic link for either operating system. http://stackoverflow.com/questions/6260149/os-symlink-support-in-windows
[ "Creates", "symbolic", "link", "for", "either", "operating", "system", ".", "http", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "6260149", "/", "os", "-", "symlink", "-", "support", "-", "in", "-", "windows" ]
python
train
DemocracyClub/uk-election-ids
uk_election_ids/election_ids.py
https://github.com/DemocracyClub/uk-election-ids/blob/566895e15b539e8a7fa3bebb680d5cd326cf6b6b/uk_election_ids/election_ids.py#L62-L78
def with_subtype(self, subtype): """Add a subtype segment Args: subtype (str): May be one of ``['a', 'c', 'r']``. See the `Reference Definition <https://elections.democracyclub.org.uk/reference_definition>`_. for valid election type/subtype combinations. Returns: IdBuilder Raises: ValueError """ self._validate_subtype(subtype) self.subtype = subtype return self
[ "def", "with_subtype", "(", "self", ",", "subtype", ")", ":", "self", ".", "_validate_subtype", "(", "subtype", ")", "self", ".", "subtype", "=", "subtype", "return", "self" ]
Add a subtype segment Args: subtype (str): May be one of ``['a', 'c', 'r']``. See the `Reference Definition <https://elections.democracyclub.org.uk/reference_definition>`_. for valid election type/subtype combinations. Returns: IdBuilder Raises: ValueError
[ "Add", "a", "subtype", "segment" ]
python
train
ppinard/matplotlib-scalebar
matplotlib_scalebar/dimension.py
https://github.com/ppinard/matplotlib-scalebar/blob/ba8ca4df7d5a4efc43a394e4fe88b8e5e517abf4/matplotlib_scalebar/dimension.py#L29-L50
def add_units(self, units, factor, latexrepr=None): """ Add new possible units. :arg units: units :type units: :class:`str` :arg factor: multiplication factor to convert new units into base units :type factor: :class:`float` :arg latexrepr: LaTeX representation of units (if ``None``, use *units) :type latexrepr: :class:`str` """ if units in self._units: raise ValueError('%s already defined' % units) if factor == 1: raise ValueError('Factor cannot be equal to 1') if latexrepr is None: latexrepr = units self._units[units] = factor self._latexrepr[units] = latexrepr
[ "def", "add_units", "(", "self", ",", "units", ",", "factor", ",", "latexrepr", "=", "None", ")", ":", "if", "units", "in", "self", ".", "_units", ":", "raise", "ValueError", "(", "'%s already defined'", "%", "units", ")", "if", "factor", "==", "1", ":", "raise", "ValueError", "(", "'Factor cannot be equal to 1'", ")", "if", "latexrepr", "is", "None", ":", "latexrepr", "=", "units", "self", ".", "_units", "[", "units", "]", "=", "factor", "self", ".", "_latexrepr", "[", "units", "]", "=", "latexrepr" ]
Add new possible units. :arg units: units :type units: :class:`str` :arg factor: multiplication factor to convert new units into base units :type factor: :class:`float` :arg latexrepr: LaTeX representation of units (if ``None``, use *units) :type latexrepr: :class:`str`
[ "Add", "new", "possible", "units", ".", ":", "arg", "units", ":", "units", ":", "type", "units", ":", ":", "class", ":", "str", ":", "arg", "factor", ":", "multiplication", "factor", "to", "convert", "new", "units", "into", "base", "units", ":", "type", "factor", ":", ":", "class", ":", "float", ":", "arg", "latexrepr", ":", "LaTeX", "representation", "of", "units", "(", "if", "None", "use", "*", "units", ")", ":", "type", "latexrepr", ":", ":", "class", ":", "str" ]
python
train
ontio/ontology-python-sdk
ontology/io/binary_reader.py
https://github.com/ontio/ontology-python-sdk/blob/ac88bdda941896c5d2ced08422a9c5179d3f9b19/ontology/io/binary_reader.py#L335-L358
def read_serializable_array(self, class_name, max_size=sys.maxsize): """ Deserialize a stream into the object specific by `class_name`. Args: class_name (str): a full path to the class to be deserialized into. e.g. 'neo.Core.Block.Block' max_size (int): (Optional) maximum number of bytes to read. Returns: list: list of `class_name` objects deserialized from the stream. """ module = '.'.join(class_name.split('.')[:-1]) class_name = class_name.split('.')[-1] class_attr = getattr(importlib.import_module(module), class_name) length = self.read_var_int(max_size=max_size) items = [] try: for _ in range(0, length): item = class_attr() item.Deserialize(self) items.append(item) except Exception as e: raise SDKException(ErrorCode.param_err("Couldn't deserialize %s" % e)) return items
[ "def", "read_serializable_array", "(", "self", ",", "class_name", ",", "max_size", "=", "sys", ".", "maxsize", ")", ":", "module", "=", "'.'", ".", "join", "(", "class_name", ".", "split", "(", "'.'", ")", "[", ":", "-", "1", "]", ")", "class_name", "=", "class_name", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "class_attr", "=", "getattr", "(", "importlib", ".", "import_module", "(", "module", ")", ",", "class_name", ")", "length", "=", "self", ".", "read_var_int", "(", "max_size", "=", "max_size", ")", "items", "=", "[", "]", "try", ":", "for", "_", "in", "range", "(", "0", ",", "length", ")", ":", "item", "=", "class_attr", "(", ")", "item", ".", "Deserialize", "(", "self", ")", "items", ".", "append", "(", "item", ")", "except", "Exception", "as", "e", ":", "raise", "SDKException", "(", "ErrorCode", ".", "param_err", "(", "\"Couldn't deserialize %s\"", "%", "e", ")", ")", "return", "items" ]
Deserialize a stream into the object specific by `class_name`. Args: class_name (str): a full path to the class to be deserialized into. e.g. 'neo.Core.Block.Block' max_size (int): (Optional) maximum number of bytes to read. Returns: list: list of `class_name` objects deserialized from the stream.
[ "Deserialize", "a", "stream", "into", "the", "object", "specific", "by", "class_name", "." ]
python
train
lemieuxl/pyGenClean
pyGenClean/run_data_clean_up.py
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/run_data_clean_up.py#L270-L493
def run_duplicated_samples(in_prefix, in_type, out_prefix, base_dir, options): """Runs step1 (duplicated samples). :param in_prefix: the prefix of the input files. :param in_type: the type of the input files. :param out_prefix: the output prefix. :param base_dir: the output directory. :param options: the options needed. :type in_prefix: str :type in_type: str :type out_prefix: str :type base_dir: str :type options: list :returns: a tuple containing the prefix of the output files (the input prefix for the next script) and the type of the output files (``tfile``). This function calls the :py:mod:`pyGenClean.DupSamples.duplicated_samples` module. The required file type for this module is ``tfile``, hence the need to use the :py:func:`check_input_files` to check if the file input file type is the good one, or to create it if needed. """ # Creating the output directory os.mkdir(out_prefix) # We know we need tfile required_type = "tfile" check_input_files(in_prefix, in_type, required_type) # We need to inject the name of the input file and the name of the output # prefix script_prefix = os.path.join(out_prefix, "dup_samples") options += ["--{}".format(required_type), in_prefix, "--out", script_prefix] # We run the script try: duplicated_samples.main(options) except duplicated_samples.ProgramError as e: msg = "duplicated_samples: {}".format(e) raise ProgramError(msg) # Reading the number of duplicated samples duplicated_count = defaultdict(int) if os.path.isfile(script_prefix + ".duplicated_samples.tfam"): with open(script_prefix + ".duplicated_samples.tfam", "r") as i_file: duplicated_count = Counter([ tuple(createRowFromPlinkSpacedOutput(line)[:2]) for line in i_file ]) # Counting the number of zeroed out genotypes per duplicated sample zeroed_out = defaultdict(int) if os.path.isfile(script_prefix + ".zeroed_out"): with open(script_prefix + ".zeroed_out", "r") as i_file: zeroed_out = Counter([ tuple(line.rstrip("\r\n").split("\t")[:2]) for line in i_file.read().splitlines()[1:] ]) nb_zeroed_out = sum(zeroed_out.values()) # Checking the not good enough samples not_good_enough = set() if os.path.isfile(script_prefix + ".not_good_enough"): with open(script_prefix + ".not_good_enough", "r") as i_file: not_good_enough = { tuple(line.rstrip("\r\n").split("\t")[:4]) for line in i_file.read().splitlines()[1:] } # Checking which samples were chosen chosen_sample = set() if os.path.isfile(script_prefix + ".chosen_samples.info"): with open(script_prefix + ".chosen_samples.info", "r") as i_file: chosen_sample = { tuple(line.rstrip("\r\n").split("\t")) for line in i_file.read().splitlines()[1:] } # Finding if some 'not_good_enough' samples were chosen not_good_still = {s[2:] for s in chosen_sample & not_good_enough} # We create a LaTeX summary latex_file = os.path.join(script_prefix + ".summary.tex") try: with open(latex_file, "w") as o_file: print >>o_file, latex_template.subsection( duplicated_samples.pretty_name ) text = ( "A total of {:,d} duplicated sample{} {} found.".format( len(duplicated_count), "s" if len(duplicated_count) > 1 else "", "were" if len(duplicated_count) > 1 else "was", ) ) print >>o_file, latex_template.wrap_lines(text) if len(duplicated_count) > 0: text = ( "While merging duplicates, a total of {:,d} genotype{} {} " "zeroed out. A total of {:,d} sample{} {} found to be not " "good enough for duplicate completion.".format( nb_zeroed_out, "s" if nb_zeroed_out > 1 else "", "were" if nb_zeroed_out > 1 else "was", len(not_good_enough), "s" if len(not_good_enough) > 1 else "", "were" if len(not_good_enough) > 1 else "was", ) ) print >>o_file, latex_template.wrap_lines(text) table_label = re.sub( r"[/\\]", "_", script_prefix, ) + "_dup_samples" text = ( r"Table~\ref{" + table_label + "} summarizes the number " "of each duplicated sample with some characteristics." ) print >>o_file, latex_template.wrap_lines(text) if len(not_good_still) > 0: text = latex_template.textbf( "There {} {:,d} sample{} that {} not good due to low " "completion or concordance, but {} still selected as " "the best duplicate (see Table~{}).".format( "were" if len(not_good_still) > 1 else "was", len(not_good_still), "s" if len(not_good_still) > 1 else "", "were" if len(not_good_still) > 1 else "was", "were" if len(not_good_still) > 1 else "was", r"~\ref{" + table_label + "}", ) ) print >>o_file, latex_template.wrap_lines(text) # Getting the template longtable_template = latex_template.jinja2_env.get_template( "longtable_template.tex", ) # The table caption table_caption = ( "Summary of the {:,d} duplicated sample{}. The number of " "duplicates and the total number of zeroed out genotypes " "are shown.".format( len(duplicated_count), "s" if len(duplicated_count) > 1 else "", ) ) if len(not_good_still) > 0: table_caption += ( " A total of {:,d} sample{} (highlighted) {} not good " "enough for completion, but {} chosen as the best " "duplicate, and {} still in the final " "dataset).".format( len(not_good_still), "s" if len(not_good_still) > 1 else "", "were" if len(not_good_still) > 1 else "was", "were" if len(not_good_still) > 1 else "was", "are" if len(not_good_still) > 1 else "is", ) ) duplicated_samples_list = duplicated_count.most_common() print >>o_file, longtable_template.render( table_caption=table_caption, table_label=table_label, nb_col=4, col_alignments="llrr", text_size="scriptsize", header_data=[("FID", 1), ("IID", 1), ("Nb Duplicate", 1), ("Nb Zeroed", 1)], tabular_data=[ [latex_template.sanitize_tex(fid), latex_template.sanitize_tex(iid), "{:,d}".format(nb), "{:,d}".format(zeroed_out[(fid, iid)])] for (fid, iid), nb in duplicated_samples_list ], highlighted=[ (fid, iid) in not_good_still for fid, iid in [i[0] for i in duplicated_samples_list] ], ) except IOError: msg = "{}: cannot write LaTeX summary".format(latex_file) raise ProgramError(msg) # Writing the summary results with open(os.path.join(base_dir, "results_summary.txt"), "a") as o_file: print >>o_file, "# {}".format(script_prefix) counter = Counter(duplicated_count.values()).most_common() if counter: print >>o_file, "Number of replicated samples" else: print >>o_file, "Number of replicated samples\t0" for rep_type, rep_count in counter: print >>o_file, " - x{}\t{:,d}\t\t-{:,d}".format( rep_type, rep_count, (rep_count * rep_type) - rep_count, ) print >>o_file, ("Poorly chosen replicated " "samples\t{:,d}".format(len(not_good_still))) print >>o_file, "---" # We know this step does produce a new data set (tfile), so we return it return _StepResult( next_file=os.path.join(out_prefix, "dup_samples.final"), next_file_type="tfile", latex_summary=latex_file, description=duplicated_samples.desc, long_description=duplicated_samples.long_desc, graph_path=None, )
[ "def", "run_duplicated_samples", "(", "in_prefix", ",", "in_type", ",", "out_prefix", ",", "base_dir", ",", "options", ")", ":", "# Creating the output directory", "os", ".", "mkdir", "(", "out_prefix", ")", "# We know we need tfile", "required_type", "=", "\"tfile\"", "check_input_files", "(", "in_prefix", ",", "in_type", ",", "required_type", ")", "# We need to inject the name of the input file and the name of the output", "# prefix", "script_prefix", "=", "os", ".", "path", ".", "join", "(", "out_prefix", ",", "\"dup_samples\"", ")", "options", "+=", "[", "\"--{}\"", ".", "format", "(", "required_type", ")", ",", "in_prefix", ",", "\"--out\"", ",", "script_prefix", "]", "# We run the script", "try", ":", "duplicated_samples", ".", "main", "(", "options", ")", "except", "duplicated_samples", ".", "ProgramError", "as", "e", ":", "msg", "=", "\"duplicated_samples: {}\"", ".", "format", "(", "e", ")", "raise", "ProgramError", "(", "msg", ")", "# Reading the number of duplicated samples", "duplicated_count", "=", "defaultdict", "(", "int", ")", "if", "os", ".", "path", ".", "isfile", "(", "script_prefix", "+", "\".duplicated_samples.tfam\"", ")", ":", "with", "open", "(", "script_prefix", "+", "\".duplicated_samples.tfam\"", ",", "\"r\"", ")", "as", "i_file", ":", "duplicated_count", "=", "Counter", "(", "[", "tuple", "(", "createRowFromPlinkSpacedOutput", "(", "line", ")", "[", ":", "2", "]", ")", "for", "line", "in", "i_file", "]", ")", "# Counting the number of zeroed out genotypes per duplicated sample", "zeroed_out", "=", "defaultdict", "(", "int", ")", "if", "os", ".", "path", ".", "isfile", "(", "script_prefix", "+", "\".zeroed_out\"", ")", ":", "with", "open", "(", "script_prefix", "+", "\".zeroed_out\"", ",", "\"r\"", ")", "as", "i_file", ":", "zeroed_out", "=", "Counter", "(", "[", "tuple", "(", "line", ".", "rstrip", "(", "\"\\r\\n\"", ")", ".", "split", "(", "\"\\t\"", ")", "[", ":", "2", "]", ")", "for", "line", "in", "i_file", ".", "read", "(", ")", ".", "splitlines", "(", ")", "[", "1", ":", "]", "]", ")", "nb_zeroed_out", "=", "sum", "(", "zeroed_out", ".", "values", "(", ")", ")", "# Checking the not good enough samples", "not_good_enough", "=", "set", "(", ")", "if", "os", ".", "path", ".", "isfile", "(", "script_prefix", "+", "\".not_good_enough\"", ")", ":", "with", "open", "(", "script_prefix", "+", "\".not_good_enough\"", ",", "\"r\"", ")", "as", "i_file", ":", "not_good_enough", "=", "{", "tuple", "(", "line", ".", "rstrip", "(", "\"\\r\\n\"", ")", ".", "split", "(", "\"\\t\"", ")", "[", ":", "4", "]", ")", "for", "line", "in", "i_file", ".", "read", "(", ")", ".", "splitlines", "(", ")", "[", "1", ":", "]", "}", "# Checking which samples were chosen", "chosen_sample", "=", "set", "(", ")", "if", "os", ".", "path", ".", "isfile", "(", "script_prefix", "+", "\".chosen_samples.info\"", ")", ":", "with", "open", "(", "script_prefix", "+", "\".chosen_samples.info\"", ",", "\"r\"", ")", "as", "i_file", ":", "chosen_sample", "=", "{", "tuple", "(", "line", ".", "rstrip", "(", "\"\\r\\n\"", ")", ".", "split", "(", "\"\\t\"", ")", ")", "for", "line", "in", "i_file", ".", "read", "(", ")", ".", "splitlines", "(", ")", "[", "1", ":", "]", "}", "# Finding if some 'not_good_enough' samples were chosen", "not_good_still", "=", "{", "s", "[", "2", ":", "]", "for", "s", "in", "chosen_sample", "&", "not_good_enough", "}", "# We create a LaTeX summary", "latex_file", "=", "os", ".", "path", ".", "join", "(", "script_prefix", "+", "\".summary.tex\"", ")", "try", ":", "with", "open", "(", "latex_file", ",", "\"w\"", ")", "as", "o_file", ":", "print", ">>", "o_file", ",", "latex_template", ".", "subsection", "(", "duplicated_samples", ".", "pretty_name", ")", "text", "=", "(", "\"A total of {:,d} duplicated sample{} {} found.\"", ".", "format", "(", "len", "(", "duplicated_count", ")", ",", "\"s\"", "if", "len", "(", "duplicated_count", ")", ">", "1", "else", "\"\"", ",", "\"were\"", "if", "len", "(", "duplicated_count", ")", ">", "1", "else", "\"was\"", ",", ")", ")", "print", ">>", "o_file", ",", "latex_template", ".", "wrap_lines", "(", "text", ")", "if", "len", "(", "duplicated_count", ")", ">", "0", ":", "text", "=", "(", "\"While merging duplicates, a total of {:,d} genotype{} {} \"", "\"zeroed out. A total of {:,d} sample{} {} found to be not \"", "\"good enough for duplicate completion.\"", ".", "format", "(", "nb_zeroed_out", ",", "\"s\"", "if", "nb_zeroed_out", ">", "1", "else", "\"\"", ",", "\"were\"", "if", "nb_zeroed_out", ">", "1", "else", "\"was\"", ",", "len", "(", "not_good_enough", ")", ",", "\"s\"", "if", "len", "(", "not_good_enough", ")", ">", "1", "else", "\"\"", ",", "\"were\"", "if", "len", "(", "not_good_enough", ")", ">", "1", "else", "\"was\"", ",", ")", ")", "print", ">>", "o_file", ",", "latex_template", ".", "wrap_lines", "(", "text", ")", "table_label", "=", "re", ".", "sub", "(", "r\"[/\\\\]\"", ",", "\"_\"", ",", "script_prefix", ",", ")", "+", "\"_dup_samples\"", "text", "=", "(", "r\"Table~\\ref{\"", "+", "table_label", "+", "\"} summarizes the number \"", "\"of each duplicated sample with some characteristics.\"", ")", "print", ">>", "o_file", ",", "latex_template", ".", "wrap_lines", "(", "text", ")", "if", "len", "(", "not_good_still", ")", ">", "0", ":", "text", "=", "latex_template", ".", "textbf", "(", "\"There {} {:,d} sample{} that {} not good due to low \"", "\"completion or concordance, but {} still selected as \"", "\"the best duplicate (see Table~{}).\"", ".", "format", "(", "\"were\"", "if", "len", "(", "not_good_still", ")", ">", "1", "else", "\"was\"", ",", "len", "(", "not_good_still", ")", ",", "\"s\"", "if", "len", "(", "not_good_still", ")", ">", "1", "else", "\"\"", ",", "\"were\"", "if", "len", "(", "not_good_still", ")", ">", "1", "else", "\"was\"", ",", "\"were\"", "if", "len", "(", "not_good_still", ")", ">", "1", "else", "\"was\"", ",", "r\"~\\ref{\"", "+", "table_label", "+", "\"}\"", ",", ")", ")", "print", ">>", "o_file", ",", "latex_template", ".", "wrap_lines", "(", "text", ")", "# Getting the template", "longtable_template", "=", "latex_template", ".", "jinja2_env", ".", "get_template", "(", "\"longtable_template.tex\"", ",", ")", "# The table caption", "table_caption", "=", "(", "\"Summary of the {:,d} duplicated sample{}. The number of \"", "\"duplicates and the total number of zeroed out genotypes \"", "\"are shown.\"", ".", "format", "(", "len", "(", "duplicated_count", ")", ",", "\"s\"", "if", "len", "(", "duplicated_count", ")", ">", "1", "else", "\"\"", ",", ")", ")", "if", "len", "(", "not_good_still", ")", ">", "0", ":", "table_caption", "+=", "(", "\" A total of {:,d} sample{} (highlighted) {} not good \"", "\"enough for completion, but {} chosen as the best \"", "\"duplicate, and {} still in the final \"", "\"dataset).\"", ".", "format", "(", "len", "(", "not_good_still", ")", ",", "\"s\"", "if", "len", "(", "not_good_still", ")", ">", "1", "else", "\"\"", ",", "\"were\"", "if", "len", "(", "not_good_still", ")", ">", "1", "else", "\"was\"", ",", "\"were\"", "if", "len", "(", "not_good_still", ")", ">", "1", "else", "\"was\"", ",", "\"are\"", "if", "len", "(", "not_good_still", ")", ">", "1", "else", "\"is\"", ",", ")", ")", "duplicated_samples_list", "=", "duplicated_count", ".", "most_common", "(", ")", "print", ">>", "o_file", ",", "longtable_template", ".", "render", "(", "table_caption", "=", "table_caption", ",", "table_label", "=", "table_label", ",", "nb_col", "=", "4", ",", "col_alignments", "=", "\"llrr\"", ",", "text_size", "=", "\"scriptsize\"", ",", "header_data", "=", "[", "(", "\"FID\"", ",", "1", ")", ",", "(", "\"IID\"", ",", "1", ")", ",", "(", "\"Nb Duplicate\"", ",", "1", ")", ",", "(", "\"Nb Zeroed\"", ",", "1", ")", "]", ",", "tabular_data", "=", "[", "[", "latex_template", ".", "sanitize_tex", "(", "fid", ")", ",", "latex_template", ".", "sanitize_tex", "(", "iid", ")", ",", "\"{:,d}\"", ".", "format", "(", "nb", ")", ",", "\"{:,d}\"", ".", "format", "(", "zeroed_out", "[", "(", "fid", ",", "iid", ")", "]", ")", "]", "for", "(", "fid", ",", "iid", ")", ",", "nb", "in", "duplicated_samples_list", "]", ",", "highlighted", "=", "[", "(", "fid", ",", "iid", ")", "in", "not_good_still", "for", "fid", ",", "iid", "in", "[", "i", "[", "0", "]", "for", "i", "in", "duplicated_samples_list", "]", "]", ",", ")", "except", "IOError", ":", "msg", "=", "\"{}: cannot write LaTeX summary\"", ".", "format", "(", "latex_file", ")", "raise", "ProgramError", "(", "msg", ")", "# Writing the summary results", "with", "open", "(", "os", ".", "path", ".", "join", "(", "base_dir", ",", "\"results_summary.txt\"", ")", ",", "\"a\"", ")", "as", "o_file", ":", "print", ">>", "o_file", ",", "\"# {}\"", ".", "format", "(", "script_prefix", ")", "counter", "=", "Counter", "(", "duplicated_count", ".", "values", "(", ")", ")", ".", "most_common", "(", ")", "if", "counter", ":", "print", ">>", "o_file", ",", "\"Number of replicated samples\"", "else", ":", "print", ">>", "o_file", ",", "\"Number of replicated samples\\t0\"", "for", "rep_type", ",", "rep_count", "in", "counter", ":", "print", ">>", "o_file", ",", "\" - x{}\\t{:,d}\\t\\t-{:,d}\"", ".", "format", "(", "rep_type", ",", "rep_count", ",", "(", "rep_count", "*", "rep_type", ")", "-", "rep_count", ",", ")", "print", ">>", "o_file", ",", "(", "\"Poorly chosen replicated \"", "\"samples\\t{:,d}\"", ".", "format", "(", "len", "(", "not_good_still", ")", ")", ")", "print", ">>", "o_file", ",", "\"---\"", "# We know this step does produce a new data set (tfile), so we return it", "return", "_StepResult", "(", "next_file", "=", "os", ".", "path", ".", "join", "(", "out_prefix", ",", "\"dup_samples.final\"", ")", ",", "next_file_type", "=", "\"tfile\"", ",", "latex_summary", "=", "latex_file", ",", "description", "=", "duplicated_samples", ".", "desc", ",", "long_description", "=", "duplicated_samples", ".", "long_desc", ",", "graph_path", "=", "None", ",", ")" ]
Runs step1 (duplicated samples). :param in_prefix: the prefix of the input files. :param in_type: the type of the input files. :param out_prefix: the output prefix. :param base_dir: the output directory. :param options: the options needed. :type in_prefix: str :type in_type: str :type out_prefix: str :type base_dir: str :type options: list :returns: a tuple containing the prefix of the output files (the input prefix for the next script) and the type of the output files (``tfile``). This function calls the :py:mod:`pyGenClean.DupSamples.duplicated_samples` module. The required file type for this module is ``tfile``, hence the need to use the :py:func:`check_input_files` to check if the file input file type is the good one, or to create it if needed.
[ "Runs", "step1", "(", "duplicated", "samples", ")", "." ]
python
train
mitsei/dlkit
dlkit/handcar/repository/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/repository/managers.py#L815-L845
def get_asset_admin_session_for_repository(self, repository_id=None, *args, **kwargs): """Gets an asset administration session for the given repository. arg: repository_id (osid.id.Id): the Id of the repository return: (osid.repository.AssetAdminSession) - an AssetAdminSession raise: NotFound - repository_id not found raise: NullArgument - repository_id is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_admin() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_asset_admin() and supports_visible_federation() are true. """ if not repository_id: raise NullArgument() if not self.supports_asset_admin(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed('import error') try: session = sessions.AssetAdminSession(repository_id, proxy=self._proxy, runtime=self._runtime, **kwargs) except AttributeError: raise OperationFailed('attribute error') return session
[ "def", "get_asset_admin_session_for_repository", "(", "self", ",", "repository_id", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "repository_id", ":", "raise", "NullArgument", "(", ")", "if", "not", "self", ".", "supports_asset_admin", "(", ")", ":", "raise", "Unimplemented", "(", ")", "try", ":", "from", ".", "import", "sessions", "except", "ImportError", ":", "raise", "OperationFailed", "(", "'import error'", ")", "try", ":", "session", "=", "sessions", ".", "AssetAdminSession", "(", "repository_id", ",", "proxy", "=", "self", ".", "_proxy", ",", "runtime", "=", "self", ".", "_runtime", ",", "*", "*", "kwargs", ")", "except", "AttributeError", ":", "raise", "OperationFailed", "(", "'attribute error'", ")", "return", "session" ]
Gets an asset administration session for the given repository. arg: repository_id (osid.id.Id): the Id of the repository return: (osid.repository.AssetAdminSession) - an AssetAdminSession raise: NotFound - repository_id not found raise: NullArgument - repository_id is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_admin() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_asset_admin() and supports_visible_federation() are true.
[ "Gets", "an", "asset", "administration", "session", "for", "the", "given", "repository", "." ]
python
train
acutesoftware/AIKIF
aikif/cls_file_mapping.py
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/cls_file_mapping.py#L27-L39
def load_data_subject_areas(subject_file): """ reads the subject file to a list, to confirm config is setup """ lst = [] if os.path.exists(subject_file): with open(subject_file, 'r') as f: for line in f: lst.append(line.strip()) else: print('MISSING DATA FILE (subject_file) ' , subject_file) print('update your config.py or config.txt') return lst
[ "def", "load_data_subject_areas", "(", "subject_file", ")", ":", "lst", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "subject_file", ")", ":", "with", "open", "(", "subject_file", ",", "'r'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "lst", ".", "append", "(", "line", ".", "strip", "(", ")", ")", "else", ":", "print", "(", "'MISSING DATA FILE (subject_file) '", ",", "subject_file", ")", "print", "(", "'update your config.py or config.txt'", ")", "return", "lst" ]
reads the subject file to a list, to confirm config is setup
[ "reads", "the", "subject", "file", "to", "a", "list", "to", "confirm", "config", "is", "setup" ]
python
train
tchellomello/python-amcrest
src/amcrest/system.py
https://github.com/tchellomello/python-amcrest/blob/ed842139e234de2eaf6ee8fb480214711cde1249/src/amcrest/system.py#L26-L45
def current_time(self, date): """ According with API: The time format is "Y-M-D H-m-S". It is not be effected by Locales. TimeFormat in SetLocalesConfig Params: date = "Y-M-D H-m-S" Example: 2016-10-28 13:48:00 Return: True """ ret = self.command( 'global.cgi?action=setCurrentTime&time={0}'.format(date) ) if "ok" in ret.content.decode('utf-8').lower(): return True return False
[ "def", "current_time", "(", "self", ",", "date", ")", ":", "ret", "=", "self", ".", "command", "(", "'global.cgi?action=setCurrentTime&time={0}'", ".", "format", "(", "date", ")", ")", "if", "\"ok\"", "in", "ret", ".", "content", ".", "decode", "(", "'utf-8'", ")", ".", "lower", "(", ")", ":", "return", "True", "return", "False" ]
According with API: The time format is "Y-M-D H-m-S". It is not be effected by Locales. TimeFormat in SetLocalesConfig Params: date = "Y-M-D H-m-S" Example: 2016-10-28 13:48:00 Return: True
[ "According", "with", "API", ":", "The", "time", "format", "is", "Y", "-", "M", "-", "D", "H", "-", "m", "-", "S", ".", "It", "is", "not", "be", "effected", "by", "Locales", ".", "TimeFormat", "in", "SetLocalesConfig" ]
python
train
neo4j-drivers/neobolt
neobolt/impl/python/routing.py
https://github.com/neo4j-drivers/neobolt/blob/724569d76e85777c4f5e30e8d0a18116bda4d8cd/neobolt/impl/python/routing.py#L222-L260
def fetch_routing_info(self, address): """ Fetch raw routing info from a given router address. :param address: router address :return: list of routing records or None if no connection could be established :raise ServiceUnavailable: if the server does not support routing or if routing support is broken """ metadata = {} records = [] def fail(md): if md.get("code") == "Neo.ClientError.Procedure.ProcedureNotFound": raise RoutingProtocolError("Server {!r} does not support routing".format(address)) else: raise RoutingProtocolError("Routing support broken on server {!r}".format(address)) try: with self.acquire_direct(address) as cx: _, _, server_version = (cx.server.agent or "").partition("/") # TODO 2.0: remove old routing procedure if server_version and Version.parse(server_version) >= Version((3, 2)): log_debug("[#%04X] C: <ROUTING> query=%r", cx.local_port, self.routing_context or {}) cx.run("CALL dbms.cluster.routing.getRoutingTable({context})", {"context": self.routing_context}, on_success=metadata.update, on_failure=fail) else: log_debug("[#%04X] C: <ROUTING> query={}", cx.local_port) cx.run("CALL dbms.cluster.routing.getServers", {}, on_success=metadata.update, on_failure=fail) cx.pull_all(on_success=metadata.update, on_records=records.extend) cx.sync() routing_info = [dict(zip(metadata.get("fields", ()), values)) for values in records] log_debug("[#%04X] S: <ROUTING> info=%r", cx.local_port, routing_info) return routing_info except RoutingProtocolError as error: raise ServiceUnavailable(*error.args) except ServiceUnavailable: self.deactivate(address) return None
[ "def", "fetch_routing_info", "(", "self", ",", "address", ")", ":", "metadata", "=", "{", "}", "records", "=", "[", "]", "def", "fail", "(", "md", ")", ":", "if", "md", ".", "get", "(", "\"code\"", ")", "==", "\"Neo.ClientError.Procedure.ProcedureNotFound\"", ":", "raise", "RoutingProtocolError", "(", "\"Server {!r} does not support routing\"", ".", "format", "(", "address", ")", ")", "else", ":", "raise", "RoutingProtocolError", "(", "\"Routing support broken on server {!r}\"", ".", "format", "(", "address", ")", ")", "try", ":", "with", "self", ".", "acquire_direct", "(", "address", ")", "as", "cx", ":", "_", ",", "_", ",", "server_version", "=", "(", "cx", ".", "server", ".", "agent", "or", "\"\"", ")", ".", "partition", "(", "\"/\"", ")", "# TODO 2.0: remove old routing procedure", "if", "server_version", "and", "Version", ".", "parse", "(", "server_version", ")", ">=", "Version", "(", "(", "3", ",", "2", ")", ")", ":", "log_debug", "(", "\"[#%04X] C: <ROUTING> query=%r\"", ",", "cx", ".", "local_port", ",", "self", ".", "routing_context", "or", "{", "}", ")", "cx", ".", "run", "(", "\"CALL dbms.cluster.routing.getRoutingTable({context})\"", ",", "{", "\"context\"", ":", "self", ".", "routing_context", "}", ",", "on_success", "=", "metadata", ".", "update", ",", "on_failure", "=", "fail", ")", "else", ":", "log_debug", "(", "\"[#%04X] C: <ROUTING> query={}\"", ",", "cx", ".", "local_port", ")", "cx", ".", "run", "(", "\"CALL dbms.cluster.routing.getServers\"", ",", "{", "}", ",", "on_success", "=", "metadata", ".", "update", ",", "on_failure", "=", "fail", ")", "cx", ".", "pull_all", "(", "on_success", "=", "metadata", ".", "update", ",", "on_records", "=", "records", ".", "extend", ")", "cx", ".", "sync", "(", ")", "routing_info", "=", "[", "dict", "(", "zip", "(", "metadata", ".", "get", "(", "\"fields\"", ",", "(", ")", ")", ",", "values", ")", ")", "for", "values", "in", "records", "]", "log_debug", "(", "\"[#%04X] S: <ROUTING> info=%r\"", ",", "cx", ".", "local_port", ",", "routing_info", ")", "return", "routing_info", "except", "RoutingProtocolError", "as", "error", ":", "raise", "ServiceUnavailable", "(", "*", "error", ".", "args", ")", "except", "ServiceUnavailable", ":", "self", ".", "deactivate", "(", "address", ")", "return", "None" ]
Fetch raw routing info from a given router address. :param address: router address :return: list of routing records or None if no connection could be established :raise ServiceUnavailable: if the server does not support routing or if routing support is broken
[ "Fetch", "raw", "routing", "info", "from", "a", "given", "router", "address", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/utils/yellowfin.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/yellowfin.py#L398-L408
def _get_mu_tensor(self): """Get the min mu which minimize the surrogate. Returns: The mu_t. """ root = self._get_cubic_root() dr = self._h_max / self._h_min mu = tf.maximum( root**2, ((tf.sqrt(dr) - 1) / (tf.sqrt(dr) + 1))**2) return mu
[ "def", "_get_mu_tensor", "(", "self", ")", ":", "root", "=", "self", ".", "_get_cubic_root", "(", ")", "dr", "=", "self", ".", "_h_max", "/", "self", ".", "_h_min", "mu", "=", "tf", ".", "maximum", "(", "root", "**", "2", ",", "(", "(", "tf", ".", "sqrt", "(", "dr", ")", "-", "1", ")", "/", "(", "tf", ".", "sqrt", "(", "dr", ")", "+", "1", ")", ")", "**", "2", ")", "return", "mu" ]
Get the min mu which minimize the surrogate. Returns: The mu_t.
[ "Get", "the", "min", "mu", "which", "minimize", "the", "surrogate", "." ]
python
train
thombashi/DataProperty
examples/py/to_column_dp_list.py
https://github.com/thombashi/DataProperty/blob/1d1a4c6abee87264c2f870a932c0194895d80a18/examples/py/to_column_dp_list.py#L16-L24
def display_col_dp(dp_list, attr_name): """ show a value assocciated with an attribute for each DataProperty instance in the dp_list """ print() print("---------- {:s} ----------".format(attr_name)) print([getattr(dp, attr_name) for dp in dp_list])
[ "def", "display_col_dp", "(", "dp_list", ",", "attr_name", ")", ":", "print", "(", ")", "print", "(", "\"---------- {:s} ----------\"", ".", "format", "(", "attr_name", ")", ")", "print", "(", "[", "getattr", "(", "dp", ",", "attr_name", ")", "for", "dp", "in", "dp_list", "]", ")" ]
show a value assocciated with an attribute for each DataProperty instance in the dp_list
[ "show", "a", "value", "assocciated", "with", "an", "attribute", "for", "each", "DataProperty", "instance", "in", "the", "dp_list" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_maps_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_maps_ext.py#L122-L134
def maps_get_rules_output_rules_value(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") maps_get_rules = ET.Element("maps_get_rules") config = maps_get_rules output = ET.SubElement(maps_get_rules, "output") rules = ET.SubElement(output, "rules") value = ET.SubElement(rules, "value") value.text = kwargs.pop('value') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "maps_get_rules_output_rules_value", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "maps_get_rules", "=", "ET", ".", "Element", "(", "\"maps_get_rules\"", ")", "config", "=", "maps_get_rules", "output", "=", "ET", ".", "SubElement", "(", "maps_get_rules", ",", "\"output\"", ")", "rules", "=", "ET", ".", "SubElement", "(", "output", ",", "\"rules\"", ")", "value", "=", "ET", ".", "SubElement", "(", "rules", ",", "\"value\"", ")", "value", ".", "text", "=", "kwargs", ".", "pop", "(", "'value'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
svartalf/python-2gis
dgis/__init__.py
https://github.com/svartalf/python-2gis/blob/6eccd6073c99494b7abf20b38a5455cbd55d6420/dgis/__init__.py#L62-L82
def search(self, **kwargs): """Firms search http://api.2gis.ru/doc/firms/searches/search/ """ point = kwargs.pop('point', False) if point: kwargs['point'] = '%s,%s' % (point[0], point[1]) bound = kwargs.pop('bound', False) if bound: kwargs['bound[point1]'] = bound[0] kwargs['bound[point2]'] = bound[1] filters = kwargs.pop('filters', False) if filters: for k, v in filters.items(): kwargs['filters[%s]' % k] = v return self._search(**kwargs)
[ "def", "search", "(", "self", ",", "*", "*", "kwargs", ")", ":", "point", "=", "kwargs", ".", "pop", "(", "'point'", ",", "False", ")", "if", "point", ":", "kwargs", "[", "'point'", "]", "=", "'%s,%s'", "%", "(", "point", "[", "0", "]", ",", "point", "[", "1", "]", ")", "bound", "=", "kwargs", ".", "pop", "(", "'bound'", ",", "False", ")", "if", "bound", ":", "kwargs", "[", "'bound[point1]'", "]", "=", "bound", "[", "0", "]", "kwargs", "[", "'bound[point2]'", "]", "=", "bound", "[", "1", "]", "filters", "=", "kwargs", ".", "pop", "(", "'filters'", ",", "False", ")", "if", "filters", ":", "for", "k", ",", "v", "in", "filters", ".", "items", "(", ")", ":", "kwargs", "[", "'filters[%s]'", "%", "k", "]", "=", "v", "return", "self", ".", "_search", "(", "*", "*", "kwargs", ")" ]
Firms search http://api.2gis.ru/doc/firms/searches/search/
[ "Firms", "search" ]
python
train
joshspeagle/dynesty
dynesty/dynamicsampler.py
https://github.com/joshspeagle/dynesty/blob/9e482aafeb5cf84bedb896fa6f07a761d917983e/dynesty/dynamicsampler.py#L62-L151
def weight_function(results, args=None, return_weights=False): """ The default weight function utilized by :class:`DynamicSampler`. Zipped parameters are passed to the function via :data:`args`. Assigns each point a weight based on a weighted average of the posterior and evidence information content:: weight = pfrac * pweight + (1. - pfrac) * zweight where `pfrac` is the fractional importance placed on the posterior, the evidence weight `zweight` is based on the estimated remaining posterior mass, and the posterior weight `pweight` is the sample's importance weight. Returns a set of log-likelihood bounds set by the earliest/latest samples where `weight > maxfrac * max(weight)`, with additional left/right padding based on `pad`. Parameters ---------- results : :class:`Results` instance :class:`Results` instance. args : dictionary of keyword arguments, optional Arguments used to set the log-likelihood bounds used for sampling, as described above. Default values are `pfrac = 0.8`, `maxfrac = 0.8`, and `pad = 1`. return_weights : bool, optional Whether to return the individual weights (and their components) used to compute the log-likelihood bounds. Default is `False`. Returns ------- logl_bounds : tuple with shape (2,) Log-likelihood bounds `(logl_min, logl_max)` determined by the weights. weights : tuple with shape (3,), optional The individual weights `(pweight, zweight, weight)` used to determine `logl_bounds`. """ # Initialize hyperparameters. if args is None: args = dict({}) pfrac = args.get('pfrac', 0.8) if not 0. <= pfrac <= 1.: raise ValueError("The provided `pfrac` {0} is not between 0. and 1." .format(pfrac)) maxfrac = args.get('maxfrac', 0.8) if not 0. <= maxfrac <= 1.: raise ValueError("The provided `maxfrac` {0} is not between 0. and 1." .format(maxfrac)) lpad = args.get('pad', 1) if lpad < 0: raise ValueError("`lpad` {0} is less than zero.".format(lpad)) # Derive evidence weights. logz = results.logz # final ln(evidence) logz_remain = results.logl[-1] + results.logvol[-1] # remainder logz_tot = np.logaddexp(logz[-1], logz_remain) # estimated upper bound lzones = np.ones_like(logz) logzin = misc.logsumexp([lzones * logz_tot, logz], axis=0, b=[lzones, -lzones]) # ln(remaining evidence) logzweight = logzin - np.log(results.samples_n) # ln(evidence weight) logzweight -= misc.logsumexp(logzweight) # normalize zweight = np.exp(logzweight) # convert to linear scale # Derive posterior weights. pweight = np.exp(results.logwt - results.logz[-1]) # importance weight pweight /= sum(pweight) # normalize # Compute combined weights. weight = (1. - pfrac) * zweight + pfrac * pweight # Compute logl bounds nsamps = len(logz) bounds = np.arange(nsamps)[weight > maxfrac * max(weight)] bounds = (min(bounds) - lpad, min(max(bounds) + lpad, nsamps - 1)) if bounds[0] < 0: logl_min = -np.inf else: logl_min = results.logl[bounds[0]] logl_max = results.logl[bounds[1]] if return_weights: return (logl_min, logl_max), (pweight, zweight, weight) else: return (logl_min, logl_max)
[ "def", "weight_function", "(", "results", ",", "args", "=", "None", ",", "return_weights", "=", "False", ")", ":", "# Initialize hyperparameters.", "if", "args", "is", "None", ":", "args", "=", "dict", "(", "{", "}", ")", "pfrac", "=", "args", ".", "get", "(", "'pfrac'", ",", "0.8", ")", "if", "not", "0.", "<=", "pfrac", "<=", "1.", ":", "raise", "ValueError", "(", "\"The provided `pfrac` {0} is not between 0. and 1.\"", ".", "format", "(", "pfrac", ")", ")", "maxfrac", "=", "args", ".", "get", "(", "'maxfrac'", ",", "0.8", ")", "if", "not", "0.", "<=", "maxfrac", "<=", "1.", ":", "raise", "ValueError", "(", "\"The provided `maxfrac` {0} is not between 0. and 1.\"", ".", "format", "(", "maxfrac", ")", ")", "lpad", "=", "args", ".", "get", "(", "'pad'", ",", "1", ")", "if", "lpad", "<", "0", ":", "raise", "ValueError", "(", "\"`lpad` {0} is less than zero.\"", ".", "format", "(", "lpad", ")", ")", "# Derive evidence weights.", "logz", "=", "results", ".", "logz", "# final ln(evidence)", "logz_remain", "=", "results", ".", "logl", "[", "-", "1", "]", "+", "results", ".", "logvol", "[", "-", "1", "]", "# remainder", "logz_tot", "=", "np", ".", "logaddexp", "(", "logz", "[", "-", "1", "]", ",", "logz_remain", ")", "# estimated upper bound", "lzones", "=", "np", ".", "ones_like", "(", "logz", ")", "logzin", "=", "misc", ".", "logsumexp", "(", "[", "lzones", "*", "logz_tot", ",", "logz", "]", ",", "axis", "=", "0", ",", "b", "=", "[", "lzones", ",", "-", "lzones", "]", ")", "# ln(remaining evidence)", "logzweight", "=", "logzin", "-", "np", ".", "log", "(", "results", ".", "samples_n", ")", "# ln(evidence weight)", "logzweight", "-=", "misc", ".", "logsumexp", "(", "logzweight", ")", "# normalize", "zweight", "=", "np", ".", "exp", "(", "logzweight", ")", "# convert to linear scale", "# Derive posterior weights.", "pweight", "=", "np", ".", "exp", "(", "results", ".", "logwt", "-", "results", ".", "logz", "[", "-", "1", "]", ")", "# importance weight", "pweight", "/=", "sum", "(", "pweight", ")", "# normalize", "# Compute combined weights.", "weight", "=", "(", "1.", "-", "pfrac", ")", "*", "zweight", "+", "pfrac", "*", "pweight", "# Compute logl bounds", "nsamps", "=", "len", "(", "logz", ")", "bounds", "=", "np", ".", "arange", "(", "nsamps", ")", "[", "weight", ">", "maxfrac", "*", "max", "(", "weight", ")", "]", "bounds", "=", "(", "min", "(", "bounds", ")", "-", "lpad", ",", "min", "(", "max", "(", "bounds", ")", "+", "lpad", ",", "nsamps", "-", "1", ")", ")", "if", "bounds", "[", "0", "]", "<", "0", ":", "logl_min", "=", "-", "np", ".", "inf", "else", ":", "logl_min", "=", "results", ".", "logl", "[", "bounds", "[", "0", "]", "]", "logl_max", "=", "results", ".", "logl", "[", "bounds", "[", "1", "]", "]", "if", "return_weights", ":", "return", "(", "logl_min", ",", "logl_max", ")", ",", "(", "pweight", ",", "zweight", ",", "weight", ")", "else", ":", "return", "(", "logl_min", ",", "logl_max", ")" ]
The default weight function utilized by :class:`DynamicSampler`. Zipped parameters are passed to the function via :data:`args`. Assigns each point a weight based on a weighted average of the posterior and evidence information content:: weight = pfrac * pweight + (1. - pfrac) * zweight where `pfrac` is the fractional importance placed on the posterior, the evidence weight `zweight` is based on the estimated remaining posterior mass, and the posterior weight `pweight` is the sample's importance weight. Returns a set of log-likelihood bounds set by the earliest/latest samples where `weight > maxfrac * max(weight)`, with additional left/right padding based on `pad`. Parameters ---------- results : :class:`Results` instance :class:`Results` instance. args : dictionary of keyword arguments, optional Arguments used to set the log-likelihood bounds used for sampling, as described above. Default values are `pfrac = 0.8`, `maxfrac = 0.8`, and `pad = 1`. return_weights : bool, optional Whether to return the individual weights (and their components) used to compute the log-likelihood bounds. Default is `False`. Returns ------- logl_bounds : tuple with shape (2,) Log-likelihood bounds `(logl_min, logl_max)` determined by the weights. weights : tuple with shape (3,), optional The individual weights `(pweight, zweight, weight)` used to determine `logl_bounds`.
[ "The", "default", "weight", "function", "utilized", "by", ":", "class", ":", "DynamicSampler", ".", "Zipped", "parameters", "are", "passed", "to", "the", "function", "via", ":", "data", ":", "args", ".", "Assigns", "each", "point", "a", "weight", "based", "on", "a", "weighted", "average", "of", "the", "posterior", "and", "evidence", "information", "content", "::" ]
python
train
pypa/pipenv
pipenv/vendor/passa/models/lockers.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/passa/models/lockers.py#L124-L163
def lock(self): """Lock specified (abstract) requirements into (concrete) candidates. The locking procedure consists of four stages: * Resolve versions and dependency graph (powered by ResolveLib). * Walk the graph to determine "why" each candidate came to be, i.e. what top-level requirements result in a given candidate. * Populate hashes for resolved candidates. * Populate markers based on dependency specifications of each candidate, and the dependency graph. """ provider = self.get_provider() reporter = self.get_reporter() resolver = resolvelib.Resolver(provider, reporter) with vistir.cd(self.project.root): state = resolver.resolve(self.requirements) traces = trace_graph(state.graph) hash_cache = HashCache() for r in state.mapping.values(): if not r.hashes: r.hashes = get_hashes(hash_cache, r) set_metadata( state.mapping, traces, provider.fetched_dependencies, provider.collected_requires_pythons, ) lockfile = plette.Lockfile.with_meta_from(self.project.pipfile) lockfile["default"] = _collect_derived_entries( state, traces, self.default_requirements, ) lockfile["develop"] = _collect_derived_entries( state, traces, self.develop_requirements, ) self.project.lockfile = lockfile
[ "def", "lock", "(", "self", ")", ":", "provider", "=", "self", ".", "get_provider", "(", ")", "reporter", "=", "self", ".", "get_reporter", "(", ")", "resolver", "=", "resolvelib", ".", "Resolver", "(", "provider", ",", "reporter", ")", "with", "vistir", ".", "cd", "(", "self", ".", "project", ".", "root", ")", ":", "state", "=", "resolver", ".", "resolve", "(", "self", ".", "requirements", ")", "traces", "=", "trace_graph", "(", "state", ".", "graph", ")", "hash_cache", "=", "HashCache", "(", ")", "for", "r", "in", "state", ".", "mapping", ".", "values", "(", ")", ":", "if", "not", "r", ".", "hashes", ":", "r", ".", "hashes", "=", "get_hashes", "(", "hash_cache", ",", "r", ")", "set_metadata", "(", "state", ".", "mapping", ",", "traces", ",", "provider", ".", "fetched_dependencies", ",", "provider", ".", "collected_requires_pythons", ",", ")", "lockfile", "=", "plette", ".", "Lockfile", ".", "with_meta_from", "(", "self", ".", "project", ".", "pipfile", ")", "lockfile", "[", "\"default\"", "]", "=", "_collect_derived_entries", "(", "state", ",", "traces", ",", "self", ".", "default_requirements", ",", ")", "lockfile", "[", "\"develop\"", "]", "=", "_collect_derived_entries", "(", "state", ",", "traces", ",", "self", ".", "develop_requirements", ",", ")", "self", ".", "project", ".", "lockfile", "=", "lockfile" ]
Lock specified (abstract) requirements into (concrete) candidates. The locking procedure consists of four stages: * Resolve versions and dependency graph (powered by ResolveLib). * Walk the graph to determine "why" each candidate came to be, i.e. what top-level requirements result in a given candidate. * Populate hashes for resolved candidates. * Populate markers based on dependency specifications of each candidate, and the dependency graph.
[ "Lock", "specified", "(", "abstract", ")", "requirements", "into", "(", "concrete", ")", "candidates", "." ]
python
train
UCSBarchlab/PyRTL
pyrtl/rtllib/multipliers.py
https://github.com/UCSBarchlab/PyRTL/blob/0988e5c9c10ededd5e1f58d5306603f9edf4b3e2/pyrtl/rtllib/multipliers.py#L67-L102
def complex_mult(A, B, shifts, start): """ Generate shift-and-add multiplier that can shift and add multiple bits per clock cycle. Uses substantially more space than `simple_mult()` but is much faster. :param WireVector A, B: two input wires for the multiplication :param int shifts: number of spaces Register is to be shifted per clk cycle (cannot be greater than the length of `A` or `B`) :param bool start: start signal :returns: Register containing the product; the "done" signal """ alen = len(A) blen = len(B) areg = pyrtl.Register(alen) breg = pyrtl.Register(alen + blen) accum = pyrtl.Register(alen + blen) done = (areg == 0) # Multiplication is finished when a becomes 0 if (shifts > alen) or (shifts > blen): raise pyrtl.PyrtlError("shift is larger than one or both of the parameters A or B," "please choose smaller shift") # During multiplication, shift a right every cycle 'shift' times, # shift b left every cycle 'shift' times with pyrtl.conditional_assignment: with start: # initialization areg.next |= A breg.next |= B accum.next |= 0 with ~done: # don't run when there's no work to do # "Multiply" shifted breg by LSB of areg by cond. adding areg.next |= libutils._shifted_reg_next(areg, 'r', shifts) # right shift breg.next |= libutils._shifted_reg_next(breg, 'l', shifts) # left shift accum.next |= accum + _one_cycle_mult(areg, breg, shifts) return accum, done
[ "def", "complex_mult", "(", "A", ",", "B", ",", "shifts", ",", "start", ")", ":", "alen", "=", "len", "(", "A", ")", "blen", "=", "len", "(", "B", ")", "areg", "=", "pyrtl", ".", "Register", "(", "alen", ")", "breg", "=", "pyrtl", ".", "Register", "(", "alen", "+", "blen", ")", "accum", "=", "pyrtl", ".", "Register", "(", "alen", "+", "blen", ")", "done", "=", "(", "areg", "==", "0", ")", "# Multiplication is finished when a becomes 0", "if", "(", "shifts", ">", "alen", ")", "or", "(", "shifts", ">", "blen", ")", ":", "raise", "pyrtl", ".", "PyrtlError", "(", "\"shift is larger than one or both of the parameters A or B,\"", "\"please choose smaller shift\"", ")", "# During multiplication, shift a right every cycle 'shift' times,", "# shift b left every cycle 'shift' times", "with", "pyrtl", ".", "conditional_assignment", ":", "with", "start", ":", "# initialization", "areg", ".", "next", "|=", "A", "breg", ".", "next", "|=", "B", "accum", ".", "next", "|=", "0", "with", "~", "done", ":", "# don't run when there's no work to do", "# \"Multiply\" shifted breg by LSB of areg by cond. adding", "areg", ".", "next", "|=", "libutils", ".", "_shifted_reg_next", "(", "areg", ",", "'r'", ",", "shifts", ")", "# right shift", "breg", ".", "next", "|=", "libutils", ".", "_shifted_reg_next", "(", "breg", ",", "'l'", ",", "shifts", ")", "# left shift", "accum", ".", "next", "|=", "accum", "+", "_one_cycle_mult", "(", "areg", ",", "breg", ",", "shifts", ")", "return", "accum", ",", "done" ]
Generate shift-and-add multiplier that can shift and add multiple bits per clock cycle. Uses substantially more space than `simple_mult()` but is much faster. :param WireVector A, B: two input wires for the multiplication :param int shifts: number of spaces Register is to be shifted per clk cycle (cannot be greater than the length of `A` or `B`) :param bool start: start signal :returns: Register containing the product; the "done" signal
[ "Generate", "shift", "-", "and", "-", "add", "multiplier", "that", "can", "shift", "and", "add", "multiple", "bits", "per", "clock", "cycle", ".", "Uses", "substantially", "more", "space", "than", "simple_mult", "()", "but", "is", "much", "faster", "." ]
python
train
MacHu-GWU/macro-project
macro/bot.py
https://github.com/MacHu-GWU/macro-project/blob/dae909d2d28acbfa2be623aa2dffe988f3882d4d/macro/bot.py#L372-L381
def left(self, n=1, interval=0, pre_dl=None, post_dl=None): """Press left key n times **中文文档** 按左方向键 n 次。 """ self.delay(pre_dl) self.k.tap_key(self.k.left_key, n, interval) self.delay(post_dl)
[ "def", "left", "(", "self", ",", "n", "=", "1", ",", "interval", "=", "0", ",", "pre_dl", "=", "None", ",", "post_dl", "=", "None", ")", ":", "self", ".", "delay", "(", "pre_dl", ")", "self", ".", "k", ".", "tap_key", "(", "self", ".", "k", ".", "left_key", ",", "n", ",", "interval", ")", "self", ".", "delay", "(", "post_dl", ")" ]
Press left key n times **中文文档** 按左方向键 n 次。
[ "Press", "left", "key", "n", "times" ]
python
train
ioos/compliance-checker
compliance_checker/suite.py
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/suite.py#L393-L407
def serialize(self, o): ''' Returns a safe serializable object that can be serialized into JSON. @param o Python object to serialize ''' if isinstance(o, (list, tuple)): return [self.serialize(i) for i in o] if isinstance(o, dict): return {k: self.serialize(v) for k, v in o.items()} if isinstance(o, datetime): return o.isoformat() if isinstance(o, Result): return self.serialize(o.serialize()) return o
[ "def", "serialize", "(", "self", ",", "o", ")", ":", "if", "isinstance", "(", "o", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "[", "self", ".", "serialize", "(", "i", ")", "for", "i", "in", "o", "]", "if", "isinstance", "(", "o", ",", "dict", ")", ":", "return", "{", "k", ":", "self", ".", "serialize", "(", "v", ")", "for", "k", ",", "v", "in", "o", ".", "items", "(", ")", "}", "if", "isinstance", "(", "o", ",", "datetime", ")", ":", "return", "o", ".", "isoformat", "(", ")", "if", "isinstance", "(", "o", ",", "Result", ")", ":", "return", "self", ".", "serialize", "(", "o", ".", "serialize", "(", ")", ")", "return", "o" ]
Returns a safe serializable object that can be serialized into JSON. @param o Python object to serialize
[ "Returns", "a", "safe", "serializable", "object", "that", "can", "be", "serialized", "into", "JSON", "." ]
python
train
numenta/nupic
src/nupic/frameworks/opf/htm_prediction_model.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/frameworks/opf/htm_prediction_model.py#L983-L1001
def getRuntimeStats(self): """ Only returns data for a stat called ``numRunCalls``. :return: """ ret = {"numRunCalls" : self.__numRunCalls} #-------------------------------------------------- # Query temporal network stats temporalStats = dict() if self._hasTP: for stat in self._netInfo.statsCollectors: sdict = stat.getStats() temporalStats.update(sdict) ret[InferenceType.getLabel(InferenceType.TemporalNextStep)] = temporalStats return ret
[ "def", "getRuntimeStats", "(", "self", ")", ":", "ret", "=", "{", "\"numRunCalls\"", ":", "self", ".", "__numRunCalls", "}", "#--------------------------------------------------", "# Query temporal network stats", "temporalStats", "=", "dict", "(", ")", "if", "self", ".", "_hasTP", ":", "for", "stat", "in", "self", ".", "_netInfo", ".", "statsCollectors", ":", "sdict", "=", "stat", ".", "getStats", "(", ")", "temporalStats", ".", "update", "(", "sdict", ")", "ret", "[", "InferenceType", ".", "getLabel", "(", "InferenceType", ".", "TemporalNextStep", ")", "]", "=", "temporalStats", "return", "ret" ]
Only returns data for a stat called ``numRunCalls``. :return:
[ "Only", "returns", "data", "for", "a", "stat", "called", "numRunCalls", ".", ":", "return", ":" ]
python
valid
Terrance/SkPy
skpy/util.py
https://github.com/Terrance/SkPy/blob/0f9489c94e8ec4d3effab4314497428872a80ad1/skpy/util.py#L235-L259
def exhaust(fn, transform=None, *args, **kwargs): """ Repeatedly call a function, starting with init, until false-y, yielding each item in turn. The ``transform`` parameter can be used to map a collection to another format, for example iterating over a :class:`dict` by value rather than key. Use with state-synced functions to retrieve all results. Args: fn (method): function to call transform (method): secondary function to convert result into an iterable args (list): positional arguments to pass to ``fn`` kwargs (dict): keyword arguments to pass to ``fn`` Returns: generator: generator of objects produced from the method """ while True: iterRes = fn(*args, **kwargs) if iterRes: for item in transform(iterRes) if transform else iterRes: yield item else: break
[ "def", "exhaust", "(", "fn", ",", "transform", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "while", "True", ":", "iterRes", "=", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "iterRes", ":", "for", "item", "in", "transform", "(", "iterRes", ")", "if", "transform", "else", "iterRes", ":", "yield", "item", "else", ":", "break" ]
Repeatedly call a function, starting with init, until false-y, yielding each item in turn. The ``transform`` parameter can be used to map a collection to another format, for example iterating over a :class:`dict` by value rather than key. Use with state-synced functions to retrieve all results. Args: fn (method): function to call transform (method): secondary function to convert result into an iterable args (list): positional arguments to pass to ``fn`` kwargs (dict): keyword arguments to pass to ``fn`` Returns: generator: generator of objects produced from the method
[ "Repeatedly", "call", "a", "function", "starting", "with", "init", "until", "false", "-", "y", "yielding", "each", "item", "in", "turn", "." ]
python
test
intel-analytics/BigDL
pyspark/bigdl/models/lenet/utils.py
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/models/lenet/utils.py#L65-L75
def validate_optimizer(optimizer, test_data, options): """ Set validation and checkpoint for distributed optimizer. """ optimizer.set_validation( batch_size=options.batchSize, val_rdd=test_data, trigger=EveryEpoch(), val_method=[Top1Accuracy()] ) optimizer.set_checkpoint(EveryEpoch(), options.checkpointPath)
[ "def", "validate_optimizer", "(", "optimizer", ",", "test_data", ",", "options", ")", ":", "optimizer", ".", "set_validation", "(", "batch_size", "=", "options", ".", "batchSize", ",", "val_rdd", "=", "test_data", ",", "trigger", "=", "EveryEpoch", "(", ")", ",", "val_method", "=", "[", "Top1Accuracy", "(", ")", "]", ")", "optimizer", ".", "set_checkpoint", "(", "EveryEpoch", "(", ")", ",", "options", ".", "checkpointPath", ")" ]
Set validation and checkpoint for distributed optimizer.
[ "Set", "validation", "and", "checkpoint", "for", "distributed", "optimizer", "." ]
python
test
googleapis/oauth2client
oauth2client/_pure_python_crypt.py
https://github.com/googleapis/oauth2client/blob/50d20532a748f18e53f7d24ccbe6647132c979a9/oauth2client/_pure_python_crypt.py#L75-L92
def verify(self, message, signature): """Verifies a message against a signature. Args: message: string or bytes, The message to verify. If string, will be encoded to bytes as utf-8. signature: string or bytes, The signature on the message. If string, will be encoded to bytes as utf-8. Returns: True if message was signed by the private key associated with the public key that this object was constructed with. """ message = _helpers._to_bytes(message, encoding='utf-8') try: return rsa.pkcs1.verify(message, signature, self._pubkey) except (ValueError, rsa.pkcs1.VerificationError): return False
[ "def", "verify", "(", "self", ",", "message", ",", "signature", ")", ":", "message", "=", "_helpers", ".", "_to_bytes", "(", "message", ",", "encoding", "=", "'utf-8'", ")", "try", ":", "return", "rsa", ".", "pkcs1", ".", "verify", "(", "message", ",", "signature", ",", "self", ".", "_pubkey", ")", "except", "(", "ValueError", ",", "rsa", ".", "pkcs1", ".", "VerificationError", ")", ":", "return", "False" ]
Verifies a message against a signature. Args: message: string or bytes, The message to verify. If string, will be encoded to bytes as utf-8. signature: string or bytes, The signature on the message. If string, will be encoded to bytes as utf-8. Returns: True if message was signed by the private key associated with the public key that this object was constructed with.
[ "Verifies", "a", "message", "against", "a", "signature", "." ]
python
valid
biocore/burrito-fillings
bfillings/formatdb.py
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/formatdb.py#L60-L80
def _get_result_paths(self, data): """ Build the dict of result filepaths """ # access data through self.Parameters so we know it's been cast # to a FilePath wd = self.WorkingDir db_name = self.Parameters['-n'].Value log_name = self.Parameters['-l'].Value result = {} result['log'] = ResultPath(Path=wd + log_name, IsWritten=True) if self.Parameters['-p'].Value == 'F': extensions = ['nhr', 'nin', 'nsq', 'nsd', 'nsi'] else: extensions = ['phr', 'pin', 'psq', 'psd', 'psi'] for extension in extensions: for file_path in glob(wd + (db_name + '*' + extension)): # this will match e.g. nr.01.psd and nr.psd key = file_path.split(db_name + '.')[1] result_path = ResultPath(Path=file_path, IsWritten=True) result[key] = result_path return result
[ "def", "_get_result_paths", "(", "self", ",", "data", ")", ":", "# access data through self.Parameters so we know it's been cast", "# to a FilePath", "wd", "=", "self", ".", "WorkingDir", "db_name", "=", "self", ".", "Parameters", "[", "'-n'", "]", ".", "Value", "log_name", "=", "self", ".", "Parameters", "[", "'-l'", "]", ".", "Value", "result", "=", "{", "}", "result", "[", "'log'", "]", "=", "ResultPath", "(", "Path", "=", "wd", "+", "log_name", ",", "IsWritten", "=", "True", ")", "if", "self", ".", "Parameters", "[", "'-p'", "]", ".", "Value", "==", "'F'", ":", "extensions", "=", "[", "'nhr'", ",", "'nin'", ",", "'nsq'", ",", "'nsd'", ",", "'nsi'", "]", "else", ":", "extensions", "=", "[", "'phr'", ",", "'pin'", ",", "'psq'", ",", "'psd'", ",", "'psi'", "]", "for", "extension", "in", "extensions", ":", "for", "file_path", "in", "glob", "(", "wd", "+", "(", "db_name", "+", "'*'", "+", "extension", ")", ")", ":", "# this will match e.g. nr.01.psd and nr.psd", "key", "=", "file_path", ".", "split", "(", "db_name", "+", "'.'", ")", "[", "1", "]", "result_path", "=", "ResultPath", "(", "Path", "=", "file_path", ",", "IsWritten", "=", "True", ")", "result", "[", "key", "]", "=", "result_path", "return", "result" ]
Build the dict of result filepaths
[ "Build", "the", "dict", "of", "result", "filepaths" ]
python
train
Nic30/hwt
hwt/pyUtils/arrayQuery.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/pyUtils/arrayQuery.py#L102-L114
def iter_with_last(iterable): """ :return: generator of tuples (isLastFlag, item) """ # Ensure it's an iterator and get the first field iterable = iter(iterable) prev = next(iterable) for item in iterable: # Lag by one item so I know I'm not at the end yield False, prev prev = item # Last item yield True, prev
[ "def", "iter_with_last", "(", "iterable", ")", ":", "# Ensure it's an iterator and get the first field", "iterable", "=", "iter", "(", "iterable", ")", "prev", "=", "next", "(", "iterable", ")", "for", "item", "in", "iterable", ":", "# Lag by one item so I know I'm not at the end", "yield", "False", ",", "prev", "prev", "=", "item", "# Last item", "yield", "True", ",", "prev" ]
:return: generator of tuples (isLastFlag, item)
[ ":", "return", ":", "generator", "of", "tuples", "(", "isLastFlag", "item", ")" ]
python
test
Grunny/zap-cli
zapcli/commands/scripts.py
https://github.com/Grunny/zap-cli/blob/d58d4850ecfc5467badfac5e5bcc841d064bd419/zapcli/commands/scripts.py#L83-L92
def remove_script(zap_helper, script_name): """Remove a script.""" with zap_error_handler(): console.debug('Removing script "{0}"'.format(script_name)) result = zap_helper.zap.script.remove(script_name) if result != 'OK': raise ZAPError('Error removing script: {0}'.format(result)) console.info('Script "{0}" removed'.format(script_name))
[ "def", "remove_script", "(", "zap_helper", ",", "script_name", ")", ":", "with", "zap_error_handler", "(", ")", ":", "console", ".", "debug", "(", "'Removing script \"{0}\"'", ".", "format", "(", "script_name", ")", ")", "result", "=", "zap_helper", ".", "zap", ".", "script", ".", "remove", "(", "script_name", ")", "if", "result", "!=", "'OK'", ":", "raise", "ZAPError", "(", "'Error removing script: {0}'", ".", "format", "(", "result", ")", ")", "console", ".", "info", "(", "'Script \"{0}\" removed'", ".", "format", "(", "script_name", ")", ")" ]
Remove a script.
[ "Remove", "a", "script", "." ]
python
train
kushaldas/retask
retask/queue.py
https://github.com/kushaldas/retask/blob/5c955b8386653d3f0591ca2f4b1a213ff4b5a018/retask/queue.py#L299-L322
def wait(self, wait_time=0): """ Blocking call to check if the worker returns the result. One can use job.result after this call returns ``True``. :arg wait_time: Time in seconds to wait, default is infinite. :return: `True` or `False`. .. note:: This is a blocking call, you can specity wait_time argument for timeout. """ if self.__result: return True data = self.rdb.brpop(self.urn, wait_time) if data: self.rdb.delete(self.urn) data = json.loads(data[1]) self.__result = data return True else: return False
[ "def", "wait", "(", "self", ",", "wait_time", "=", "0", ")", ":", "if", "self", ".", "__result", ":", "return", "True", "data", "=", "self", ".", "rdb", ".", "brpop", "(", "self", ".", "urn", ",", "wait_time", ")", "if", "data", ":", "self", ".", "rdb", ".", "delete", "(", "self", ".", "urn", ")", "data", "=", "json", ".", "loads", "(", "data", "[", "1", "]", ")", "self", ".", "__result", "=", "data", "return", "True", "else", ":", "return", "False" ]
Blocking call to check if the worker returns the result. One can use job.result after this call returns ``True``. :arg wait_time: Time in seconds to wait, default is infinite. :return: `True` or `False`. .. note:: This is a blocking call, you can specity wait_time argument for timeout.
[ "Blocking", "call", "to", "check", "if", "the", "worker", "returns", "the", "result", ".", "One", "can", "use", "job", ".", "result", "after", "this", "call", "returns", "True", "." ]
python
train
jplusplus/statscraper
statscraper/base_scraper.py
https://github.com/jplusplus/statscraper/blob/932ec048b23d15b3dbdaf829facc55fd78ec0109/statscraper/base_scraper.py#L579-L588
def move_to(self, id_): """Select a child item by id (str), reference or index.""" if self.items: try: self.current_item = self.items[id_] except (StopIteration, IndexError, NoSuchItem): raise NoSuchItem for f in self._hooks["select"]: f(self, id_) return self
[ "def", "move_to", "(", "self", ",", "id_", ")", ":", "if", "self", ".", "items", ":", "try", ":", "self", ".", "current_item", "=", "self", ".", "items", "[", "id_", "]", "except", "(", "StopIteration", ",", "IndexError", ",", "NoSuchItem", ")", ":", "raise", "NoSuchItem", "for", "f", "in", "self", ".", "_hooks", "[", "\"select\"", "]", ":", "f", "(", "self", ",", "id_", ")", "return", "self" ]
Select a child item by id (str), reference or index.
[ "Select", "a", "child", "item", "by", "id", "(", "str", ")", "reference", "or", "index", "." ]
python
train
peergradeio/flask-mongo-profiler
flask_mongo_profiler/contrib/flask_admin/formatters/polymorphic_relations.py
https://github.com/peergradeio/flask-mongo-profiler/blob/a267eeb49fea07c9a24fb370bd9d7a90ed313ccf/flask_mongo_profiler/contrib/flask_admin/formatters/polymorphic_relations.py#L27-L65
def generic_ref_formatter(view, context, model, name, lazy=False): """ For GenericReferenceField and LazyGenericReferenceField See Also -------- diff_formatter """ try: if lazy: rel_model = getattr(model, name).fetch() else: rel_model = getattr(model, name) except (mongoengine.DoesNotExist, AttributeError) as e: # custom_field_type_formatters seems to fix the issue of stale references # crashing pages, since it intercepts the display of all ReferenceField's. return Markup( '<span class="label label-danger">Error</span> <small>%s</small>' % e ) if rel_model is None: return '' try: return Markup( '<a href="%s">%s</a>' % ( url_for( # Flask-Admin creates URL's namespaced w/ model class name, lowercase. '%s.details_view' % rel_model.__class__.__name__.lower(), id=rel_model.id, ), rel_model, ) ) except werkzeug.routing.BuildError as e: return Markup( '<span class="label label-danger">Error</span> <small>%s</small>' % e )
[ "def", "generic_ref_formatter", "(", "view", ",", "context", ",", "model", ",", "name", ",", "lazy", "=", "False", ")", ":", "try", ":", "if", "lazy", ":", "rel_model", "=", "getattr", "(", "model", ",", "name", ")", ".", "fetch", "(", ")", "else", ":", "rel_model", "=", "getattr", "(", "model", ",", "name", ")", "except", "(", "mongoengine", ".", "DoesNotExist", ",", "AttributeError", ")", "as", "e", ":", "# custom_field_type_formatters seems to fix the issue of stale references", "# crashing pages, since it intercepts the display of all ReferenceField's.", "return", "Markup", "(", "'<span class=\"label label-danger\">Error</span> <small>%s</small>'", "%", "e", ")", "if", "rel_model", "is", "None", ":", "return", "''", "try", ":", "return", "Markup", "(", "'<a href=\"%s\">%s</a>'", "%", "(", "url_for", "(", "# Flask-Admin creates URL's namespaced w/ model class name, lowercase.", "'%s.details_view'", "%", "rel_model", ".", "__class__", ".", "__name__", ".", "lower", "(", ")", ",", "id", "=", "rel_model", ".", "id", ",", ")", ",", "rel_model", ",", ")", ")", "except", "werkzeug", ".", "routing", ".", "BuildError", "as", "e", ":", "return", "Markup", "(", "'<span class=\"label label-danger\">Error</span> <small>%s</small>'", "%", "e", ")" ]
For GenericReferenceField and LazyGenericReferenceField See Also -------- diff_formatter
[ "For", "GenericReferenceField", "and", "LazyGenericReferenceField" ]
python
train
CityOfZion/neo-python
neo/Core/TX/RegisterTransaction.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/TX/RegisterTransaction.py#L129-L148
def ToJson(self): """ Convert object members to a dictionary that can be parsed as JSON. Returns: dict: """ jsn = super(RegisterTransaction, self).ToJson() asset = { 'type': self.AssetType, 'name': self.Name.decode('utf-8'), 'amount': self.Amount.value, 'precision': self.Precision if type(self.Precision) is int else self.Precision.decode('utf-8'), 'owner': self.Owner.ToString(), 'admin': Crypto.ToAddress(self.Admin) } jsn['asset'] = asset return jsn
[ "def", "ToJson", "(", "self", ")", ":", "jsn", "=", "super", "(", "RegisterTransaction", ",", "self", ")", ".", "ToJson", "(", ")", "asset", "=", "{", "'type'", ":", "self", ".", "AssetType", ",", "'name'", ":", "self", ".", "Name", ".", "decode", "(", "'utf-8'", ")", ",", "'amount'", ":", "self", ".", "Amount", ".", "value", ",", "'precision'", ":", "self", ".", "Precision", "if", "type", "(", "self", ".", "Precision", ")", "is", "int", "else", "self", ".", "Precision", ".", "decode", "(", "'utf-8'", ")", ",", "'owner'", ":", "self", ".", "Owner", ".", "ToString", "(", ")", ",", "'admin'", ":", "Crypto", ".", "ToAddress", "(", "self", ".", "Admin", ")", "}", "jsn", "[", "'asset'", "]", "=", "asset", "return", "jsn" ]
Convert object members to a dictionary that can be parsed as JSON. Returns: dict:
[ "Convert", "object", "members", "to", "a", "dictionary", "that", "can", "be", "parsed", "as", "JSON", "." ]
python
train
Azure/azure-sdk-for-python
azure-mgmt-storage/azure/mgmt/storage/storage_management_client.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-mgmt-storage/azure/mgmt/storage/storage_management_client.py#L147-L163
def blob_containers(self): """Instance depends on the API version: * 2018-02-01: :class:`BlobContainersOperations<azure.mgmt.storage.v2018_02_01.operations.BlobContainersOperations>` * 2018-03-01-preview: :class:`BlobContainersOperations<azure.mgmt.storage.v2018_03_01_preview.operations.BlobContainersOperations>` * 2018-07-01: :class:`BlobContainersOperations<azure.mgmt.storage.v2018_07_01.operations.BlobContainersOperations>` """ api_version = self._get_api_version('blob_containers') if api_version == '2018-02-01': from .v2018_02_01.operations import BlobContainersOperations as OperationClass elif api_version == '2018-03-01-preview': from .v2018_03_01_preview.operations import BlobContainersOperations as OperationClass elif api_version == '2018-07-01': from .v2018_07_01.operations import BlobContainersOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
[ "def", "blob_containers", "(", "self", ")", ":", "api_version", "=", "self", ".", "_get_api_version", "(", "'blob_containers'", ")", "if", "api_version", "==", "'2018-02-01'", ":", "from", ".", "v2018_02_01", ".", "operations", "import", "BlobContainersOperations", "as", "OperationClass", "elif", "api_version", "==", "'2018-03-01-preview'", ":", "from", ".", "v2018_03_01_preview", ".", "operations", "import", "BlobContainersOperations", "as", "OperationClass", "elif", "api_version", "==", "'2018-07-01'", ":", "from", ".", "v2018_07_01", ".", "operations", "import", "BlobContainersOperations", "as", "OperationClass", "else", ":", "raise", "NotImplementedError", "(", "\"APIVersion {} is not available\"", ".", "format", "(", "api_version", ")", ")", "return", "OperationClass", "(", "self", ".", "_client", ",", "self", ".", "config", ",", "Serializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ",", "Deserializer", "(", "self", ".", "_models_dict", "(", "api_version", ")", ")", ")" ]
Instance depends on the API version: * 2018-02-01: :class:`BlobContainersOperations<azure.mgmt.storage.v2018_02_01.operations.BlobContainersOperations>` * 2018-03-01-preview: :class:`BlobContainersOperations<azure.mgmt.storage.v2018_03_01_preview.operations.BlobContainersOperations>` * 2018-07-01: :class:`BlobContainersOperations<azure.mgmt.storage.v2018_07_01.operations.BlobContainersOperations>`
[ "Instance", "depends", "on", "the", "API", "version", ":" ]
python
test
Azure/azure-sdk-for-python
azure-keyvault/azure/keyvault/_internal.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-keyvault/azure/keyvault/_internal.py#L131-L140
def _b64_to_bstr(b64str): """Deserialize base64 encoded string into string. :param str b64str: response string to be deserialized. :rtype: bytearray :raises: TypeError if string format invalid. """ padding = '=' * (3 - (len(b64str) + 3) % 4) b64str = b64str + padding encoded = b64str.replace('-', '+').replace('_', '/') return b64decode(encoded)
[ "def", "_b64_to_bstr", "(", "b64str", ")", ":", "padding", "=", "'='", "*", "(", "3", "-", "(", "len", "(", "b64str", ")", "+", "3", ")", "%", "4", ")", "b64str", "=", "b64str", "+", "padding", "encoded", "=", "b64str", ".", "replace", "(", "'-'", ",", "'+'", ")", ".", "replace", "(", "'_'", ",", "'/'", ")", "return", "b64decode", "(", "encoded", ")" ]
Deserialize base64 encoded string into string. :param str b64str: response string to be deserialized. :rtype: bytearray :raises: TypeError if string format invalid.
[ "Deserialize", "base64", "encoded", "string", "into", "string", ".", ":", "param", "str", "b64str", ":", "response", "string", "to", "be", "deserialized", ".", ":", "rtype", ":", "bytearray", ":", "raises", ":", "TypeError", "if", "string", "format", "invalid", "." ]
python
test
sephii/taxi-zebra
taxi_zebra/backend.py
https://github.com/sephii/taxi-zebra/blob/36affa22d4167e7ce5a8c7c6eaf5adc4cbfcfb5d/taxi_zebra/backend.py#L139-L148
def update_alias_mapping(settings, alias, new_mapping): """ Override `alias` mapping in the user configuration file with the given `new_mapping`, which should be a tuple with 2 or 3 elements (in the form `(project_id, activity_id, role_id)`). """ mapping = aliases_database[alias] new_mapping = Mapping(mapping=new_mapping, backend=mapping.backend) aliases_database[alias] = new_mapping settings.add_alias(alias, new_mapping) settings.write_config()
[ "def", "update_alias_mapping", "(", "settings", ",", "alias", ",", "new_mapping", ")", ":", "mapping", "=", "aliases_database", "[", "alias", "]", "new_mapping", "=", "Mapping", "(", "mapping", "=", "new_mapping", ",", "backend", "=", "mapping", ".", "backend", ")", "aliases_database", "[", "alias", "]", "=", "new_mapping", "settings", ".", "add_alias", "(", "alias", ",", "new_mapping", ")", "settings", ".", "write_config", "(", ")" ]
Override `alias` mapping in the user configuration file with the given `new_mapping`, which should be a tuple with 2 or 3 elements (in the form `(project_id, activity_id, role_id)`).
[ "Override", "alias", "mapping", "in", "the", "user", "configuration", "file", "with", "the", "given", "new_mapping", "which", "should", "be", "a", "tuple", "with", "2", "or", "3", "elements", "(", "in", "the", "form", "(", "project_id", "activity_id", "role_id", ")", ")", "." ]
python
valid
sripathikrishnan/redis-rdb-tools
rdbtools/encodehelpers.py
https://github.com/sripathikrishnan/redis-rdb-tools/blob/543a73e84702e911ddcd31325ecfde77d7fd230b/rdbtools/encodehelpers.py#L96-L123
def bytes_to_unicode(byte_data, escape, skip_printable=False): """ Decode given bytes using specified escaping method. :param byte_data: The byte-like object with bytes to decode. :param escape: The escape method to use. :param skip_printable: If True, don't escape byte_data with all 'printable ASCII' bytes. Defaults to False. :return: New unicode string, escaped with the specified method if needed. """ if isnumber(byte_data): if skip_printable: return num2unistr(byte_data) else: byte_data = num2bytes(byte_data) else: assert (isinstance(byte_data, type(b''))) if skip_printable and all(0x20 <= bval(ch) <= 0x7E for ch in byte_data): escape = STRING_ESCAPE_RAW if escape == STRING_ESCAPE_RAW: return byte_data.decode('latin-1') elif escape == STRING_ESCAPE_PRINT: return escape_ascii(byte_data) elif escape == STRING_ESCAPE_UTF8: return escape_utf8(byte_data) elif escape == STRING_ESCAPE_BASE64: return codecs.decode(base64.b64encode(byte_data), 'latin-1') else: raise UnicodeEncodeError("Unknown escape option")
[ "def", "bytes_to_unicode", "(", "byte_data", ",", "escape", ",", "skip_printable", "=", "False", ")", ":", "if", "isnumber", "(", "byte_data", ")", ":", "if", "skip_printable", ":", "return", "num2unistr", "(", "byte_data", ")", "else", ":", "byte_data", "=", "num2bytes", "(", "byte_data", ")", "else", ":", "assert", "(", "isinstance", "(", "byte_data", ",", "type", "(", "b''", ")", ")", ")", "if", "skip_printable", "and", "all", "(", "0x20", "<=", "bval", "(", "ch", ")", "<=", "0x7E", "for", "ch", "in", "byte_data", ")", ":", "escape", "=", "STRING_ESCAPE_RAW", "if", "escape", "==", "STRING_ESCAPE_RAW", ":", "return", "byte_data", ".", "decode", "(", "'latin-1'", ")", "elif", "escape", "==", "STRING_ESCAPE_PRINT", ":", "return", "escape_ascii", "(", "byte_data", ")", "elif", "escape", "==", "STRING_ESCAPE_UTF8", ":", "return", "escape_utf8", "(", "byte_data", ")", "elif", "escape", "==", "STRING_ESCAPE_BASE64", ":", "return", "codecs", ".", "decode", "(", "base64", ".", "b64encode", "(", "byte_data", ")", ",", "'latin-1'", ")", "else", ":", "raise", "UnicodeEncodeError", "(", "\"Unknown escape option\"", ")" ]
Decode given bytes using specified escaping method. :param byte_data: The byte-like object with bytes to decode. :param escape: The escape method to use. :param skip_printable: If True, don't escape byte_data with all 'printable ASCII' bytes. Defaults to False. :return: New unicode string, escaped with the specified method if needed.
[ "Decode", "given", "bytes", "using", "specified", "escaping", "method", ".", ":", "param", "byte_data", ":", "The", "byte", "-", "like", "object", "with", "bytes", "to", "decode", ".", ":", "param", "escape", ":", "The", "escape", "method", "to", "use", ".", ":", "param", "skip_printable", ":", "If", "True", "don", "t", "escape", "byte_data", "with", "all", "printable", "ASCII", "bytes", ".", "Defaults", "to", "False", ".", ":", "return", ":", "New", "unicode", "string", "escaped", "with", "the", "specified", "method", "if", "needed", "." ]
python
train
Capitains/MyCapytain
MyCapytain/resources/collections/dts/_resolver.py
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resources/collections/dts/_resolver.py#L187-L218
def parse_member( cls, obj: dict, collection: "HttpResolverDtsCollection", direction: str, **additional_parameters): """ Parse the member value of a Collection response and returns the list of object while setting the graph relationship based on `direction` :param obj: PyLD parsed JSON+LD :param collection: Collection attached to the member property :param direction: Direction of the member (children, parent) """ members = [] # Start pagination check here hydra_members = obj.get(str(_hyd.member), []) if hydra_members: for member in hydra_members: subcollection = cls.parse(member, metadata_parsed=False, **additional_parameters) if direction == "children": subcollection.parents.set({collection}) members.append(subcollection) if "https://www.w3.org/ns/hydra/core#view" not in obj or \ (direction == "children" and collection.size == 0): collection._parsed[direction] = True return members
[ "def", "parse_member", "(", "cls", ",", "obj", ":", "dict", ",", "collection", ":", "\"HttpResolverDtsCollection\"", ",", "direction", ":", "str", ",", "*", "*", "additional_parameters", ")", ":", "members", "=", "[", "]", "# Start pagination check here", "hydra_members", "=", "obj", ".", "get", "(", "str", "(", "_hyd", ".", "member", ")", ",", "[", "]", ")", "if", "hydra_members", ":", "for", "member", "in", "hydra_members", ":", "subcollection", "=", "cls", ".", "parse", "(", "member", ",", "metadata_parsed", "=", "False", ",", "*", "*", "additional_parameters", ")", "if", "direction", "==", "\"children\"", ":", "subcollection", ".", "parents", ".", "set", "(", "{", "collection", "}", ")", "members", ".", "append", "(", "subcollection", ")", "if", "\"https://www.w3.org/ns/hydra/core#view\"", "not", "in", "obj", "or", "(", "direction", "==", "\"children\"", "and", "collection", ".", "size", "==", "0", ")", ":", "collection", ".", "_parsed", "[", "direction", "]", "=", "True", "return", "members" ]
Parse the member value of a Collection response and returns the list of object while setting the graph relationship based on `direction` :param obj: PyLD parsed JSON+LD :param collection: Collection attached to the member property :param direction: Direction of the member (children, parent)
[ "Parse", "the", "member", "value", "of", "a", "Collection", "response", "and", "returns", "the", "list", "of", "object", "while", "setting", "the", "graph", "relationship", "based", "on", "direction" ]
python
train
zalando/patroni
patroni/scripts/wale_restore.py
https://github.com/zalando/patroni/blob/f6d29081c90af52064b981cdd877a07338d86038/patroni/scripts/wale_restore.py#L131-L158
def run(self): """ Creates a new replica using WAL-E Returns ------- ExitCode 0 = Success 1 = Error, try again 2 = Error, don't try again """ if self.init_error: logger.error('init error: %r did not exist at initialization time', self.wal_e.env_dir) return ExitCode.FAIL try: should_use_s3 = self.should_use_s3_to_create_replica() if should_use_s3 is None: # Need to retry return ExitCode.RETRY_LATER elif should_use_s3: return self.create_replica_with_s3() elif not should_use_s3: return ExitCode.FAIL except Exception: logger.exception("Unhandled exception when running WAL-E restore") return ExitCode.FAIL
[ "def", "run", "(", "self", ")", ":", "if", "self", ".", "init_error", ":", "logger", ".", "error", "(", "'init error: %r did not exist at initialization time'", ",", "self", ".", "wal_e", ".", "env_dir", ")", "return", "ExitCode", ".", "FAIL", "try", ":", "should_use_s3", "=", "self", ".", "should_use_s3_to_create_replica", "(", ")", "if", "should_use_s3", "is", "None", ":", "# Need to retry", "return", "ExitCode", ".", "RETRY_LATER", "elif", "should_use_s3", ":", "return", "self", ".", "create_replica_with_s3", "(", ")", "elif", "not", "should_use_s3", ":", "return", "ExitCode", ".", "FAIL", "except", "Exception", ":", "logger", ".", "exception", "(", "\"Unhandled exception when running WAL-E restore\"", ")", "return", "ExitCode", ".", "FAIL" ]
Creates a new replica using WAL-E Returns ------- ExitCode 0 = Success 1 = Error, try again 2 = Error, don't try again
[ "Creates", "a", "new", "replica", "using", "WAL", "-", "E" ]
python
train
bitesofcode/projexui
projexui/widgets/xstackedwidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xstackedwidget.py#L66-L76
def _finishAnimation(self): """ Cleans up post-animation. """ self.setCurrentIndex(self._nextIndex) self.widget(self._lastIndex).hide() self.widget(self._lastIndex).move(self._lastPoint) self._active = False if not self.signalsBlocked(): self.animationFinished.emit()
[ "def", "_finishAnimation", "(", "self", ")", ":", "self", ".", "setCurrentIndex", "(", "self", ".", "_nextIndex", ")", "self", ".", "widget", "(", "self", ".", "_lastIndex", ")", ".", "hide", "(", ")", "self", ".", "widget", "(", "self", ".", "_lastIndex", ")", ".", "move", "(", "self", ".", "_lastPoint", ")", "self", ".", "_active", "=", "False", "if", "not", "self", ".", "signalsBlocked", "(", ")", ":", "self", ".", "animationFinished", ".", "emit", "(", ")" ]
Cleans up post-animation.
[ "Cleans", "up", "post", "-", "animation", "." ]
python
train
egineering-llc/egat
egat/loggers/html_logger.py
https://github.com/egineering-llc/egat/blob/63a172276b554ae1c7d0f13ba305881201c49d55/egat/loggers/html_logger.py#L28-L36
def copy_resources_to_log_dir(log_dir): """Copies the necessary static assets to the log_dir and returns the path of the main css file.""" css_path = resource_filename(Requirement.parse("egat"), "/egat/data/default.css") header_path = resource_filename(Requirement.parse("egat"), "/egat/data/egat_header.png") shutil.copyfile(css_path, log_dir + "/style.css") shutil.copyfile(header_path, log_dir + "/egat_header.png") return log_dir + os.sep + "style.css"
[ "def", "copy_resources_to_log_dir", "(", "log_dir", ")", ":", "css_path", "=", "resource_filename", "(", "Requirement", ".", "parse", "(", "\"egat\"", ")", ",", "\"/egat/data/default.css\"", ")", "header_path", "=", "resource_filename", "(", "Requirement", ".", "parse", "(", "\"egat\"", ")", ",", "\"/egat/data/egat_header.png\"", ")", "shutil", ".", "copyfile", "(", "css_path", ",", "log_dir", "+", "\"/style.css\"", ")", "shutil", ".", "copyfile", "(", "header_path", ",", "log_dir", "+", "\"/egat_header.png\"", ")", "return", "log_dir", "+", "os", ".", "sep", "+", "\"style.css\"" ]
Copies the necessary static assets to the log_dir and returns the path of the main css file.
[ "Copies", "the", "necessary", "static", "assets", "to", "the", "log_dir", "and", "returns", "the", "path", "of", "the", "main", "css", "file", "." ]
python
train
kubernetes-client/python
kubernetes/client/apis/storage_v1beta1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/storage_v1beta1_api.py#L1074-L1099
def delete_csi_node(self, name, **kwargs): """ delete a CSINode This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_csi_node(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the CSINode (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_csi_node_with_http_info(name, **kwargs) else: (data) = self.delete_csi_node_with_http_info(name, **kwargs) return data
[ "def", "delete_csi_node", "(", "self", ",", "name", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "delete_csi_node_with_http_info", "(", "name", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "self", ".", "delete_csi_node_with_http_info", "(", "name", ",", "*", "*", "kwargs", ")", "return", "data" ]
delete a CSINode This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_csi_node(name, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the CSINode (required) :param str pretty: If 'true', then the output is pretty printed. :param V1DeleteOptions body: :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately. :param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both. :param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground. :return: V1Status If the method is called asynchronously, returns the request thread.
[ "delete", "a", "CSINode", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", ".", "delete_csi_node", "(", "name", "async_req", "=", "True", ")", ">>>", "result", "=", "thread", ".", "get", "()" ]
python
train
Pylons/plaster_pastedeploy
src/plaster_pastedeploy/__init__.py
https://github.com/Pylons/plaster_pastedeploy/blob/72a08f3fb6d11a0b039f381ade83f045668cfcb0/src/plaster_pastedeploy/__init__.py#L61-L107
def get_settings(self, section=None, defaults=None): """ Gets a named section from the configuration source. :param section: a :class:`str` representing the section you want to retrieve from the configuration source. If ``None`` this will fallback to the :attr:`plaster.PlasterURL.fragment`. :param defaults: a :class:`dict` that will get passed to :class:`configparser.ConfigParser` and will populate the ``DEFAULT`` section. :return: A :class:`plaster_pastedeploy.ConfigDict` of key/value pairs. """ # This is a partial reimplementation of # ``paste.deploy.loadwsgi.ConfigLoader:get_context`` which supports # "set" and "get" options and filters out any other globals section = self._maybe_get_default_name(section) if self.filepath is None: return {} parser = self._get_parser(defaults) defaults = parser.defaults() try: raw_items = parser.items(section) except NoSectionError: return {} local_conf = OrderedDict() get_from_globals = {} for option, value in raw_items: if option.startswith("set "): name = option[4:].strip() defaults[name] = value elif option.startswith("get "): name = option[4:].strip() get_from_globals[name] = value # insert a value into local_conf to preserve the order local_conf[name] = None else: # annoyingly pastedeploy filters out all defaults unless # "get foo" is used to pull it in if option in defaults: continue local_conf[option] = value for option, global_option in get_from_globals.items(): local_conf[option] = defaults[global_option] return ConfigDict(local_conf, defaults, self)
[ "def", "get_settings", "(", "self", ",", "section", "=", "None", ",", "defaults", "=", "None", ")", ":", "# This is a partial reimplementation of", "# ``paste.deploy.loadwsgi.ConfigLoader:get_context`` which supports", "# \"set\" and \"get\" options and filters out any other globals", "section", "=", "self", ".", "_maybe_get_default_name", "(", "section", ")", "if", "self", ".", "filepath", "is", "None", ":", "return", "{", "}", "parser", "=", "self", ".", "_get_parser", "(", "defaults", ")", "defaults", "=", "parser", ".", "defaults", "(", ")", "try", ":", "raw_items", "=", "parser", ".", "items", "(", "section", ")", "except", "NoSectionError", ":", "return", "{", "}", "local_conf", "=", "OrderedDict", "(", ")", "get_from_globals", "=", "{", "}", "for", "option", ",", "value", "in", "raw_items", ":", "if", "option", ".", "startswith", "(", "\"set \"", ")", ":", "name", "=", "option", "[", "4", ":", "]", ".", "strip", "(", ")", "defaults", "[", "name", "]", "=", "value", "elif", "option", ".", "startswith", "(", "\"get \"", ")", ":", "name", "=", "option", "[", "4", ":", "]", ".", "strip", "(", ")", "get_from_globals", "[", "name", "]", "=", "value", "# insert a value into local_conf to preserve the order", "local_conf", "[", "name", "]", "=", "None", "else", ":", "# annoyingly pastedeploy filters out all defaults unless", "# \"get foo\" is used to pull it in", "if", "option", "in", "defaults", ":", "continue", "local_conf", "[", "option", "]", "=", "value", "for", "option", ",", "global_option", "in", "get_from_globals", ".", "items", "(", ")", ":", "local_conf", "[", "option", "]", "=", "defaults", "[", "global_option", "]", "return", "ConfigDict", "(", "local_conf", ",", "defaults", ",", "self", ")" ]
Gets a named section from the configuration source. :param section: a :class:`str` representing the section you want to retrieve from the configuration source. If ``None`` this will fallback to the :attr:`plaster.PlasterURL.fragment`. :param defaults: a :class:`dict` that will get passed to :class:`configparser.ConfigParser` and will populate the ``DEFAULT`` section. :return: A :class:`plaster_pastedeploy.ConfigDict` of key/value pairs.
[ "Gets", "a", "named", "section", "from", "the", "configuration", "source", "." ]
python
train
PyGithub/PyGithub
github/PullRequest.py
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/PullRequest.py#L404-L419
def create_issue_comment(self, body): """ :calls: `POST /repos/:owner/:repo/issues/:number/comments <http://developer.github.com/v3/issues/comments>`_ :param body: string :rtype: :class:`github.IssueComment.IssueComment` """ assert isinstance(body, (str, unicode)), body post_parameters = { "body": body, } headers, data = self._requester.requestJsonAndCheck( "POST", self.issue_url + "/comments", input=post_parameters ) return github.IssueComment.IssueComment(self._requester, headers, data, completed=True)
[ "def", "create_issue_comment", "(", "self", ",", "body", ")", ":", "assert", "isinstance", "(", "body", ",", "(", "str", ",", "unicode", ")", ")", ",", "body", "post_parameters", "=", "{", "\"body\"", ":", "body", ",", "}", "headers", ",", "data", "=", "self", ".", "_requester", ".", "requestJsonAndCheck", "(", "\"POST\"", ",", "self", ".", "issue_url", "+", "\"/comments\"", ",", "input", "=", "post_parameters", ")", "return", "github", ".", "IssueComment", ".", "IssueComment", "(", "self", ".", "_requester", ",", "headers", ",", "data", ",", "completed", "=", "True", ")" ]
:calls: `POST /repos/:owner/:repo/issues/:number/comments <http://developer.github.com/v3/issues/comments>`_ :param body: string :rtype: :class:`github.IssueComment.IssueComment`
[ ":", "calls", ":", "POST", "/", "repos", "/", ":", "owner", "/", ":", "repo", "/", "issues", "/", ":", "number", "/", "comments", "<http", ":", "//", "developer", ".", "github", ".", "com", "/", "v3", "/", "issues", "/", "comments", ">", "_", ":", "param", "body", ":", "string", ":", "rtype", ":", ":", "class", ":", "github", ".", "IssueComment", ".", "IssueComment" ]
python
train
pantsbuild/pants
src/python/pants/build_graph/import_remote_sources_mixin.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/build_graph/import_remote_sources_mixin.py#L87-L107
def imported_targets(self): """ :returns: target instances for specs referenced by imported_target_specs. :rtype: list of Target """ libs = [] for spec in self.imported_target_specs(payload=self.payload): resolved_target = self._build_graph.get_target_from_spec(spec, relative_to=self.address.spec_path) if not resolved_target: raise self.UnresolvedImportError( 'Could not find target {spec} referenced from {relative_to}' .format(spec=spec, relative_to=self.address.spec)) try: libs.append(self.expected_target_constraint.validate_satisfied_by(resolved_target)) except TypeConstraintError as e: raise self.WrongTargetTypeError( 'Wrong target type {spec} referenced from remote sources target {relative_to}: {err}' .format(spec=spec, relative_to=self.address.spec, err=str(e)), e) return libs
[ "def", "imported_targets", "(", "self", ")", ":", "libs", "=", "[", "]", "for", "spec", "in", "self", ".", "imported_target_specs", "(", "payload", "=", "self", ".", "payload", ")", ":", "resolved_target", "=", "self", ".", "_build_graph", ".", "get_target_from_spec", "(", "spec", ",", "relative_to", "=", "self", ".", "address", ".", "spec_path", ")", "if", "not", "resolved_target", ":", "raise", "self", ".", "UnresolvedImportError", "(", "'Could not find target {spec} referenced from {relative_to}'", ".", "format", "(", "spec", "=", "spec", ",", "relative_to", "=", "self", ".", "address", ".", "spec", ")", ")", "try", ":", "libs", ".", "append", "(", "self", ".", "expected_target_constraint", ".", "validate_satisfied_by", "(", "resolved_target", ")", ")", "except", "TypeConstraintError", "as", "e", ":", "raise", "self", ".", "WrongTargetTypeError", "(", "'Wrong target type {spec} referenced from remote sources target {relative_to}: {err}'", ".", "format", "(", "spec", "=", "spec", ",", "relative_to", "=", "self", ".", "address", ".", "spec", ",", "err", "=", "str", "(", "e", ")", ")", ",", "e", ")", "return", "libs" ]
:returns: target instances for specs referenced by imported_target_specs. :rtype: list of Target
[ ":", "returns", ":", "target", "instances", "for", "specs", "referenced", "by", "imported_target_specs", ".", ":", "rtype", ":", "list", "of", "Target" ]
python
train
djgagne/hagelslag
hagelslag/processing/TrackModeler.py
https://github.com/djgagne/hagelslag/blob/6fb6c3df90bf4867e13a97d3460b14471d107df1/hagelslag/processing/TrackModeler.py#L193-L292
def fit_condition_threshold_models(self, model_names, model_objs, input_columns, output_column="Matched", output_threshold=0.5, num_folds=5, threshold_score="ets"): """ Fit models to predict hail/no-hail and use cross-validation to determine the probaility threshold that maximizes a skill score. Args: model_names: List of machine learning model names model_objs: List of Scikit-learn ML models input_columns: List of input variables in the training data output_column: Column used for prediction output_threshold: Values exceeding this threshold are considered positive events; below are nulls num_folds: Number of folds in the cross-validation procedure threshold_score: Score available in ContingencyTable used for determining the best probability threshold Returns: None """ print("Fitting condition models") groups = self.data["train"]["member"][self.group_col].unique() weights=None for group in groups: print(group) group_data = self.data["train"]["combo"].iloc[ np.where(self.data["train"]["combo"][self.group_col] == group)[0]] if self.sector: lon_obj = group_data.loc[:,'Centroid_Lon'] lat_obj = group_data.loc[:,'Centroid_Lat'] conus_lat_lon_points = zip(lon_obj.values.ravel(),lat_obj.values.ravel()) center_lon, center_lat = self.proj_dict["lon_0"],self.proj_dict["lat_0"] distances = np.array([np.sqrt((x-center_lon)**2+\ (y-center_lat)**2) for (x, y) in conus_lat_lon_points]) min_dist, max_minus_min = min(distances),max(distances)-min(distances) distance_0_1 = [1.0-((d - min_dist)/(max_minus_min)) for d in distances] weights = np.array(distance_0_1) output_data = np.where(group_data.loc[:, output_column] > output_threshold, 1, 0) ones = np.count_nonzero(output_data > 0) print("Ones: ", ones, "Zeros: ", np.count_nonzero(output_data == 0)) self.condition_models[group] = {} num_elements = group_data[input_columns].shape[0] for m, model_name in enumerate(model_names): print(model_name) roc = DistributedROC(thresholds=np.arange(0, 1.1, 0.01)) self.condition_models[group][model_name] = deepcopy(model_objs[m]) try: kf = KFold(n_splits=num_folds) for train_index, test_index in kf.split(group_data[input_columns].values): if np.count_nonzero(output_data[train_index]) > 0: try: self.condition_models[group][model_name].fit( group_data.iloc[train_index][input_columns], output_data[train_index],sample_weight=weights[train_index]) except: self.condition_models[group][model_name].fit( group_data.iloc[train_index][input_columns], output_data[train_index]) cv_preds = self.condition_models[group][model_name].predict_proba( group_data.iloc[test_index][input_columns])[:,1] roc.update(cv_preds, output_data[test_index]) else: continue except TypeError: kf = KFold(num_elements,n_folds=num_folds) for train_index, test_index in kf: if np.count_nonzero(output_data[train_index]) > 0: try: self.condition_models[group][model_name].fit( group_data.iloc[train_index][input_columns], output_data[train_index],sample_weight=weights[train_index]) except: self.condition_models[group][model_name].fit( group_data.iloc[train_index][input_columns], output_data[train_index]) cv_preds = self.condition_models[group][model_name].predict_proba( group_data.iloc[test_index][input_columns])[:, 1] roc.update(cv_preds, output_data[test_index]) else: continue self.condition_models[group][ model_name + "_condition_threshold"], _ = roc.max_threshold_score(threshold_score) print(model_name + " condition threshold: {0:0.3f}".format( self.condition_models[group][model_name + "_condition_threshold"])) self.condition_models[group][model_name].fit(group_data[input_columns], output_data)
[ "def", "fit_condition_threshold_models", "(", "self", ",", "model_names", ",", "model_objs", ",", "input_columns", ",", "output_column", "=", "\"Matched\"", ",", "output_threshold", "=", "0.5", ",", "num_folds", "=", "5", ",", "threshold_score", "=", "\"ets\"", ")", ":", "print", "(", "\"Fitting condition models\"", ")", "groups", "=", "self", ".", "data", "[", "\"train\"", "]", "[", "\"member\"", "]", "[", "self", ".", "group_col", "]", ".", "unique", "(", ")", "weights", "=", "None", "for", "group", "in", "groups", ":", "print", "(", "group", ")", "group_data", "=", "self", ".", "data", "[", "\"train\"", "]", "[", "\"combo\"", "]", ".", "iloc", "[", "np", ".", "where", "(", "self", ".", "data", "[", "\"train\"", "]", "[", "\"combo\"", "]", "[", "self", ".", "group_col", "]", "==", "group", ")", "[", "0", "]", "]", "if", "self", ".", "sector", ":", "lon_obj", "=", "group_data", ".", "loc", "[", ":", ",", "'Centroid_Lon'", "]", "lat_obj", "=", "group_data", ".", "loc", "[", ":", ",", "'Centroid_Lat'", "]", "conus_lat_lon_points", "=", "zip", "(", "lon_obj", ".", "values", ".", "ravel", "(", ")", ",", "lat_obj", ".", "values", ".", "ravel", "(", ")", ")", "center_lon", ",", "center_lat", "=", "self", ".", "proj_dict", "[", "\"lon_0\"", "]", ",", "self", ".", "proj_dict", "[", "\"lat_0\"", "]", "distances", "=", "np", ".", "array", "(", "[", "np", ".", "sqrt", "(", "(", "x", "-", "center_lon", ")", "**", "2", "+", "(", "y", "-", "center_lat", ")", "**", "2", ")", "for", "(", "x", ",", "y", ")", "in", "conus_lat_lon_points", "]", ")", "min_dist", ",", "max_minus_min", "=", "min", "(", "distances", ")", ",", "max", "(", "distances", ")", "-", "min", "(", "distances", ")", "distance_0_1", "=", "[", "1.0", "-", "(", "(", "d", "-", "min_dist", ")", "/", "(", "max_minus_min", ")", ")", "for", "d", "in", "distances", "]", "weights", "=", "np", ".", "array", "(", "distance_0_1", ")", "output_data", "=", "np", ".", "where", "(", "group_data", ".", "loc", "[", ":", ",", "output_column", "]", ">", "output_threshold", ",", "1", ",", "0", ")", "ones", "=", "np", ".", "count_nonzero", "(", "output_data", ">", "0", ")", "print", "(", "\"Ones: \"", ",", "ones", ",", "\"Zeros: \"", ",", "np", ".", "count_nonzero", "(", "output_data", "==", "0", ")", ")", "self", ".", "condition_models", "[", "group", "]", "=", "{", "}", "num_elements", "=", "group_data", "[", "input_columns", "]", ".", "shape", "[", "0", "]", "for", "m", ",", "model_name", "in", "enumerate", "(", "model_names", ")", ":", "print", "(", "model_name", ")", "roc", "=", "DistributedROC", "(", "thresholds", "=", "np", ".", "arange", "(", "0", ",", "1.1", ",", "0.01", ")", ")", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", "=", "deepcopy", "(", "model_objs", "[", "m", "]", ")", "try", ":", "kf", "=", "KFold", "(", "n_splits", "=", "num_folds", ")", "for", "train_index", ",", "test_index", "in", "kf", ".", "split", "(", "group_data", "[", "input_columns", "]", ".", "values", ")", ":", "if", "np", ".", "count_nonzero", "(", "output_data", "[", "train_index", "]", ")", ">", "0", ":", "try", ":", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", ".", "fit", "(", "group_data", ".", "iloc", "[", "train_index", "]", "[", "input_columns", "]", ",", "output_data", "[", "train_index", "]", ",", "sample_weight", "=", "weights", "[", "train_index", "]", ")", "except", ":", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", ".", "fit", "(", "group_data", ".", "iloc", "[", "train_index", "]", "[", "input_columns", "]", ",", "output_data", "[", "train_index", "]", ")", "cv_preds", "=", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", ".", "predict_proba", "(", "group_data", ".", "iloc", "[", "test_index", "]", "[", "input_columns", "]", ")", "[", ":", ",", "1", "]", "roc", ".", "update", "(", "cv_preds", ",", "output_data", "[", "test_index", "]", ")", "else", ":", "continue", "except", "TypeError", ":", "kf", "=", "KFold", "(", "num_elements", ",", "n_folds", "=", "num_folds", ")", "for", "train_index", ",", "test_index", "in", "kf", ":", "if", "np", ".", "count_nonzero", "(", "output_data", "[", "train_index", "]", ")", ">", "0", ":", "try", ":", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", ".", "fit", "(", "group_data", ".", "iloc", "[", "train_index", "]", "[", "input_columns", "]", ",", "output_data", "[", "train_index", "]", ",", "sample_weight", "=", "weights", "[", "train_index", "]", ")", "except", ":", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", ".", "fit", "(", "group_data", ".", "iloc", "[", "train_index", "]", "[", "input_columns", "]", ",", "output_data", "[", "train_index", "]", ")", "cv_preds", "=", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", ".", "predict_proba", "(", "group_data", ".", "iloc", "[", "test_index", "]", "[", "input_columns", "]", ")", "[", ":", ",", "1", "]", "roc", ".", "update", "(", "cv_preds", ",", "output_data", "[", "test_index", "]", ")", "else", ":", "continue", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "+", "\"_condition_threshold\"", "]", ",", "_", "=", "roc", ".", "max_threshold_score", "(", "threshold_score", ")", "print", "(", "model_name", "+", "\" condition threshold: {0:0.3f}\"", ".", "format", "(", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "+", "\"_condition_threshold\"", "]", ")", ")", "self", ".", "condition_models", "[", "group", "]", "[", "model_name", "]", ".", "fit", "(", "group_data", "[", "input_columns", "]", ",", "output_data", ")" ]
Fit models to predict hail/no-hail and use cross-validation to determine the probaility threshold that maximizes a skill score. Args: model_names: List of machine learning model names model_objs: List of Scikit-learn ML models input_columns: List of input variables in the training data output_column: Column used for prediction output_threshold: Values exceeding this threshold are considered positive events; below are nulls num_folds: Number of folds in the cross-validation procedure threshold_score: Score available in ContingencyTable used for determining the best probability threshold Returns: None
[ "Fit", "models", "to", "predict", "hail", "/", "no", "-", "hail", "and", "use", "cross", "-", "validation", "to", "determine", "the", "probaility", "threshold", "that", "maximizes", "a", "skill", "score", "." ]
python
train
coghost/izen
izen/icfg.py
https://github.com/coghost/izen/blob/432db017f99dd2ba809e1ba1792145ab6510263d/izen/icfg.py#L82-L95
def __spawn(self): """通过手动方式, 指定字段类型与默认值, - 如果配置文件有变动, 需要手动在这里添加 - 如果配置字段未在这里指出, 则默认为 string, 使用时需要手动转换 """ dat = { 'log.enabled': False, 'log.file_pth': '{}/{}.log'.format(self._pth, self._cfg_name), 'log.file_backups': 3, 'log.file_size': 5, 'log.level': 10, 'log.symbol': '☰☷☳☴☵☲☶☱', } self.__do_init(dat)
[ "def", "__spawn", "(", "self", ")", ":", "dat", "=", "{", "'log.enabled'", ":", "False", ",", "'log.file_pth'", ":", "'{}/{}.log'", ".", "format", "(", "self", ".", "_pth", ",", "self", ".", "_cfg_name", ")", ",", "'log.file_backups'", ":", "3", ",", "'log.file_size'", ":", "5", ",", "'log.level'", ":", "10", ",", "'log.symbol'", ":", "'☰☷☳☴☵☲☶☱',", "", "}", "self", ".", "__do_init", "(", "dat", ")" ]
通过手动方式, 指定字段类型与默认值, - 如果配置文件有变动, 需要手动在这里添加 - 如果配置字段未在这里指出, 则默认为 string, 使用时需要手动转换
[ "通过手动方式", "指定字段类型与默认值", "-", "如果配置文件有变动", "需要手动在这里添加", "-", "如果配置字段未在这里指出", "则默认为", "string", "使用时需要手动转换" ]
python
train
crossbario/txaio
txaio/tx.py
https://github.com/crossbario/txaio/blob/29c77ff1210cabd4cc03f16f34672612e7eef704/txaio/tx.py#L376-L387
def failure_message(self, fail): """ :param fail: must be an IFailedFuture returns a unicode error-message """ try: return u'{0}: {1}'.format( fail.value.__class__.__name__, fail.getErrorMessage(), ) except Exception: return 'Failed to produce failure message for "{0}"'.format(fail)
[ "def", "failure_message", "(", "self", ",", "fail", ")", ":", "try", ":", "return", "u'{0}: {1}'", ".", "format", "(", "fail", ".", "value", ".", "__class__", ".", "__name__", ",", "fail", ".", "getErrorMessage", "(", ")", ",", ")", "except", "Exception", ":", "return", "'Failed to produce failure message for \"{0}\"'", ".", "format", "(", "fail", ")" ]
:param fail: must be an IFailedFuture returns a unicode error-message
[ ":", "param", "fail", ":", "must", "be", "an", "IFailedFuture", "returns", "a", "unicode", "error", "-", "message" ]
python
train
tensorflow/probability
tensorflow_probability/python/internal/dtype_util.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/dtype_util.py#L98-L103
def is_floating(dtype): """Returns whether this is a (non-quantized, real) floating point type.""" dtype = tf.as_dtype(dtype) if hasattr(dtype, 'is_floating'): return dtype.is_floating return np.issubdtype(np.dtype(dtype), np.float)
[ "def", "is_floating", "(", "dtype", ")", ":", "dtype", "=", "tf", ".", "as_dtype", "(", "dtype", ")", "if", "hasattr", "(", "dtype", ",", "'is_floating'", ")", ":", "return", "dtype", ".", "is_floating", "return", "np", ".", "issubdtype", "(", "np", ".", "dtype", "(", "dtype", ")", ",", "np", ".", "float", ")" ]
Returns whether this is a (non-quantized, real) floating point type.
[ "Returns", "whether", "this", "is", "a", "(", "non", "-", "quantized", "real", ")", "floating", "point", "type", "." ]
python
test
manns/pyspread
pyspread/src/lib/_string_helpers.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/_string_helpers.py#L35-L57
def quote(code): """Returns quoted code if not already quoted and if possible Parameters ---------- code: String \tCode thta is quoted """ try: code = code.rstrip() except AttributeError: # code is not a string, may be None --> There is no code to quote return code if code and code[0] + code[-1] not in ('""', "''", "u'", '"') \ and '"' not in code: return 'u"' + code + '"' else: return code
[ "def", "quote", "(", "code", ")", ":", "try", ":", "code", "=", "code", ".", "rstrip", "(", ")", "except", "AttributeError", ":", "# code is not a string, may be None --> There is no code to quote", "return", "code", "if", "code", "and", "code", "[", "0", "]", "+", "code", "[", "-", "1", "]", "not", "in", "(", "'\"\"'", ",", "\"''\"", ",", "\"u'\"", ",", "'\"'", ")", "and", "'\"'", "not", "in", "code", ":", "return", "'u\"'", "+", "code", "+", "'\"'", "else", ":", "return", "code" ]
Returns quoted code if not already quoted and if possible Parameters ---------- code: String \tCode thta is quoted
[ "Returns", "quoted", "code", "if", "not", "already", "quoted", "and", "if", "possible" ]
python
train
JnyJny/Geometry
Geometry/ellipse.py
https://github.com/JnyJny/Geometry/blob/3500f815fa56c535b36d1b6fd0afe69ce5d055be/Geometry/ellipse.py#L416-L446
def circumcircleForTriangle(cls, triangle): ''' :param: triangle - Triangle class :return: Circle class Returns the circle where every vertex in the input triangle is on the radius of that circle. ''' if triangle.isRight: # circumcircle origin is the midpoint of the hypotenues o = triangle.hypotenuse.midpoint r = o.distance(triangle.A) return cls(o, r) # otherwise # 1. find the normals to two sides # 2. translate them to the midpoints of those two sides # 3. intersect those lines for center of circumcircle # 4. radius is distance from center to any vertex in the triangle abn = triangle.AB.normal abn += triangle.AB.midpoint acn = triangle.AC.normal acn += triangle.AC.midpoint o = abn.intersection(acn) r = o.distance(triangle.A) return cls(o, r)
[ "def", "circumcircleForTriangle", "(", "cls", ",", "triangle", ")", ":", "if", "triangle", ".", "isRight", ":", "# circumcircle origin is the midpoint of the hypotenues", "o", "=", "triangle", ".", "hypotenuse", ".", "midpoint", "r", "=", "o", ".", "distance", "(", "triangle", ".", "A", ")", "return", "cls", "(", "o", ",", "r", ")", "# otherwise", "# 1. find the normals to two sides", "# 2. translate them to the midpoints of those two sides", "# 3. intersect those lines for center of circumcircle", "# 4. radius is distance from center to any vertex in the triangle", "abn", "=", "triangle", ".", "AB", ".", "normal", "abn", "+=", "triangle", ".", "AB", ".", "midpoint", "acn", "=", "triangle", ".", "AC", ".", "normal", "acn", "+=", "triangle", ".", "AC", ".", "midpoint", "o", "=", "abn", ".", "intersection", "(", "acn", ")", "r", "=", "o", ".", "distance", "(", "triangle", ".", "A", ")", "return", "cls", "(", "o", ",", "r", ")" ]
:param: triangle - Triangle class :return: Circle class Returns the circle where every vertex in the input triangle is on the radius of that circle.
[ ":", "param", ":", "triangle", "-", "Triangle", "class", ":", "return", ":", "Circle", "class" ]
python
train
log2timeline/dfdatetime
dfdatetime/cocoa_time.py
https://github.com/log2timeline/dfdatetime/blob/141ca4ef1eff3d354b5deaac3d81cb08506f98d6/dfdatetime/cocoa_time.py#L106-L126
def CopyToDateTimeString(self): """Copies the Cocoa timestamp to a date and time string. Returns: str: date and time value formatted as: YYYY-MM-DD hh:mm:ss.###### or None if the timestamp cannot be copied to a date and time string. """ if self._timestamp is None: return None number_of_days, hours, minutes, seconds = self._GetTimeValues( int(self._timestamp)) year, month, day_of_month = self._GetDateValuesWithEpoch( number_of_days, self._EPOCH) microseconds = int( (self._timestamp % 1) * definitions.MICROSECONDS_PER_SECOND) return '{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:06d}'.format( year, month, day_of_month, hours, minutes, seconds, microseconds)
[ "def", "CopyToDateTimeString", "(", "self", ")", ":", "if", "self", ".", "_timestamp", "is", "None", ":", "return", "None", "number_of_days", ",", "hours", ",", "minutes", ",", "seconds", "=", "self", ".", "_GetTimeValues", "(", "int", "(", "self", ".", "_timestamp", ")", ")", "year", ",", "month", ",", "day_of_month", "=", "self", ".", "_GetDateValuesWithEpoch", "(", "number_of_days", ",", "self", ".", "_EPOCH", ")", "microseconds", "=", "int", "(", "(", "self", ".", "_timestamp", "%", "1", ")", "*", "definitions", ".", "MICROSECONDS_PER_SECOND", ")", "return", "'{0:04d}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}.{6:06d}'", ".", "format", "(", "year", ",", "month", ",", "day_of_month", ",", "hours", ",", "minutes", ",", "seconds", ",", "microseconds", ")" ]
Copies the Cocoa timestamp to a date and time string. Returns: str: date and time value formatted as: YYYY-MM-DD hh:mm:ss.###### or None if the timestamp cannot be copied to a date and time string.
[ "Copies", "the", "Cocoa", "timestamp", "to", "a", "date", "and", "time", "string", "." ]
python
train
project-rig/rig
rig/routing_table/utils.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/rig/routing_table/utils.py#L234-L282
def expand_entry(entry, ignore_xs=0x0): """Turn all Xs which are not marked in `ignore_xs` into ``0``\ s and ``1``\ s. The following will expand any Xs in bits ``1..3``\ :: >>> entry = RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1100) >>> list(expand_entry(entry, 0xfffffff1)) == [ ... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1110), # 010X ... RoutingTableEntry(set(), 0b0110, 0xfffffff0 | 0b1110), # 011X ... ] True Parameters ---------- entry : :py:class:`~rig.routing_table.RoutingTableEntry` or similar The entry to expand. ignore_xs : int Bit-mask of Xs which should not be expanded. Yields ------ :py:class:`~rig.routing_table.RoutingTableEntry` Routing table entries which represent the original entry but with all Xs not masked off by `ignore_xs` replaced with 1s and 0s. """ # Get all the Xs in the entry that are not ignored xs = (~entry.key & ~entry.mask) & ~ignore_xs # Find the most significant X for bit in (1 << i for i in range(31, -1, -1)): if bit & xs: # Yield all the entries with this bit set as 0 entry_0 = RoutingTableEntry(entry.route, entry.key, entry.mask | bit, entry.sources) for new_entry in expand_entry(entry_0, ignore_xs): yield new_entry # And yield all the entries with this bit set as 1 entry_1 = RoutingTableEntry(entry.route, entry.key | bit, entry.mask | bit, entry.sources) for new_entry in expand_entry(entry_1, ignore_xs): yield new_entry # Stop looking for Xs break else: # If there are no Xs then yield the entry we were given. yield entry
[ "def", "expand_entry", "(", "entry", ",", "ignore_xs", "=", "0x0", ")", ":", "# Get all the Xs in the entry that are not ignored", "xs", "=", "(", "~", "entry", ".", "key", "&", "~", "entry", ".", "mask", ")", "&", "~", "ignore_xs", "# Find the most significant X", "for", "bit", "in", "(", "1", "<<", "i", "for", "i", "in", "range", "(", "31", ",", "-", "1", ",", "-", "1", ")", ")", ":", "if", "bit", "&", "xs", ":", "# Yield all the entries with this bit set as 0", "entry_0", "=", "RoutingTableEntry", "(", "entry", ".", "route", ",", "entry", ".", "key", ",", "entry", ".", "mask", "|", "bit", ",", "entry", ".", "sources", ")", "for", "new_entry", "in", "expand_entry", "(", "entry_0", ",", "ignore_xs", ")", ":", "yield", "new_entry", "# And yield all the entries with this bit set as 1", "entry_1", "=", "RoutingTableEntry", "(", "entry", ".", "route", ",", "entry", ".", "key", "|", "bit", ",", "entry", ".", "mask", "|", "bit", ",", "entry", ".", "sources", ")", "for", "new_entry", "in", "expand_entry", "(", "entry_1", ",", "ignore_xs", ")", ":", "yield", "new_entry", "# Stop looking for Xs", "break", "else", ":", "# If there are no Xs then yield the entry we were given.", "yield", "entry" ]
Turn all Xs which are not marked in `ignore_xs` into ``0``\ s and ``1``\ s. The following will expand any Xs in bits ``1..3``\ :: >>> entry = RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1100) >>> list(expand_entry(entry, 0xfffffff1)) == [ ... RoutingTableEntry(set(), 0b0100, 0xfffffff0 | 0b1110), # 010X ... RoutingTableEntry(set(), 0b0110, 0xfffffff0 | 0b1110), # 011X ... ] True Parameters ---------- entry : :py:class:`~rig.routing_table.RoutingTableEntry` or similar The entry to expand. ignore_xs : int Bit-mask of Xs which should not be expanded. Yields ------ :py:class:`~rig.routing_table.RoutingTableEntry` Routing table entries which represent the original entry but with all Xs not masked off by `ignore_xs` replaced with 1s and 0s.
[ "Turn", "all", "Xs", "which", "are", "not", "marked", "in", "ignore_xs", "into", "0", "\\", "s", "and", "1", "\\", "s", "." ]
python
train
bodylabs/lace
lace/geometry.py
https://github.com/bodylabs/lace/blob/b68f4a60a4cac66c0607ffbae38ef9d07d37f459/lace/geometry.py#L52-L64
def predict_body_units(self): ''' There is no prediction for united states unit system. This may fail when a mesh is not axis-aligned ''' longest_dist = np.max(np.max(self.v, axis=0) - np.min(self.v, axis=0)) if round(longest_dist / 1000) > 0: return 'mm' if round(longest_dist / 100) > 0: return 'cm' if round(longest_dist / 10) > 0: return 'dm' return 'm'
[ "def", "predict_body_units", "(", "self", ")", ":", "longest_dist", "=", "np", ".", "max", "(", "np", ".", "max", "(", "self", ".", "v", ",", "axis", "=", "0", ")", "-", "np", ".", "min", "(", "self", ".", "v", ",", "axis", "=", "0", ")", ")", "if", "round", "(", "longest_dist", "/", "1000", ")", ">", "0", ":", "return", "'mm'", "if", "round", "(", "longest_dist", "/", "100", ")", ">", "0", ":", "return", "'cm'", "if", "round", "(", "longest_dist", "/", "10", ")", ">", "0", ":", "return", "'dm'", "return", "'m'" ]
There is no prediction for united states unit system. This may fail when a mesh is not axis-aligned
[ "There", "is", "no", "prediction", "for", "united", "states", "unit", "system", ".", "This", "may", "fail", "when", "a", "mesh", "is", "not", "axis", "-", "aligned" ]
python
train
xapple/fasta
fasta/__init__.py
https://github.com/xapple/fasta/blob/a827c3138812d555203be45187ffae1277dd0d76/fasta/__init__.py#L138-L142
def flush(self): """Empty the buffer.""" for seq in self.buffer: SeqIO.write(seq, self.handle, self.format) self.buffer = []
[ "def", "flush", "(", "self", ")", ":", "for", "seq", "in", "self", ".", "buffer", ":", "SeqIO", ".", "write", "(", "seq", ",", "self", ".", "handle", ",", "self", ".", "format", ")", "self", ".", "buffer", "=", "[", "]" ]
Empty the buffer.
[ "Empty", "the", "buffer", "." ]
python
train
mitsei/dlkit
dlkit/json_/cataloging/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/cataloging/sessions.py#L915-L935
def get_root_catalogs(self): """Gets the root catalogs in the catalog hierarchy. A node with no parents is an orphan. While all catalog ``Ids`` are known to the hierarchy, an orphan does not appear in the hierarchy unless explicitly added as a root node or child of another node. return: (osid.cataloging.CatalogList) - the root catalogs raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method is must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.get_root_bins if self._catalog_session is not None: return self._catalog_session.get_root_catalogs() return CatalogLookupSession( self._proxy, self._runtime).get_catalogs_by_ids(list(self.get_root_catalog_ids()))
[ "def", "get_root_catalogs", "(", "self", ")", ":", "# Implemented from template for", "# osid.resource.BinHierarchySession.get_root_bins", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "get_root_catalogs", "(", ")", "return", "CatalogLookupSession", "(", "self", ".", "_proxy", ",", "self", ".", "_runtime", ")", ".", "get_catalogs_by_ids", "(", "list", "(", "self", ".", "get_root_catalog_ids", "(", ")", ")", ")" ]
Gets the root catalogs in the catalog hierarchy. A node with no parents is an orphan. While all catalog ``Ids`` are known to the hierarchy, an orphan does not appear in the hierarchy unless explicitly added as a root node or child of another node. return: (osid.cataloging.CatalogList) - the root catalogs raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method is must be implemented.*
[ "Gets", "the", "root", "catalogs", "in", "the", "catalog", "hierarchy", "." ]
python
train
astraw38/lint
lint/validators/validation_factory.py
https://github.com/astraw38/lint/blob/162ceefcb812f07d18544aaa887b9ec4f102cfb1/lint/validators/validation_factory.py#L32-L42
def register_validator(validator): """ Register a Validator class for file verification. :param validator: :return: """ if hasattr(validator, "EXTS") and hasattr(validator, "run"): ValidatorFactory.PLUGINS.append(validator) else: raise ValidatorException("Validator does not have 'run' method or EXTS variable!")
[ "def", "register_validator", "(", "validator", ")", ":", "if", "hasattr", "(", "validator", ",", "\"EXTS\"", ")", "and", "hasattr", "(", "validator", ",", "\"run\"", ")", ":", "ValidatorFactory", ".", "PLUGINS", ".", "append", "(", "validator", ")", "else", ":", "raise", "ValidatorException", "(", "\"Validator does not have 'run' method or EXTS variable!\"", ")" ]
Register a Validator class for file verification. :param validator: :return:
[ "Register", "a", "Validator", "class", "for", "file", "verification", "." ]
python
train
roll/interest-py
interest/logger/logger.py
https://github.com/roll/interest-py/blob/e6e1def4f2999222aac2fb1d290ae94250673b89/interest/logger/logger.py#L91-L96
def debug(self, message, *args, **kwargs): """Log debug event. Compatible with logging.debug signature. """ self.system.debug(message, *args, **kwargs)
[ "def", "debug", "(", "self", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "system", ".", "debug", "(", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Log debug event. Compatible with logging.debug signature.
[ "Log", "debug", "event", "." ]
python
train
project-generator/project_generator
project_generator/project.py
https://github.com/project-generator/project_generator/blob/a361be16eeb5a8829ff5cd26850ddd4b264296fe/project_generator/project.py#L157-L171
def get_project_template(name="Default", output_type='exe', debugger=None, build_dir='build'): """ Project data (+ data) """ project_template = { 'build_dir' : build_dir, # Build output path 'debugger' : debugger, # Debugger 'export_dir': '', # Export directory path 'name': name, # project name 'output_type': output_type, # output type, default - exe 'target': '', # target 'tools_supported': [], # Tools which are supported, } project_template.update(ProjectTemplate._get_common_data_template()) project_template.update(ProjectTemplate._get_tool_specific_data_template()) return project_template
[ "def", "get_project_template", "(", "name", "=", "\"Default\"", ",", "output_type", "=", "'exe'", ",", "debugger", "=", "None", ",", "build_dir", "=", "'build'", ")", ":", "project_template", "=", "{", "'build_dir'", ":", "build_dir", ",", "# Build output path", "'debugger'", ":", "debugger", ",", "# Debugger", "'export_dir'", ":", "''", ",", "# Export directory path", "'name'", ":", "name", ",", "# project name", "'output_type'", ":", "output_type", ",", "# output type, default - exe", "'target'", ":", "''", ",", "# target", "'tools_supported'", ":", "[", "]", ",", "# Tools which are supported,", "}", "project_template", ".", "update", "(", "ProjectTemplate", ".", "_get_common_data_template", "(", ")", ")", "project_template", ".", "update", "(", "ProjectTemplate", ".", "_get_tool_specific_data_template", "(", ")", ")", "return", "project_template" ]
Project data (+ data)
[ "Project", "data", "(", "+", "data", ")" ]
python
train
aiogram/aiogram
aiogram/types/input_media.py
https://github.com/aiogram/aiogram/blob/2af930149ce2482547721e2c8755c10307295e48/aiogram/types/input_media.py#L218-L248
def attach(self, media: typing.Union[InputMedia, typing.Dict]): """ Attach media :param media: """ if isinstance(media, dict): if 'type' not in media: raise ValueError(f"Invalid media!") media_type = media['type'] if media_type == 'photo': media = InputMediaPhoto(**media) elif media_type == 'video': media = InputMediaVideo(**media) # elif media_type == 'document': # media = InputMediaDocument(**media) # elif media_type == 'audio': # media = InputMediaAudio(**media) # elif media_type == 'animation': # media = InputMediaAnimation(**media) else: raise TypeError(f"Invalid media type '{media_type}'!") elif not isinstance(media, InputMedia): raise TypeError(f"Media must be an instance of InputMedia or dict, not {type(media).__name__}") elif media.type in ['document', 'audio', 'animation']: raise ValueError(f"This type of media is not supported by media groups!") self.media.append(media)
[ "def", "attach", "(", "self", ",", "media", ":", "typing", ".", "Union", "[", "InputMedia", ",", "typing", ".", "Dict", "]", ")", ":", "if", "isinstance", "(", "media", ",", "dict", ")", ":", "if", "'type'", "not", "in", "media", ":", "raise", "ValueError", "(", "f\"Invalid media!\"", ")", "media_type", "=", "media", "[", "'type'", "]", "if", "media_type", "==", "'photo'", ":", "media", "=", "InputMediaPhoto", "(", "*", "*", "media", ")", "elif", "media_type", "==", "'video'", ":", "media", "=", "InputMediaVideo", "(", "*", "*", "media", ")", "# elif media_type == 'document':", "# media = InputMediaDocument(**media)", "# elif media_type == 'audio':", "# media = InputMediaAudio(**media)", "# elif media_type == 'animation':", "# media = InputMediaAnimation(**media)", "else", ":", "raise", "TypeError", "(", "f\"Invalid media type '{media_type}'!\"", ")", "elif", "not", "isinstance", "(", "media", ",", "InputMedia", ")", ":", "raise", "TypeError", "(", "f\"Media must be an instance of InputMedia or dict, not {type(media).__name__}\"", ")", "elif", "media", ".", "type", "in", "[", "'document'", ",", "'audio'", ",", "'animation'", "]", ":", "raise", "ValueError", "(", "f\"This type of media is not supported by media groups!\"", ")", "self", ".", "media", ".", "append", "(", "media", ")" ]
Attach media :param media:
[ "Attach", "media" ]
python
train
nerdvegas/rez
src/rez/solver.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/solver.py#L956-L994
def intersect(self, range_): """Intersect this scope with a package range. Returns: A new copy of this scope, with variants whos version fall outside of the given range removed. If there were no removals, self is returned. If all variants were removed, None is returned. """ new_slice = None if self.package_request.conflict: if self.package_request.range is None: new_slice = self.solver._get_variant_slice( self.package_name, range_) else: new_range = range_ - self.package_request.range if new_range is not None: new_slice = self.solver._get_variant_slice( self.package_name, new_range) else: new_slice = self.variant_slice.intersect(range_) # intersection reduced the scope to nothing if new_slice is None: if self.pr: self.pr("%s intersected with range '%s' resulted in no packages", self, range_) return None # intersection narrowed the scope if new_slice is not self.variant_slice: scope = self._copy(new_slice) if self.pr: self.pr("%s was intersected to %s by range '%s'", self, scope, range_) return scope # intersection did not change the scope return self
[ "def", "intersect", "(", "self", ",", "range_", ")", ":", "new_slice", "=", "None", "if", "self", ".", "package_request", ".", "conflict", ":", "if", "self", ".", "package_request", ".", "range", "is", "None", ":", "new_slice", "=", "self", ".", "solver", ".", "_get_variant_slice", "(", "self", ".", "package_name", ",", "range_", ")", "else", ":", "new_range", "=", "range_", "-", "self", ".", "package_request", ".", "range", "if", "new_range", "is", "not", "None", ":", "new_slice", "=", "self", ".", "solver", ".", "_get_variant_slice", "(", "self", ".", "package_name", ",", "new_range", ")", "else", ":", "new_slice", "=", "self", ".", "variant_slice", ".", "intersect", "(", "range_", ")", "# intersection reduced the scope to nothing", "if", "new_slice", "is", "None", ":", "if", "self", ".", "pr", ":", "self", ".", "pr", "(", "\"%s intersected with range '%s' resulted in no packages\"", ",", "self", ",", "range_", ")", "return", "None", "# intersection narrowed the scope", "if", "new_slice", "is", "not", "self", ".", "variant_slice", ":", "scope", "=", "self", ".", "_copy", "(", "new_slice", ")", "if", "self", ".", "pr", ":", "self", ".", "pr", "(", "\"%s was intersected to %s by range '%s'\"", ",", "self", ",", "scope", ",", "range_", ")", "return", "scope", "# intersection did not change the scope", "return", "self" ]
Intersect this scope with a package range. Returns: A new copy of this scope, with variants whos version fall outside of the given range removed. If there were no removals, self is returned. If all variants were removed, None is returned.
[ "Intersect", "this", "scope", "with", "a", "package", "range", "." ]
python
train
pantsbuild/pants
src/python/pants/util/contextutil.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/util/contextutil.py#L98-L107
def hermetic_environment_as(**kwargs): """Set the environment to the supplied values from an empty state.""" old_environment = os.environ.copy() if PY3 else _copy_and_decode_env(os.environ) _purge_env() try: with environment_as(**kwargs): yield finally: _purge_env() _restore_env(old_environment)
[ "def", "hermetic_environment_as", "(", "*", "*", "kwargs", ")", ":", "old_environment", "=", "os", ".", "environ", ".", "copy", "(", ")", "if", "PY3", "else", "_copy_and_decode_env", "(", "os", ".", "environ", ")", "_purge_env", "(", ")", "try", ":", "with", "environment_as", "(", "*", "*", "kwargs", ")", ":", "yield", "finally", ":", "_purge_env", "(", ")", "_restore_env", "(", "old_environment", ")" ]
Set the environment to the supplied values from an empty state.
[ "Set", "the", "environment", "to", "the", "supplied", "values", "from", "an", "empty", "state", "." ]
python
train
gem/oq-engine
openquake/calculators/reportwriter.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/reportwriter.py#L119-L122
def save(self, fname): """Save the report""" with open(fname, 'wb') as f: f.write(encode(self.text))
[ "def", "save", "(", "self", ",", "fname", ")", ":", "with", "open", "(", "fname", ",", "'wb'", ")", "as", "f", ":", "f", ".", "write", "(", "encode", "(", "self", ".", "text", ")", ")" ]
Save the report
[ "Save", "the", "report" ]
python
train
seleniumbase/SeleniumBase
seleniumbase/fixtures/base_case.py
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/fixtures/base_case.py#L967-L985
def create_bootstrap_tour(self, name=None): """ Creates a Bootstrap tour for a website. @Params name - If creating multiple tours at the same time, use this to select the tour you wish to add steps to. """ if not name: name = "default" new_tour = ( """ // Bootstrap Tour var tour = new Tour({ }); tour.addSteps([ """) self._tour_steps[name] = [] self._tour_steps[name].append(new_tour)
[ "def", "create_bootstrap_tour", "(", "self", ",", "name", "=", "None", ")", ":", "if", "not", "name", ":", "name", "=", "\"default\"", "new_tour", "=", "(", "\"\"\"\n // Bootstrap Tour\n var tour = new Tour({\n });\n tour.addSteps([\n \"\"\"", ")", "self", ".", "_tour_steps", "[", "name", "]", "=", "[", "]", "self", ".", "_tour_steps", "[", "name", "]", ".", "append", "(", "new_tour", ")" ]
Creates a Bootstrap tour for a website. @Params name - If creating multiple tours at the same time, use this to select the tour you wish to add steps to.
[ "Creates", "a", "Bootstrap", "tour", "for", "a", "website", "." ]
python
train
andreikop/qutepart
qutepart/__init__.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/__init__.py#L335-L348
def terminate(self): """ Terminate Qutepart instance. This method MUST be called before application stop to avoid crashes and some other interesting effects Call it on close to free memory and stop background highlighting """ self.text = '' self._completer.terminate() if self._highlighter is not None: self._highlighter.terminate() if self._vim is not None: self._vim.terminate()
[ "def", "terminate", "(", "self", ")", ":", "self", ".", "text", "=", "''", "self", ".", "_completer", ".", "terminate", "(", ")", "if", "self", ".", "_highlighter", "is", "not", "None", ":", "self", ".", "_highlighter", ".", "terminate", "(", ")", "if", "self", ".", "_vim", "is", "not", "None", ":", "self", ".", "_vim", ".", "terminate", "(", ")" ]
Terminate Qutepart instance. This method MUST be called before application stop to avoid crashes and some other interesting effects Call it on close to free memory and stop background highlighting
[ "Terminate", "Qutepart", "instance", ".", "This", "method", "MUST", "be", "called", "before", "application", "stop", "to", "avoid", "crashes", "and", "some", "other", "interesting", "effects", "Call", "it", "on", "close", "to", "free", "memory", "and", "stop", "background", "highlighting" ]
python
train
wdecoster/nanoplotter
nanoplotter/nanoplotter_main.py
https://github.com/wdecoster/nanoplotter/blob/80908dd1be585f450da5a66989de9de4d544ec85/nanoplotter/nanoplotter_main.py#L209-L220
def contains_variance(arrays, names): """ Make sure both arrays for bivariate ("scatter") plot have a stddev > 0 """ for ar, name in zip(arrays, names): if np.std(ar) == 0: sys.stderr.write( "No variation in '{}', skipping bivariate plots.\n".format(name.lower())) logging.info("Nanoplotter: No variation in {}, skipping bivariate plot".format(name)) return False else: return True
[ "def", "contains_variance", "(", "arrays", ",", "names", ")", ":", "for", "ar", ",", "name", "in", "zip", "(", "arrays", ",", "names", ")", ":", "if", "np", ".", "std", "(", "ar", ")", "==", "0", ":", "sys", ".", "stderr", ".", "write", "(", "\"No variation in '{}', skipping bivariate plots.\\n\"", ".", "format", "(", "name", ".", "lower", "(", ")", ")", ")", "logging", ".", "info", "(", "\"Nanoplotter: No variation in {}, skipping bivariate plot\"", ".", "format", "(", "name", ")", ")", "return", "False", "else", ":", "return", "True" ]
Make sure both arrays for bivariate ("scatter") plot have a stddev > 0
[ "Make", "sure", "both", "arrays", "for", "bivariate", "(", "scatter", ")", "plot", "have", "a", "stddev", ">", "0" ]
python
train
chaoss/grimoirelab-perceval
perceval/backends/core/launchpad.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/launchpad.py#L192-L219
def _fetch_issues(self, from_date): """Fetch the issues from a project (distribution/package)""" issues_groups = self.client.issues(start=from_date) for raw_issues in issues_groups: issues = json.loads(raw_issues)['entries'] for issue in issues: issue = self.__init_extra_issue_fields(issue) issue_id = self.__extract_issue_id(issue['bug_link']) for field in TARGET_ISSUE_FIELDS: if not issue[field]: continue if field == 'bug_link': issue['bug_data'] = self.__fetch_issue_data(issue_id) issue['activity_data'] = [activity for activity in self.__fetch_issue_activities(issue_id)] issue['messages_data'] = [message for message in self.__fetch_issue_messages(issue_id)] issue['attachments_data'] = [attachment for attachment in self.__fetch_issue_attachments(issue_id)] elif field == 'assignee_link': issue['assignee_data'] = self.__fetch_user_data('{ASSIGNEE}', issue[field]) elif field == 'owner_link': issue['owner_data'] = self.__fetch_user_data('{OWNER}', issue[field]) yield issue
[ "def", "_fetch_issues", "(", "self", ",", "from_date", ")", ":", "issues_groups", "=", "self", ".", "client", ".", "issues", "(", "start", "=", "from_date", ")", "for", "raw_issues", "in", "issues_groups", ":", "issues", "=", "json", ".", "loads", "(", "raw_issues", ")", "[", "'entries'", "]", "for", "issue", "in", "issues", ":", "issue", "=", "self", ".", "__init_extra_issue_fields", "(", "issue", ")", "issue_id", "=", "self", ".", "__extract_issue_id", "(", "issue", "[", "'bug_link'", "]", ")", "for", "field", "in", "TARGET_ISSUE_FIELDS", ":", "if", "not", "issue", "[", "field", "]", ":", "continue", "if", "field", "==", "'bug_link'", ":", "issue", "[", "'bug_data'", "]", "=", "self", ".", "__fetch_issue_data", "(", "issue_id", ")", "issue", "[", "'activity_data'", "]", "=", "[", "activity", "for", "activity", "in", "self", ".", "__fetch_issue_activities", "(", "issue_id", ")", "]", "issue", "[", "'messages_data'", "]", "=", "[", "message", "for", "message", "in", "self", ".", "__fetch_issue_messages", "(", "issue_id", ")", "]", "issue", "[", "'attachments_data'", "]", "=", "[", "attachment", "for", "attachment", "in", "self", ".", "__fetch_issue_attachments", "(", "issue_id", ")", "]", "elif", "field", "==", "'assignee_link'", ":", "issue", "[", "'assignee_data'", "]", "=", "self", ".", "__fetch_user_data", "(", "'{ASSIGNEE}'", ",", "issue", "[", "field", "]", ")", "elif", "field", "==", "'owner_link'", ":", "issue", "[", "'owner_data'", "]", "=", "self", ".", "__fetch_user_data", "(", "'{OWNER}'", ",", "issue", "[", "field", "]", ")", "yield", "issue" ]
Fetch the issues from a project (distribution/package)
[ "Fetch", "the", "issues", "from", "a", "project", "(", "distribution", "/", "package", ")" ]
python
test
opencobra/memote
memote/experimental/experimental_base.py
https://github.com/opencobra/memote/blob/276630fcd4449fb7b914186edfd38c239e7052df/memote/experimental/experimental_base.py#L72-L88
def load(self, dtype_conversion=None): """ Load the data table and corresponding validation schema. Parameters ---------- dtype_conversion : dict Column names as keys and corresponding type for loading the data. Please take a look at the `pandas documentation <https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__ for detailed explanations. """ self.data = read_tabular(self.filename, dtype_conversion) with open_text(memote.experimental.schemata, self.SCHEMA, encoding="utf-8") as file_handle: self.schema = json.load(file_handle)
[ "def", "load", "(", "self", ",", "dtype_conversion", "=", "None", ")", ":", "self", ".", "data", "=", "read_tabular", "(", "self", ".", "filename", ",", "dtype_conversion", ")", "with", "open_text", "(", "memote", ".", "experimental", ".", "schemata", ",", "self", ".", "SCHEMA", ",", "encoding", "=", "\"utf-8\"", ")", "as", "file_handle", ":", "self", ".", "schema", "=", "json", ".", "load", "(", "file_handle", ")" ]
Load the data table and corresponding validation schema. Parameters ---------- dtype_conversion : dict Column names as keys and corresponding type for loading the data. Please take a look at the `pandas documentation <https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__ for detailed explanations.
[ "Load", "the", "data", "table", "and", "corresponding", "validation", "schema", "." ]
python
train
semente/django-smuggler
smuggler/views.py
https://github.com/semente/django-smuggler/blob/3be76f4e94e50e927a55a60741fac1a793df83de/smuggler/views.py#L63-L71
def dump_data(request): """Exports data from whole project. """ # Try to grab app_label data app_label = request.GET.get('app_label', []) if app_label: app_label = app_label.split(',') return dump_to_response(request, app_label=app_label, exclude=settings.SMUGGLER_EXCLUDE_LIST)
[ "def", "dump_data", "(", "request", ")", ":", "# Try to grab app_label data", "app_label", "=", "request", ".", "GET", ".", "get", "(", "'app_label'", ",", "[", "]", ")", "if", "app_label", ":", "app_label", "=", "app_label", ".", "split", "(", "','", ")", "return", "dump_to_response", "(", "request", ",", "app_label", "=", "app_label", ",", "exclude", "=", "settings", ".", "SMUGGLER_EXCLUDE_LIST", ")" ]
Exports data from whole project.
[ "Exports", "data", "from", "whole", "project", "." ]
python
train
shad7/tvrenamer
tvrenamer/services/tvdb.py
https://github.com/shad7/tvrenamer/blob/7fb59cb02669357e73b7acb92dcb6d74fdff4654/tvrenamer/services/tvdb.py#L46-L57
def get_series_by_name(self, series_name): """Perform lookup for series :param str series_name: series name found within filename :returns: instance of series :rtype: object """ try: return self.api.search_series(name=series_name), None except exceptions.TVDBRequestException as err: LOG.exception('search for series %s failed', series_name) return None, _as_str(err)
[ "def", "get_series_by_name", "(", "self", ",", "series_name", ")", ":", "try", ":", "return", "self", ".", "api", ".", "search_series", "(", "name", "=", "series_name", ")", ",", "None", "except", "exceptions", ".", "TVDBRequestException", "as", "err", ":", "LOG", ".", "exception", "(", "'search for series %s failed'", ",", "series_name", ")", "return", "None", ",", "_as_str", "(", "err", ")" ]
Perform lookup for series :param str series_name: series name found within filename :returns: instance of series :rtype: object
[ "Perform", "lookup", "for", "series" ]
python
train
python-openxml/python-docx
docx/section.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/section.py#L419-L422
def _definition(self): """|HeaderPart| object containing content of this header.""" headerReference = self._sectPr.get_headerReference(self._hdrftr_index) return self._document_part.header_part(headerReference.rId)
[ "def", "_definition", "(", "self", ")", ":", "headerReference", "=", "self", ".", "_sectPr", ".", "get_headerReference", "(", "self", ".", "_hdrftr_index", ")", "return", "self", ".", "_document_part", ".", "header_part", "(", "headerReference", ".", "rId", ")" ]
|HeaderPart| object containing content of this header.
[ "|HeaderPart|", "object", "containing", "content", "of", "this", "header", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/inputsplitter.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/inputsplitter.py#L206-L227
def has_comment(src): """Indicate whether an input line has (i.e. ends in, or is) a comment. This uses tokenize, so it can distinguish comments from # inside strings. Parameters ---------- src : string A single line input string. Returns ------- Boolean: True if source has a comment. """ readline = StringIO(src).readline toktypes = set() try: for t in tokenize.generate_tokens(readline): toktypes.add(t[0]) except tokenize.TokenError: pass return(tokenize.COMMENT in toktypes)
[ "def", "has_comment", "(", "src", ")", ":", "readline", "=", "StringIO", "(", "src", ")", ".", "readline", "toktypes", "=", "set", "(", ")", "try", ":", "for", "t", "in", "tokenize", ".", "generate_tokens", "(", "readline", ")", ":", "toktypes", ".", "add", "(", "t", "[", "0", "]", ")", "except", "tokenize", ".", "TokenError", ":", "pass", "return", "(", "tokenize", ".", "COMMENT", "in", "toktypes", ")" ]
Indicate whether an input line has (i.e. ends in, or is) a comment. This uses tokenize, so it can distinguish comments from # inside strings. Parameters ---------- src : string A single line input string. Returns ------- Boolean: True if source has a comment.
[ "Indicate", "whether", "an", "input", "line", "has", "(", "i", ".", "e", ".", "ends", "in", "or", "is", ")", "a", "comment", ".", "This", "uses", "tokenize", "so", "it", "can", "distinguish", "comments", "from", "#", "inside", "strings", ".", "Parameters", "----------", "src", ":", "string", "A", "single", "line", "input", "string", ".", "Returns", "-------", "Boolean", ":", "True", "if", "source", "has", "a", "comment", "." ]
python
test
danielperna84/pyhomematic
pyhomematic/devicetypes/generic.py
https://github.com/danielperna84/pyhomematic/blob/8b91f3e84c83f05d289c740d507293a0d6759d8e/pyhomematic/devicetypes/generic.py#L270-L278
def UNREACH(self): """ Returns true if the device or any children is not reachable """ if self._VALUES.get(PARAM_UNREACH, False): return True else: for device in self._hmchannels.values(): if device.UNREACH: return True return False
[ "def", "UNREACH", "(", "self", ")", ":", "if", "self", ".", "_VALUES", ".", "get", "(", "PARAM_UNREACH", ",", "False", ")", ":", "return", "True", "else", ":", "for", "device", "in", "self", ".", "_hmchannels", ".", "values", "(", ")", ":", "if", "device", ".", "UNREACH", ":", "return", "True", "return", "False" ]
Returns true if the device or any children is not reachable
[ "Returns", "true", "if", "the", "device", "or", "any", "children", "is", "not", "reachable" ]
python
train
aestrivex/bctpy
bct/algorithms/core.py
https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/core.py#L300-L365
def kcore_bu(CIJ, k, peel=False): ''' The k-core is the largest subnetwork comprising nodes of degree at least k. This function computes the k-core for a given binary undirected connection matrix by recursively peeling off nodes with degree lower than k, until no such nodes remain. Parameters ---------- CIJ : NxN np.ndarray binary undirected connection matrix k : int level of k-core peel : bool If True, additionally calculates peelorder and peellevel. Defaults to False. Returns ------- CIJkcore : NxN np.ndarray connection matrix of the k-core. This matrix only contains nodes of degree at least k. kn : int size of k-core peelorder : Nx1 np.ndarray indices in the order in which they were peeled away during k-core decomposition. only returned if peel is specified. peellevel : Nx1 np.ndarray corresponding level - nodes in at the same level have been peeled away at the same time. only return if peel is specified Notes ----- 'peelorder' and 'peellevel' are similar the the k-core sub-shells described in Modha and Singh (2010). ''' if peel: peelorder, peellevel = ([], []) iter = 0 CIJkcore = CIJ.copy() while True: deg = degrees_und(CIJkcore) # get degrees of matrix # find nodes with degree <k ff, = np.where(np.logical_and(deg < k, deg > 0)) if ff.size == 0: break # if none found -> stop # else peel away found nodes iter += 1 CIJkcore[ff, :] = 0 CIJkcore[:, ff] = 0 if peel: peelorder.append(ff) if peel: peellevel.append(iter * np.ones((len(ff),))) kn = np.sum(deg > 0) if peel: return CIJkcore, kn, peelorder, peellevel else: return CIJkcore, kn
[ "def", "kcore_bu", "(", "CIJ", ",", "k", ",", "peel", "=", "False", ")", ":", "if", "peel", ":", "peelorder", ",", "peellevel", "=", "(", "[", "]", ",", "[", "]", ")", "iter", "=", "0", "CIJkcore", "=", "CIJ", ".", "copy", "(", ")", "while", "True", ":", "deg", "=", "degrees_und", "(", "CIJkcore", ")", "# get degrees of matrix", "# find nodes with degree <k", "ff", ",", "=", "np", ".", "where", "(", "np", ".", "logical_and", "(", "deg", "<", "k", ",", "deg", ">", "0", ")", ")", "if", "ff", ".", "size", "==", "0", ":", "break", "# if none found -> stop", "# else peel away found nodes", "iter", "+=", "1", "CIJkcore", "[", "ff", ",", ":", "]", "=", "0", "CIJkcore", "[", ":", ",", "ff", "]", "=", "0", "if", "peel", ":", "peelorder", ".", "append", "(", "ff", ")", "if", "peel", ":", "peellevel", ".", "append", "(", "iter", "*", "np", ".", "ones", "(", "(", "len", "(", "ff", ")", ",", ")", ")", ")", "kn", "=", "np", ".", "sum", "(", "deg", ">", "0", ")", "if", "peel", ":", "return", "CIJkcore", ",", "kn", ",", "peelorder", ",", "peellevel", "else", ":", "return", "CIJkcore", ",", "kn" ]
The k-core is the largest subnetwork comprising nodes of degree at least k. This function computes the k-core for a given binary undirected connection matrix by recursively peeling off nodes with degree lower than k, until no such nodes remain. Parameters ---------- CIJ : NxN np.ndarray binary undirected connection matrix k : int level of k-core peel : bool If True, additionally calculates peelorder and peellevel. Defaults to False. Returns ------- CIJkcore : NxN np.ndarray connection matrix of the k-core. This matrix only contains nodes of degree at least k. kn : int size of k-core peelorder : Nx1 np.ndarray indices in the order in which they were peeled away during k-core decomposition. only returned if peel is specified. peellevel : Nx1 np.ndarray corresponding level - nodes in at the same level have been peeled away at the same time. only return if peel is specified Notes ----- 'peelorder' and 'peellevel' are similar the the k-core sub-shells described in Modha and Singh (2010).
[ "The", "k", "-", "core", "is", "the", "largest", "subnetwork", "comprising", "nodes", "of", "degree", "at", "least", "k", ".", "This", "function", "computes", "the", "k", "-", "core", "for", "a", "given", "binary", "undirected", "connection", "matrix", "by", "recursively", "peeling", "off", "nodes", "with", "degree", "lower", "than", "k", "until", "no", "such", "nodes", "remain", "." ]
python
train
kdeldycke/maildir-deduplicate
maildir_deduplicate/mail.py
https://github.com/kdeldycke/maildir-deduplicate/blob/f1c6ff25b80c6c1a4dc2dc7a65b34d808b0b7733/maildir_deduplicate/mail.py#L128-L138
def hash_key(self): """ Returns the canonical hash of a mail. """ if self.conf.message_id: message_id = self.message.get('Message-Id') if message_id: return message_id.strip() logger.error( "No Message-ID in {}: {}".format(self.path, self.header_text)) raise MissingMessageID return hashlib.sha224(self.canonical_headers).hexdigest()
[ "def", "hash_key", "(", "self", ")", ":", "if", "self", ".", "conf", ".", "message_id", ":", "message_id", "=", "self", ".", "message", ".", "get", "(", "'Message-Id'", ")", "if", "message_id", ":", "return", "message_id", ".", "strip", "(", ")", "logger", ".", "error", "(", "\"No Message-ID in {}: {}\"", ".", "format", "(", "self", ".", "path", ",", "self", ".", "header_text", ")", ")", "raise", "MissingMessageID", "return", "hashlib", ".", "sha224", "(", "self", ".", "canonical_headers", ")", ".", "hexdigest", "(", ")" ]
Returns the canonical hash of a mail.
[ "Returns", "the", "canonical", "hash", "of", "a", "mail", "." ]
python
train
python-openxml/python-docx
docx/oxml/table.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/oxml/table.py#L511-L517
def _add_width_of(self, other_tc): """ Add the width of *other_tc* to this cell. Does nothing if either this tc or *other_tc* does not have a specified width. """ if self.width and other_tc.width: self.width += other_tc.width
[ "def", "_add_width_of", "(", "self", ",", "other_tc", ")", ":", "if", "self", ".", "width", "and", "other_tc", ".", "width", ":", "self", ".", "width", "+=", "other_tc", ".", "width" ]
Add the width of *other_tc* to this cell. Does nothing if either this tc or *other_tc* does not have a specified width.
[ "Add", "the", "width", "of", "*", "other_tc", "*", "to", "this", "cell", ".", "Does", "nothing", "if", "either", "this", "tc", "or", "*", "other_tc", "*", "does", "not", "have", "a", "specified", "width", "." ]
python
train
candango/firenado
firenado/config.py
https://github.com/candango/firenado/blob/4b1f628e485b521e161d64169c46a9818f26949f/firenado/config.py#L124-L143
def process_config(config, config_data): """ Populates config with data from the configuration data dict. It handles components, data, log, management and session sections from the configuration data. :param config: The config reference of the object that will hold the configuration data from the config_data. :param config_data: The configuration data loaded from a configuration file. """ if 'components' in config_data: process_components_config_section(config, config_data['components']) if 'data' in config_data: process_data_config_section(config, config_data['data']) if 'log' in config_data: process_log_config_section(config, config_data['log']) if 'management' in config_data: process_management_config_section(config, config_data['management']) if 'session' in config_data: process_session_config_section(config, config_data['session'])
[ "def", "process_config", "(", "config", ",", "config_data", ")", ":", "if", "'components'", "in", "config_data", ":", "process_components_config_section", "(", "config", ",", "config_data", "[", "'components'", "]", ")", "if", "'data'", "in", "config_data", ":", "process_data_config_section", "(", "config", ",", "config_data", "[", "'data'", "]", ")", "if", "'log'", "in", "config_data", ":", "process_log_config_section", "(", "config", ",", "config_data", "[", "'log'", "]", ")", "if", "'management'", "in", "config_data", ":", "process_management_config_section", "(", "config", ",", "config_data", "[", "'management'", "]", ")", "if", "'session'", "in", "config_data", ":", "process_session_config_section", "(", "config", ",", "config_data", "[", "'session'", "]", ")" ]
Populates config with data from the configuration data dict. It handles components, data, log, management and session sections from the configuration data. :param config: The config reference of the object that will hold the configuration data from the config_data. :param config_data: The configuration data loaded from a configuration file.
[ "Populates", "config", "with", "data", "from", "the", "configuration", "data", "dict", ".", "It", "handles", "components", "data", "log", "management", "and", "session", "sections", "from", "the", "configuration", "data", "." ]
python
train
junzis/pyModeS
pyModeS/decoder/bds/bds44.py
https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/decoder/bds/bds44.py#L92-L118
def temp44(msg): """Static air temperature. Args: msg (String): 28 bytes hexadecimal message string Returns: float, float: temperature and alternative temperature in Celsius degree. Note: Two values returns due to what seems to be an inconsistancy error in ICAO 9871 (2008) Appendix A-67. """ d = hex2bin(data(msg)) sign = int(d[23]) value = bin2int(d[24:34]) if sign: value = value - 1024 temp = value * 0.25 # celsius temp = round(temp, 2) temp_alternative = value * 0.125 # celsius temp_alternative = round(temp, 3) return temp, temp_alternative
[ "def", "temp44", "(", "msg", ")", ":", "d", "=", "hex2bin", "(", "data", "(", "msg", ")", ")", "sign", "=", "int", "(", "d", "[", "23", "]", ")", "value", "=", "bin2int", "(", "d", "[", "24", ":", "34", "]", ")", "if", "sign", ":", "value", "=", "value", "-", "1024", "temp", "=", "value", "*", "0.25", "# celsius", "temp", "=", "round", "(", "temp", ",", "2", ")", "temp_alternative", "=", "value", "*", "0.125", "# celsius", "temp_alternative", "=", "round", "(", "temp", ",", "3", ")", "return", "temp", ",", "temp_alternative" ]
Static air temperature. Args: msg (String): 28 bytes hexadecimal message string Returns: float, float: temperature and alternative temperature in Celsius degree. Note: Two values returns due to what seems to be an inconsistancy error in ICAO 9871 (2008) Appendix A-67.
[ "Static", "air", "temperature", "." ]
python
train
6809/MC6809
MC6809/components/mc6809_base.py
https://github.com/6809/MC6809/blob/6ba2f5106df46689017b5d0b6d84d43b7ee6a240/MC6809/components/mc6809_base.py#L259-L265
def add_sync_callback(self, callback_cycles, callback): """ Add a CPU cycle triggered callback """ self.sync_callbacks_cyles[callback] = 0 self.sync_callbacks.append([callback_cycles, callback]) if self.quickest_sync_callback_cycles is None or \ self.quickest_sync_callback_cycles > callback_cycles: self.quickest_sync_callback_cycles = callback_cycles
[ "def", "add_sync_callback", "(", "self", ",", "callback_cycles", ",", "callback", ")", ":", "self", ".", "sync_callbacks_cyles", "[", "callback", "]", "=", "0", "self", ".", "sync_callbacks", ".", "append", "(", "[", "callback_cycles", ",", "callback", "]", ")", "if", "self", ".", "quickest_sync_callback_cycles", "is", "None", "or", "self", ".", "quickest_sync_callback_cycles", ">", "callback_cycles", ":", "self", ".", "quickest_sync_callback_cycles", "=", "callback_cycles" ]
Add a CPU cycle triggered callback
[ "Add", "a", "CPU", "cycle", "triggered", "callback" ]
python
train
yamcs/yamcs-python
yamcs-client/yamcs/storage/client.py
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/yamcs/storage/client.py#L63-L73
def create_bucket(self, instance, bucket_name): """ Create a new bucket in the specified instance. :param str instance: A Yamcs instance name. :param str bucket_name: The name of the bucket. """ req = rest_pb2.CreateBucketRequest() req.name = bucket_name url = '/buckets/{}'.format(instance) self._client.post_proto(url, data=req.SerializeToString())
[ "def", "create_bucket", "(", "self", ",", "instance", ",", "bucket_name", ")", ":", "req", "=", "rest_pb2", ".", "CreateBucketRequest", "(", ")", "req", ".", "name", "=", "bucket_name", "url", "=", "'/buckets/{}'", ".", "format", "(", "instance", ")", "self", ".", "_client", ".", "post_proto", "(", "url", ",", "data", "=", "req", ".", "SerializeToString", "(", ")", ")" ]
Create a new bucket in the specified instance. :param str instance: A Yamcs instance name. :param str bucket_name: The name of the bucket.
[ "Create", "a", "new", "bucket", "in", "the", "specified", "instance", "." ]
python
train
thiagopbueno/rddl2tf
rddl2tf/compiler.py
https://github.com/thiagopbueno/rddl2tf/blob/f7c03d3a74d2663807c1e23e04eeed2e85166b71/rddl2tf/compiler.py#L340-L377
def compile_action_bound_constraints(self, state: Sequence[tf.Tensor]) -> Dict[str, Bounds]: '''Compiles all actions bounds for the given `state`. Args: state (Sequence[tf.Tensor]): The current state fluents. Returns: A mapping from action names to a pair of :obj:`rddl2tf.fluent.TensorFluent` representing its lower and upper bounds. ''' scope = self.action_precondition_scope(state) lower_bounds = self.rddl.domain.action_lower_bound_constraints upper_bounds = self.rddl.domain.action_upper_bound_constraints with self.graph.as_default(): with tf.name_scope('action_bound_constraints'): bounds = {} for name in self.rddl.domain.action_fluent_ordering: lower_expr = lower_bounds.get(name) lower = None if lower_expr is not None: with tf.name_scope('lower_bound'): lower = self._compile_expression(lower_expr, scope) upper_expr = upper_bounds.get(name) upper = None if upper_expr is not None: with tf.name_scope('upper_bound'): upper = self._compile_expression(upper_expr, scope) bounds[name] = (lower, upper) return bounds
[ "def", "compile_action_bound_constraints", "(", "self", ",", "state", ":", "Sequence", "[", "tf", ".", "Tensor", "]", ")", "->", "Dict", "[", "str", ",", "Bounds", "]", ":", "scope", "=", "self", ".", "action_precondition_scope", "(", "state", ")", "lower_bounds", "=", "self", ".", "rddl", ".", "domain", ".", "action_lower_bound_constraints", "upper_bounds", "=", "self", ".", "rddl", ".", "domain", ".", "action_upper_bound_constraints", "with", "self", ".", "graph", ".", "as_default", "(", ")", ":", "with", "tf", ".", "name_scope", "(", "'action_bound_constraints'", ")", ":", "bounds", "=", "{", "}", "for", "name", "in", "self", ".", "rddl", ".", "domain", ".", "action_fluent_ordering", ":", "lower_expr", "=", "lower_bounds", ".", "get", "(", "name", ")", "lower", "=", "None", "if", "lower_expr", "is", "not", "None", ":", "with", "tf", ".", "name_scope", "(", "'lower_bound'", ")", ":", "lower", "=", "self", ".", "_compile_expression", "(", "lower_expr", ",", "scope", ")", "upper_expr", "=", "upper_bounds", ".", "get", "(", "name", ")", "upper", "=", "None", "if", "upper_expr", "is", "not", "None", ":", "with", "tf", ".", "name_scope", "(", "'upper_bound'", ")", ":", "upper", "=", "self", ".", "_compile_expression", "(", "upper_expr", ",", "scope", ")", "bounds", "[", "name", "]", "=", "(", "lower", ",", "upper", ")", "return", "bounds" ]
Compiles all actions bounds for the given `state`. Args: state (Sequence[tf.Tensor]): The current state fluents. Returns: A mapping from action names to a pair of :obj:`rddl2tf.fluent.TensorFluent` representing its lower and upper bounds.
[ "Compiles", "all", "actions", "bounds", "for", "the", "given", "state", "." ]
python
train
nutechsoftware/alarmdecoder
alarmdecoder/decoder.py
https://github.com/nutechsoftware/alarmdecoder/blob/b0c014089e24455228cb4402cf30ba98157578cd/alarmdecoder/decoder.py#L483-L497
def _handle_expander_message(self, data): """ Handle expander messages. :param data: expander message to parse :type data: string :returns: :py:class:`~alarmdecoder.messages.ExpanderMessage` """ msg = ExpanderMessage(data) self._update_internal_states(msg) self.on_expander_message(message=msg) return msg
[ "def", "_handle_expander_message", "(", "self", ",", "data", ")", ":", "msg", "=", "ExpanderMessage", "(", "data", ")", "self", ".", "_update_internal_states", "(", "msg", ")", "self", ".", "on_expander_message", "(", "message", "=", "msg", ")", "return", "msg" ]
Handle expander messages. :param data: expander message to parse :type data: string :returns: :py:class:`~alarmdecoder.messages.ExpanderMessage`
[ "Handle", "expander", "messages", "." ]
python
train
mediawiki-utilities/python-mwpersistence
mwpersistence/state.py
https://github.com/mediawiki-utilities/python-mwpersistence/blob/2b98847fb8acaca38b3cbf94bde3fd7e27d2b67d/mwpersistence/state.py#L113-L135
def update(self, text, revision=None): """ Modifies the internal state based a change to the content and returns the sets of words added and removed. :Parameters: text : str The text content of a revision revision : `mixed` Revision metadata :Returns: A triple of lists: current_tokens : `list` ( :class:`~mwpersistence.Token` ) A sequence of Tokens representing the revision that was just processed. tokens_added : `list` ( :class:`~mwpersistence.Token` ) Tokens that were added while updating state. tokens_removed : `list` ( :class:`~mwpersistence.Token` ) Tokens that were removed while updating state. """ return self._update(text=text, revision=revision)
[ "def", "update", "(", "self", ",", "text", ",", "revision", "=", "None", ")", ":", "return", "self", ".", "_update", "(", "text", "=", "text", ",", "revision", "=", "revision", ")" ]
Modifies the internal state based a change to the content and returns the sets of words added and removed. :Parameters: text : str The text content of a revision revision : `mixed` Revision metadata :Returns: A triple of lists: current_tokens : `list` ( :class:`~mwpersistence.Token` ) A sequence of Tokens representing the revision that was just processed. tokens_added : `list` ( :class:`~mwpersistence.Token` ) Tokens that were added while updating state. tokens_removed : `list` ( :class:`~mwpersistence.Token` ) Tokens that were removed while updating state.
[ "Modifies", "the", "internal", "state", "based", "a", "change", "to", "the", "content", "and", "returns", "the", "sets", "of", "words", "added", "and", "removed", "." ]
python
train
SmokinCaterpillar/pypet
pypet/storageservice.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/storageservice.py#L1929-L2014
def _trj_load_meta_data(self, traj, load_data, as_new, with_run_information, force): """Loads meta information about the trajectory Checks if the version number does not differ from current pypet version Loads, comment, timestamp, name, version from disk in case trajectory is not loaded as new. Updates the run information as well. """ metatable = self._overview_group.info metarow = metatable[0] try: version = metarow['version'].decode('utf-8') except (IndexError, ValueError) as ke: self._logger.error('Could not check version due to: %s' % str(ke)) version = '`COULD NOT BE LOADED`' try: python = metarow['python'].decode('utf-8') except (IndexError, ValueError) as ke: self._logger.error('Could not check version due to: %s' % str(ke)) python = '`COULD NOT BE LOADED`' self._trj_check_version(version, python, force) # Load the skeleton information self._grp_load_group(traj, load_data=load_data, with_links=False, recursive=False, _traj=traj, _as_new=as_new, _hdf5_group=self._trajectory_group) if as_new: length = int(metarow['length']) for irun in range(length): traj._add_run_info(irun) else: traj._comment = metarow['comment'].decode('utf-8') traj._timestamp = float(metarow['timestamp']) traj._trajectory_timestamp = traj._timestamp traj._time = metarow['time'].decode('utf-8') traj._trajectory_time = traj._time traj._name = metarow['name'].decode('utf-8') traj._trajectory_name = traj._name traj._version = version traj._python = python single_run_table = self._overview_group.runs if with_run_information: for row in single_run_table.iterrows(): name = row['name'].decode('utf-8') idx = int(row['idx']) timestamp = float(row['timestamp']) time_ = row['time'].decode('utf-8') completed = int(row['completed']) summary = row['parameter_summary'].decode('utf-8') hexsha = row['short_environment_hexsha'].decode('utf-8') # To allow backwards compatibility we need this try catch block try: runtime = row['runtime'].decode('utf-8') finish_timestamp = float(row['finish_timestamp']) except (IndexError, ValueError) as ke: runtime = '' finish_timestamp = 0.0 self._logger.debug('Could not load runtime, ' + repr(ke)) info_dict = {'idx': idx, 'timestamp': timestamp, 'finish_timestamp': finish_timestamp, 'runtime': runtime, 'time': time_, 'completed': completed, 'name': name, 'parameter_summary': summary, 'short_environment_hexsha': hexsha} traj._add_run_info(**info_dict) else: traj._length = single_run_table.nrows # Load explorations self._trj_load_exploration(traj) # Load the hdf5 config data: self._srvc_load_hdf5_settings()
[ "def", "_trj_load_meta_data", "(", "self", ",", "traj", ",", "load_data", ",", "as_new", ",", "with_run_information", ",", "force", ")", ":", "metatable", "=", "self", ".", "_overview_group", ".", "info", "metarow", "=", "metatable", "[", "0", "]", "try", ":", "version", "=", "metarow", "[", "'version'", "]", ".", "decode", "(", "'utf-8'", ")", "except", "(", "IndexError", ",", "ValueError", ")", "as", "ke", ":", "self", ".", "_logger", ".", "error", "(", "'Could not check version due to: %s'", "%", "str", "(", "ke", ")", ")", "version", "=", "'`COULD NOT BE LOADED`'", "try", ":", "python", "=", "metarow", "[", "'python'", "]", ".", "decode", "(", "'utf-8'", ")", "except", "(", "IndexError", ",", "ValueError", ")", "as", "ke", ":", "self", ".", "_logger", ".", "error", "(", "'Could not check version due to: %s'", "%", "str", "(", "ke", ")", ")", "python", "=", "'`COULD NOT BE LOADED`'", "self", ".", "_trj_check_version", "(", "version", ",", "python", ",", "force", ")", "# Load the skeleton information", "self", ".", "_grp_load_group", "(", "traj", ",", "load_data", "=", "load_data", ",", "with_links", "=", "False", ",", "recursive", "=", "False", ",", "_traj", "=", "traj", ",", "_as_new", "=", "as_new", ",", "_hdf5_group", "=", "self", ".", "_trajectory_group", ")", "if", "as_new", ":", "length", "=", "int", "(", "metarow", "[", "'length'", "]", ")", "for", "irun", "in", "range", "(", "length", ")", ":", "traj", ".", "_add_run_info", "(", "irun", ")", "else", ":", "traj", ".", "_comment", "=", "metarow", "[", "'comment'", "]", ".", "decode", "(", "'utf-8'", ")", "traj", ".", "_timestamp", "=", "float", "(", "metarow", "[", "'timestamp'", "]", ")", "traj", ".", "_trajectory_timestamp", "=", "traj", ".", "_timestamp", "traj", ".", "_time", "=", "metarow", "[", "'time'", "]", ".", "decode", "(", "'utf-8'", ")", "traj", ".", "_trajectory_time", "=", "traj", ".", "_time", "traj", ".", "_name", "=", "metarow", "[", "'name'", "]", ".", "decode", "(", "'utf-8'", ")", "traj", ".", "_trajectory_name", "=", "traj", ".", "_name", "traj", ".", "_version", "=", "version", "traj", ".", "_python", "=", "python", "single_run_table", "=", "self", ".", "_overview_group", ".", "runs", "if", "with_run_information", ":", "for", "row", "in", "single_run_table", ".", "iterrows", "(", ")", ":", "name", "=", "row", "[", "'name'", "]", ".", "decode", "(", "'utf-8'", ")", "idx", "=", "int", "(", "row", "[", "'idx'", "]", ")", "timestamp", "=", "float", "(", "row", "[", "'timestamp'", "]", ")", "time_", "=", "row", "[", "'time'", "]", ".", "decode", "(", "'utf-8'", ")", "completed", "=", "int", "(", "row", "[", "'completed'", "]", ")", "summary", "=", "row", "[", "'parameter_summary'", "]", ".", "decode", "(", "'utf-8'", ")", "hexsha", "=", "row", "[", "'short_environment_hexsha'", "]", ".", "decode", "(", "'utf-8'", ")", "# To allow backwards compatibility we need this try catch block", "try", ":", "runtime", "=", "row", "[", "'runtime'", "]", ".", "decode", "(", "'utf-8'", ")", "finish_timestamp", "=", "float", "(", "row", "[", "'finish_timestamp'", "]", ")", "except", "(", "IndexError", ",", "ValueError", ")", "as", "ke", ":", "runtime", "=", "''", "finish_timestamp", "=", "0.0", "self", ".", "_logger", ".", "debug", "(", "'Could not load runtime, '", "+", "repr", "(", "ke", ")", ")", "info_dict", "=", "{", "'idx'", ":", "idx", ",", "'timestamp'", ":", "timestamp", ",", "'finish_timestamp'", ":", "finish_timestamp", ",", "'runtime'", ":", "runtime", ",", "'time'", ":", "time_", ",", "'completed'", ":", "completed", ",", "'name'", ":", "name", ",", "'parameter_summary'", ":", "summary", ",", "'short_environment_hexsha'", ":", "hexsha", "}", "traj", ".", "_add_run_info", "(", "*", "*", "info_dict", ")", "else", ":", "traj", ".", "_length", "=", "single_run_table", ".", "nrows", "# Load explorations", "self", ".", "_trj_load_exploration", "(", "traj", ")", "# Load the hdf5 config data:", "self", ".", "_srvc_load_hdf5_settings", "(", ")" ]
Loads meta information about the trajectory Checks if the version number does not differ from current pypet version Loads, comment, timestamp, name, version from disk in case trajectory is not loaded as new. Updates the run information as well.
[ "Loads", "meta", "information", "about", "the", "trajectory" ]
python
test
raphaelvallat/pingouin
pingouin/correlation.py
https://github.com/raphaelvallat/pingouin/blob/58b19fa4fffbfe09d58b456e3926a148249e4d9b/pingouin/correlation.py#L664-L758
def rm_corr(data=None, x=None, y=None, subject=None, tail='two-sided'): """Repeated measures correlation. Parameters ---------- data : pd.DataFrame Dataframe. x, y : string Name of columns in ``data`` containing the two dependent variables. subject : string Name of column in ``data`` containing the subject indicator. tail : string Specify whether to return 'one-sided' or 'two-sided' p-value. Returns ------- stats : pandas DataFrame Test summary :: 'r' : Repeated measures correlation coefficient 'dof' : Degrees of freedom 'pval' : one or two tailed p-value 'CI95' : 95% parametric confidence intervals 'power' : achieved power of the test (= 1 - type II error). Notes ----- Repeated measures correlation (rmcorr) is a statistical technique for determining the common within-individual association for paired measures assessed on two or more occasions for multiple individuals. From Bakdash and Marusich (2017): "Rmcorr accounts for non-independence among observations using analysis of covariance (ANCOVA) to statistically adjust for inter-individual variability. By removing measured variance between-participants, rmcorr provides the best linear fit for each participant using parallel regression lines (the same slope) with varying intercepts. Like a Pearson correlation coefficient, the rmcorr coefficient is bounded by − 1 to 1 and represents the strength of the linear association between two variables." Results have been tested against the `rmcorr` R package. Please note that NaN are automatically removed from the dataframe (listwise deletion). References ---------- .. [1] Bakdash, J.Z., Marusich, L.R., 2017. Repeated Measures Correlation. Front. Psychol. 8, 456. https://doi.org/10.3389/fpsyg.2017.00456 .. [2] Bland, J. M., & Altman, D. G. (1995). Statistics notes: Calculating correlation coefficients with repeated observations: Part 1—correlation within subjects. Bmj, 310(6977), 446. .. [3] https://github.com/cran/rmcorr Examples -------- >>> import pingouin as pg >>> df = pg.read_dataset('rm_corr') >>> pg.rm_corr(data=df, x='pH', y='PacO2', subject='Subject') r dof pval CI95% power rm_corr -0.507 38 0.000847 [-0.71, -0.23] 0.93 """ from pingouin import ancova, power_corr # Safety checks assert isinstance(data, pd.DataFrame), 'Data must be a DataFrame' assert x in data, 'The %s column is not in data.' % x assert y in data, 'The %s column is not in data.' % y assert subject in data, 'The %s column is not in data.' % subject if data[subject].nunique() < 3: raise ValueError('rm_corr requires at least 3 unique subjects.') # Remove missing values data = data[[x, y, subject]].dropna(axis=0) # Using PINGOUIN aov, bw = ancova(dv=y, covar=x, between=subject, data=data, return_bw=True) sign = np.sign(bw) dof = int(aov.loc[2, 'DF']) n = dof + 2 ssfactor = aov.loc[1, 'SS'] sserror = aov.loc[2, 'SS'] rm = sign * np.sqrt(ssfactor / (ssfactor + sserror)) pval = aov.loc[1, 'p-unc'] pval *= 0.5 if tail == 'one-sided' else 1 ci = compute_esci(stat=rm, nx=n, eftype='pearson').tolist() pwr = power_corr(r=rm, n=n, tail=tail) # Convert to Dataframe stats = pd.DataFrame({"r": round(rm, 3), "dof": int(dof), "pval": pval, "CI95%": str(ci), "power": round(pwr, 3)}, index=["rm_corr"]) return stats
[ "def", "rm_corr", "(", "data", "=", "None", ",", "x", "=", "None", ",", "y", "=", "None", ",", "subject", "=", "None", ",", "tail", "=", "'two-sided'", ")", ":", "from", "pingouin", "import", "ancova", ",", "power_corr", "# Safety checks", "assert", "isinstance", "(", "data", ",", "pd", ".", "DataFrame", ")", ",", "'Data must be a DataFrame'", "assert", "x", "in", "data", ",", "'The %s column is not in data.'", "%", "x", "assert", "y", "in", "data", ",", "'The %s column is not in data.'", "%", "y", "assert", "subject", "in", "data", ",", "'The %s column is not in data.'", "%", "subject", "if", "data", "[", "subject", "]", ".", "nunique", "(", ")", "<", "3", ":", "raise", "ValueError", "(", "'rm_corr requires at least 3 unique subjects.'", ")", "# Remove missing values", "data", "=", "data", "[", "[", "x", ",", "y", ",", "subject", "]", "]", ".", "dropna", "(", "axis", "=", "0", ")", "# Using PINGOUIN", "aov", ",", "bw", "=", "ancova", "(", "dv", "=", "y", ",", "covar", "=", "x", ",", "between", "=", "subject", ",", "data", "=", "data", ",", "return_bw", "=", "True", ")", "sign", "=", "np", ".", "sign", "(", "bw", ")", "dof", "=", "int", "(", "aov", ".", "loc", "[", "2", ",", "'DF'", "]", ")", "n", "=", "dof", "+", "2", "ssfactor", "=", "aov", ".", "loc", "[", "1", ",", "'SS'", "]", "sserror", "=", "aov", ".", "loc", "[", "2", ",", "'SS'", "]", "rm", "=", "sign", "*", "np", ".", "sqrt", "(", "ssfactor", "/", "(", "ssfactor", "+", "sserror", ")", ")", "pval", "=", "aov", ".", "loc", "[", "1", ",", "'p-unc'", "]", "pval", "*=", "0.5", "if", "tail", "==", "'one-sided'", "else", "1", "ci", "=", "compute_esci", "(", "stat", "=", "rm", ",", "nx", "=", "n", ",", "eftype", "=", "'pearson'", ")", ".", "tolist", "(", ")", "pwr", "=", "power_corr", "(", "r", "=", "rm", ",", "n", "=", "n", ",", "tail", "=", "tail", ")", "# Convert to Dataframe", "stats", "=", "pd", ".", "DataFrame", "(", "{", "\"r\"", ":", "round", "(", "rm", ",", "3", ")", ",", "\"dof\"", ":", "int", "(", "dof", ")", ",", "\"pval\"", ":", "pval", ",", "\"CI95%\"", ":", "str", "(", "ci", ")", ",", "\"power\"", ":", "round", "(", "pwr", ",", "3", ")", "}", ",", "index", "=", "[", "\"rm_corr\"", "]", ")", "return", "stats" ]
Repeated measures correlation. Parameters ---------- data : pd.DataFrame Dataframe. x, y : string Name of columns in ``data`` containing the two dependent variables. subject : string Name of column in ``data`` containing the subject indicator. tail : string Specify whether to return 'one-sided' or 'two-sided' p-value. Returns ------- stats : pandas DataFrame Test summary :: 'r' : Repeated measures correlation coefficient 'dof' : Degrees of freedom 'pval' : one or two tailed p-value 'CI95' : 95% parametric confidence intervals 'power' : achieved power of the test (= 1 - type II error). Notes ----- Repeated measures correlation (rmcorr) is a statistical technique for determining the common within-individual association for paired measures assessed on two or more occasions for multiple individuals. From Bakdash and Marusich (2017): "Rmcorr accounts for non-independence among observations using analysis of covariance (ANCOVA) to statistically adjust for inter-individual variability. By removing measured variance between-participants, rmcorr provides the best linear fit for each participant using parallel regression lines (the same slope) with varying intercepts. Like a Pearson correlation coefficient, the rmcorr coefficient is bounded by − 1 to 1 and represents the strength of the linear association between two variables." Results have been tested against the `rmcorr` R package. Please note that NaN are automatically removed from the dataframe (listwise deletion). References ---------- .. [1] Bakdash, J.Z., Marusich, L.R., 2017. Repeated Measures Correlation. Front. Psychol. 8, 456. https://doi.org/10.3389/fpsyg.2017.00456 .. [2] Bland, J. M., & Altman, D. G. (1995). Statistics notes: Calculating correlation coefficients with repeated observations: Part 1—correlation within subjects. Bmj, 310(6977), 446. .. [3] https://github.com/cran/rmcorr Examples -------- >>> import pingouin as pg >>> df = pg.read_dataset('rm_corr') >>> pg.rm_corr(data=df, x='pH', y='PacO2', subject='Subject') r dof pval CI95% power rm_corr -0.507 38 0.000847 [-0.71, -0.23] 0.93
[ "Repeated", "measures", "correlation", "." ]
python
train
dsoprea/PyEasyArchive
libarchive/adapters/archive_read.py
https://github.com/dsoprea/PyEasyArchive/blob/50414b9fa9a1055435499b5b2e4b2a336a40dff6/libarchive/adapters/archive_read.py#L399-L406
def file_pour(filepath, block_size=10240, *args, **kwargs): """Write physical files from entries.""" def opener(archive_res): _LOGGER.debug("Opening from file (file_pour): %s", filepath) _archive_read_open_filename(archive_res, filepath, block_size) return _pour(opener, *args, flags=0, **kwargs)
[ "def", "file_pour", "(", "filepath", ",", "block_size", "=", "10240", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "opener", "(", "archive_res", ")", ":", "_LOGGER", ".", "debug", "(", "\"Opening from file (file_pour): %s\"", ",", "filepath", ")", "_archive_read_open_filename", "(", "archive_res", ",", "filepath", ",", "block_size", ")", "return", "_pour", "(", "opener", ",", "*", "args", ",", "flags", "=", "0", ",", "*", "*", "kwargs", ")" ]
Write physical files from entries.
[ "Write", "physical", "files", "from", "entries", "." ]
python
train
inveniosoftware/invenio-stats
invenio_stats/contrib/registrations.py
https://github.com/inveniosoftware/invenio-stats/blob/d877ae5462084abb4a28a20f1ebb3d636769c1bc/invenio_stats/contrib/registrations.py#L87-L121
def register_queries(): """Register queries.""" return [ dict( query_name='bucket-file-download-histogram', query_class=ESDateHistogramQuery, query_config=dict( index='stats-file-download', doc_type='file-download-day-aggregation', copy_fields=dict( bucket_id='bucket_id', file_key='file_key', ), required_filters=dict( bucket_id='bucket_id', file_key='file_key', ) ) ), dict( query_name='bucket-file-download-total', query_class=ESTermsQuery, query_config=dict( index='stats-file-download', doc_type='file-download-day-aggregation', copy_fields=dict( # bucket_id='bucket_id', ), required_filters=dict( bucket_id='bucket_id', ), aggregated_fields=['file_key'] ) ), ]
[ "def", "register_queries", "(", ")", ":", "return", "[", "dict", "(", "query_name", "=", "'bucket-file-download-histogram'", ",", "query_class", "=", "ESDateHistogramQuery", ",", "query_config", "=", "dict", "(", "index", "=", "'stats-file-download'", ",", "doc_type", "=", "'file-download-day-aggregation'", ",", "copy_fields", "=", "dict", "(", "bucket_id", "=", "'bucket_id'", ",", "file_key", "=", "'file_key'", ",", ")", ",", "required_filters", "=", "dict", "(", "bucket_id", "=", "'bucket_id'", ",", "file_key", "=", "'file_key'", ",", ")", ")", ")", ",", "dict", "(", "query_name", "=", "'bucket-file-download-total'", ",", "query_class", "=", "ESTermsQuery", ",", "query_config", "=", "dict", "(", "index", "=", "'stats-file-download'", ",", "doc_type", "=", "'file-download-day-aggregation'", ",", "copy_fields", "=", "dict", "(", "# bucket_id='bucket_id',", ")", ",", "required_filters", "=", "dict", "(", "bucket_id", "=", "'bucket_id'", ",", ")", ",", "aggregated_fields", "=", "[", "'file_key'", "]", ")", ")", ",", "]" ]
Register queries.
[ "Register", "queries", "." ]
python
valid
DataDog/integrations-core
openstack_controller/datadog_checks/openstack_controller/api.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/openstack_controller/datadog_checks/openstack_controller/api.py#L556-L572
def _get_user_identity(user): """ Parse user identity out of init_config To guarantee a uniquely identifiable user, expects {"user": {"name": "my_username", "password": "my_password", "domain": {"id": "my_domain_id"} } } """ if not ( user and user.get('name') and user.get('password') and user.get("domain") and user.get("domain").get("id") ): raise IncompleteIdentity() identity = {"methods": ['password'], "password": {"user": user}} return identity
[ "def", "_get_user_identity", "(", "user", ")", ":", "if", "not", "(", "user", "and", "user", ".", "get", "(", "'name'", ")", "and", "user", ".", "get", "(", "'password'", ")", "and", "user", ".", "get", "(", "\"domain\"", ")", "and", "user", ".", "get", "(", "\"domain\"", ")", ".", "get", "(", "\"id\"", ")", ")", ":", "raise", "IncompleteIdentity", "(", ")", "identity", "=", "{", "\"methods\"", ":", "[", "'password'", "]", ",", "\"password\"", ":", "{", "\"user\"", ":", "user", "}", "}", "return", "identity" ]
Parse user identity out of init_config To guarantee a uniquely identifiable user, expects {"user": {"name": "my_username", "password": "my_password", "domain": {"id": "my_domain_id"} } }
[ "Parse", "user", "identity", "out", "of", "init_config" ]
python
train
henzk/featuremonkey
featuremonkey/composer.py
https://github.com/henzk/featuremonkey/blob/e44414fc68427bcd71ad33ec2d816da0dd78eefa/featuremonkey/composer.py#L219-L228
def _compose_pair(self, role, base): ''' composes onto base by applying the role ''' # apply transformations in role to base for attrname in dir(role): transformation = getattr(role, attrname) self._apply_transformation(role, base, transformation, attrname) return base
[ "def", "_compose_pair", "(", "self", ",", "role", ",", "base", ")", ":", "# apply transformations in role to base", "for", "attrname", "in", "dir", "(", "role", ")", ":", "transformation", "=", "getattr", "(", "role", ",", "attrname", ")", "self", ".", "_apply_transformation", "(", "role", ",", "base", ",", "transformation", ",", "attrname", ")", "return", "base" ]
composes onto base by applying the role
[ "composes", "onto", "base", "by", "applying", "the", "role" ]
python
train
log2timeline/plaso
plaso/cli/storage_media_tool.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/cli/storage_media_tool.py#L902-L943
def _ScanEncryptedVolume(self, scan_context, scan_node): """Scans an encrypted volume scan node for volume and file systems. Args: scan_context (SourceScannerContext): source scanner context. scan_node (SourceScanNode): volume scan node. Raises: SourceScannerError: if the format of or within the source is not supported, the scan node is invalid or there are no credentials defined for the format. """ if not scan_node or not scan_node.path_spec: raise errors.SourceScannerError('Invalid or missing scan node.') credentials = credentials_manager.CredentialsManager.GetCredentials( scan_node.path_spec) if not credentials: raise errors.SourceScannerError('Missing credentials for scan node.') credentials_dict = { credential_type: credential_data for credential_type, credential_data in self._credentials} is_unlocked = False for credential_type in credentials.CREDENTIALS: credential_data = credentials_dict.get(credential_type, None) if not credential_data: continue is_unlocked = self._source_scanner.Unlock( scan_context, scan_node.path_spec, credential_type, credential_data) if is_unlocked: break if not is_unlocked: is_unlocked = self._PromptUserForEncryptedVolumeCredential( scan_context, scan_node, credentials) if is_unlocked: self._source_scanner.Scan( scan_context, scan_path_spec=scan_node.path_spec)
[ "def", "_ScanEncryptedVolume", "(", "self", ",", "scan_context", ",", "scan_node", ")", ":", "if", "not", "scan_node", "or", "not", "scan_node", ".", "path_spec", ":", "raise", "errors", ".", "SourceScannerError", "(", "'Invalid or missing scan node.'", ")", "credentials", "=", "credentials_manager", ".", "CredentialsManager", ".", "GetCredentials", "(", "scan_node", ".", "path_spec", ")", "if", "not", "credentials", ":", "raise", "errors", ".", "SourceScannerError", "(", "'Missing credentials for scan node.'", ")", "credentials_dict", "=", "{", "credential_type", ":", "credential_data", "for", "credential_type", ",", "credential_data", "in", "self", ".", "_credentials", "}", "is_unlocked", "=", "False", "for", "credential_type", "in", "credentials", ".", "CREDENTIALS", ":", "credential_data", "=", "credentials_dict", ".", "get", "(", "credential_type", ",", "None", ")", "if", "not", "credential_data", ":", "continue", "is_unlocked", "=", "self", ".", "_source_scanner", ".", "Unlock", "(", "scan_context", ",", "scan_node", ".", "path_spec", ",", "credential_type", ",", "credential_data", ")", "if", "is_unlocked", ":", "break", "if", "not", "is_unlocked", ":", "is_unlocked", "=", "self", ".", "_PromptUserForEncryptedVolumeCredential", "(", "scan_context", ",", "scan_node", ",", "credentials", ")", "if", "is_unlocked", ":", "self", ".", "_source_scanner", ".", "Scan", "(", "scan_context", ",", "scan_path_spec", "=", "scan_node", ".", "path_spec", ")" ]
Scans an encrypted volume scan node for volume and file systems. Args: scan_context (SourceScannerContext): source scanner context. scan_node (SourceScanNode): volume scan node. Raises: SourceScannerError: if the format of or within the source is not supported, the scan node is invalid or there are no credentials defined for the format.
[ "Scans", "an", "encrypted", "volume", "scan", "node", "for", "volume", "and", "file", "systems", "." ]
python
train
quintusdias/glymur
glymur/lib/openjp2.py
https://github.com/quintusdias/glymur/blob/8b8fb091130fff00f1028dc82219e69e3f9baf6d/glymur/lib/openjp2.py#L1275-L1287
def stream_destroy(stream): """Wraps openjp2 library function opj_stream_destroy. Destroys the stream created by create_stream. Parameters ---------- stream : STREAM_TYPE_P The file stream. """ OPENJP2.opj_stream_destroy.argtypes = [STREAM_TYPE_P] OPENJP2.opj_stream_destroy.restype = ctypes.c_void_p OPENJP2.opj_stream_destroy(stream)
[ "def", "stream_destroy", "(", "stream", ")", ":", "OPENJP2", ".", "opj_stream_destroy", ".", "argtypes", "=", "[", "STREAM_TYPE_P", "]", "OPENJP2", ".", "opj_stream_destroy", ".", "restype", "=", "ctypes", ".", "c_void_p", "OPENJP2", ".", "opj_stream_destroy", "(", "stream", ")" ]
Wraps openjp2 library function opj_stream_destroy. Destroys the stream created by create_stream. Parameters ---------- stream : STREAM_TYPE_P The file stream.
[ "Wraps", "openjp2", "library", "function", "opj_stream_destroy", "." ]
python
train
antevens/listen
listen/signal_handler.py
https://github.com/antevens/listen/blob/d3ddff8e7fbfb672c5bd7f6f4febeb5e921d8c67/listen/signal_handler.py#L65-L92
def default_handler(self, signum, frame): """ Default handler, a generic callback method for signal processing""" self.log.debug("Signal handler called with signal: {0}".format(signum)) # 1. If signal is HUP restart the python process # 2. If signal is TERM, INT or QUIT we try to cleanup then exit with -1 # 3. If signal is STOP or TSTP we pause # 4. If signal is CONT or USR1 we continue # 5. If signal is INFO we print status # 6. If signal is USR2 we we abort and then exit with -1 if signum in self.restart_signals: self.set_handler(self.handled_signals, self.pseudo_handler) self._cleanup() os.execl('python', 'python', * sys.argv) elif signum in self.abort_signals: self.abort(signum) elif signum in self.pause_signals: self.pause(signum) elif signum in self.resume_signals: self.resume(signum) elif signum in self.status_signals: self.status(signum) elif signum in self.error_signals: self.log.error('Signal handler received error signal from an external process, aborting') self.abort(signum) else: self.log.error("Unhandled signal received: {0}".format(signum)) raise
[ "def", "default_handler", "(", "self", ",", "signum", ",", "frame", ")", ":", "self", ".", "log", ".", "debug", "(", "\"Signal handler called with signal: {0}\"", ".", "format", "(", "signum", ")", ")", "# 1. If signal is HUP restart the python process", "# 2. If signal is TERM, INT or QUIT we try to cleanup then exit with -1", "# 3. If signal is STOP or TSTP we pause", "# 4. If signal is CONT or USR1 we continue", "# 5. If signal is INFO we print status", "# 6. If signal is USR2 we we abort and then exit with -1", "if", "signum", "in", "self", ".", "restart_signals", ":", "self", ".", "set_handler", "(", "self", ".", "handled_signals", ",", "self", ".", "pseudo_handler", ")", "self", ".", "_cleanup", "(", ")", "os", ".", "execl", "(", "'python'", ",", "'python'", ",", "*", "sys", ".", "argv", ")", "elif", "signum", "in", "self", ".", "abort_signals", ":", "self", ".", "abort", "(", "signum", ")", "elif", "signum", "in", "self", ".", "pause_signals", ":", "self", ".", "pause", "(", "signum", ")", "elif", "signum", "in", "self", ".", "resume_signals", ":", "self", ".", "resume", "(", "signum", ")", "elif", "signum", "in", "self", ".", "status_signals", ":", "self", ".", "status", "(", "signum", ")", "elif", "signum", "in", "self", ".", "error_signals", ":", "self", ".", "log", ".", "error", "(", "'Signal handler received error signal from an external process, aborting'", ")", "self", ".", "abort", "(", "signum", ")", "else", ":", "self", ".", "log", ".", "error", "(", "\"Unhandled signal received: {0}\"", ".", "format", "(", "signum", ")", ")", "raise" ]
Default handler, a generic callback method for signal processing
[ "Default", "handler", "a", "generic", "callback", "method", "for", "signal", "processing" ]
python
test
ml4ai/delphi
delphi/translators/for2py/arrays.py
https://github.com/ml4ai/delphi/blob/6d03d8aafeab99610387c51b89c99738ff2abbe3/delphi/translators/for2py/arrays.py#L165-L176
def idx2subs(idx_list): """Given a list idx_list of index values for each dimension of an array, idx2subs() returns a list of the tuples of subscripts for all of the array elements specified by those index values. Note: This code adapted from that posted by jfs at https://stackoverflow.com/questions/533905/get-the-cartesian-product- of-a-series-of-lists""" if not idx_list: return [()] return [items + (item,) for items in idx2subs(idx_list[:-1]) for item in idx_list[-1]]
[ "def", "idx2subs", "(", "idx_list", ")", ":", "if", "not", "idx_list", ":", "return", "[", "(", ")", "]", "return", "[", "items", "+", "(", "item", ",", ")", "for", "items", "in", "idx2subs", "(", "idx_list", "[", ":", "-", "1", "]", ")", "for", "item", "in", "idx_list", "[", "-", "1", "]", "]" ]
Given a list idx_list of index values for each dimension of an array, idx2subs() returns a list of the tuples of subscripts for all of the array elements specified by those index values. Note: This code adapted from that posted by jfs at https://stackoverflow.com/questions/533905/get-the-cartesian-product- of-a-series-of-lists
[ "Given", "a", "list", "idx_list", "of", "index", "values", "for", "each", "dimension", "of", "an", "array", "idx2subs", "()", "returns", "a", "list", "of", "the", "tuples", "of", "subscripts", "for", "all", "of", "the", "array", "elements", "specified", "by", "those", "index", "values", "." ]
python
train
pandas-dev/pandas
pandas/io/packers.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/packers.py#L714-L725
def pack(o, default=encode, encoding='utf-8', unicode_errors='strict', use_single_float=False, autoreset=1, use_bin_type=1): """ Pack an object and return the packed bytes. """ return Packer(default=default, encoding=encoding, unicode_errors=unicode_errors, use_single_float=use_single_float, autoreset=autoreset, use_bin_type=use_bin_type).pack(o)
[ "def", "pack", "(", "o", ",", "default", "=", "encode", ",", "encoding", "=", "'utf-8'", ",", "unicode_errors", "=", "'strict'", ",", "use_single_float", "=", "False", ",", "autoreset", "=", "1", ",", "use_bin_type", "=", "1", ")", ":", "return", "Packer", "(", "default", "=", "default", ",", "encoding", "=", "encoding", ",", "unicode_errors", "=", "unicode_errors", ",", "use_single_float", "=", "use_single_float", ",", "autoreset", "=", "autoreset", ",", "use_bin_type", "=", "use_bin_type", ")", ".", "pack", "(", "o", ")" ]
Pack an object and return the packed bytes.
[ "Pack", "an", "object", "and", "return", "the", "packed", "bytes", "." ]
python
train
topic2k/pygcgen
pygcgen/generator.py
https://github.com/topic2k/pygcgen/blob/c41701815df2c8c3a57fd5f7b8babe702127c8a1/pygcgen/generator.py#L935-L948
def version_of_first_item(self): """ Try to detect the newest tag from self.options.base, otherwise return a special value indicating the creation of the repo. :rtype: str :return: Tag name to use as 'oldest' tag. May be special value, indicating the creation of the repo. """ try: sections = read_changelog(self.options) return sections[0]["version"] except(IOError, TypeError): return self.get_temp_tag_for_repo_creation()
[ "def", "version_of_first_item", "(", "self", ")", ":", "try", ":", "sections", "=", "read_changelog", "(", "self", ".", "options", ")", "return", "sections", "[", "0", "]", "[", "\"version\"", "]", "except", "(", "IOError", ",", "TypeError", ")", ":", "return", "self", ".", "get_temp_tag_for_repo_creation", "(", ")" ]
Try to detect the newest tag from self.options.base, otherwise return a special value indicating the creation of the repo. :rtype: str :return: Tag name to use as 'oldest' tag. May be special value, indicating the creation of the repo.
[ "Try", "to", "detect", "the", "newest", "tag", "from", "self", ".", "options", ".", "base", "otherwise", "return", "a", "special", "value", "indicating", "the", "creation", "of", "the", "repo", "." ]
python
valid
RiotGames/cloud-inquisitor
plugins/public/cinq-auditor-vpc-flowlogs/cinq_auditor_vpc_flowlogs/__init__.py
https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/plugins/public/cinq-auditor-vpc-flowlogs/cinq_auditor_vpc_flowlogs/__init__.py#L185-L225
def create_vpc_flow_logs(self, account, region, vpc_id, iam_role_arn): """Create a new VPC Flow log Args: account (:obj:`Account`): Account to create the flow in region (`str`): Region to create the flow in vpc_id (`str`): ID of the VPC to create the flow for iam_role_arn (`str`): ARN of the IAM role used to post logs to the log group Returns: `None` """ try: flow = self.session.client('ec2', region) flow.create_flow_logs( ResourceIds=[vpc_id], ResourceType='VPC', TrafficType='ALL', LogGroupName=vpc_id, DeliverLogsPermissionArn=iam_role_arn ) fvpc = VPC.get(vpc_id) fvpc.set_property('vpc_flow_logs_status', 'ACTIVE') self.log.info('Enabled VPC Logging {}/{}/{}'.format(account, region, vpc_id)) auditlog( event='vpc_flow_logs.create_vpc_flow', actor=self.ns, data={ 'account': account.account_name, 'region': region, 'vpcId': vpc_id, 'arn': iam_role_arn } ) except Exception: self.log.exception('Failed creating VPC Flow Logs for {}/{}/{}.'.format( account, region, vpc_id ))
[ "def", "create_vpc_flow_logs", "(", "self", ",", "account", ",", "region", ",", "vpc_id", ",", "iam_role_arn", ")", ":", "try", ":", "flow", "=", "self", ".", "session", ".", "client", "(", "'ec2'", ",", "region", ")", "flow", ".", "create_flow_logs", "(", "ResourceIds", "=", "[", "vpc_id", "]", ",", "ResourceType", "=", "'VPC'", ",", "TrafficType", "=", "'ALL'", ",", "LogGroupName", "=", "vpc_id", ",", "DeliverLogsPermissionArn", "=", "iam_role_arn", ")", "fvpc", "=", "VPC", ".", "get", "(", "vpc_id", ")", "fvpc", ".", "set_property", "(", "'vpc_flow_logs_status'", ",", "'ACTIVE'", ")", "self", ".", "log", ".", "info", "(", "'Enabled VPC Logging {}/{}/{}'", ".", "format", "(", "account", ",", "region", ",", "vpc_id", ")", ")", "auditlog", "(", "event", "=", "'vpc_flow_logs.create_vpc_flow'", ",", "actor", "=", "self", ".", "ns", ",", "data", "=", "{", "'account'", ":", "account", ".", "account_name", ",", "'region'", ":", "region", ",", "'vpcId'", ":", "vpc_id", ",", "'arn'", ":", "iam_role_arn", "}", ")", "except", "Exception", ":", "self", ".", "log", ".", "exception", "(", "'Failed creating VPC Flow Logs for {}/{}/{}.'", ".", "format", "(", "account", ",", "region", ",", "vpc_id", ")", ")" ]
Create a new VPC Flow log Args: account (:obj:`Account`): Account to create the flow in region (`str`): Region to create the flow in vpc_id (`str`): ID of the VPC to create the flow for iam_role_arn (`str`): ARN of the IAM role used to post logs to the log group Returns: `None`
[ "Create", "a", "new", "VPC", "Flow", "log" ]
python
train
mbedmicro/pyOCD
pyocd/probe/stlink/detect/darwin.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/stlink/detect/darwin.py#L113-L123
def _mount_points(self): """ Returns map {volume_id: mount_point} """ diskutil_ls = subprocess.Popen( ["diskutil", "list", "-plist"], stdout=subprocess.PIPE ) disks = _plist_from_popen(diskutil_ls) return { disk["DeviceIdentifier"]: disk.get("MountPoint", None) for disk in disks["AllDisksAndPartitions"] }
[ "def", "_mount_points", "(", "self", ")", ":", "diskutil_ls", "=", "subprocess", ".", "Popen", "(", "[", "\"diskutil\"", ",", "\"list\"", ",", "\"-plist\"", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "disks", "=", "_plist_from_popen", "(", "diskutil_ls", ")", "return", "{", "disk", "[", "\"DeviceIdentifier\"", "]", ":", "disk", ".", "get", "(", "\"MountPoint\"", ",", "None", ")", "for", "disk", "in", "disks", "[", "\"AllDisksAndPartitions\"", "]", "}" ]
Returns map {volume_id: mount_point}
[ "Returns", "map", "{", "volume_id", ":", "mount_point", "}" ]
python
train
GetBlimp/django-rest-framework-jwt
rest_framework_jwt/views.py
https://github.com/GetBlimp/django-rest-framework-jwt/blob/0a0bd402ec21fd6b9a5f715d114411836fbb2923/rest_framework_jwt/views.py#L45-L52
def get_serializer(self, *args, **kwargs): """ Return the serializer instance that should be used for validating and deserializing input, and for serializing output. """ serializer_class = self.get_serializer_class() kwargs['context'] = self.get_serializer_context() return serializer_class(*args, **kwargs)
[ "def", "get_serializer", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "serializer_class", "=", "self", ".", "get_serializer_class", "(", ")", "kwargs", "[", "'context'", "]", "=", "self", ".", "get_serializer_context", "(", ")", "return", "serializer_class", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Return the serializer instance that should be used for validating and deserializing input, and for serializing output.
[ "Return", "the", "serializer", "instance", "that", "should", "be", "used", "for", "validating", "and", "deserializing", "input", "and", "for", "serializing", "output", "." ]
python
train
pip-services3-python/pip-services3-commons-python
pip_services3_commons/data/AnyValueMap.py
https://github.com/pip-services3-python/pip-services3-commons-python/blob/22cbbb3e91e49717f65c083d36147fdb07ba9e3b/pip_services3_commons/data/AnyValueMap.py#L395-L404
def get_as_array(self, key): """ Converts map element into an AnyValueMap or returns empty AnyValueMap if conversion is not possible. :param key: an index of element to get. :return: AnyValueMap value of the element or empty AnyValueMap if conversion is not supported. """ value = self.get(key) return AnyValueMap.from_value(value)
[ "def", "get_as_array", "(", "self", ",", "key", ")", ":", "value", "=", "self", ".", "get", "(", "key", ")", "return", "AnyValueMap", ".", "from_value", "(", "value", ")" ]
Converts map element into an AnyValueMap or returns empty AnyValueMap if conversion is not possible. :param key: an index of element to get. :return: AnyValueMap value of the element or empty AnyValueMap if conversion is not supported.
[ "Converts", "map", "element", "into", "an", "AnyValueMap", "or", "returns", "empty", "AnyValueMap", "if", "conversion", "is", "not", "possible", "." ]
python
train