repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
CalebBell/ht
ht/insulation.py
https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/insulation.py#L580-L625
def nearest_material(name, complete=False): r'''Returns the nearest hit to a given name from from dictionaries of building, insulating, or refractory material from tables in [1]_, [2]_, and [3]_. Function will pick the closest match based on a fuzzy search. if `complete` is True, will only return hits with all three of density, heat capacity, and thermal conductivity available. Parameters ---------- name : str Search keywords to be used by difflib function complete : bool, optional If True, returns only hits with all parameters available Returns ------- ID : str A key to one of the dictionaries mentioned above Examples -------- >>> nearest_material('stainless steel') 'Metals, stainless steel' References ---------- .. [1] ASHRAE Handbook: Fundamentals. American Society of Heating, Refrigerating and Air-Conditioning Engineers, Incorporated, 2013. .. [2] DIN EN 12524 (2000-07) Building Materials and Products Hygrothermal Properties - Tabulated Design Values; English Version of DIN EN 12524. .. [3] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition. Berlin; New York:: Springer, 2010. ''' if complete: hits = difflib.get_close_matches(name, materials_dict.keys(), n=1000, cutoff=0) for hit in hits: if materials_dict[hit] == 1 or materials_dict[hit]==3 or (ASHRAE[hit][0] and ASHRAE[hit][1]): return hit else: ID = difflib.get_close_matches(name, materials_dict.keys(), n=1, cutoff=0.6) if not ID: ID = difflib.get_close_matches(name, materials_dict.keys(), n=1, cutoff=0.3) if not ID: ID = difflib.get_close_matches(name, materials_dict.keys(), n=1, cutoff=0) return ID[0]
[ "def", "nearest_material", "(", "name", ",", "complete", "=", "False", ")", ":", "if", "complete", ":", "hits", "=", "difflib", ".", "get_close_matches", "(", "name", ",", "materials_dict", ".", "keys", "(", ")", ",", "n", "=", "1000", ",", "cutoff", "=", "0", ")", "for", "hit", "in", "hits", ":", "if", "materials_dict", "[", "hit", "]", "==", "1", "or", "materials_dict", "[", "hit", "]", "==", "3", "or", "(", "ASHRAE", "[", "hit", "]", "[", "0", "]", "and", "ASHRAE", "[", "hit", "]", "[", "1", "]", ")", ":", "return", "hit", "else", ":", "ID", "=", "difflib", ".", "get_close_matches", "(", "name", ",", "materials_dict", ".", "keys", "(", ")", ",", "n", "=", "1", ",", "cutoff", "=", "0.6", ")", "if", "not", "ID", ":", "ID", "=", "difflib", ".", "get_close_matches", "(", "name", ",", "materials_dict", ".", "keys", "(", ")", ",", "n", "=", "1", ",", "cutoff", "=", "0.3", ")", "if", "not", "ID", ":", "ID", "=", "difflib", ".", "get_close_matches", "(", "name", ",", "materials_dict", ".", "keys", "(", ")", ",", "n", "=", "1", ",", "cutoff", "=", "0", ")", "return", "ID", "[", "0", "]" ]
r'''Returns the nearest hit to a given name from from dictionaries of building, insulating, or refractory material from tables in [1]_, [2]_, and [3]_. Function will pick the closest match based on a fuzzy search. if `complete` is True, will only return hits with all three of density, heat capacity, and thermal conductivity available. Parameters ---------- name : str Search keywords to be used by difflib function complete : bool, optional If True, returns only hits with all parameters available Returns ------- ID : str A key to one of the dictionaries mentioned above Examples -------- >>> nearest_material('stainless steel') 'Metals, stainless steel' References ---------- .. [1] ASHRAE Handbook: Fundamentals. American Society of Heating, Refrigerating and Air-Conditioning Engineers, Incorporated, 2013. .. [2] DIN EN 12524 (2000-07) Building Materials and Products Hygrothermal Properties - Tabulated Design Values; English Version of DIN EN 12524. .. [3] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd edition. Berlin; New York:: Springer, 2010.
[ "r", "Returns", "the", "nearest", "hit", "to", "a", "given", "name", "from", "from", "dictionaries", "of", "building", "insulating", "or", "refractory", "material", "from", "tables", "in", "[", "1", "]", "_", "[", "2", "]", "_", "and", "[", "3", "]", "_", ".", "Function", "will", "pick", "the", "closest", "match", "based", "on", "a", "fuzzy", "search", ".", "if", "complete", "is", "True", "will", "only", "return", "hits", "with", "all", "three", "of", "density", "heat", "capacity", "and", "thermal", "conductivity", "available", "." ]
python
train
RedHatInsights/insights-core
insights/client/data_collector.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/client/data_collector.py#L51-L72
def _run_pre_command(self, pre_cmd): ''' Run a pre command to get external args for a command ''' logger.debug('Executing pre-command: %s', pre_cmd) try: pre_proc = Popen(pre_cmd, stdout=PIPE, stderr=STDOUT, shell=True) except OSError as err: if err.errno == errno.ENOENT: logger.debug('Command %s not found', pre_cmd) return stdout, stderr = pre_proc.communicate() the_return_code = pre_proc.poll() logger.debug("Pre-command results:") logger.debug("STDOUT: %s", stdout) logger.debug("STDERR: %s", stderr) logger.debug("Return Code: %s", the_return_code) if the_return_code != 0: return [] if six.PY3: stdout = stdout.decode('utf-8') return stdout.splitlines()
[ "def", "_run_pre_command", "(", "self", ",", "pre_cmd", ")", ":", "logger", ".", "debug", "(", "'Executing pre-command: %s'", ",", "pre_cmd", ")", "try", ":", "pre_proc", "=", "Popen", "(", "pre_cmd", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "STDOUT", ",", "shell", "=", "True", ")", "except", "OSError", "as", "err", ":", "if", "err", ".", "errno", "==", "errno", ".", "ENOENT", ":", "logger", ".", "debug", "(", "'Command %s not found'", ",", "pre_cmd", ")", "return", "stdout", ",", "stderr", "=", "pre_proc", ".", "communicate", "(", ")", "the_return_code", "=", "pre_proc", ".", "poll", "(", ")", "logger", ".", "debug", "(", "\"Pre-command results:\"", ")", "logger", ".", "debug", "(", "\"STDOUT: %s\"", ",", "stdout", ")", "logger", ".", "debug", "(", "\"STDERR: %s\"", ",", "stderr", ")", "logger", ".", "debug", "(", "\"Return Code: %s\"", ",", "the_return_code", ")", "if", "the_return_code", "!=", "0", ":", "return", "[", "]", "if", "six", ".", "PY3", ":", "stdout", "=", "stdout", ".", "decode", "(", "'utf-8'", ")", "return", "stdout", ".", "splitlines", "(", ")" ]
Run a pre command to get external args for a command
[ "Run", "a", "pre", "command", "to", "get", "external", "args", "for", "a", "command" ]
python
train
mitsei/dlkit
dlkit/handcar/osid/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/handcar/osid/managers.py#L210-L242
def initialize(self, runtime=None): """Initializes this manager. A manager is initialized once at the time of creation. arg: runtime (osid.OsidRuntimeManager): the runtime environment raise: CONFIGURATION_ERROR - an error with implementation configuration raise: ILLEGAL_STATE - this manager has already been initialized by the OsidRuntime raise: NullArgument - runtime is null raise: OperationFailed - unable to complete request compliance: mandatory - This method must be implemented. implementation notes: In addition to loading its runtime configuration an implementation may create shared resources such as connection pools to be shared among all sessions of this service and released when this manager is closed. Providers must thread-protect any data stored in the manager. To maximize interoperability, providers should not honor a second call to initialize() and must set an ILLEGAL_STATE error. """ if self._runtime is not None: raise IllegalState() self._runtime = runtime config = runtime.get_configuration() parameter_id = Id('parameter:hostName@dlkit_service') host = config.get_value_by_parameter(parameter_id).get_string_value() if host is not None: self._host = host parameter_id = Id('parameter:appKey@dlkit_service') app_key = config.get_value_by_parameter(parameter_id).get_string_value() if app_key is not None: self._app_key = app_key
[ "def", "initialize", "(", "self", ",", "runtime", "=", "None", ")", ":", "if", "self", ".", "_runtime", "is", "not", "None", ":", "raise", "IllegalState", "(", ")", "self", ".", "_runtime", "=", "runtime", "config", "=", "runtime", ".", "get_configuration", "(", ")", "parameter_id", "=", "Id", "(", "'parameter:hostName@dlkit_service'", ")", "host", "=", "config", ".", "get_value_by_parameter", "(", "parameter_id", ")", ".", "get_string_value", "(", ")", "if", "host", "is", "not", "None", ":", "self", ".", "_host", "=", "host", "parameter_id", "=", "Id", "(", "'parameter:appKey@dlkit_service'", ")", "app_key", "=", "config", ".", "get_value_by_parameter", "(", "parameter_id", ")", ".", "get_string_value", "(", ")", "if", "app_key", "is", "not", "None", ":", "self", ".", "_app_key", "=", "app_key" ]
Initializes this manager. A manager is initialized once at the time of creation. arg: runtime (osid.OsidRuntimeManager): the runtime environment raise: CONFIGURATION_ERROR - an error with implementation configuration raise: ILLEGAL_STATE - this manager has already been initialized by the OsidRuntime raise: NullArgument - runtime is null raise: OperationFailed - unable to complete request compliance: mandatory - This method must be implemented. implementation notes: In addition to loading its runtime configuration an implementation may create shared resources such as connection pools to be shared among all sessions of this service and released when this manager is closed. Providers must thread-protect any data stored in the manager. To maximize interoperability, providers should not honor a second call to initialize() and must set an ILLEGAL_STATE error.
[ "Initializes", "this", "manager", ".", "A", "manager", "is", "initialized", "once", "at", "the", "time", "of", "creation", ".", "arg", ":", "runtime", "(", "osid", ".", "OsidRuntimeManager", ")", ":", "the", "runtime", "environment", "raise", ":", "CONFIGURATION_ERROR", "-", "an", "error", "with", "implementation", "configuration", "raise", ":", "ILLEGAL_STATE", "-", "this", "manager", "has", "already", "been", "initialized", "by", "the", "OsidRuntime", "raise", ":", "NullArgument", "-", "runtime", "is", "null", "raise", ":", "OperationFailed", "-", "unable", "to", "complete", "request", "compliance", ":", "mandatory", "-", "This", "method", "must", "be", "implemented", ".", "implementation", "notes", ":", "In", "addition", "to", "loading", "its", "runtime", "configuration", "an", "implementation", "may", "create", "shared", "resources", "such", "as", "connection", "pools", "to", "be", "shared", "among", "all", "sessions", "of", "this", "service", "and", "released", "when", "this", "manager", "is", "closed", ".", "Providers", "must", "thread", "-", "protect", "any", "data", "stored", "in", "the", "manager", ".", "To", "maximize", "interoperability", "providers", "should", "not", "honor", "a", "second", "call", "to", "initialize", "()", "and", "must", "set", "an", "ILLEGAL_STATE", "error", "." ]
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/ApiGenericClient.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ApiGenericClient.py#L233-L246
def _parse(self, content): """ Parse data request to data from python. @param content: Context of request. @raise ParseError: """ if content: stream = BytesIO(str(content)) data = json.loads(stream.getvalue()) return data
[ "def", "_parse", "(", "self", ",", "content", ")", ":", "if", "content", ":", "stream", "=", "BytesIO", "(", "str", "(", "content", ")", ")", "data", "=", "json", ".", "loads", "(", "stream", ".", "getvalue", "(", ")", ")", "return", "data" ]
Parse data request to data from python. @param content: Context of request. @raise ParseError:
[ "Parse", "data", "request", "to", "data", "from", "python", "." ]
python
train
jepegit/cellpy
cellpy/utils/batch_tools/batch_experiments.py
https://github.com/jepegit/cellpy/blob/9f4a84cdd11f72cfa02cda8c2d7b5174abbb7370/cellpy/utils/batch_tools/batch_experiments.py#L268-L315
def link(self): """Ensure that an appropriate link to the cellpy-files exists for each cell. The experiment will then contain a CellpyData object for each cell (in the cell_data_frames attribute) with only the step-table stored. Remark that running update persists the summary frames instead (or everything in case you specify all_in_memory=True). This might be considered "a strange and unexpected behaviour". Sorry for that (but the authors of this package is also a bit strange...). """ logging.info("[estblishing links]") logging.debug("checking and establishing link to data") cell_data_frames = dict() counter = 0 errors = [] try: for indx, row in self.journal.pages.iterrows(): counter += 1 l_txt = "starting to process file # %i (index=%s)" % (counter, indx) logging.debug(l_txt) logging.info(f"linking cellpy-file: {row.cellpy_file_names}") if not os.path.isfile(row.cellpy_file_names): logging.error("File does not exist") raise IOError cell_data_frames[indx] = cellreader.CellpyData(initialize=True) step_table = helper.look_up_and_get( row.cellpy_file_names, "step_table" ) cell_data_frames[indx].dataset.step_table = step_table self.cell_data_frames = cell_data_frames except IOError as e: logging.warning(e) e_txt = "links not established - try update" logging.warning(e_txt) errors.append(e_txt) self.errors["link"] = errors
[ "def", "link", "(", "self", ")", ":", "logging", ".", "info", "(", "\"[estblishing links]\"", ")", "logging", ".", "debug", "(", "\"checking and establishing link to data\"", ")", "cell_data_frames", "=", "dict", "(", ")", "counter", "=", "0", "errors", "=", "[", "]", "try", ":", "for", "indx", ",", "row", "in", "self", ".", "journal", ".", "pages", ".", "iterrows", "(", ")", ":", "counter", "+=", "1", "l_txt", "=", "\"starting to process file # %i (index=%s)\"", "%", "(", "counter", ",", "indx", ")", "logging", ".", "debug", "(", "l_txt", ")", "logging", ".", "info", "(", "f\"linking cellpy-file: {row.cellpy_file_names}\"", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "row", ".", "cellpy_file_names", ")", ":", "logging", ".", "error", "(", "\"File does not exist\"", ")", "raise", "IOError", "cell_data_frames", "[", "indx", "]", "=", "cellreader", ".", "CellpyData", "(", "initialize", "=", "True", ")", "step_table", "=", "helper", ".", "look_up_and_get", "(", "row", ".", "cellpy_file_names", ",", "\"step_table\"", ")", "cell_data_frames", "[", "indx", "]", ".", "dataset", ".", "step_table", "=", "step_table", "self", ".", "cell_data_frames", "=", "cell_data_frames", "except", "IOError", "as", "e", ":", "logging", ".", "warning", "(", "e", ")", "e_txt", "=", "\"links not established - try update\"", "logging", ".", "warning", "(", "e_txt", ")", "errors", ".", "append", "(", "e_txt", ")", "self", ".", "errors", "[", "\"link\"", "]", "=", "errors" ]
Ensure that an appropriate link to the cellpy-files exists for each cell. The experiment will then contain a CellpyData object for each cell (in the cell_data_frames attribute) with only the step-table stored. Remark that running update persists the summary frames instead (or everything in case you specify all_in_memory=True). This might be considered "a strange and unexpected behaviour". Sorry for that (but the authors of this package is also a bit strange...).
[ "Ensure", "that", "an", "appropriate", "link", "to", "the", "cellpy", "-", "files", "exists", "for", "each", "cell", "." ]
python
train
cbclab/MOT
mot/mcmc_diagnostics.py
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/mcmc_diagnostics.py#L444-L462
def monte_carlo_standard_error(chain, batch_size_generator=None, compute_method=None): """Compute Monte Carlo standard errors for the expectations This is a convenience function that calls the compute method for each batch size and returns the lowest ESS over the used batch sizes. Args: chain (ndarray): the Markov chain batch_size_generator (UniVariateESSBatchSizeGenerator): the method that generates that batch sizes we will use. Per default it uses the :class:`SquareRootSingleBatch` method. compute_method (ComputeMonteCarloStandardError): the method used to compute the standard error. By default we will use the :class:`BatchMeansMCSE` method """ batch_size_generator = batch_size_generator or SquareRootSingleBatch() compute_method = compute_method or BatchMeansMCSE() batch_sizes = batch_size_generator.get_univariate_ess_batch_sizes(len(chain)) return np.min(list(compute_method.compute_standard_error(chain, b) for b in batch_sizes))
[ "def", "monte_carlo_standard_error", "(", "chain", ",", "batch_size_generator", "=", "None", ",", "compute_method", "=", "None", ")", ":", "batch_size_generator", "=", "batch_size_generator", "or", "SquareRootSingleBatch", "(", ")", "compute_method", "=", "compute_method", "or", "BatchMeansMCSE", "(", ")", "batch_sizes", "=", "batch_size_generator", ".", "get_univariate_ess_batch_sizes", "(", "len", "(", "chain", ")", ")", "return", "np", ".", "min", "(", "list", "(", "compute_method", ".", "compute_standard_error", "(", "chain", ",", "b", ")", "for", "b", "in", "batch_sizes", ")", ")" ]
Compute Monte Carlo standard errors for the expectations This is a convenience function that calls the compute method for each batch size and returns the lowest ESS over the used batch sizes. Args: chain (ndarray): the Markov chain batch_size_generator (UniVariateESSBatchSizeGenerator): the method that generates that batch sizes we will use. Per default it uses the :class:`SquareRootSingleBatch` method. compute_method (ComputeMonteCarloStandardError): the method used to compute the standard error. By default we will use the :class:`BatchMeansMCSE` method
[ "Compute", "Monte", "Carlo", "standard", "errors", "for", "the", "expectations" ]
python
train
7sDream/zhihu-py3
zhihu/author.py
https://github.com/7sDream/zhihu-py3/blob/bcb4aa8325f8b54d3b44bd0bdc959edd9761fcfc/zhihu/author.py#L66-L73
def id(self): """获取用户id,就是网址最后那一部分. :return: 用户id :rtype: str """ return re.match(r'^.*/([^/]+)/$', self.url).group(1) \ if self.url is not None else ''
[ "def", "id", "(", "self", ")", ":", "return", "re", ".", "match", "(", "r'^.*/([^/]+)/$'", ",", "self", ".", "url", ")", ".", "group", "(", "1", ")", "if", "self", ".", "url", "is", "not", "None", "else", "''" ]
获取用户id,就是网址最后那一部分. :return: 用户id :rtype: str
[ "获取用户id,就是网址最后那一部分", "." ]
python
train
christian-oudard/htmltreediff
htmltreediff/text.py
https://github.com/christian-oudard/htmltreediff/blob/0e28f56492ae7e69bb0f74f9a79a8909a5ad588d/htmltreediff/text.py#L140-L146
def match_length(self): """ Find the total length of all words that match between the two sequences.""" length = 0 for match in self.get_matching_blocks(): a, b, size = match length += self._text_length(self.a[a:a+size]) return length
[ "def", "match_length", "(", "self", ")", ":", "length", "=", "0", "for", "match", "in", "self", ".", "get_matching_blocks", "(", ")", ":", "a", ",", "b", ",", "size", "=", "match", "length", "+=", "self", ".", "_text_length", "(", "self", ".", "a", "[", "a", ":", "a", "+", "size", "]", ")", "return", "length" ]
Find the total length of all words that match between the two sequences.
[ "Find", "the", "total", "length", "of", "all", "words", "that", "match", "between", "the", "two", "sequences", "." ]
python
train
Galarzaa90/tibia.py
tibiapy/world.py
https://github.com/Galarzaa90/tibia.py/blob/02ba1a8f1e18177ef5c7dcd44affc8d761d59e12/tibiapy/world.py#L306-L352
def _parse_world_info(self, world_info_table): """ Parses the World Information table from Tibia.com and adds the found values to the object. Parameters ---------- world_info_table: :class:`list`[:class:`bs4.Tag`] """ world_info = {} for row in world_info_table: cols_raw = row.find_all('td') cols = [ele.text.strip() for ele in cols_raw] field, value = cols field = field.replace("\xa0", "_").replace(" ", "_").replace(":", "").lower() value = value.replace("\xa0", " ") world_info[field] = value try: self.online_count = int(world_info.pop("players_online")) except KeyError: self.online_count = 0 self.location = try_enum(WorldLocation, world_info.pop("location")) self.pvp_type = try_enum(PvpType, world_info.pop("pvp_type")) self.transfer_type = try_enum(TransferType, world_info.pop("transfer_type", None), TransferType.REGULAR) m = record_regexp.match(world_info.pop("online_record")) if m: self.record_count = int(m.group("count")) self.record_date = parse_tibia_datetime(m.group("date")) if "world_quest_titles" in world_info: self.world_quest_titles = [q.strip() for q in world_info.pop("world_quest_titles").split(",")] self.experimental = world_info.pop("game_world_type") != "Regular" self._parse_battleye_status(world_info.pop("battleye_status")) self.premium_only = "premium_type" in world_info month, year = world_info.pop("creation_date").split("/") month = int(month) year = int(year) if year > 90: year += 1900 else: year += 2000 self.creation_date = "%d-%02d" % (year, month) for k, v in world_info.items(): try: setattr(self, k, v) except AttributeError: pass
[ "def", "_parse_world_info", "(", "self", ",", "world_info_table", ")", ":", "world_info", "=", "{", "}", "for", "row", "in", "world_info_table", ":", "cols_raw", "=", "row", ".", "find_all", "(", "'td'", ")", "cols", "=", "[", "ele", ".", "text", ".", "strip", "(", ")", "for", "ele", "in", "cols_raw", "]", "field", ",", "value", "=", "cols", "field", "=", "field", ".", "replace", "(", "\"\\xa0\"", ",", "\"_\"", ")", ".", "replace", "(", "\" \"", ",", "\"_\"", ")", ".", "replace", "(", "\":\"", ",", "\"\"", ")", ".", "lower", "(", ")", "value", "=", "value", ".", "replace", "(", "\"\\xa0\"", ",", "\" \"", ")", "world_info", "[", "field", "]", "=", "value", "try", ":", "self", ".", "online_count", "=", "int", "(", "world_info", ".", "pop", "(", "\"players_online\"", ")", ")", "except", "KeyError", ":", "self", ".", "online_count", "=", "0", "self", ".", "location", "=", "try_enum", "(", "WorldLocation", ",", "world_info", ".", "pop", "(", "\"location\"", ")", ")", "self", ".", "pvp_type", "=", "try_enum", "(", "PvpType", ",", "world_info", ".", "pop", "(", "\"pvp_type\"", ")", ")", "self", ".", "transfer_type", "=", "try_enum", "(", "TransferType", ",", "world_info", ".", "pop", "(", "\"transfer_type\"", ",", "None", ")", ",", "TransferType", ".", "REGULAR", ")", "m", "=", "record_regexp", ".", "match", "(", "world_info", ".", "pop", "(", "\"online_record\"", ")", ")", "if", "m", ":", "self", ".", "record_count", "=", "int", "(", "m", ".", "group", "(", "\"count\"", ")", ")", "self", ".", "record_date", "=", "parse_tibia_datetime", "(", "m", ".", "group", "(", "\"date\"", ")", ")", "if", "\"world_quest_titles\"", "in", "world_info", ":", "self", ".", "world_quest_titles", "=", "[", "q", ".", "strip", "(", ")", "for", "q", "in", "world_info", ".", "pop", "(", "\"world_quest_titles\"", ")", ".", "split", "(", "\",\"", ")", "]", "self", ".", "experimental", "=", "world_info", ".", "pop", "(", "\"game_world_type\"", ")", "!=", "\"Regular\"", "self", ".", "_parse_battleye_status", "(", "world_info", ".", "pop", "(", "\"battleye_status\"", ")", ")", "self", ".", "premium_only", "=", "\"premium_type\"", "in", "world_info", "month", ",", "year", "=", "world_info", ".", "pop", "(", "\"creation_date\"", ")", ".", "split", "(", "\"/\"", ")", "month", "=", "int", "(", "month", ")", "year", "=", "int", "(", "year", ")", "if", "year", ">", "90", ":", "year", "+=", "1900", "else", ":", "year", "+=", "2000", "self", ".", "creation_date", "=", "\"%d-%02d\"", "%", "(", "year", ",", "month", ")", "for", "k", ",", "v", "in", "world_info", ".", "items", "(", ")", ":", "try", ":", "setattr", "(", "self", ",", "k", ",", "v", ")", "except", "AttributeError", ":", "pass" ]
Parses the World Information table from Tibia.com and adds the found values to the object. Parameters ---------- world_info_table: :class:`list`[:class:`bs4.Tag`]
[ "Parses", "the", "World", "Information", "table", "from", "Tibia", ".", "com", "and", "adds", "the", "found", "values", "to", "the", "object", "." ]
python
train
deeshugupta/tes
es_commands/tes.py
https://github.com/deeshugupta/tes/blob/217db49aa211ebca2d9258380765a0c31abfca91/es_commands/tes.py#L32-L58
def configure(name, host, port, auth, current): ''' Configure is used to add various ES ports you are working on. The user can add as many es ports as the one wants, but one will remain active at one point. ''' Config = ConfigParser.ConfigParser() if not os.path.exists(os.path.dirname(filename)): try: os.makedirs(os.path.dirname(filename)) except Exception as e: click.echo(e) return section_name = None if(current.lower() == 'y'): section_name = 'Current' change_current() else: section_name = name.capitalize() cfgfile = open(filename,'a') Config.add_section(section_name) Config.set(section_name,'host',host) Config.set(section_name,'port',port) Config.set(section_name,'auth',auth) Config.set(section_name,'name',name.capitalize()) Config.write(cfgfile) cfgfile.close()
[ "def", "configure", "(", "name", ",", "host", ",", "port", ",", "auth", ",", "current", ")", ":", "Config", "=", "ConfigParser", ".", "ConfigParser", "(", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ")", ":", "try", ":", "os", ".", "makedirs", "(", "os", ".", "path", ".", "dirname", "(", "filename", ")", ")", "except", "Exception", "as", "e", ":", "click", ".", "echo", "(", "e", ")", "return", "section_name", "=", "None", "if", "(", "current", ".", "lower", "(", ")", "==", "'y'", ")", ":", "section_name", "=", "'Current'", "change_current", "(", ")", "else", ":", "section_name", "=", "name", ".", "capitalize", "(", ")", "cfgfile", "=", "open", "(", "filename", ",", "'a'", ")", "Config", ".", "add_section", "(", "section_name", ")", "Config", ".", "set", "(", "section_name", ",", "'host'", ",", "host", ")", "Config", ".", "set", "(", "section_name", ",", "'port'", ",", "port", ")", "Config", ".", "set", "(", "section_name", ",", "'auth'", ",", "auth", ")", "Config", ".", "set", "(", "section_name", ",", "'name'", ",", "name", ".", "capitalize", "(", ")", ")", "Config", ".", "write", "(", "cfgfile", ")", "cfgfile", ".", "close", "(", ")" ]
Configure is used to add various ES ports you are working on. The user can add as many es ports as the one wants, but one will remain active at one point.
[ "Configure", "is", "used", "to", "add", "various", "ES", "ports", "you", "are", "working", "on", ".", "The", "user", "can", "add", "as", "many", "es", "ports", "as", "the", "one", "wants", "but", "one", "will", "remain", "active", "at", "one", "point", "." ]
python
train
Pytwitcher/pytwitcherapi
src/pytwitcherapi/session.py
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/src/pytwitcherapi/session.py#L491-L510
def get_playlist(self, channel): """Return the playlist for the given channel :param channel: the channel :type channel: :class:`models.Channel` | :class:`str` :returns: the playlist :rtype: :class:`m3u8.M3U8` :raises: :class:`requests.HTTPError` if channel is offline. """ if isinstance(channel, models.Channel): channel = channel.name token, sig = self.get_channel_access_token(channel) params = {'token': token, 'sig': sig, 'allow_audio_only': True, 'allow_source': True} r = self.usher_request( 'GET', 'channel/hls/%s.m3u8' % channel, params=params) playlist = m3u8.loads(r.text) return playlist
[ "def", "get_playlist", "(", "self", ",", "channel", ")", ":", "if", "isinstance", "(", "channel", ",", "models", ".", "Channel", ")", ":", "channel", "=", "channel", ".", "name", "token", ",", "sig", "=", "self", ".", "get_channel_access_token", "(", "channel", ")", "params", "=", "{", "'token'", ":", "token", ",", "'sig'", ":", "sig", ",", "'allow_audio_only'", ":", "True", ",", "'allow_source'", ":", "True", "}", "r", "=", "self", ".", "usher_request", "(", "'GET'", ",", "'channel/hls/%s.m3u8'", "%", "channel", ",", "params", "=", "params", ")", "playlist", "=", "m3u8", ".", "loads", "(", "r", ".", "text", ")", "return", "playlist" ]
Return the playlist for the given channel :param channel: the channel :type channel: :class:`models.Channel` | :class:`str` :returns: the playlist :rtype: :class:`m3u8.M3U8` :raises: :class:`requests.HTTPError` if channel is offline.
[ "Return", "the", "playlist", "for", "the", "given", "channel" ]
python
train
ska-sa/katcp-python
katcp/client.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/client.py#L1112-L1127
def _pop_async_request(self, msg_id, msg_name): """Pop the set of callbacks for a request. Return tuple of Nones if callbacks already popped (or don't exist). """ assert get_thread_ident() == self.ioloop_thread_id if msg_id is None: msg_id = self._msg_id_for_name(msg_name) if msg_id in self._async_queue: callback_tuple = self._async_queue[msg_id] del self._async_queue[msg_id] self._async_id_stack[callback_tuple[0].name].remove(msg_id) return callback_tuple else: return None, None, None, None, None
[ "def", "_pop_async_request", "(", "self", ",", "msg_id", ",", "msg_name", ")", ":", "assert", "get_thread_ident", "(", ")", "==", "self", ".", "ioloop_thread_id", "if", "msg_id", "is", "None", ":", "msg_id", "=", "self", ".", "_msg_id_for_name", "(", "msg_name", ")", "if", "msg_id", "in", "self", ".", "_async_queue", ":", "callback_tuple", "=", "self", ".", "_async_queue", "[", "msg_id", "]", "del", "self", ".", "_async_queue", "[", "msg_id", "]", "self", ".", "_async_id_stack", "[", "callback_tuple", "[", "0", "]", ".", "name", "]", ".", "remove", "(", "msg_id", ")", "return", "callback_tuple", "else", ":", "return", "None", ",", "None", ",", "None", ",", "None", ",", "None" ]
Pop the set of callbacks for a request. Return tuple of Nones if callbacks already popped (or don't exist).
[ "Pop", "the", "set", "of", "callbacks", "for", "a", "request", "." ]
python
train
Turbo87/aerofiles
aerofiles/igc/writer.py
https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/igc/writer.py#L769-L806
def write_satellites(self, *args): """ Write a satellite constellation record:: writer.write_satellites(datetime.time(12, 34, 56), [1, 2, 5, 22]) # -> F12345601020522 :param time: UTC time of the satellite constellation record (default: :meth:`~datetime.datetime.utcnow`) :param satellites: a list of satellite IDs as either two-character strings or integers below 100 """ num_args = len(args) if num_args not in (1, 2): raise ValueError('Invalid number of parameters received') if num_args == 1: satellites = args[0] time = None else: time, satellites = args if time is None: time = datetime.datetime.utcnow() record = self.format_time(time) for satellite in satellites: if isinstance(satellite, int): satellite = '%02d' % satellite if len(satellite) != 2: raise ValueError('Invalid satellite ID') record += satellite self.write_record('F', record)
[ "def", "write_satellites", "(", "self", ",", "*", "args", ")", ":", "num_args", "=", "len", "(", "args", ")", "if", "num_args", "not", "in", "(", "1", ",", "2", ")", ":", "raise", "ValueError", "(", "'Invalid number of parameters received'", ")", "if", "num_args", "==", "1", ":", "satellites", "=", "args", "[", "0", "]", "time", "=", "None", "else", ":", "time", ",", "satellites", "=", "args", "if", "time", "is", "None", ":", "time", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "record", "=", "self", ".", "format_time", "(", "time", ")", "for", "satellite", "in", "satellites", ":", "if", "isinstance", "(", "satellite", ",", "int", ")", ":", "satellite", "=", "'%02d'", "%", "satellite", "if", "len", "(", "satellite", ")", "!=", "2", ":", "raise", "ValueError", "(", "'Invalid satellite ID'", ")", "record", "+=", "satellite", "self", ".", "write_record", "(", "'F'", ",", "record", ")" ]
Write a satellite constellation record:: writer.write_satellites(datetime.time(12, 34, 56), [1, 2, 5, 22]) # -> F12345601020522 :param time: UTC time of the satellite constellation record (default: :meth:`~datetime.datetime.utcnow`) :param satellites: a list of satellite IDs as either two-character strings or integers below 100
[ "Write", "a", "satellite", "constellation", "record", "::" ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/breakpoint.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/breakpoint.py#L4173-L4232
def __set_variable_watch(self, tid, address, size, action): """ Used by L{watch_variable} and L{stalk_variable}. @type tid: int @param tid: Thread global ID. @type address: int @param address: Memory address of variable to watch. @type size: int @param size: Size of variable to watch. The only supported sizes are: byte (1), word (2), dword (4) and qword (8). @type action: function @param action: (Optional) Action callback function. See L{define_hardware_breakpoint} for more details. @rtype: L{HardwareBreakpoint} @return: Hardware breakpoint at the requested address. """ # TODO # We should merge the breakpoints instead of overwriting them. # We'll have the same problem as watch_buffer and we'll need to change # the API again. if size == 1: sizeFlag = self.BP_WATCH_BYTE elif size == 2: sizeFlag = self.BP_WATCH_WORD elif size == 4: sizeFlag = self.BP_WATCH_DWORD elif size == 8: sizeFlag = self.BP_WATCH_QWORD else: raise ValueError("Bad size for variable watch: %r" % size) if self.has_hardware_breakpoint(tid, address): warnings.warn( "Hardware breakpoint in thread %d at address %s was overwritten!" \ % (tid, HexDump.address(address, self.system.get_thread(tid).get_bits())), BreakpointWarning) bp = self.get_hardware_breakpoint(tid, address) if bp.get_trigger() != self.BP_BREAK_ON_ACCESS or \ bp.get_watch() != sizeFlag: self.erase_hardware_breakpoint(tid, address) self.define_hardware_breakpoint(tid, address, self.BP_BREAK_ON_ACCESS, sizeFlag, True, action) bp = self.get_hardware_breakpoint(tid, address) else: self.define_hardware_breakpoint(tid, address, self.BP_BREAK_ON_ACCESS, sizeFlag, True, action) bp = self.get_hardware_breakpoint(tid, address) return bp
[ "def", "__set_variable_watch", "(", "self", ",", "tid", ",", "address", ",", "size", ",", "action", ")", ":", "# TODO", "# We should merge the breakpoints instead of overwriting them.", "# We'll have the same problem as watch_buffer and we'll need to change", "# the API again.", "if", "size", "==", "1", ":", "sizeFlag", "=", "self", ".", "BP_WATCH_BYTE", "elif", "size", "==", "2", ":", "sizeFlag", "=", "self", ".", "BP_WATCH_WORD", "elif", "size", "==", "4", ":", "sizeFlag", "=", "self", ".", "BP_WATCH_DWORD", "elif", "size", "==", "8", ":", "sizeFlag", "=", "self", ".", "BP_WATCH_QWORD", "else", ":", "raise", "ValueError", "(", "\"Bad size for variable watch: %r\"", "%", "size", ")", "if", "self", ".", "has_hardware_breakpoint", "(", "tid", ",", "address", ")", ":", "warnings", ".", "warn", "(", "\"Hardware breakpoint in thread %d at address %s was overwritten!\"", "%", "(", "tid", ",", "HexDump", ".", "address", "(", "address", ",", "self", ".", "system", ".", "get_thread", "(", "tid", ")", ".", "get_bits", "(", ")", ")", ")", ",", "BreakpointWarning", ")", "bp", "=", "self", ".", "get_hardware_breakpoint", "(", "tid", ",", "address", ")", "if", "bp", ".", "get_trigger", "(", ")", "!=", "self", ".", "BP_BREAK_ON_ACCESS", "or", "bp", ".", "get_watch", "(", ")", "!=", "sizeFlag", ":", "self", ".", "erase_hardware_breakpoint", "(", "tid", ",", "address", ")", "self", ".", "define_hardware_breakpoint", "(", "tid", ",", "address", ",", "self", ".", "BP_BREAK_ON_ACCESS", ",", "sizeFlag", ",", "True", ",", "action", ")", "bp", "=", "self", ".", "get_hardware_breakpoint", "(", "tid", ",", "address", ")", "else", ":", "self", ".", "define_hardware_breakpoint", "(", "tid", ",", "address", ",", "self", ".", "BP_BREAK_ON_ACCESS", ",", "sizeFlag", ",", "True", ",", "action", ")", "bp", "=", "self", ".", "get_hardware_breakpoint", "(", "tid", ",", "address", ")", "return", "bp" ]
Used by L{watch_variable} and L{stalk_variable}. @type tid: int @param tid: Thread global ID. @type address: int @param address: Memory address of variable to watch. @type size: int @param size: Size of variable to watch. The only supported sizes are: byte (1), word (2), dword (4) and qword (8). @type action: function @param action: (Optional) Action callback function. See L{define_hardware_breakpoint} for more details. @rtype: L{HardwareBreakpoint} @return: Hardware breakpoint at the requested address.
[ "Used", "by", "L", "{", "watch_variable", "}", "and", "L", "{", "stalk_variable", "}", "." ]
python
train
yinkaisheng/Python-UIAutomation-for-Windows
uiautomation/uiautomation.py
https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L6056-L6061
def Show(self, waitTime: float = OPERATION_WAIT_TIME) -> bool: """ Call native `ShowWindow(SW.Show)`. Return bool, True if succeed otherwise False. """ return self.ShowWindow(SW.Show, waitTime)
[ "def", "Show", "(", "self", ",", "waitTime", ":", "float", "=", "OPERATION_WAIT_TIME", ")", "->", "bool", ":", "return", "self", ".", "ShowWindow", "(", "SW", ".", "Show", ",", "waitTime", ")" ]
Call native `ShowWindow(SW.Show)`. Return bool, True if succeed otherwise False.
[ "Call", "native", "ShowWindow", "(", "SW", ".", "Show", ")", ".", "Return", "bool", "True", "if", "succeed", "otherwise", "False", "." ]
python
valid
abilian/abilian-core
abilian/i18n.py
https://github.com/abilian/abilian-core/blob/0a71275bf108c3d51e13ca9e093c0249235351e3/abilian/i18n.py#L302-L315
def localeselector(): """Default locale selector used in abilian applications.""" # if a user is logged in, use the locale from the user settings user = getattr(g, "user", None) if user is not None: locale = getattr(user, "locale", None) if locale: return locale # Otherwise, try to guess the language from the user accept header the browser # transmits. By default we support en/fr. The best match wins. return request.accept_languages.best_match( current_app.config["BABEL_ACCEPT_LANGUAGES"] )
[ "def", "localeselector", "(", ")", ":", "# if a user is logged in, use the locale from the user settings", "user", "=", "getattr", "(", "g", ",", "\"user\"", ",", "None", ")", "if", "user", "is", "not", "None", ":", "locale", "=", "getattr", "(", "user", ",", "\"locale\"", ",", "None", ")", "if", "locale", ":", "return", "locale", "# Otherwise, try to guess the language from the user accept header the browser", "# transmits. By default we support en/fr. The best match wins.", "return", "request", ".", "accept_languages", ".", "best_match", "(", "current_app", ".", "config", "[", "\"BABEL_ACCEPT_LANGUAGES\"", "]", ")" ]
Default locale selector used in abilian applications.
[ "Default", "locale", "selector", "used", "in", "abilian", "applications", "." ]
python
train
GetmeUK/MongoFrames
mongoframes/queries.py
https://github.com/GetmeUK/MongoFrames/blob/7d2bd792235dfa77a9deecab5366f5f73480823d/mongoframes/queries.py#L135-L144
def ElemMatch(q, *conditions): """ The ElemMatch operator matches documents that contain an array field with at least one element that matches all the specified query criteria. """ new_condition = {} for condition in conditions: deep_merge(condition.to_dict(), new_condition) return Condition(q._path, new_condition, '$elemMatch')
[ "def", "ElemMatch", "(", "q", ",", "*", "conditions", ")", ":", "new_condition", "=", "{", "}", "for", "condition", "in", "conditions", ":", "deep_merge", "(", "condition", ".", "to_dict", "(", ")", ",", "new_condition", ")", "return", "Condition", "(", "q", ".", "_path", ",", "new_condition", ",", "'$elemMatch'", ")" ]
The ElemMatch operator matches documents that contain an array field with at least one element that matches all the specified query criteria.
[ "The", "ElemMatch", "operator", "matches", "documents", "that", "contain", "an", "array", "field", "with", "at", "least", "one", "element", "that", "matches", "all", "the", "specified", "query", "criteria", "." ]
python
train
mozilla/FoxPuppet
foxpuppet/windows/browser/window.py
https://github.com/mozilla/FoxPuppet/blob/6575eb4c72fd024c986b254e198c8b4e6f68cddd/foxpuppet/windows/browser/window.py#L66-L93
def wait_for_notification(self, notification_class=BaseNotification): """Wait for the specified notification to be displayed. Args: notification_class (:py:class:`BaseNotification`, optional): The notification class to wait for. If `None` is specified it will wait for any notification to be closed. Defaults to `BaseNotification`. Returns: :py:class:`BaseNotification`: Firefox notification. """ if notification_class: if notification_class is BaseNotification: message = "No notification was shown." else: message = "{0} was not shown.".format(notification_class.__name__) self.wait.until( lambda _: isinstance(self.notification, notification_class), message=message, ) return self.notification else: self.wait.until( lambda _: self.notification is None, message="Unexpected notification shown.", )
[ "def", "wait_for_notification", "(", "self", ",", "notification_class", "=", "BaseNotification", ")", ":", "if", "notification_class", ":", "if", "notification_class", "is", "BaseNotification", ":", "message", "=", "\"No notification was shown.\"", "else", ":", "message", "=", "\"{0} was not shown.\"", ".", "format", "(", "notification_class", ".", "__name__", ")", "self", ".", "wait", ".", "until", "(", "lambda", "_", ":", "isinstance", "(", "self", ".", "notification", ",", "notification_class", ")", ",", "message", "=", "message", ",", ")", "return", "self", ".", "notification", "else", ":", "self", ".", "wait", ".", "until", "(", "lambda", "_", ":", "self", ".", "notification", "is", "None", ",", "message", "=", "\"Unexpected notification shown.\"", ",", ")" ]
Wait for the specified notification to be displayed. Args: notification_class (:py:class:`BaseNotification`, optional): The notification class to wait for. If `None` is specified it will wait for any notification to be closed. Defaults to `BaseNotification`. Returns: :py:class:`BaseNotification`: Firefox notification.
[ "Wait", "for", "the", "specified", "notification", "to", "be", "displayed", "." ]
python
train
r4fek/django-cassandra-engine
django_cassandra_engine/models/__init__.py
https://github.com/r4fek/django-cassandra-engine/blob/b43f8fddd4bba143f03f73f8bbfc09e6b32c699b/django_cassandra_engine/models/__init__.py#L130-L179
def _give_columns_django_field_attributes(self): """ Add Django Field attributes to each cqlengine.Column instance. So that the Django Options class may interact with it as if it were a Django Field. """ methods_to_add = ( django_field_methods.value_from_object, django_field_methods.value_to_string, django_field_methods.get_attname, django_field_methods.get_cache_name, django_field_methods.pre_save, django_field_methods.get_prep_value, django_field_methods.get_choices, django_field_methods.get_choices_default, django_field_methods.save_form_data, django_field_methods.formfield, django_field_methods.get_db_prep_value, django_field_methods.get_db_prep_save, django_field_methods.db_type_suffix, django_field_methods.select_format, django_field_methods.get_internal_type, django_field_methods.get_attname_column, django_field_methods.check, django_field_methods._check_field_name, django_field_methods._check_db_index, django_field_methods.deconstruct, django_field_methods.run_validators, django_field_methods.clean, django_field_methods.get_db_converters, django_field_methods.get_prep_lookup, django_field_methods.get_db_prep_lookup, django_field_methods.get_filter_kwargs_for_object, django_field_methods.set_attributes_from_name, django_field_methods.db_parameters, django_field_methods.get_pk_value_on_save, django_field_methods.get_col, ) for name, cql_column in six.iteritems(self._defined_columns): self._set_column_django_attributes(cql_column=cql_column, name=name) for method in methods_to_add: try: method_name = method.func_name except AttributeError: # python 3 method_name = method.__name__ new_method = six.create_bound_method(method, cql_column) setattr(cql_column, method_name, new_method)
[ "def", "_give_columns_django_field_attributes", "(", "self", ")", ":", "methods_to_add", "=", "(", "django_field_methods", ".", "value_from_object", ",", "django_field_methods", ".", "value_to_string", ",", "django_field_methods", ".", "get_attname", ",", "django_field_methods", ".", "get_cache_name", ",", "django_field_methods", ".", "pre_save", ",", "django_field_methods", ".", "get_prep_value", ",", "django_field_methods", ".", "get_choices", ",", "django_field_methods", ".", "get_choices_default", ",", "django_field_methods", ".", "save_form_data", ",", "django_field_methods", ".", "formfield", ",", "django_field_methods", ".", "get_db_prep_value", ",", "django_field_methods", ".", "get_db_prep_save", ",", "django_field_methods", ".", "db_type_suffix", ",", "django_field_methods", ".", "select_format", ",", "django_field_methods", ".", "get_internal_type", ",", "django_field_methods", ".", "get_attname_column", ",", "django_field_methods", ".", "check", ",", "django_field_methods", ".", "_check_field_name", ",", "django_field_methods", ".", "_check_db_index", ",", "django_field_methods", ".", "deconstruct", ",", "django_field_methods", ".", "run_validators", ",", "django_field_methods", ".", "clean", ",", "django_field_methods", ".", "get_db_converters", ",", "django_field_methods", ".", "get_prep_lookup", ",", "django_field_methods", ".", "get_db_prep_lookup", ",", "django_field_methods", ".", "get_filter_kwargs_for_object", ",", "django_field_methods", ".", "set_attributes_from_name", ",", "django_field_methods", ".", "db_parameters", ",", "django_field_methods", ".", "get_pk_value_on_save", ",", "django_field_methods", ".", "get_col", ",", ")", "for", "name", ",", "cql_column", "in", "six", ".", "iteritems", "(", "self", ".", "_defined_columns", ")", ":", "self", ".", "_set_column_django_attributes", "(", "cql_column", "=", "cql_column", ",", "name", "=", "name", ")", "for", "method", "in", "methods_to_add", ":", "try", ":", "method_name", "=", "method", ".", "func_name", "except", "AttributeError", ":", "# python 3", "method_name", "=", "method", ".", "__name__", "new_method", "=", "six", ".", "create_bound_method", "(", "method", ",", "cql_column", ")", "setattr", "(", "cql_column", ",", "method_name", ",", "new_method", ")" ]
Add Django Field attributes to each cqlengine.Column instance. So that the Django Options class may interact with it as if it were a Django Field.
[ "Add", "Django", "Field", "attributes", "to", "each", "cqlengine", ".", "Column", "instance", "." ]
python
train
saltstack/salt
salt/sdb/consul.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/sdb/consul.py#L66-L81
def get_conn(profile): ''' Return a client object for accessing consul ''' params = {} for key in ('host', 'port', 'token', 'scheme', 'consistency', 'dc', 'verify'): if key in profile: params[key] = profile[key] if HAS_CONSUL: return consul.Consul(**params) else: raise CommandExecutionError( '(unable to import consul, ' 'module most likely not installed. PLease install python-consul)' )
[ "def", "get_conn", "(", "profile", ")", ":", "params", "=", "{", "}", "for", "key", "in", "(", "'host'", ",", "'port'", ",", "'token'", ",", "'scheme'", ",", "'consistency'", ",", "'dc'", ",", "'verify'", ")", ":", "if", "key", "in", "profile", ":", "params", "[", "key", "]", "=", "profile", "[", "key", "]", "if", "HAS_CONSUL", ":", "return", "consul", ".", "Consul", "(", "*", "*", "params", ")", "else", ":", "raise", "CommandExecutionError", "(", "'(unable to import consul, '", "'module most likely not installed. PLease install python-consul)'", ")" ]
Return a client object for accessing consul
[ "Return", "a", "client", "object", "for", "accessing", "consul" ]
python
train
fr33jc/bang
bang/providers/bases.py
https://github.com/fr33jc/bang/blob/8f000713f88d2a9a8c1193b63ca10a6578560c16/bang/providers/bases.py#L55-L66
def get_consul(self, resource_type): """ Returns an object that a :class:`~bang.deployers.deployer.Deployer` uses to control resources of :attr:`resource_type`. :param str service: Any of the resources defined in :mod:`bang.resources`. """ consul = self.CONSUL_MAP.get(resource_type) if consul: return consul(self)
[ "def", "get_consul", "(", "self", ",", "resource_type", ")", ":", "consul", "=", "self", ".", "CONSUL_MAP", ".", "get", "(", "resource_type", ")", "if", "consul", ":", "return", "consul", "(", "self", ")" ]
Returns an object that a :class:`~bang.deployers.deployer.Deployer` uses to control resources of :attr:`resource_type`. :param str service: Any of the resources defined in :mod:`bang.resources`.
[ "Returns", "an", "object", "that", "a", ":", "class", ":", "~bang", ".", "deployers", ".", "deployer", ".", "Deployer", "uses", "to", "control", "resources", "of", ":", "attr", ":", "resource_type", "." ]
python
train
librosa/librosa
librosa/output.py
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/output.py#L187-L238
def write_wav(path, y, sr, norm=False): """Output a time series as a .wav file Note: only mono or stereo, floating-point data is supported. For more advanced and flexible output options, refer to `soundfile`. Parameters ---------- path : str path to save the output wav file y : np.ndarray [shape=(n,) or (2,n), dtype=np.float] audio time series (mono or stereo). Note that only floating-point values are supported. sr : int > 0 [scalar] sampling rate of `y` norm : boolean [scalar] enable amplitude normalization. For floating point `y`, scale the data to the range [-1, +1]. Examples -------- Trim a signal to 5 seconds and save it back >>> y, sr = librosa.load(librosa.util.example_audio_file(), ... duration=5.0) >>> librosa.output.write_wav('file_trim_5s.wav', y, sr) See Also -------- soundfile.write """ # Validate the buffer. Stereo is okay here. util.valid_audio(y, mono=False) # normalize if norm and np.issubdtype(y.dtype, np.floating): wav = util.normalize(y, norm=np.inf, axis=None) else: wav = y # Check for stereo if wav.ndim > 1 and wav.shape[0] == 2: wav = wav.T # Save scipy.io.wavfile.write(path, sr, wav)
[ "def", "write_wav", "(", "path", ",", "y", ",", "sr", ",", "norm", "=", "False", ")", ":", "# Validate the buffer. Stereo is okay here.", "util", ".", "valid_audio", "(", "y", ",", "mono", "=", "False", ")", "# normalize", "if", "norm", "and", "np", ".", "issubdtype", "(", "y", ".", "dtype", ",", "np", ".", "floating", ")", ":", "wav", "=", "util", ".", "normalize", "(", "y", ",", "norm", "=", "np", ".", "inf", ",", "axis", "=", "None", ")", "else", ":", "wav", "=", "y", "# Check for stereo", "if", "wav", ".", "ndim", ">", "1", "and", "wav", ".", "shape", "[", "0", "]", "==", "2", ":", "wav", "=", "wav", ".", "T", "# Save", "scipy", ".", "io", ".", "wavfile", ".", "write", "(", "path", ",", "sr", ",", "wav", ")" ]
Output a time series as a .wav file Note: only mono or stereo, floating-point data is supported. For more advanced and flexible output options, refer to `soundfile`. Parameters ---------- path : str path to save the output wav file y : np.ndarray [shape=(n,) or (2,n), dtype=np.float] audio time series (mono or stereo). Note that only floating-point values are supported. sr : int > 0 [scalar] sampling rate of `y` norm : boolean [scalar] enable amplitude normalization. For floating point `y`, scale the data to the range [-1, +1]. Examples -------- Trim a signal to 5 seconds and save it back >>> y, sr = librosa.load(librosa.util.example_audio_file(), ... duration=5.0) >>> librosa.output.write_wav('file_trim_5s.wav', y, sr) See Also -------- soundfile.write
[ "Output", "a", "time", "series", "as", "a", ".", "wav", "file" ]
python
test
saltstack/salt
salt/modules/dockermod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/dockermod.py#L5376-L5417
def connect_container_to_network(container, net_id, **kwargs): ''' .. versionadded:: 2015.8.3 .. versionchanged:: 2017.7.0 Support for ``ipv4_address`` argument added .. versionchanged:: 2018.3.0 All arguments are now passed through to `connect_container_to_network()`_, allowing for any new arguments added to this function to be supported automagically. Connect container to network. See the `connect_container_to_network()`_ docs for information on supported arguments. container Container name or ID net_id Network name or ID CLI Examples: .. code-block:: bash salt myminion docker.connect_container_to_network web-1 mynet salt myminion docker.connect_container_to_network web-1 mynet ipv4_address=10.20.0.10 salt myminion docker.connect_container_to_network web-1 1f9d2454d0872b68dd9e8744c6e7a4c66b86f10abaccc21e14f7f014f729b2bc ''' kwargs = __utils__['args.clean_kwargs'](**kwargs) log.debug( 'Connecting container \'%s\' to network \'%s\' with the following ' 'configuration: %s', container, net_id, kwargs ) response = _client_wrapper('connect_container_to_network', container, net_id, **kwargs) log.debug( 'Successfully connected container \'%s\' to network \'%s\'', container, net_id ) _clear_context() return True
[ "def", "connect_container_to_network", "(", "container", ",", "net_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "=", "__utils__", "[", "'args.clean_kwargs'", "]", "(", "*", "*", "kwargs", ")", "log", ".", "debug", "(", "'Connecting container \\'%s\\' to network \\'%s\\' with the following '", "'configuration: %s'", ",", "container", ",", "net_id", ",", "kwargs", ")", "response", "=", "_client_wrapper", "(", "'connect_container_to_network'", ",", "container", ",", "net_id", ",", "*", "*", "kwargs", ")", "log", ".", "debug", "(", "'Successfully connected container \\'%s\\' to network \\'%s\\''", ",", "container", ",", "net_id", ")", "_clear_context", "(", ")", "return", "True" ]
.. versionadded:: 2015.8.3 .. versionchanged:: 2017.7.0 Support for ``ipv4_address`` argument added .. versionchanged:: 2018.3.0 All arguments are now passed through to `connect_container_to_network()`_, allowing for any new arguments added to this function to be supported automagically. Connect container to network. See the `connect_container_to_network()`_ docs for information on supported arguments. container Container name or ID net_id Network name or ID CLI Examples: .. code-block:: bash salt myminion docker.connect_container_to_network web-1 mynet salt myminion docker.connect_container_to_network web-1 mynet ipv4_address=10.20.0.10 salt myminion docker.connect_container_to_network web-1 1f9d2454d0872b68dd9e8744c6e7a4c66b86f10abaccc21e14f7f014f729b2bc
[ "..", "versionadded", "::", "2015", ".", "8", ".", "3", "..", "versionchanged", "::", "2017", ".", "7", ".", "0", "Support", "for", "ipv4_address", "argument", "added", "..", "versionchanged", "::", "2018", ".", "3", ".", "0", "All", "arguments", "are", "now", "passed", "through", "to", "connect_container_to_network", "()", "_", "allowing", "for", "any", "new", "arguments", "added", "to", "this", "function", "to", "be", "supported", "automagically", "." ]
python
train
clchiou/startup
startup.py
https://github.com/clchiou/startup/blob/13cbf3ce1deffbc10d33a5f64c396a73129a5929/startup.py#L308-L319
def _notify_reader_writes(writeto): """Notify reader closures about these writes and return a sorted list of thus-satisfied closures. """ satisfied = [] for var in writeto: if var.readable: for reader in var.readers: reader.notify_read_ready() if reader.satisfied: satisfied.append(reader) return Closure.sort(satisfied)
[ "def", "_notify_reader_writes", "(", "writeto", ")", ":", "satisfied", "=", "[", "]", "for", "var", "in", "writeto", ":", "if", "var", ".", "readable", ":", "for", "reader", "in", "var", ".", "readers", ":", "reader", ".", "notify_read_ready", "(", ")", "if", "reader", ".", "satisfied", ":", "satisfied", ".", "append", "(", "reader", ")", "return", "Closure", ".", "sort", "(", "satisfied", ")" ]
Notify reader closures about these writes and return a sorted list of thus-satisfied closures.
[ "Notify", "reader", "closures", "about", "these", "writes", "and", "return", "a", "sorted", "list", "of", "thus", "-", "satisfied", "closures", "." ]
python
train
jaredLunde/vital-tools
vital/tools/lists.py
https://github.com/jaredLunde/vital-tools/blob/ea924c9bbb6ec22aa66f8095f018b1ee0099ac04/vital/tools/lists.py#L59-L78
def flatten(*seqs): """ Flattens a sequence e.g. |[(1, 2), (3, (4, 5))] -> [1, 2, 3, 4, 5]| @seq: #tuple, #list or :class:UserList -> yields an iterator .. l = [(1, 2), (3, 4)] for x in flatten(l): print(x) .. """ for seq in seqs: for item in seq: if isinstance(item, (tuple, list, UserList)): for subitem in flatten(item): yield subitem else: yield item
[ "def", "flatten", "(", "*", "seqs", ")", ":", "for", "seq", "in", "seqs", ":", "for", "item", "in", "seq", ":", "if", "isinstance", "(", "item", ",", "(", "tuple", ",", "list", ",", "UserList", ")", ")", ":", "for", "subitem", "in", "flatten", "(", "item", ")", ":", "yield", "subitem", "else", ":", "yield", "item" ]
Flattens a sequence e.g. |[(1, 2), (3, (4, 5))] -> [1, 2, 3, 4, 5]| @seq: #tuple, #list or :class:UserList -> yields an iterator .. l = [(1, 2), (3, 4)] for x in flatten(l): print(x) ..
[ "Flattens", "a", "sequence", "e", ".", "g", ".", "|", "[", "(", "1", "2", ")", "(", "3", "(", "4", "5", "))", "]", "-", ">", "[", "1", "2", "3", "4", "5", "]", "|" ]
python
train
jobovy/galpy
galpy/orbit/OrbitTop.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/OrbitTop.py#L236-L254
def vR(self,*args,**kwargs): """ NAME: vR PURPOSE: return radial velocity at time t INPUT: t - (optional) time at which to get the radial velocity vo= (Object-wide default) physical scale for velocities to use to convert use_physical= use to override Object-wide default for using a physical scale for output OUTPUT: vR(t) HISTORY: 2010-09-21 - Written - Bovy (NYU) """ thiso= self(*args,**kwargs) onet= (len(thiso.shape) == 1) if onet: return thiso[1] else: return thiso[1,:]
[ "def", "vR", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "thiso", "=", "self", "(", "*", "args", ",", "*", "*", "kwargs", ")", "onet", "=", "(", "len", "(", "thiso", ".", "shape", ")", "==", "1", ")", "if", "onet", ":", "return", "thiso", "[", "1", "]", "else", ":", "return", "thiso", "[", "1", ",", ":", "]" ]
NAME: vR PURPOSE: return radial velocity at time t INPUT: t - (optional) time at which to get the radial velocity vo= (Object-wide default) physical scale for velocities to use to convert use_physical= use to override Object-wide default for using a physical scale for output OUTPUT: vR(t) HISTORY: 2010-09-21 - Written - Bovy (NYU)
[ "NAME", ":", "vR", "PURPOSE", ":", "return", "radial", "velocity", "at", "time", "t", "INPUT", ":", "t", "-", "(", "optional", ")", "time", "at", "which", "to", "get", "the", "radial", "velocity", "vo", "=", "(", "Object", "-", "wide", "default", ")", "physical", "scale", "for", "velocities", "to", "use", "to", "convert", "use_physical", "=", "use", "to", "override", "Object", "-", "wide", "default", "for", "using", "a", "physical", "scale", "for", "output", "OUTPUT", ":", "vR", "(", "t", ")", "HISTORY", ":", "2010", "-", "09", "-", "21", "-", "Written", "-", "Bovy", "(", "NYU", ")" ]
python
train
ssato/python-anyconfig
src/anyconfig/utils.py
https://github.com/ssato/python-anyconfig/blob/f2f4fb8d8e232aadea866c202e1dd7a5967e2877/src/anyconfig/utils.py#L417-L438
def is_list_like(obj): """ >>> is_list_like([]) True >>> is_list_like(()) True >>> is_list_like([x for x in range(10)]) True >>> is_list_like((1, 2, 3)) True >>> g = (x for x in range(10)) >>> is_list_like(g) True >>> is_list_like("abc") False >>> is_list_like(0) False >>> is_list_like({}) False """ return isinstance(obj, _LIST_LIKE_TYPES) and \ not (isinstance(obj, anyconfig.compat.STR_TYPES) or is_dict_like(obj))
[ "def", "is_list_like", "(", "obj", ")", ":", "return", "isinstance", "(", "obj", ",", "_LIST_LIKE_TYPES", ")", "and", "not", "(", "isinstance", "(", "obj", ",", "anyconfig", ".", "compat", ".", "STR_TYPES", ")", "or", "is_dict_like", "(", "obj", ")", ")" ]
>>> is_list_like([]) True >>> is_list_like(()) True >>> is_list_like([x for x in range(10)]) True >>> is_list_like((1, 2, 3)) True >>> g = (x for x in range(10)) >>> is_list_like(g) True >>> is_list_like("abc") False >>> is_list_like(0) False >>> is_list_like({}) False
[ ">>>", "is_list_like", "(", "[]", ")", "True", ">>>", "is_list_like", "((", "))", "True", ">>>", "is_list_like", "(", "[", "x", "for", "x", "in", "range", "(", "10", ")", "]", ")", "True", ">>>", "is_list_like", "((", "1", "2", "3", "))", "True", ">>>", "g", "=", "(", "x", "for", "x", "in", "range", "(", "10", "))", ">>>", "is_list_like", "(", "g", ")", "True", ">>>", "is_list_like", "(", "abc", ")", "False", ">>>", "is_list_like", "(", "0", ")", "False", ">>>", "is_list_like", "(", "{}", ")", "False" ]
python
train
dw/mitogen
ansible_mitogen/connection.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/ansible_mitogen/connection.py#L185-L198
def _connect_jail(spec): """ Return ContextService arguments for a FreeBSD jail connection. """ return { 'method': 'jail', 'kwargs': { 'username': spec.remote_user(), 'container': spec.remote_addr(), 'python_path': spec.python_path(), 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), 'remote_name': get_remote_name(spec), } }
[ "def", "_connect_jail", "(", "spec", ")", ":", "return", "{", "'method'", ":", "'jail'", ",", "'kwargs'", ":", "{", "'username'", ":", "spec", ".", "remote_user", "(", ")", ",", "'container'", ":", "spec", ".", "remote_addr", "(", ")", ",", "'python_path'", ":", "spec", ".", "python_path", "(", ")", ",", "'connect_timeout'", ":", "spec", ".", "ansible_ssh_timeout", "(", ")", "or", "spec", ".", "timeout", "(", ")", ",", "'remote_name'", ":", "get_remote_name", "(", "spec", ")", ",", "}", "}" ]
Return ContextService arguments for a FreeBSD jail connection.
[ "Return", "ContextService", "arguments", "for", "a", "FreeBSD", "jail", "connection", "." ]
python
train
slaveofcode/pycrawler
pycrawler/page.py
https://github.com/slaveofcode/pycrawler/blob/6d19b5b378f42f9586e2d3a0d0c013cb03c82f6d/pycrawler/page.py#L419-L430
def html(self, selector): """Return html result that executed by given css selector :param selector: `str` css selector :return: `list` or `None` """ result = self.__bs4.select(selector) return [str(r) for r in result] \ if result.__len__() > 1 else \ str(result[0]) if result.__len__() > 0 else None
[ "def", "html", "(", "self", ",", "selector", ")", ":", "result", "=", "self", ".", "__bs4", ".", "select", "(", "selector", ")", "return", "[", "str", "(", "r", ")", "for", "r", "in", "result", "]", "if", "result", ".", "__len__", "(", ")", ">", "1", "else", "str", "(", "result", "[", "0", "]", ")", "if", "result", ".", "__len__", "(", ")", ">", "0", "else", "None" ]
Return html result that executed by given css selector :param selector: `str` css selector :return: `list` or `None`
[ "Return", "html", "result", "that", "executed", "by", "given", "css", "selector" ]
python
train
loli/medpy
medpy/metric/histogram.py
https://github.com/loli/medpy/blob/95216b9e22e7ce301f0edf953ee2a2f1b6c6aee5/medpy/metric/histogram.py#L245-L287
def histogram_intersection(h1, h2): # 6 us @array, 30 us @list \w 100 bins r""" Calculate the common part of two histograms. The histogram intersection between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: .. math:: d_{\cap}(H, H') = \sum_{m=1}^M\min(H_m, H'_m) *Attributes:* - a real metric *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 1` - :math:`d(H, H') = d(H', H)` *Attributes for not-normalized histograms:* - not applicable *Attributes for not-equal histograms:* - not applicable Parameters ---------- h1 : sequence The first histogram, normalized. h2 : sequence The second histogram, normalized, same bins as ``h1``. Returns ------- histogram_intersection : float Intersection between the two histograms. """ h1, h2 = __prepare_histogram(h1, h2) return scipy.sum(scipy.minimum(h1, h2))
[ "def", "histogram_intersection", "(", "h1", ",", "h2", ")", ":", "# 6 us @array, 30 us @list \\w 100 bins", "h1", ",", "h2", "=", "__prepare_histogram", "(", "h1", ",", "h2", ")", "return", "scipy", ".", "sum", "(", "scipy", ".", "minimum", "(", "h1", ",", "h2", ")", ")" ]
r""" Calculate the common part of two histograms. The histogram intersection between two histograms :math:`H` and :math:`H'` of size :math:`m` is defined as: .. math:: d_{\cap}(H, H') = \sum_{m=1}^M\min(H_m, H'_m) *Attributes:* - a real metric *Attributes for normalized histograms:* - :math:`d(H, H')\in[0, 1]` - :math:`d(H, H) = 1` - :math:`d(H, H') = d(H', H)` *Attributes for not-normalized histograms:* - not applicable *Attributes for not-equal histograms:* - not applicable Parameters ---------- h1 : sequence The first histogram, normalized. h2 : sequence The second histogram, normalized, same bins as ``h1``. Returns ------- histogram_intersection : float Intersection between the two histograms.
[ "r", "Calculate", "the", "common", "part", "of", "two", "histograms", ".", "The", "histogram", "intersection", "between", "two", "histograms", ":", "math", ":", "H", "and", ":", "math", ":", "H", "of", "size", ":", "math", ":", "m", "is", "defined", "as", ":", "..", "math", "::", "d_", "{", "\\", "cap", "}", "(", "H", "H", ")", "=", "\\", "sum_", "{", "m", "=", "1", "}", "^M", "\\", "min", "(", "H_m", "H", "_m", ")", "*", "Attributes", ":", "*" ]
python
train
hyperledger/indy-plenum
stp_zmq/util.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/stp_zmq/util.py#L105-L147
def generate_certificates(base_dir, *peer_names, pubKeyDir=None, secKeyDir=None, sigKeyDir=None, verkeyDir=None, clean=True): ''' Generate client and server CURVE certificate files''' pubKeyDir = pubKeyDir or 'public_keys' secKeyDir = secKeyDir or 'private_keys' verkeyDir = verkeyDir or 'verif_keys' sigKeyDir = sigKeyDir or 'sig_keys' # keys_dir = os.path.join(base_dir, 'certificates') e_keys_dir = os.path.join(base_dir, '_enc') s_keys_dir = os.path.join(base_dir, '_sig') public_keys_dir = os.path.join(base_dir, pubKeyDir) secret_keys_dir = os.path.join(base_dir, secKeyDir) ver_keys_dir = os.path.join(base_dir, verkeyDir) sig_keys_dir = os.path.join(base_dir, sigKeyDir) # Create directories for certificates, remove old content if necessary for d in [e_keys_dir, s_keys_dir, public_keys_dir, secret_keys_dir, ver_keys_dir, sig_keys_dir]: if clean and os.path.exists(d): shutil.rmtree(d) os.makedirs(d, exist_ok=True) # create new keys in certificates dir for peer_name in peer_names: createEncAndSigKeys(e_keys_dir, s_keys_dir, peer_name) # move public keys to appropriate directory for keys_dir, pkdir, skdir in [ (e_keys_dir, public_keys_dir, secret_keys_dir), (s_keys_dir, ver_keys_dir, sig_keys_dir) ]: moveKeyFilesToCorrectLocations(keys_dir, pkdir, skdir) shutil.rmtree(e_keys_dir) shutil.rmtree(s_keys_dir) print('Public keys in {}'.format(public_keys_dir)) print('Private keys in {}'.format(secret_keys_dir)) print('Verification keys in {}'.format(ver_keys_dir)) print('Signing keys in {}'.format(sig_keys_dir))
[ "def", "generate_certificates", "(", "base_dir", ",", "*", "peer_names", ",", "pubKeyDir", "=", "None", ",", "secKeyDir", "=", "None", ",", "sigKeyDir", "=", "None", ",", "verkeyDir", "=", "None", ",", "clean", "=", "True", ")", ":", "pubKeyDir", "=", "pubKeyDir", "or", "'public_keys'", "secKeyDir", "=", "secKeyDir", "or", "'private_keys'", "verkeyDir", "=", "verkeyDir", "or", "'verif_keys'", "sigKeyDir", "=", "sigKeyDir", "or", "'sig_keys'", "# keys_dir = os.path.join(base_dir, 'certificates')", "e_keys_dir", "=", "os", ".", "path", ".", "join", "(", "base_dir", ",", "'_enc'", ")", "s_keys_dir", "=", "os", ".", "path", ".", "join", "(", "base_dir", ",", "'_sig'", ")", "public_keys_dir", "=", "os", ".", "path", ".", "join", "(", "base_dir", ",", "pubKeyDir", ")", "secret_keys_dir", "=", "os", ".", "path", ".", "join", "(", "base_dir", ",", "secKeyDir", ")", "ver_keys_dir", "=", "os", ".", "path", ".", "join", "(", "base_dir", ",", "verkeyDir", ")", "sig_keys_dir", "=", "os", ".", "path", ".", "join", "(", "base_dir", ",", "sigKeyDir", ")", "# Create directories for certificates, remove old content if necessary", "for", "d", "in", "[", "e_keys_dir", ",", "s_keys_dir", ",", "public_keys_dir", ",", "secret_keys_dir", ",", "ver_keys_dir", ",", "sig_keys_dir", "]", ":", "if", "clean", "and", "os", ".", "path", ".", "exists", "(", "d", ")", ":", "shutil", ".", "rmtree", "(", "d", ")", "os", ".", "makedirs", "(", "d", ",", "exist_ok", "=", "True", ")", "# create new keys in certificates dir", "for", "peer_name", "in", "peer_names", ":", "createEncAndSigKeys", "(", "e_keys_dir", ",", "s_keys_dir", ",", "peer_name", ")", "# move public keys to appropriate directory", "for", "keys_dir", ",", "pkdir", ",", "skdir", "in", "[", "(", "e_keys_dir", ",", "public_keys_dir", ",", "secret_keys_dir", ")", ",", "(", "s_keys_dir", ",", "ver_keys_dir", ",", "sig_keys_dir", ")", "]", ":", "moveKeyFilesToCorrectLocations", "(", "keys_dir", ",", "pkdir", ",", "skdir", ")", "shutil", ".", "rmtree", "(", "e_keys_dir", ")", "shutil", ".", "rmtree", "(", "s_keys_dir", ")", "print", "(", "'Public keys in {}'", ".", "format", "(", "public_keys_dir", ")", ")", "print", "(", "'Private keys in {}'", ".", "format", "(", "secret_keys_dir", ")", ")", "print", "(", "'Verification keys in {}'", ".", "format", "(", "ver_keys_dir", ")", ")", "print", "(", "'Signing keys in {}'", ".", "format", "(", "sig_keys_dir", ")", ")" ]
Generate client and server CURVE certificate files
[ "Generate", "client", "and", "server", "CURVE", "certificate", "files" ]
python
train
nion-software/nionswift-io
nionswift_plugin/TIFF_IO/tifffile.py
https://github.com/nion-software/nionswift-io/blob/e9ae37f01faa9332c48b647f93afd5ef2166b155/nionswift_plugin/TIFF_IO/tifffile.py#L9225-L9231
def scanimage_artist_metadata(artist): """Return metatata from ScanImage artist tag as dict.""" try: return json.loads(artist) except ValueError as exc: log.warning('scanimage_artist_metadata: %s: %s', exc.__class__.__name__, exc)
[ "def", "scanimage_artist_metadata", "(", "artist", ")", ":", "try", ":", "return", "json", ".", "loads", "(", "artist", ")", "except", "ValueError", "as", "exc", ":", "log", ".", "warning", "(", "'scanimage_artist_metadata: %s: %s'", ",", "exc", ".", "__class__", ".", "__name__", ",", "exc", ")" ]
Return metatata from ScanImage artist tag as dict.
[ "Return", "metatata", "from", "ScanImage", "artist", "tag", "as", "dict", "." ]
python
train
RedHatQE/python-stitches
stitches/expect.py
https://github.com/RedHatQE/python-stitches/blob/957e9895e64ffd3b8157b38b9cce414969509288/stitches/expect.py#L153-L176
def ping_pong(connection, command, strexp, timeout=10): ''' Enter a command and wait for something to happen (enter + expect combined) @param connection: connection to the host @type connection: L{Connection} @param command: command to execute @type command: str @param strexp: string to convert to expression (.*string.*) @type strexp: str @param timeout: timeout for performing expect operation @type timeout: int @return: True if succeeded @rtype: bool @raises ExpectFailed ''' Expect.enter(connection, command) return Expect.expect(connection, strexp, timeout)
[ "def", "ping_pong", "(", "connection", ",", "command", ",", "strexp", ",", "timeout", "=", "10", ")", ":", "Expect", ".", "enter", "(", "connection", ",", "command", ")", "return", "Expect", ".", "expect", "(", "connection", ",", "strexp", ",", "timeout", ")" ]
Enter a command and wait for something to happen (enter + expect combined) @param connection: connection to the host @type connection: L{Connection} @param command: command to execute @type command: str @param strexp: string to convert to expression (.*string.*) @type strexp: str @param timeout: timeout for performing expect operation @type timeout: int @return: True if succeeded @rtype: bool @raises ExpectFailed
[ "Enter", "a", "command", "and", "wait", "for", "something", "to", "happen", "(", "enter", "+", "expect", "combined", ")" ]
python
train
apache/incubator-mxnet
python/mxnet/gluon/parameter.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/parameter.py#L550-L556
def var(self): """Returns a symbol representing this parameter.""" if self._var is None: self._var = symbol.var(self.name, shape=self.shape, dtype=self.dtype, lr_mult=self.lr_mult, wd_mult=self.wd_mult, init=self.init, stype=self._stype) return self._var
[ "def", "var", "(", "self", ")", ":", "if", "self", ".", "_var", "is", "None", ":", "self", ".", "_var", "=", "symbol", ".", "var", "(", "self", ".", "name", ",", "shape", "=", "self", ".", "shape", ",", "dtype", "=", "self", ".", "dtype", ",", "lr_mult", "=", "self", ".", "lr_mult", ",", "wd_mult", "=", "self", ".", "wd_mult", ",", "init", "=", "self", ".", "init", ",", "stype", "=", "self", ".", "_stype", ")", "return", "self", ".", "_var" ]
Returns a symbol representing this parameter.
[ "Returns", "a", "symbol", "representing", "this", "parameter", "." ]
python
train
storax/jinjaapidoc
src/jinjaapidoc/gendoc.py
https://github.com/storax/jinjaapidoc/blob/f1eeb6ab5bd1a96c4130306718c6423f37c76856/src/jinjaapidoc/gendoc.py#L111-L149
def write_file(app, name, text, dest, suffix, dryrun, force): """Write the output file for module/package <name>. :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param name: the file name without file extension :type name: :class:`str` :param text: the content of the file :type text: :class:`str` :param dest: the output directory :type dest: :class:`str` :param suffix: the file extension :type suffix: :class:`str` :param dryrun: If True, do not create any files, just log the potential location. :type dryrun: :class:`bool` :param force: Overwrite existing files :type force: :class:`bool` :returns: None :raises: None """ fname = os.path.join(dest, '%s.%s' % (name, suffix)) if dryrun: logger.info('Would create file %s.' % fname) return if not force and os.path.isfile(fname): logger.info('File %s already exists, skipping.' % fname) else: logger.info('Creating file %s.' % fname) f = open(fname, 'w') try: f.write(text) relpath = os.path.relpath(fname, start=app.env.srcdir) abspath = os.sep + relpath docpath = app.env.relfn2path(abspath)[0] docpath = docpath.rsplit(os.path.extsep, 1)[0] logger.debug('Adding document %s' % docpath) app.env.found_docs.add(docpath) finally: f.close()
[ "def", "write_file", "(", "app", ",", "name", ",", "text", ",", "dest", ",", "suffix", ",", "dryrun", ",", "force", ")", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "dest", ",", "'%s.%s'", "%", "(", "name", ",", "suffix", ")", ")", "if", "dryrun", ":", "logger", ".", "info", "(", "'Would create file %s.'", "%", "fname", ")", "return", "if", "not", "force", "and", "os", ".", "path", ".", "isfile", "(", "fname", ")", ":", "logger", ".", "info", "(", "'File %s already exists, skipping.'", "%", "fname", ")", "else", ":", "logger", ".", "info", "(", "'Creating file %s.'", "%", "fname", ")", "f", "=", "open", "(", "fname", ",", "'w'", ")", "try", ":", "f", ".", "write", "(", "text", ")", "relpath", "=", "os", ".", "path", ".", "relpath", "(", "fname", ",", "start", "=", "app", ".", "env", ".", "srcdir", ")", "abspath", "=", "os", ".", "sep", "+", "relpath", "docpath", "=", "app", ".", "env", ".", "relfn2path", "(", "abspath", ")", "[", "0", "]", "docpath", "=", "docpath", ".", "rsplit", "(", "os", ".", "path", ".", "extsep", ",", "1", ")", "[", "0", "]", "logger", ".", "debug", "(", "'Adding document %s'", "%", "docpath", ")", "app", ".", "env", ".", "found_docs", ".", "add", "(", "docpath", ")", "finally", ":", "f", ".", "close", "(", ")" ]
Write the output file for module/package <name>. :param app: the sphinx app :type app: :class:`sphinx.application.Sphinx` :param name: the file name without file extension :type name: :class:`str` :param text: the content of the file :type text: :class:`str` :param dest: the output directory :type dest: :class:`str` :param suffix: the file extension :type suffix: :class:`str` :param dryrun: If True, do not create any files, just log the potential location. :type dryrun: :class:`bool` :param force: Overwrite existing files :type force: :class:`bool` :returns: None :raises: None
[ "Write", "the", "output", "file", "for", "module", "/", "package", "<name", ">", "." ]
python
train
cloudsmith-io/cloudsmith-cli
cloudsmith_cli/cli/validators.py
https://github.com/cloudsmith-io/cloudsmith-cli/blob/5bc245ca5d0bfa85380be48e7c206b4c86cc6c8e/cloudsmith_cli/cli/validators.py#L64-L87
def validate_slashes(param, value, minimum=2, maximum=None, form=None): """Ensure that parameter has slashes and minimum parts.""" try: value = value.split("/") except ValueError: value = None if value: if len(value) < minimum: value = None elif maximum and len(value) > maximum: value = None if not value: form = form or "/".join("VALUE" for _ in range(minimum)) raise click.BadParameter( "Must be in the form of %(form)s" % {"form": form}, param=param ) value = [v.strip() for v in value] if not all(value): raise click.BadParameter("Individual values cannot be blank", param=param) return value
[ "def", "validate_slashes", "(", "param", ",", "value", ",", "minimum", "=", "2", ",", "maximum", "=", "None", ",", "form", "=", "None", ")", ":", "try", ":", "value", "=", "value", ".", "split", "(", "\"/\"", ")", "except", "ValueError", ":", "value", "=", "None", "if", "value", ":", "if", "len", "(", "value", ")", "<", "minimum", ":", "value", "=", "None", "elif", "maximum", "and", "len", "(", "value", ")", ">", "maximum", ":", "value", "=", "None", "if", "not", "value", ":", "form", "=", "form", "or", "\"/\"", ".", "join", "(", "\"VALUE\"", "for", "_", "in", "range", "(", "minimum", ")", ")", "raise", "click", ".", "BadParameter", "(", "\"Must be in the form of %(form)s\"", "%", "{", "\"form\"", ":", "form", "}", ",", "param", "=", "param", ")", "value", "=", "[", "v", ".", "strip", "(", ")", "for", "v", "in", "value", "]", "if", "not", "all", "(", "value", ")", ":", "raise", "click", ".", "BadParameter", "(", "\"Individual values cannot be blank\"", ",", "param", "=", "param", ")", "return", "value" ]
Ensure that parameter has slashes and minimum parts.
[ "Ensure", "that", "parameter", "has", "slashes", "and", "minimum", "parts", "." ]
python
train
WhereSoftwareGoesToDie/pymarquise
marquise/marquise.py
https://github.com/WhereSoftwareGoesToDie/pymarquise/blob/67e52df70c50ed53ad315a64fea430a9567e2b1b/marquise/marquise.py#L100-L140
def send_simple(self, address, timestamp, value): """Queue a simple datapoint (ie. a 64-bit word), return True/False for success. Arguments: address -- uint64_t representing a unique metric. timestamp -- uint64_t representing number of nanoseconds (10^-9) since epoch. value -- uint64_t value being stored. There are no formal restrictions on how `address` is chosen, but it must be unique to the metric you are inserting. If you don't have one, you may generate one by calling `hash_identifier` with a string; the recommended input is the source identifier. If you don't have a `timestamp` you may pass in None to have Pymarquise generate one for you. """ if self.marquise_ctx is None: raise ValueError("Attempted to write to a closed Marquise handle.") self.__debug("Supplied address: %s" % address) if value is None: raise TypeError("Can't store None as a value.") if timestamp is None: timestamp = self.current_timestamp() # Wrap/convert our arguments to C datatypes before dispatching. # FFI will take care of converting them to the right endianness. I think. c_address = FFI.cast("uint64_t", address) c_timestamp = FFI.cast("uint64_t", timestamp) c_value = FFI.cast("uint64_t", value) success = MARQUISE_SEND_SIMPLE(self.marquise_ctx, c_address, c_timestamp, c_value) if success != 0: self.__debug("send_simple returned %d, raising exception" % success) raise RuntimeError("send_simple was unsuccessful, errno is %d" % FFI.errno) self.__debug("send_simple returned %d" % success) return True
[ "def", "send_simple", "(", "self", ",", "address", ",", "timestamp", ",", "value", ")", ":", "if", "self", ".", "marquise_ctx", "is", "None", ":", "raise", "ValueError", "(", "\"Attempted to write to a closed Marquise handle.\"", ")", "self", ".", "__debug", "(", "\"Supplied address: %s\"", "%", "address", ")", "if", "value", "is", "None", ":", "raise", "TypeError", "(", "\"Can't store None as a value.\"", ")", "if", "timestamp", "is", "None", ":", "timestamp", "=", "self", ".", "current_timestamp", "(", ")", "# Wrap/convert our arguments to C datatypes before dispatching.", "# FFI will take care of converting them to the right endianness. I think.", "c_address", "=", "FFI", ".", "cast", "(", "\"uint64_t\"", ",", "address", ")", "c_timestamp", "=", "FFI", ".", "cast", "(", "\"uint64_t\"", ",", "timestamp", ")", "c_value", "=", "FFI", ".", "cast", "(", "\"uint64_t\"", ",", "value", ")", "success", "=", "MARQUISE_SEND_SIMPLE", "(", "self", ".", "marquise_ctx", ",", "c_address", ",", "c_timestamp", ",", "c_value", ")", "if", "success", "!=", "0", ":", "self", ".", "__debug", "(", "\"send_simple returned %d, raising exception\"", "%", "success", ")", "raise", "RuntimeError", "(", "\"send_simple was unsuccessful, errno is %d\"", "%", "FFI", ".", "errno", ")", "self", ".", "__debug", "(", "\"send_simple returned %d\"", "%", "success", ")", "return", "True" ]
Queue a simple datapoint (ie. a 64-bit word), return True/False for success. Arguments: address -- uint64_t representing a unique metric. timestamp -- uint64_t representing number of nanoseconds (10^-9) since epoch. value -- uint64_t value being stored. There are no formal restrictions on how `address` is chosen, but it must be unique to the metric you are inserting. If you don't have one, you may generate one by calling `hash_identifier` with a string; the recommended input is the source identifier. If you don't have a `timestamp` you may pass in None to have Pymarquise generate one for you.
[ "Queue", "a", "simple", "datapoint", "(", "ie", ".", "a", "64", "-", "bit", "word", ")", "return", "True", "/", "False", "for", "success", "." ]
python
train
pixelogik/NearPy
nearpy/storage/storage_redis.py
https://github.com/pixelogik/NearPy/blob/1b534b864d320d875508e95cd2b76b6d8c07a90b/nearpy/storage/storage_redis.py#L49-L53
def store_vector(self, hash_name, bucket_key, v, data): """ Stores vector and JSON-serializable data in bucket with specified key. """ self._add_vector(hash_name, bucket_key, v, data, self.redis_object)
[ "def", "store_vector", "(", "self", ",", "hash_name", ",", "bucket_key", ",", "v", ",", "data", ")", ":", "self", ".", "_add_vector", "(", "hash_name", ",", "bucket_key", ",", "v", ",", "data", ",", "self", ".", "redis_object", ")" ]
Stores vector and JSON-serializable data in bucket with specified key.
[ "Stores", "vector", "and", "JSON", "-", "serializable", "data", "in", "bucket", "with", "specified", "key", "." ]
python
train
bwohlberg/sporco
sporco/linalg.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/linalg.py#L1269-L1291
def atleast_nd(n, u): """ If the input array has fewer than n dimensions, append singleton dimensions so that it is n dimensional. Note that the interface differs substantially from that of :func:`numpy.atleast_3d` etc. Parameters ---------- n : int Minimum number of required dimensions u : array_like Input array Returns ------- v : ndarray Output array with at least n dimensions """ if u.ndim >= n: return u else: return u.reshape(u.shape + (1,)*(n-u.ndim))
[ "def", "atleast_nd", "(", "n", ",", "u", ")", ":", "if", "u", ".", "ndim", ">=", "n", ":", "return", "u", "else", ":", "return", "u", ".", "reshape", "(", "u", ".", "shape", "+", "(", "1", ",", ")", "*", "(", "n", "-", "u", ".", "ndim", ")", ")" ]
If the input array has fewer than n dimensions, append singleton dimensions so that it is n dimensional. Note that the interface differs substantially from that of :func:`numpy.atleast_3d` etc. Parameters ---------- n : int Minimum number of required dimensions u : array_like Input array Returns ------- v : ndarray Output array with at least n dimensions
[ "If", "the", "input", "array", "has", "fewer", "than", "n", "dimensions", "append", "singleton", "dimensions", "so", "that", "it", "is", "n", "dimensional", ".", "Note", "that", "the", "interface", "differs", "substantially", "from", "that", "of", ":", "func", ":", "numpy", ".", "atleast_3d", "etc", "." ]
python
train
althonos/pronto
pronto/parser/obo.py
https://github.com/althonos/pronto/blob/a768adcba19fb34f26f67cde4a03d317f932c274/pronto/parser/obo.py#L152-L166
def _parse_typedef(line, _rawtypedef): """Parse a typedef line. The typedef is organized as a succesion of ``key:value`` pairs that are extracted into the same dictionnary until a new header is encountered Arguments: line (str): the line containing a typedef statement """ if "[Typedef]" in line: _rawtypedef.append(collections.defaultdict(list)) else: key, value = line.split(':', 1) _rawtypedef[-1][key.strip()].append(value.strip())
[ "def", "_parse_typedef", "(", "line", ",", "_rawtypedef", ")", ":", "if", "\"[Typedef]\"", "in", "line", ":", "_rawtypedef", ".", "append", "(", "collections", ".", "defaultdict", "(", "list", ")", ")", "else", ":", "key", ",", "value", "=", "line", ".", "split", "(", "':'", ",", "1", ")", "_rawtypedef", "[", "-", "1", "]", "[", "key", ".", "strip", "(", ")", "]", ".", "append", "(", "value", ".", "strip", "(", ")", ")" ]
Parse a typedef line. The typedef is organized as a succesion of ``key:value`` pairs that are extracted into the same dictionnary until a new header is encountered Arguments: line (str): the line containing a typedef statement
[ "Parse", "a", "typedef", "line", "." ]
python
train
edx/edx-django-release-util
release_util/management/commands/__init__.py
https://github.com/edx/edx-django-release-util/blob/de0fde41d6a19885ab7dc309472b94fd0fccbc1d/release_util/management/commands/__init__.py#L16-L48
def dump_migration_session_state(raw): """ Serialize a migration session state to yaml using nicer formatting Args: raw: object to serialize Returns: string (of yaml) Specifically, this forces the "output" member of state step dicts (e.g. state[0]['output']) to use block formatting. For example, rather than this: - migration: [app, migration_name] output: "line 1\nline2\nline3" You get this: - migration: [app, migration_name] output: | line 1 line 2 line 3 """ class BlockStyle(str): pass class SessionDumper(yaml.SafeDumper): pass def str_block_formatter(dumper, data): return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|') SessionDumper.add_representer(BlockStyle, str_block_formatter) raw = deepcopy(raw) for step in raw: step['output'] = BlockStyle(step['output']) step['traceback'] = BlockStyle(step['traceback']) return yaml.dump(raw, Dumper=SessionDumper)
[ "def", "dump_migration_session_state", "(", "raw", ")", ":", "class", "BlockStyle", "(", "str", ")", ":", "pass", "class", "SessionDumper", "(", "yaml", ".", "SafeDumper", ")", ":", "pass", "def", "str_block_formatter", "(", "dumper", ",", "data", ")", ":", "return", "dumper", ".", "represent_scalar", "(", "u'tag:yaml.org,2002:str'", ",", "data", ",", "style", "=", "'|'", ")", "SessionDumper", ".", "add_representer", "(", "BlockStyle", ",", "str_block_formatter", ")", "raw", "=", "deepcopy", "(", "raw", ")", "for", "step", "in", "raw", ":", "step", "[", "'output'", "]", "=", "BlockStyle", "(", "step", "[", "'output'", "]", ")", "step", "[", "'traceback'", "]", "=", "BlockStyle", "(", "step", "[", "'traceback'", "]", ")", "return", "yaml", ".", "dump", "(", "raw", ",", "Dumper", "=", "SessionDumper", ")" ]
Serialize a migration session state to yaml using nicer formatting Args: raw: object to serialize Returns: string (of yaml) Specifically, this forces the "output" member of state step dicts (e.g. state[0]['output']) to use block formatting. For example, rather than this: - migration: [app, migration_name] output: "line 1\nline2\nline3" You get this: - migration: [app, migration_name] output: | line 1 line 2 line 3
[ "Serialize", "a", "migration", "session", "state", "to", "yaml", "using", "nicer", "formatting" ]
python
train
maljovec/topopy
topopy/MorseSmaleComplex.py
https://github.com/maljovec/topopy/blob/4be598d51c4e4043b73d4ad44beed6d289e2f088/topopy/MorseSmaleComplex.py#L354-L365
def get_sample_size(self, key=None): """ Returns the number of samples in the input data @ In, key, an optional 2-tuple specifying a min-max id pair used for determining which partition size should be returned. If not specified then the size of the entire data set will be returned. @ Out, an integer specifying the number of samples. """ if key is None: return len(self.Y) else: return len(self.get_partitions(self.persistence)[key])
[ "def", "get_sample_size", "(", "self", ",", "key", "=", "None", ")", ":", "if", "key", "is", "None", ":", "return", "len", "(", "self", ".", "Y", ")", "else", ":", "return", "len", "(", "self", ".", "get_partitions", "(", "self", ".", "persistence", ")", "[", "key", "]", ")" ]
Returns the number of samples in the input data @ In, key, an optional 2-tuple specifying a min-max id pair used for determining which partition size should be returned. If not specified then the size of the entire data set will be returned. @ Out, an integer specifying the number of samples.
[ "Returns", "the", "number", "of", "samples", "in", "the", "input", "data" ]
python
train
amoffat/sh
sh.py
https://github.com/amoffat/sh/blob/858adf0c682af4c40e41f34d6926696b7a5d3b12/sh.py#L2596-L2603
def get_iter_string_reader(stdin): """ return an iterator that returns a chunk of a string every time it is called. notice that even though bufsize_type might be line buffered, we're not doing any line buffering here. that's because our StreamBufferer handles all buffering. we just need to return a reasonable-sized chunk. """ bufsize = 1024 iter_str = (stdin[i:i + bufsize] for i in range(0, len(stdin), bufsize)) return get_iter_chunk_reader(iter_str)
[ "def", "get_iter_string_reader", "(", "stdin", ")", ":", "bufsize", "=", "1024", "iter_str", "=", "(", "stdin", "[", "i", ":", "i", "+", "bufsize", "]", "for", "i", "in", "range", "(", "0", ",", "len", "(", "stdin", ")", ",", "bufsize", ")", ")", "return", "get_iter_chunk_reader", "(", "iter_str", ")" ]
return an iterator that returns a chunk of a string every time it is called. notice that even though bufsize_type might be line buffered, we're not doing any line buffering here. that's because our StreamBufferer handles all buffering. we just need to return a reasonable-sized chunk.
[ "return", "an", "iterator", "that", "returns", "a", "chunk", "of", "a", "string", "every", "time", "it", "is", "called", ".", "notice", "that", "even", "though", "bufsize_type", "might", "be", "line", "buffered", "we", "re", "not", "doing", "any", "line", "buffering", "here", ".", "that", "s", "because", "our", "StreamBufferer", "handles", "all", "buffering", ".", "we", "just", "need", "to", "return", "a", "reasonable", "-", "sized", "chunk", "." ]
python
train
jaywink/federation
federation/entities/mixins.py
https://github.com/jaywink/federation/blob/59d31bb37e662891dbea72c1dee05dc53146c78b/federation/entities/mixins.py#L96-L104
def _validate_children(self): """Check that the children we have are allowed here.""" for child in self._children: if child.__class__ not in self._allowed_children: raise ValueError( "Child %s is not allowed as a children for this %s type entity." % ( child, self.__class__ ) )
[ "def", "_validate_children", "(", "self", ")", ":", "for", "child", "in", "self", ".", "_children", ":", "if", "child", ".", "__class__", "not", "in", "self", ".", "_allowed_children", ":", "raise", "ValueError", "(", "\"Child %s is not allowed as a children for this %s type entity.\"", "%", "(", "child", ",", "self", ".", "__class__", ")", ")" ]
Check that the children we have are allowed here.
[ "Check", "that", "the", "children", "we", "have", "are", "allowed", "here", "." ]
python
train
SoCo/SoCo
soco/services.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/services.py#L729-L748
def iter_event_vars(self): """Yield the services eventable variables. Yields: `tuple`: a tuple of (variable name, data type). """ # pylint: disable=invalid-name ns = '{urn:schemas-upnp-org:service-1-0}' scpd_body = requests.get(self.base_url + self.scpd_url).text tree = XML.fromstring(scpd_body.encode('utf-8')) # parse the state variables to get the relevant variable types statevars = tree.findall('{}stateVariable'.format(ns)) for state in statevars: # We are only interested if 'sendEvents' is 'yes', i.e this # is an eventable variable if state.attrib['sendEvents'] == "yes": name = state.findtext('{}name'.format(ns)) vartype = state.findtext('{}dataType'.format(ns)) yield (name, vartype)
[ "def", "iter_event_vars", "(", "self", ")", ":", "# pylint: disable=invalid-name", "ns", "=", "'{urn:schemas-upnp-org:service-1-0}'", "scpd_body", "=", "requests", ".", "get", "(", "self", ".", "base_url", "+", "self", ".", "scpd_url", ")", ".", "text", "tree", "=", "XML", ".", "fromstring", "(", "scpd_body", ".", "encode", "(", "'utf-8'", ")", ")", "# parse the state variables to get the relevant variable types", "statevars", "=", "tree", ".", "findall", "(", "'{}stateVariable'", ".", "format", "(", "ns", ")", ")", "for", "state", "in", "statevars", ":", "# We are only interested if 'sendEvents' is 'yes', i.e this", "# is an eventable variable", "if", "state", ".", "attrib", "[", "'sendEvents'", "]", "==", "\"yes\"", ":", "name", "=", "state", ".", "findtext", "(", "'{}name'", ".", "format", "(", "ns", ")", ")", "vartype", "=", "state", ".", "findtext", "(", "'{}dataType'", ".", "format", "(", "ns", ")", ")", "yield", "(", "name", ",", "vartype", ")" ]
Yield the services eventable variables. Yields: `tuple`: a tuple of (variable name, data type).
[ "Yield", "the", "services", "eventable", "variables", "." ]
python
train
thespacedoctor/astrocalc
astrocalc/times/conversions.py
https://github.com/thespacedoctor/astrocalc/blob/dfbebf9b86d7b2d2110c48a6a4f4194bf8885b86/astrocalc/times/conversions.py#L187-L276
def mjd_to_ut_datetime( self, mjd, sqlDate=False, datetimeObject=False): """*mjd to ut datetime* Precision should be respected. **Key Arguments:** - ``mjd`` -- time in MJD. - ``sqlDate`` -- add a 'T' between date and time instead of space - ``datetimeObject`` -- return a datetime object instead of a string. Default *False* .. todo:: - replace getDateFromMJD in all code - replace getSQLDateFromMJD in all code **Return:** - ``utDatetime`` - the UT datetime in string format **Usage:** .. code-block:: python from astrocalc.times import conversions converter = conversions( log=log ) utDate = converter.mjd_to_ut_datetime( mjd=57504.61577585013 ) print utDate # OUT: 2016-04-26 14:46:43.033 utDate = converter.mjd_to_ut_datetime( mjd=57504.61577585013, sqlDate=True ) print utDate # OUT: 2016-04-26T14:46:43.033 """ self.log.info('starting the ``mjd_to_ut_datetime`` method') from datetime import datetime # CONVERT TO UNIXTIME unixtime = (float(mjd) + 2400000.5 - 2440587.5) * 86400.0 theDate = datetime.utcfromtimestamp(unixtime) if datetimeObject == False: # DETERMINE PRECISION strmjd = repr(mjd) if "." not in strmjd: precisionUnit = "day" precision = 0 utDatetime = theDate.strftime("%Y-%m-%d") else: lenDec = len(strmjd.split(".")[-1]) if lenDec < 2: precisionUnit = "day" precision = 0 utDatetime = theDate.strftime("%Y-%m-%d") elif lenDec < 3: precisionUnit = "hour" precision = 0 utDatetime = theDate.strftime("%Y-%m-%d") elif lenDec < 5: precisionUnit = "minute" precision = 0 utDatetime = theDate.strftime("%Y-%m-%d %H:%M") else: precisionUnit = "second" precision = lenDec - 5 if precision > 3: precision = 3 secs = float(theDate.strftime("%S.%f")) secs = "%02.*f" % (precision, secs) utDatetime = theDate.strftime("%Y-%m-%d %H:%M:") + secs if sqlDate: utDatetime = utDatetime.replace(" ", "T") else: utDatetime = theDate self.log.info('completed the ``mjd_to_ut_datetime`` method') return utDatetime
[ "def", "mjd_to_ut_datetime", "(", "self", ",", "mjd", ",", "sqlDate", "=", "False", ",", "datetimeObject", "=", "False", ")", ":", "self", ".", "log", ".", "info", "(", "'starting the ``mjd_to_ut_datetime`` method'", ")", "from", "datetime", "import", "datetime", "# CONVERT TO UNIXTIME", "unixtime", "=", "(", "float", "(", "mjd", ")", "+", "2400000.5", "-", "2440587.5", ")", "*", "86400.0", "theDate", "=", "datetime", ".", "utcfromtimestamp", "(", "unixtime", ")", "if", "datetimeObject", "==", "False", ":", "# DETERMINE PRECISION", "strmjd", "=", "repr", "(", "mjd", ")", "if", "\".\"", "not", "in", "strmjd", ":", "precisionUnit", "=", "\"day\"", "precision", "=", "0", "utDatetime", "=", "theDate", ".", "strftime", "(", "\"%Y-%m-%d\"", ")", "else", ":", "lenDec", "=", "len", "(", "strmjd", ".", "split", "(", "\".\"", ")", "[", "-", "1", "]", ")", "if", "lenDec", "<", "2", ":", "precisionUnit", "=", "\"day\"", "precision", "=", "0", "utDatetime", "=", "theDate", ".", "strftime", "(", "\"%Y-%m-%d\"", ")", "elif", "lenDec", "<", "3", ":", "precisionUnit", "=", "\"hour\"", "precision", "=", "0", "utDatetime", "=", "theDate", ".", "strftime", "(", "\"%Y-%m-%d\"", ")", "elif", "lenDec", "<", "5", ":", "precisionUnit", "=", "\"minute\"", "precision", "=", "0", "utDatetime", "=", "theDate", ".", "strftime", "(", "\"%Y-%m-%d %H:%M\"", ")", "else", ":", "precisionUnit", "=", "\"second\"", "precision", "=", "lenDec", "-", "5", "if", "precision", ">", "3", ":", "precision", "=", "3", "secs", "=", "float", "(", "theDate", ".", "strftime", "(", "\"%S.%f\"", ")", ")", "secs", "=", "\"%02.*f\"", "%", "(", "precision", ",", "secs", ")", "utDatetime", "=", "theDate", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:\"", ")", "+", "secs", "if", "sqlDate", ":", "utDatetime", "=", "utDatetime", ".", "replace", "(", "\" \"", ",", "\"T\"", ")", "else", ":", "utDatetime", "=", "theDate", "self", ".", "log", ".", "info", "(", "'completed the ``mjd_to_ut_datetime`` method'", ")", "return", "utDatetime" ]
*mjd to ut datetime* Precision should be respected. **Key Arguments:** - ``mjd`` -- time in MJD. - ``sqlDate`` -- add a 'T' between date and time instead of space - ``datetimeObject`` -- return a datetime object instead of a string. Default *False* .. todo:: - replace getDateFromMJD in all code - replace getSQLDateFromMJD in all code **Return:** - ``utDatetime`` - the UT datetime in string format **Usage:** .. code-block:: python from astrocalc.times import conversions converter = conversions( log=log ) utDate = converter.mjd_to_ut_datetime( mjd=57504.61577585013 ) print utDate # OUT: 2016-04-26 14:46:43.033 utDate = converter.mjd_to_ut_datetime( mjd=57504.61577585013, sqlDate=True ) print utDate # OUT: 2016-04-26T14:46:43.033
[ "*", "mjd", "to", "ut", "datetime", "*" ]
python
train
tensorflow/datasets
tensorflow_datasets/core/dataset_utils.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/dataset_utils.py#L176-L242
def as_numpy(dataset, graph=None): """Converts a `tf.data.Dataset` to an iterable of NumPy arrays. `as_numpy` converts a possibly nested structure of `tf.data.Dataset`s and `tf.Tensor`s to iterables of NumPy arrays and NumPy arrays, respectively. Args: dataset: a possibly nested structure of `tf.data.Dataset`s and/or `tf.Tensor`s. graph: `tf.Graph`, optional, explicitly set the graph to use. Returns: A structure matching `dataset` where `tf.data.Dataset`s are converted to generators of NumPy arrays and `tf.Tensor`s are converted to NumPy arrays. """ nested_ds = dataset del dataset # Flatten flat_ds = tf.nest.flatten(nested_ds) flat_np = [] # Type check for Tensors and Datasets for ds_el in flat_ds: types = [type(el) for el in flat_ds] types = tf.nest.pack_sequence_as(nested_ds, types) if not (isinstance(ds_el, tf.Tensor) or tf_compat.is_dataset(ds_el)): raise ValueError("Arguments to as_numpy must be tf.Tensors or " "tf.data.Datasets. Got: %s" % types) if tf.executing_eagerly(): # Eager mode for ds_el in flat_ds: if isinstance(ds_el, tf.Tensor): np_el = ds_el.numpy() elif tf_compat.is_dataset(ds_el): np_el = _eager_dataset_iterator(ds_el) else: assert False flat_np.append(np_el) else: # Graph mode # First create iterators for datasets with utils.maybe_with_graph(graph, create_if_none=False): ds_iters = [ tf.compat.v1.data.make_one_shot_iterator(ds_el).get_next() for ds_el in flat_ds if tf_compat.is_dataset(ds_el) ] ds_iters = [_graph_dataset_iterator(ds_iter, graph) for ds_iter in ds_iters] # Then create numpy arrays for tensors with utils.nogpu_session(graph) as sess: # Shared session for tf.Tensor # Calling sess.run once so that randomness is shared. np_arrays = sess.run([tensor for tensor in flat_ds if not tf_compat.is_dataset(tensor)]) # Merge the dataset iterators and np arrays iter_ds = iter(ds_iters) iter_array = iter(np_arrays) flat_np = [ next(iter_ds) if tf_compat.is_dataset(ds_el) else next(iter_array) for ds_el in flat_ds ] # Nest return tf.nest.pack_sequence_as(nested_ds, flat_np)
[ "def", "as_numpy", "(", "dataset", ",", "graph", "=", "None", ")", ":", "nested_ds", "=", "dataset", "del", "dataset", "# Flatten", "flat_ds", "=", "tf", ".", "nest", ".", "flatten", "(", "nested_ds", ")", "flat_np", "=", "[", "]", "# Type check for Tensors and Datasets", "for", "ds_el", "in", "flat_ds", ":", "types", "=", "[", "type", "(", "el", ")", "for", "el", "in", "flat_ds", "]", "types", "=", "tf", ".", "nest", ".", "pack_sequence_as", "(", "nested_ds", ",", "types", ")", "if", "not", "(", "isinstance", "(", "ds_el", ",", "tf", ".", "Tensor", ")", "or", "tf_compat", ".", "is_dataset", "(", "ds_el", ")", ")", ":", "raise", "ValueError", "(", "\"Arguments to as_numpy must be tf.Tensors or \"", "\"tf.data.Datasets. Got: %s\"", "%", "types", ")", "if", "tf", ".", "executing_eagerly", "(", ")", ":", "# Eager mode", "for", "ds_el", "in", "flat_ds", ":", "if", "isinstance", "(", "ds_el", ",", "tf", ".", "Tensor", ")", ":", "np_el", "=", "ds_el", ".", "numpy", "(", ")", "elif", "tf_compat", ".", "is_dataset", "(", "ds_el", ")", ":", "np_el", "=", "_eager_dataset_iterator", "(", "ds_el", ")", "else", ":", "assert", "False", "flat_np", ".", "append", "(", "np_el", ")", "else", ":", "# Graph mode", "# First create iterators for datasets", "with", "utils", ".", "maybe_with_graph", "(", "graph", ",", "create_if_none", "=", "False", ")", ":", "ds_iters", "=", "[", "tf", ".", "compat", ".", "v1", ".", "data", ".", "make_one_shot_iterator", "(", "ds_el", ")", ".", "get_next", "(", ")", "for", "ds_el", "in", "flat_ds", "if", "tf_compat", ".", "is_dataset", "(", "ds_el", ")", "]", "ds_iters", "=", "[", "_graph_dataset_iterator", "(", "ds_iter", ",", "graph", ")", "for", "ds_iter", "in", "ds_iters", "]", "# Then create numpy arrays for tensors", "with", "utils", ".", "nogpu_session", "(", "graph", ")", "as", "sess", ":", "# Shared session for tf.Tensor", "# Calling sess.run once so that randomness is shared.", "np_arrays", "=", "sess", ".", "run", "(", "[", "tensor", "for", "tensor", "in", "flat_ds", "if", "not", "tf_compat", ".", "is_dataset", "(", "tensor", ")", "]", ")", "# Merge the dataset iterators and np arrays", "iter_ds", "=", "iter", "(", "ds_iters", ")", "iter_array", "=", "iter", "(", "np_arrays", ")", "flat_np", "=", "[", "next", "(", "iter_ds", ")", "if", "tf_compat", ".", "is_dataset", "(", "ds_el", ")", "else", "next", "(", "iter_array", ")", "for", "ds_el", "in", "flat_ds", "]", "# Nest", "return", "tf", ".", "nest", ".", "pack_sequence_as", "(", "nested_ds", ",", "flat_np", ")" ]
Converts a `tf.data.Dataset` to an iterable of NumPy arrays. `as_numpy` converts a possibly nested structure of `tf.data.Dataset`s and `tf.Tensor`s to iterables of NumPy arrays and NumPy arrays, respectively. Args: dataset: a possibly nested structure of `tf.data.Dataset`s and/or `tf.Tensor`s. graph: `tf.Graph`, optional, explicitly set the graph to use. Returns: A structure matching `dataset` where `tf.data.Dataset`s are converted to generators of NumPy arrays and `tf.Tensor`s are converted to NumPy arrays.
[ "Converts", "a", "tf", ".", "data", ".", "Dataset", "to", "an", "iterable", "of", "NumPy", "arrays", "." ]
python
train
ktbyers/netmiko
netmiko/extreme/extreme_slx_ssh.py
https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/extreme/extreme_slx_ssh.py#L18-L22
def special_login_handler(self, delay_factor=1): """Adding a delay after login.""" delay_factor = self.select_delay_factor(delay_factor) self.write_channel(self.RETURN) time.sleep(1 * delay_factor)
[ "def", "special_login_handler", "(", "self", ",", "delay_factor", "=", "1", ")", ":", "delay_factor", "=", "self", ".", "select_delay_factor", "(", "delay_factor", ")", "self", ".", "write_channel", "(", "self", ".", "RETURN", ")", "time", ".", "sleep", "(", "1", "*", "delay_factor", ")" ]
Adding a delay after login.
[ "Adding", "a", "delay", "after", "login", "." ]
python
train
cloudant/python-cloudant
src/cloudant/document.py
https://github.com/cloudant/python-cloudant/blob/e0ba190f6ba07fe3522a668747128214ad573c7e/src/cloudant/document.py#L155-L167
def fetch(self): """ Retrieves the content of the current document from the remote database and populates the locally cached Document object with that content. A call to fetch will overwrite any dictionary content currently in the locally cached Document object. """ if self.document_url is None: raise CloudantDocumentException(101) resp = self.r_session.get(self.document_url) resp.raise_for_status() self.clear() self.update(response_to_json_dict(resp, cls=self.decoder))
[ "def", "fetch", "(", "self", ")", ":", "if", "self", ".", "document_url", "is", "None", ":", "raise", "CloudantDocumentException", "(", "101", ")", "resp", "=", "self", ".", "r_session", ".", "get", "(", "self", ".", "document_url", ")", "resp", ".", "raise_for_status", "(", ")", "self", ".", "clear", "(", ")", "self", ".", "update", "(", "response_to_json_dict", "(", "resp", ",", "cls", "=", "self", ".", "decoder", ")", ")" ]
Retrieves the content of the current document from the remote database and populates the locally cached Document object with that content. A call to fetch will overwrite any dictionary content currently in the locally cached Document object.
[ "Retrieves", "the", "content", "of", "the", "current", "document", "from", "the", "remote", "database", "and", "populates", "the", "locally", "cached", "Document", "object", "with", "that", "content", ".", "A", "call", "to", "fetch", "will", "overwrite", "any", "dictionary", "content", "currently", "in", "the", "locally", "cached", "Document", "object", "." ]
python
train
scanny/python-pptx
pptx/parts/image.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/parts/image.py#L77-L99
def scale(self, scaled_cx, scaled_cy): """ Return scaled image dimensions in EMU based on the combination of parameters supplied. If *scaled_cx* and *scaled_cy* are both |None|, the native image size is returned. If neither *scaled_cx* nor *scaled_cy* is |None|, their values are returned unchanged. If a value is provided for either *scaled_cx* or *scaled_cy* and the other is |None|, the missing value is calculated such that the image's aspect ratio is preserved. """ image_cx, image_cy = self._native_size if scaled_cx is None and scaled_cy is None: scaled_cx = image_cx scaled_cy = image_cy elif scaled_cx is None: scaling_factor = float(scaled_cy) / float(image_cy) scaled_cx = int(round(image_cx * scaling_factor)) elif scaled_cy is None: scaling_factor = float(scaled_cx) / float(image_cx) scaled_cy = int(round(image_cy * scaling_factor)) return scaled_cx, scaled_cy
[ "def", "scale", "(", "self", ",", "scaled_cx", ",", "scaled_cy", ")", ":", "image_cx", ",", "image_cy", "=", "self", ".", "_native_size", "if", "scaled_cx", "is", "None", "and", "scaled_cy", "is", "None", ":", "scaled_cx", "=", "image_cx", "scaled_cy", "=", "image_cy", "elif", "scaled_cx", "is", "None", ":", "scaling_factor", "=", "float", "(", "scaled_cy", ")", "/", "float", "(", "image_cy", ")", "scaled_cx", "=", "int", "(", "round", "(", "image_cx", "*", "scaling_factor", ")", ")", "elif", "scaled_cy", "is", "None", ":", "scaling_factor", "=", "float", "(", "scaled_cx", ")", "/", "float", "(", "image_cx", ")", "scaled_cy", "=", "int", "(", "round", "(", "image_cy", "*", "scaling_factor", ")", ")", "return", "scaled_cx", ",", "scaled_cy" ]
Return scaled image dimensions in EMU based on the combination of parameters supplied. If *scaled_cx* and *scaled_cy* are both |None|, the native image size is returned. If neither *scaled_cx* nor *scaled_cy* is |None|, their values are returned unchanged. If a value is provided for either *scaled_cx* or *scaled_cy* and the other is |None|, the missing value is calculated such that the image's aspect ratio is preserved.
[ "Return", "scaled", "image", "dimensions", "in", "EMU", "based", "on", "the", "combination", "of", "parameters", "supplied", ".", "If", "*", "scaled_cx", "*", "and", "*", "scaled_cy", "*", "are", "both", "|None|", "the", "native", "image", "size", "is", "returned", ".", "If", "neither", "*", "scaled_cx", "*", "nor", "*", "scaled_cy", "*", "is", "|None|", "their", "values", "are", "returned", "unchanged", ".", "If", "a", "value", "is", "provided", "for", "either", "*", "scaled_cx", "*", "or", "*", "scaled_cy", "*", "and", "the", "other", "is", "|None|", "the", "missing", "value", "is", "calculated", "such", "that", "the", "image", "s", "aspect", "ratio", "is", "preserved", "." ]
python
train
gmr/infoblox
infoblox/record.py
https://github.com/gmr/infoblox/blob/163dd9cff5f77c08751936c56aa8428acfd2d208/infoblox/record.py#L290-L301
def add_ipv6addr(self, ipv6addr): """Add an IPv6 address to the host. :param str ipv6addr: The IP address to add. :raises: ValueError """ for addr in self.ipv6addrs: if ((isinstance(addr, dict) and addr['ipv6addr'] == ipv6addr) or (isinstance(addr, HostIPv4) and addr.ipv6addr == ipv6addr)): raise ValueError('Already exists') self.ipv6addrs.append({'ipv6addr': ipv6addr})
[ "def", "add_ipv6addr", "(", "self", ",", "ipv6addr", ")", ":", "for", "addr", "in", "self", ".", "ipv6addrs", ":", "if", "(", "(", "isinstance", "(", "addr", ",", "dict", ")", "and", "addr", "[", "'ipv6addr'", "]", "==", "ipv6addr", ")", "or", "(", "isinstance", "(", "addr", ",", "HostIPv4", ")", "and", "addr", ".", "ipv6addr", "==", "ipv6addr", ")", ")", ":", "raise", "ValueError", "(", "'Already exists'", ")", "self", ".", "ipv6addrs", ".", "append", "(", "{", "'ipv6addr'", ":", "ipv6addr", "}", ")" ]
Add an IPv6 address to the host. :param str ipv6addr: The IP address to add. :raises: ValueError
[ "Add", "an", "IPv6", "address", "to", "the", "host", "." ]
python
train
mabuchilab/QNET
src/qnet/algebra/core/operator_algebra.py
https://github.com/mabuchilab/QNET/blob/cc20d26dad78691d34c67173e5cd67dcac94208a/src/qnet/algebra/core/operator_algebra.py#L888-L916
def get_coeffs(expr, expand=False, epsilon=0.): """Create a dictionary with all Operator terms of the expression (understood as a sum) as keys and their coefficients as values. The returned object is a defaultdict that return 0. if a term/key doesn't exist. Args: expr: The operator expression to get all coefficients from. expand: Whether to expand the expression distributively. epsilon: If non-zero, drop all Operators with coefficients that have absolute value less than epsilon. Returns: dict: A dictionary ``{op1: coeff1, op2: coeff2, ...}`` """ if expand: expr = expr.expand() ret = defaultdict(int) operands = expr.operands if isinstance(expr, OperatorPlus) else [expr] for e in operands: c, t = _coeff_term(e) try: if abs(complex(c)) < epsilon: continue except TypeError: pass ret[t] += c return ret
[ "def", "get_coeffs", "(", "expr", ",", "expand", "=", "False", ",", "epsilon", "=", "0.", ")", ":", "if", "expand", ":", "expr", "=", "expr", ".", "expand", "(", ")", "ret", "=", "defaultdict", "(", "int", ")", "operands", "=", "expr", ".", "operands", "if", "isinstance", "(", "expr", ",", "OperatorPlus", ")", "else", "[", "expr", "]", "for", "e", "in", "operands", ":", "c", ",", "t", "=", "_coeff_term", "(", "e", ")", "try", ":", "if", "abs", "(", "complex", "(", "c", ")", ")", "<", "epsilon", ":", "continue", "except", "TypeError", ":", "pass", "ret", "[", "t", "]", "+=", "c", "return", "ret" ]
Create a dictionary with all Operator terms of the expression (understood as a sum) as keys and their coefficients as values. The returned object is a defaultdict that return 0. if a term/key doesn't exist. Args: expr: The operator expression to get all coefficients from. expand: Whether to expand the expression distributively. epsilon: If non-zero, drop all Operators with coefficients that have absolute value less than epsilon. Returns: dict: A dictionary ``{op1: coeff1, op2: coeff2, ...}``
[ "Create", "a", "dictionary", "with", "all", "Operator", "terms", "of", "the", "expression", "(", "understood", "as", "a", "sum", ")", "as", "keys", "and", "their", "coefficients", "as", "values", "." ]
python
train
xtrementl/focus
focus/plugin/modules/im.py
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/im.py#L50-L67
def _dbus_get_object(bus_name, object_name): """ Fetches DBUS proxy object given the specified parameters. `bus_name` Name of the bus interface. `object_name` Object path related to the interface. Returns object or ``None``. """ try: bus = dbus.SessionBus() obj = bus.get_object(bus_name, object_name) return obj except (NameError, dbus.exceptions.DBusException): return None
[ "def", "_dbus_get_object", "(", "bus_name", ",", "object_name", ")", ":", "try", ":", "bus", "=", "dbus", ".", "SessionBus", "(", ")", "obj", "=", "bus", ".", "get_object", "(", "bus_name", ",", "object_name", ")", "return", "obj", "except", "(", "NameError", ",", "dbus", ".", "exceptions", ".", "DBusException", ")", ":", "return", "None" ]
Fetches DBUS proxy object given the specified parameters. `bus_name` Name of the bus interface. `object_name` Object path related to the interface. Returns object or ``None``.
[ "Fetches", "DBUS", "proxy", "object", "given", "the", "specified", "parameters", "." ]
python
train
AnthonyBloomer/daftlistings
daftlistings/listing.py
https://github.com/AnthonyBloomer/daftlistings/blob/f6c1b52425bc740f443b5efe6632a4bf18ee997f/daftlistings/listing.py#L383-L398
def shortcode(self): """ This method returns the shortcode url of the listing. :return: """ try: div = self._ad_page_content.find( 'div', {'class': 'description_extras'}) index = [i for i, s in enumerate( div.contents) if 'Shortcode' in str(s)][0] + 1 return div.contents[index]['href'] except Exception as e: if self._debug: logging.error( "Error getting shortcode. Error message: " + e.args[0]) return 'N/A'
[ "def", "shortcode", "(", "self", ")", ":", "try", ":", "div", "=", "self", ".", "_ad_page_content", ".", "find", "(", "'div'", ",", "{", "'class'", ":", "'description_extras'", "}", ")", "index", "=", "[", "i", "for", "i", ",", "s", "in", "enumerate", "(", "div", ".", "contents", ")", "if", "'Shortcode'", "in", "str", "(", "s", ")", "]", "[", "0", "]", "+", "1", "return", "div", ".", "contents", "[", "index", "]", "[", "'href'", "]", "except", "Exception", "as", "e", ":", "if", "self", ".", "_debug", ":", "logging", ".", "error", "(", "\"Error getting shortcode. Error message: \"", "+", "e", ".", "args", "[", "0", "]", ")", "return", "'N/A'" ]
This method returns the shortcode url of the listing. :return:
[ "This", "method", "returns", "the", "shortcode", "url", "of", "the", "listing", ".", ":", "return", ":" ]
python
train
numenta/htmresearch
htmresearch/frameworks/location/path_integration_union_narrowing.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/location/path_integration_union_narrowing.py#L294-L308
def getLearnableLocationRepresentation(self): """ Get the cells in the location layer that should be associated with the sensory input layer representation. In some models, this is identical to the active cells. In others, it's a subset. """ learnableCells = np.array([], dtype="uint32") totalPrevCells = 0 for module in self.L6aModules: learnableCells = np.append(learnableCells, module.getLearnableCells() + totalPrevCells) totalPrevCells += module.numberOfCells() return learnableCells
[ "def", "getLearnableLocationRepresentation", "(", "self", ")", ":", "learnableCells", "=", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "\"uint32\"", ")", "totalPrevCells", "=", "0", "for", "module", "in", "self", ".", "L6aModules", ":", "learnableCells", "=", "np", ".", "append", "(", "learnableCells", ",", "module", ".", "getLearnableCells", "(", ")", "+", "totalPrevCells", ")", "totalPrevCells", "+=", "module", ".", "numberOfCells", "(", ")", "return", "learnableCells" ]
Get the cells in the location layer that should be associated with the sensory input layer representation. In some models, this is identical to the active cells. In others, it's a subset.
[ "Get", "the", "cells", "in", "the", "location", "layer", "that", "should", "be", "associated", "with", "the", "sensory", "input", "layer", "representation", ".", "In", "some", "models", "this", "is", "identical", "to", "the", "active", "cells", ".", "In", "others", "it", "s", "a", "subset", "." ]
python
train
dnanexus/dx-toolkit
src/python/dxpy/api.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/api.py#L1341-L1345
def system_shorten_url(input_params={}, always_retry=True, **kwargs): """ Invokes the /system/shortenURL API method. """ return DXHTTPRequest('/system/shortenURL', input_params, always_retry=always_retry, **kwargs)
[ "def", "system_shorten_url", "(", "input_params", "=", "{", "}", ",", "always_retry", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "DXHTTPRequest", "(", "'/system/shortenURL'", ",", "input_params", ",", "always_retry", "=", "always_retry", ",", "*", "*", "kwargs", ")" ]
Invokes the /system/shortenURL API method.
[ "Invokes", "the", "/", "system", "/", "shortenURL", "API", "method", "." ]
python
train
estnltk/estnltk
estnltk/prettyprinter/templates.py
https://github.com/estnltk/estnltk/blob/28ae334a68a0673072febc318635f04da0dcc54a/estnltk/prettyprinter/templates.py#L44-L63
def get_mark_css(aes_name, css_value): """Generate CSS class for <mark> tag. Parameters ---------- aes_name: str The name of the class. css_value: str The value for the CSS property defined by aes_name. Returns ------- list of str The CSS codeblocks """ css_prop = AES_CSS_MAP[aes_name] if isinstance(css_value, list): return get_mark_css_for_rules(aes_name, css_prop, css_value) else: return get_mark_simple_css(aes_name, css_prop, css_value)
[ "def", "get_mark_css", "(", "aes_name", ",", "css_value", ")", ":", "css_prop", "=", "AES_CSS_MAP", "[", "aes_name", "]", "if", "isinstance", "(", "css_value", ",", "list", ")", ":", "return", "get_mark_css_for_rules", "(", "aes_name", ",", "css_prop", ",", "css_value", ")", "else", ":", "return", "get_mark_simple_css", "(", "aes_name", ",", "css_prop", ",", "css_value", ")" ]
Generate CSS class for <mark> tag. Parameters ---------- aes_name: str The name of the class. css_value: str The value for the CSS property defined by aes_name. Returns ------- list of str The CSS codeblocks
[ "Generate", "CSS", "class", "for", "<mark", ">", "tag", "." ]
python
train
airbus-cert/mispy
mispy/misp.py
https://github.com/airbus-cert/mispy/blob/6d523d6f134d2bd38ec8264be74e73b68403da65/mispy/misp.py#L813-L823
def download(self, attr): """ Download an attribute attachment (if type is malware-sample or attachment only) :param attr: attribute (should be MispAttribute instance) :returns: value of the attachment """ if attr.type not in ['malware-sample', 'attachment']: raise ValueError('Only malware-sample and attachment can be downloaded') return self.GET('/attributes/downloadAttachment/download/%i' % attr.id)
[ "def", "download", "(", "self", ",", "attr", ")", ":", "if", "attr", ".", "type", "not", "in", "[", "'malware-sample'", ",", "'attachment'", "]", ":", "raise", "ValueError", "(", "'Only malware-sample and attachment can be downloaded'", ")", "return", "self", ".", "GET", "(", "'/attributes/downloadAttachment/download/%i'", "%", "attr", ".", "id", ")" ]
Download an attribute attachment (if type is malware-sample or attachment only) :param attr: attribute (should be MispAttribute instance) :returns: value of the attachment
[ "Download", "an", "attribute", "attachment", "(", "if", "type", "is", "malware", "-", "sample", "or", "attachment", "only", ")", ":", "param", "attr", ":", "attribute", "(", "should", "be", "MispAttribute", "instance", ")", ":", "returns", ":", "value", "of", "the", "attachment" ]
python
train
openid/JWTConnect-Python-CryptoJWT
src/cryptojwt/jwk/ec.py
https://github.com/openid/JWTConnect-Python-CryptoJWT/blob/8863cfbfe77ca885084870b234a66b55bd52930c/src/cryptojwt/jwk/ec.py#L142-L188
def deserialize(self): """ Starting with information gathered from the on-the-wire representation of an elliptic curve key (a JWK) initiate an cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePublicKey or EllipticCurvePrivateKey instance. So we have to get from having:: { "kty":"EC", "crv":"P-256", "x":"MKBCTNIcKUSDii11ySs3526iDZ8AiTo7Tu6KPAqv7D4", "y":"4Etl6SRW2YiLUrN5vfvVHuhp7x8PxltmWWlbbM4IFyM", "d":"870MB6gfuTJ4HtUnUvYMyJpr5eUZNP4Bk43bVdj3eAE" } to having a key that can be used for signing/verifying and/or encrypting/decrypting. If 'd' has value then we're dealing with a private key otherwise a public key. 'x' and 'y' MUST have values. If self.pub_key or self.priv_key has a value beforehand this will be overwrite. x, y and d (if present) must be strings or bytes. """ if isinstance(self.x, (str, bytes)): _x = deser(self.x) else: raise ValueError('"x" MUST be a string') if isinstance(self.y, (str, bytes)): _y = deser(self.y) else: raise ValueError('"y" MUST be a string') if self.d: try: if isinstance(self.d, (str, bytes)): _d = deser(self.d) self.priv_key = ec_construct_private( {'x': _x, 'y': _y, 'crv': self.crv, 'd': _d}) self.pub_key = self.priv_key.public_key() except ValueError as err: raise DeSerializationNotPossible(str(err)) else: self.pub_key = ec_construct_public( {'x': _x, 'y': _y, 'crv': self.crv})
[ "def", "deserialize", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "x", ",", "(", "str", ",", "bytes", ")", ")", ":", "_x", "=", "deser", "(", "self", ".", "x", ")", "else", ":", "raise", "ValueError", "(", "'\"x\" MUST be a string'", ")", "if", "isinstance", "(", "self", ".", "y", ",", "(", "str", ",", "bytes", ")", ")", ":", "_y", "=", "deser", "(", "self", ".", "y", ")", "else", ":", "raise", "ValueError", "(", "'\"y\" MUST be a string'", ")", "if", "self", ".", "d", ":", "try", ":", "if", "isinstance", "(", "self", ".", "d", ",", "(", "str", ",", "bytes", ")", ")", ":", "_d", "=", "deser", "(", "self", ".", "d", ")", "self", ".", "priv_key", "=", "ec_construct_private", "(", "{", "'x'", ":", "_x", ",", "'y'", ":", "_y", ",", "'crv'", ":", "self", ".", "crv", ",", "'d'", ":", "_d", "}", ")", "self", ".", "pub_key", "=", "self", ".", "priv_key", ".", "public_key", "(", ")", "except", "ValueError", "as", "err", ":", "raise", "DeSerializationNotPossible", "(", "str", "(", "err", ")", ")", "else", ":", "self", ".", "pub_key", "=", "ec_construct_public", "(", "{", "'x'", ":", "_x", ",", "'y'", ":", "_y", ",", "'crv'", ":", "self", ".", "crv", "}", ")" ]
Starting with information gathered from the on-the-wire representation of an elliptic curve key (a JWK) initiate an cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePublicKey or EllipticCurvePrivateKey instance. So we have to get from having:: { "kty":"EC", "crv":"P-256", "x":"MKBCTNIcKUSDii11ySs3526iDZ8AiTo7Tu6KPAqv7D4", "y":"4Etl6SRW2YiLUrN5vfvVHuhp7x8PxltmWWlbbM4IFyM", "d":"870MB6gfuTJ4HtUnUvYMyJpr5eUZNP4Bk43bVdj3eAE" } to having a key that can be used for signing/verifying and/or encrypting/decrypting. If 'd' has value then we're dealing with a private key otherwise a public key. 'x' and 'y' MUST have values. If self.pub_key or self.priv_key has a value beforehand this will be overwrite. x, y and d (if present) must be strings or bytes.
[ "Starting", "with", "information", "gathered", "from", "the", "on", "-", "the", "-", "wire", "representation", "of", "an", "elliptic", "curve", "key", "(", "a", "JWK", ")", "initiate", "an", "cryptography", ".", "hazmat", ".", "primitives", ".", "asymmetric", ".", "ec", ".", "EllipticCurvePublicKey", "or", "EllipticCurvePrivateKey", "instance", ".", "So", "we", "have", "to", "get", "from", "having", "::" ]
python
train
alonho/pql
pql/__init__.py
https://github.com/alonho/pql/blob/fd8fefcb720b4325d27ab71f15a882fe5f9f77e2/pql/__init__.py#L7-L14
def find(expression, schema=None): ''' Gets an <expression> and optional <schema>. <expression> should be a string of python code. <schema> should be a dictionary mapping field names to types. ''' parser = SchemaFreeParser() if schema is None else SchemaAwareParser(schema) return parser.parse(expression)
[ "def", "find", "(", "expression", ",", "schema", "=", "None", ")", ":", "parser", "=", "SchemaFreeParser", "(", ")", "if", "schema", "is", "None", "else", "SchemaAwareParser", "(", "schema", ")", "return", "parser", ".", "parse", "(", "expression", ")" ]
Gets an <expression> and optional <schema>. <expression> should be a string of python code. <schema> should be a dictionary mapping field names to types.
[ "Gets", "an", "<expression", ">", "and", "optional", "<schema", ">", ".", "<expression", ">", "should", "be", "a", "string", "of", "python", "code", ".", "<schema", ">", "should", "be", "a", "dictionary", "mapping", "field", "names", "to", "types", "." ]
python
train
spyder-ide/spyder
spyder/plugins/editor/widgets/base.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/widgets/base.py#L1199-L1202
def focusOutEvent(self, event): """Reimplemented to handle focus""" self.focus_changed.emit() QPlainTextEdit.focusOutEvent(self, event)
[ "def", "focusOutEvent", "(", "self", ",", "event", ")", ":", "self", ".", "focus_changed", ".", "emit", "(", ")", "QPlainTextEdit", ".", "focusOutEvent", "(", "self", ",", "event", ")" ]
Reimplemented to handle focus
[ "Reimplemented", "to", "handle", "focus" ]
python
train
sosreport/sos
sos/plugins/__init__.py
https://github.com/sosreport/sos/blob/2ebc04da53dc871c8dd5243567afa4f8592dca29/sos/plugins/__init__.py#L1009-L1024
def get_cmd_output_now(self, exe, suggest_filename=None, root_symlink=False, timeout=300, stderr=True, chroot=True, runat=None, env=None, binary=False, sizelimit=None, pred=None): """Execute a command and save the output to a file for inclusion in the report. """ if not self.test_predicate(cmd=True, pred=pred): self._log_info("skipped cmd output '%s' due to predicate (%s)" % (exe, self.get_predicate(cmd=True, pred=pred))) return None return self._get_cmd_output_now(exe, timeout=timeout, stderr=stderr, chroot=chroot, runat=runat, env=env, binary=binary, sizelimit=sizelimit)
[ "def", "get_cmd_output_now", "(", "self", ",", "exe", ",", "suggest_filename", "=", "None", ",", "root_symlink", "=", "False", ",", "timeout", "=", "300", ",", "stderr", "=", "True", ",", "chroot", "=", "True", ",", "runat", "=", "None", ",", "env", "=", "None", ",", "binary", "=", "False", ",", "sizelimit", "=", "None", ",", "pred", "=", "None", ")", ":", "if", "not", "self", ".", "test_predicate", "(", "cmd", "=", "True", ",", "pred", "=", "pred", ")", ":", "self", ".", "_log_info", "(", "\"skipped cmd output '%s' due to predicate (%s)\"", "%", "(", "exe", ",", "self", ".", "get_predicate", "(", "cmd", "=", "True", ",", "pred", "=", "pred", ")", ")", ")", "return", "None", "return", "self", ".", "_get_cmd_output_now", "(", "exe", ",", "timeout", "=", "timeout", ",", "stderr", "=", "stderr", ",", "chroot", "=", "chroot", ",", "runat", "=", "runat", ",", "env", "=", "env", ",", "binary", "=", "binary", ",", "sizelimit", "=", "sizelimit", ")" ]
Execute a command and save the output to a file for inclusion in the report.
[ "Execute", "a", "command", "and", "save", "the", "output", "to", "a", "file", "for", "inclusion", "in", "the", "report", "." ]
python
train
fabaff/python-opensensemap-api
opensensemap_api/__init__.py
https://github.com/fabaff/python-opensensemap-api/blob/3c4f5473c514185087aae5d766ab4d5736ec1f30/opensensemap_api/__init__.py#L123-L132
def get_value(self, key): """Extract a value for a given key.""" for title in _TITLES.get(key, ()) + (key,): try: value = [entry['lastMeasurement']['value'] for entry in self.data['sensors'] if entry['title'] == title][0] return value except IndexError: pass return None
[ "def", "get_value", "(", "self", ",", "key", ")", ":", "for", "title", "in", "_TITLES", ".", "get", "(", "key", ",", "(", ")", ")", "+", "(", "key", ",", ")", ":", "try", ":", "value", "=", "[", "entry", "[", "'lastMeasurement'", "]", "[", "'value'", "]", "for", "entry", "in", "self", ".", "data", "[", "'sensors'", "]", "if", "entry", "[", "'title'", "]", "==", "title", "]", "[", "0", "]", "return", "value", "except", "IndexError", ":", "pass", "return", "None" ]
Extract a value for a given key.
[ "Extract", "a", "value", "for", "a", "given", "key", "." ]
python
train
axialmarket/fsq
fsq/configure.py
https://github.com/axialmarket/fsq/blob/43b84c292cb8a187599d86753b947cf73248f989/fsq/configure.py#L167-L170
def down_host(trg_queue, host, user=None, group=None, mode=None): ''' Down a host queue by creating a down file in the host queue directory ''' down(trg_queue, user=user, group=group, mode=mode, host=host)
[ "def", "down_host", "(", "trg_queue", ",", "host", ",", "user", "=", "None", ",", "group", "=", "None", ",", "mode", "=", "None", ")", ":", "down", "(", "trg_queue", ",", "user", "=", "user", ",", "group", "=", "group", ",", "mode", "=", "mode", ",", "host", "=", "host", ")" ]
Down a host queue by creating a down file in the host queue directory
[ "Down", "a", "host", "queue", "by", "creating", "a", "down", "file", "in", "the", "host", "queue", "directory" ]
python
train
ssalentin/plip
plip/modules/supplemental.py
https://github.com/ssalentin/plip/blob/906c8d36463689779b403f6c2c9ed06174acaf9a/plip/modules/supplemental.py#L480-L483
def write_message(msg, indent=False, mtype='standard', caption=False): """Writes message if verbose mode is set.""" if (mtype == 'debug' and config.DEBUG) or (mtype != 'debug' and config.VERBOSE) or mtype == 'error': message(msg, indent=indent, mtype=mtype, caption=caption)
[ "def", "write_message", "(", "msg", ",", "indent", "=", "False", ",", "mtype", "=", "'standard'", ",", "caption", "=", "False", ")", ":", "if", "(", "mtype", "==", "'debug'", "and", "config", ".", "DEBUG", ")", "or", "(", "mtype", "!=", "'debug'", "and", "config", ".", "VERBOSE", ")", "or", "mtype", "==", "'error'", ":", "message", "(", "msg", ",", "indent", "=", "indent", ",", "mtype", "=", "mtype", ",", "caption", "=", "caption", ")" ]
Writes message if verbose mode is set.
[ "Writes", "message", "if", "verbose", "mode", "is", "set", "." ]
python
train
saltstack/salt
salt/modules/namecheap_domains_dns.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/namecheap_domains_dns.py#L118-L170
def set_hosts(sld, tld, hosts): ''' Sets DNS host records settings for the requested domain. returns True if the host records were set successfully sld SLD of the domain name tld TLD of the domain name hosts Must be passed as a list of Python dictionaries, with each dictionary containing the following keys: - **hostname** - **recordtype** - One of ``A``, ``AAAA``, ``CNAME``, ``MX``, ``MXE``, ``TXT``, ``URL``, ``URL301``, or ``FRAME`` - **address** - URL or IP address - **ttl** - An integer between 60 and 60000 (default: ``1800``) Additonally, the ``mxpref`` key can be present, but must be accompanied by an ``emailtype`` key. CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains_dns.set_hosts sld tld hosts ''' opts = salt.utils.namecheap.get_opts('namecheap.domains.dns.setHosts') opts['SLD'] = sld opts['TLD'] = tld i = 1 for hostrecord in hosts: str_i = six.text_type(i) opts['HostName' + str_i] = hostrecord['hostname'] opts['RecordType' + str_i] = hostrecord['recordtype'] opts['Address' + str_i] = hostrecord['address'] if 'ttl' in hostrecord: opts['TTL' + str_i] = hostrecord['ttl'] if 'mxpref' in hostrecord: opts['MXPref' + str_i] = hostrecord['mxpref'] opts['EmailType'] = hostrecord['emailtype'] i += 1 response_xml = salt.utils.namecheap.post_request(opts) if response_xml is None: return False dnsresult = response_xml.getElementsByTagName('DomainDNSSetHostsResult')[0] return salt.utils.namecheap.string_to_value(dnsresult.getAttribute('IsSuccess'))
[ "def", "set_hosts", "(", "sld", ",", "tld", ",", "hosts", ")", ":", "opts", "=", "salt", ".", "utils", ".", "namecheap", ".", "get_opts", "(", "'namecheap.domains.dns.setHosts'", ")", "opts", "[", "'SLD'", "]", "=", "sld", "opts", "[", "'TLD'", "]", "=", "tld", "i", "=", "1", "for", "hostrecord", "in", "hosts", ":", "str_i", "=", "six", ".", "text_type", "(", "i", ")", "opts", "[", "'HostName'", "+", "str_i", "]", "=", "hostrecord", "[", "'hostname'", "]", "opts", "[", "'RecordType'", "+", "str_i", "]", "=", "hostrecord", "[", "'recordtype'", "]", "opts", "[", "'Address'", "+", "str_i", "]", "=", "hostrecord", "[", "'address'", "]", "if", "'ttl'", "in", "hostrecord", ":", "opts", "[", "'TTL'", "+", "str_i", "]", "=", "hostrecord", "[", "'ttl'", "]", "if", "'mxpref'", "in", "hostrecord", ":", "opts", "[", "'MXPref'", "+", "str_i", "]", "=", "hostrecord", "[", "'mxpref'", "]", "opts", "[", "'EmailType'", "]", "=", "hostrecord", "[", "'emailtype'", "]", "i", "+=", "1", "response_xml", "=", "salt", ".", "utils", ".", "namecheap", ".", "post_request", "(", "opts", ")", "if", "response_xml", "is", "None", ":", "return", "False", "dnsresult", "=", "response_xml", ".", "getElementsByTagName", "(", "'DomainDNSSetHostsResult'", ")", "[", "0", "]", "return", "salt", ".", "utils", ".", "namecheap", ".", "string_to_value", "(", "dnsresult", ".", "getAttribute", "(", "'IsSuccess'", ")", ")" ]
Sets DNS host records settings for the requested domain. returns True if the host records were set successfully sld SLD of the domain name tld TLD of the domain name hosts Must be passed as a list of Python dictionaries, with each dictionary containing the following keys: - **hostname** - **recordtype** - One of ``A``, ``AAAA``, ``CNAME``, ``MX``, ``MXE``, ``TXT``, ``URL``, ``URL301``, or ``FRAME`` - **address** - URL or IP address - **ttl** - An integer between 60 and 60000 (default: ``1800``) Additonally, the ``mxpref`` key can be present, but must be accompanied by an ``emailtype`` key. CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains_dns.set_hosts sld tld hosts
[ "Sets", "DNS", "host", "records", "settings", "for", "the", "requested", "domain", "." ]
python
train
Diviyan-Kalainathan/CausalDiscoveryToolbox
cdt/generators/cyclic_graph_generator.py
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/generators/cyclic_graph_generator.py#L78-L110
def init_variables(self, verbose=False): """Redefine the causes of the graph.""" # Resetting adjacency matrix for i in range(self.nodes): for j in np.random.choice(range(self.nodes), np.random.randint( 0, self.parents_max + 1), replace=False): if i != j: self.adjacency_matrix[j, i] = 1 try: assert any([sum(self.adjacency_matrix[:, i]) == self.parents_max for i in range(self.nodes)]) self.g = nx.DiGraph(self.adjacency_matrix) assert list(nx.simple_cycles(self.g)) assert any(len(i) == 2 for i in nx.simple_cycles(self.g)) except AssertionError: if verbose: print("Regenerating, graph non valid...") self.init_variables() if verbose: print("Matrix generated ! \ Number of cycles: {}".format(len(list(nx.simple_cycles(self.g))))) for i in range(self.nodes): self.data.iloc[:, i] = scale(self.initial_generator(self.points)) # Mechanisms self.cfunctions = [self.mechanism(int(sum(self.adjacency_matrix[:, i])), self.points, self.noise, noise_coeff=self.noise_coeff) for i in range(self.nodes)]
[ "def", "init_variables", "(", "self", ",", "verbose", "=", "False", ")", ":", "# Resetting adjacency matrix", "for", "i", "in", "range", "(", "self", ".", "nodes", ")", ":", "for", "j", "in", "np", ".", "random", ".", "choice", "(", "range", "(", "self", ".", "nodes", ")", ",", "np", ".", "random", ".", "randint", "(", "0", ",", "self", ".", "parents_max", "+", "1", ")", ",", "replace", "=", "False", ")", ":", "if", "i", "!=", "j", ":", "self", ".", "adjacency_matrix", "[", "j", ",", "i", "]", "=", "1", "try", ":", "assert", "any", "(", "[", "sum", "(", "self", ".", "adjacency_matrix", "[", ":", ",", "i", "]", ")", "==", "self", ".", "parents_max", "for", "i", "in", "range", "(", "self", ".", "nodes", ")", "]", ")", "self", ".", "g", "=", "nx", ".", "DiGraph", "(", "self", ".", "adjacency_matrix", ")", "assert", "list", "(", "nx", ".", "simple_cycles", "(", "self", ".", "g", ")", ")", "assert", "any", "(", "len", "(", "i", ")", "==", "2", "for", "i", "in", "nx", ".", "simple_cycles", "(", "self", ".", "g", ")", ")", "except", "AssertionError", ":", "if", "verbose", ":", "print", "(", "\"Regenerating, graph non valid...\"", ")", "self", ".", "init_variables", "(", ")", "if", "verbose", ":", "print", "(", "\"Matrix generated ! \\\n Number of cycles: {}\"", ".", "format", "(", "len", "(", "list", "(", "nx", ".", "simple_cycles", "(", "self", ".", "g", ")", ")", ")", ")", ")", "for", "i", "in", "range", "(", "self", ".", "nodes", ")", ":", "self", ".", "data", ".", "iloc", "[", ":", ",", "i", "]", "=", "scale", "(", "self", ".", "initial_generator", "(", "self", ".", "points", ")", ")", "# Mechanisms", "self", ".", "cfunctions", "=", "[", "self", ".", "mechanism", "(", "int", "(", "sum", "(", "self", ".", "adjacency_matrix", "[", ":", ",", "i", "]", ")", ")", ",", "self", ".", "points", ",", "self", ".", "noise", ",", "noise_coeff", "=", "self", ".", "noise_coeff", ")", "for", "i", "in", "range", "(", "self", ".", "nodes", ")", "]" ]
Redefine the causes of the graph.
[ "Redefine", "the", "causes", "of", "the", "graph", "." ]
python
valid
markovmodel/PyEMMA
pyemma/_ext/variational/solvers/direct.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/_ext/variational/solvers/direct.py#L142-L181
def spd_inv_sqrt(W, epsilon=1e-10, method='QR', return_rank=False): """ Computes :math:`W^{-1/2}` of symmetric positive-definite matrix :math:`W`. by first reducing W to a low-rank approximation that is truly spd. Parameters ---------- W : ndarray((m,m), dtype=float) Symmetric positive-definite (spd) matrix. epsilon : float Truncation parameter. Eigenvalues with norms smaller than this cutoff will be removed. method : str Method to perform the decomposition of :math:`W` before inverting. Options are: * 'QR': QR-based robust eigenvalue decomposition of W * 'schur': Schur decomposition of W Returns ------- L : ndarray((n, r)) :math:`W^{-1/2}` after reduction of W to a low-rank spd matrix """ if _np.shape(W)[0] == 1: if W[0,0] < epsilon: raise _ZeroRankError( 'All eigenvalues are smaller than %g, rank reduction would discard all dimensions.' % epsilon) Winv = 1./_np.sqrt(W[0, 0]) sm = _np.ones(1) else: sm, Vm = spd_eig(W, epsilon=epsilon, method=method) Winv = _np.dot(Vm, _np.diag(1.0 / _np.sqrt(sm))).dot(Vm.T) # return split if return_rank: return Winv, sm.shape[0] else: return Winv
[ "def", "spd_inv_sqrt", "(", "W", ",", "epsilon", "=", "1e-10", ",", "method", "=", "'QR'", ",", "return_rank", "=", "False", ")", ":", "if", "_np", ".", "shape", "(", "W", ")", "[", "0", "]", "==", "1", ":", "if", "W", "[", "0", ",", "0", "]", "<", "epsilon", ":", "raise", "_ZeroRankError", "(", "'All eigenvalues are smaller than %g, rank reduction would discard all dimensions.'", "%", "epsilon", ")", "Winv", "=", "1.", "/", "_np", ".", "sqrt", "(", "W", "[", "0", ",", "0", "]", ")", "sm", "=", "_np", ".", "ones", "(", "1", ")", "else", ":", "sm", ",", "Vm", "=", "spd_eig", "(", "W", ",", "epsilon", "=", "epsilon", ",", "method", "=", "method", ")", "Winv", "=", "_np", ".", "dot", "(", "Vm", ",", "_np", ".", "diag", "(", "1.0", "/", "_np", ".", "sqrt", "(", "sm", ")", ")", ")", ".", "dot", "(", "Vm", ".", "T", ")", "# return split", "if", "return_rank", ":", "return", "Winv", ",", "sm", ".", "shape", "[", "0", "]", "else", ":", "return", "Winv" ]
Computes :math:`W^{-1/2}` of symmetric positive-definite matrix :math:`W`. by first reducing W to a low-rank approximation that is truly spd. Parameters ---------- W : ndarray((m,m), dtype=float) Symmetric positive-definite (spd) matrix. epsilon : float Truncation parameter. Eigenvalues with norms smaller than this cutoff will be removed. method : str Method to perform the decomposition of :math:`W` before inverting. Options are: * 'QR': QR-based robust eigenvalue decomposition of W * 'schur': Schur decomposition of W Returns ------- L : ndarray((n, r)) :math:`W^{-1/2}` after reduction of W to a low-rank spd matrix
[ "Computes", ":", "math", ":", "W^", "{", "-", "1", "/", "2", "}", "of", "symmetric", "positive", "-", "definite", "matrix", ":", "math", ":", "W", "." ]
python
train
etesync/radicale_storage_etesync
radicale_storage_etesync/__init__.py
https://github.com/etesync/radicale_storage_etesync/blob/73d549bad7a37f060ece65c653c18a859a9962f2/radicale_storage_etesync/__init__.py#L185-L232
def discover(cls, path, depth="0"): """Discover a list of collections under the given ``path``. If ``depth`` is "0", only the actual object under ``path`` is returned. If ``depth`` is anything but "0", it is considered as "1" and direct children are included in the result. The ``path`` is relative. The root collection "/" must always exist. """ # Path should already be sanitized attributes = _get_attributes_from_path(path) try: if len(attributes) == 3: # If an item, create a collection for the item. item = attributes.pop() path = "/".join(attributes) collection = cls(path, _is_principal(path)) yield collection.get(item) return collection = cls(path, _is_principal(path)) except api.exceptions.DoesNotExist: return yield collection if depth == "0": return if len(attributes) == 0: yield cls(posixpath.join(path, cls.user), principal=True) elif len(attributes) == 1: for journal in cls.etesync.list(): if journal.collection.TYPE in (api.AddressBook.TYPE, api.Calendar.TYPE, api.TaskList.TYPE): yield cls(posixpath.join(path, journal.uid), principal=False) elif len(attributes) == 2: for item in collection.list(): yield collection.get(item) elif len(attributes) > 2: raise RuntimeError("Found more than one attribute. Shouldn't happen")
[ "def", "discover", "(", "cls", ",", "path", ",", "depth", "=", "\"0\"", ")", ":", "# Path should already be sanitized", "attributes", "=", "_get_attributes_from_path", "(", "path", ")", "try", ":", "if", "len", "(", "attributes", ")", "==", "3", ":", "# If an item, create a collection for the item.", "item", "=", "attributes", ".", "pop", "(", ")", "path", "=", "\"/\"", ".", "join", "(", "attributes", ")", "collection", "=", "cls", "(", "path", ",", "_is_principal", "(", "path", ")", ")", "yield", "collection", ".", "get", "(", "item", ")", "return", "collection", "=", "cls", "(", "path", ",", "_is_principal", "(", "path", ")", ")", "except", "api", ".", "exceptions", ".", "DoesNotExist", ":", "return", "yield", "collection", "if", "depth", "==", "\"0\"", ":", "return", "if", "len", "(", "attributes", ")", "==", "0", ":", "yield", "cls", "(", "posixpath", ".", "join", "(", "path", ",", "cls", ".", "user", ")", ",", "principal", "=", "True", ")", "elif", "len", "(", "attributes", ")", "==", "1", ":", "for", "journal", "in", "cls", ".", "etesync", ".", "list", "(", ")", ":", "if", "journal", ".", "collection", ".", "TYPE", "in", "(", "api", ".", "AddressBook", ".", "TYPE", ",", "api", ".", "Calendar", ".", "TYPE", ",", "api", ".", "TaskList", ".", "TYPE", ")", ":", "yield", "cls", "(", "posixpath", ".", "join", "(", "path", ",", "journal", ".", "uid", ")", ",", "principal", "=", "False", ")", "elif", "len", "(", "attributes", ")", "==", "2", ":", "for", "item", "in", "collection", ".", "list", "(", ")", ":", "yield", "collection", ".", "get", "(", "item", ")", "elif", "len", "(", "attributes", ")", ">", "2", ":", "raise", "RuntimeError", "(", "\"Found more than one attribute. Shouldn't happen\"", ")" ]
Discover a list of collections under the given ``path``. If ``depth`` is "0", only the actual object under ``path`` is returned. If ``depth`` is anything but "0", it is considered as "1" and direct children are included in the result. The ``path`` is relative. The root collection "/" must always exist.
[ "Discover", "a", "list", "of", "collections", "under", "the", "given", "path", "." ]
python
train
nccgroup/Scout2
AWSScout2/services/iam.py
https://github.com/nccgroup/Scout2/blob/5d86d46d7ed91a92000496189e9cfa6b98243937/AWSScout2/services/iam.py#L107-L126
def parse_groups(self, group, params): """ Parse a single IAM group and fetch additional information """ # When resuming upon throttling error, skip if already fetched if group['GroupName'] in self.groups: return api_client = params['api_client'] # Ensure consistent attribute names across resource types group['id'] = group.pop('GroupId') group['name'] = group.pop('GroupName') group['arn'] = group.pop('Arn') # Get group's members group['users'] = self.__fetch_group_users(api_client, group['name']); # Get inline policies policies = self.__get_inline_policies(api_client, 'group', group['id'], group['name']) if len(policies): group['inline_policies'] = policies group['inline_policies_count'] = len(policies) self.groups[group['id']] = group
[ "def", "parse_groups", "(", "self", ",", "group", ",", "params", ")", ":", "# When resuming upon throttling error, skip if already fetched", "if", "group", "[", "'GroupName'", "]", "in", "self", ".", "groups", ":", "return", "api_client", "=", "params", "[", "'api_client'", "]", "# Ensure consistent attribute names across resource types", "group", "[", "'id'", "]", "=", "group", ".", "pop", "(", "'GroupId'", ")", "group", "[", "'name'", "]", "=", "group", ".", "pop", "(", "'GroupName'", ")", "group", "[", "'arn'", "]", "=", "group", ".", "pop", "(", "'Arn'", ")", "# Get group's members", "group", "[", "'users'", "]", "=", "self", ".", "__fetch_group_users", "(", "api_client", ",", "group", "[", "'name'", "]", ")", "# Get inline policies", "policies", "=", "self", ".", "__get_inline_policies", "(", "api_client", ",", "'group'", ",", "group", "[", "'id'", "]", ",", "group", "[", "'name'", "]", ")", "if", "len", "(", "policies", ")", ":", "group", "[", "'inline_policies'", "]", "=", "policies", "group", "[", "'inline_policies_count'", "]", "=", "len", "(", "policies", ")", "self", ".", "groups", "[", "group", "[", "'id'", "]", "]", "=", "group" ]
Parse a single IAM group and fetch additional information
[ "Parse", "a", "single", "IAM", "group", "and", "fetch", "additional", "information" ]
python
train
inasafe/inasafe
safe/definitions/utilities.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/definitions/utilities.py#L119-L129
def hazard_units(hazard): """Helper to get unit of a hazard. :param hazard: Hazard type. :type hazard: str :returns: List of hazard units. :rtype: list """ units = definition(hazard)['continuous_hazard_units'] return sorted(units, key=lambda k: k['key'])
[ "def", "hazard_units", "(", "hazard", ")", ":", "units", "=", "definition", "(", "hazard", ")", "[", "'continuous_hazard_units'", "]", "return", "sorted", "(", "units", ",", "key", "=", "lambda", "k", ":", "k", "[", "'key'", "]", ")" ]
Helper to get unit of a hazard. :param hazard: Hazard type. :type hazard: str :returns: List of hazard units. :rtype: list
[ "Helper", "to", "get", "unit", "of", "a", "hazard", "." ]
python
train
openvax/varcode
varcode/effects/effect_collection.py
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/effects/effect_collection.py#L211-L216
def top_priority_effect_per_variant(self): """Highest priority effect for each unique variant""" return OrderedDict( (variant, top_priority_effect(variant_effects)) for (variant, variant_effects) in self.groupby_variant().items())
[ "def", "top_priority_effect_per_variant", "(", "self", ")", ":", "return", "OrderedDict", "(", "(", "variant", ",", "top_priority_effect", "(", "variant_effects", ")", ")", "for", "(", "variant", ",", "variant_effects", ")", "in", "self", ".", "groupby_variant", "(", ")", ".", "items", "(", ")", ")" ]
Highest priority effect for each unique variant
[ "Highest", "priority", "effect", "for", "each", "unique", "variant" ]
python
train
biolink/ontobio
bin/biogolr-bulk.py
https://github.com/biolink/ontobio/blob/4e512a7831cfe6bc1b32f2c3be2ba41bc5cf7345/bin/biogolr-bulk.py#L31-L85
def main(): """ Wrapper for OGR """ parser = argparse.ArgumentParser( description='Command line interface to python-ontobio.golr library' """ Provides command line interface onto the ontobio.golr python library, a high level abstraction layer over Monarch and GO solr indices. """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('-o', '--outfile', type=str, required=False, help='Path to output file') parser.add_argument('-C', '--category', nargs=2, type=str, required=True, help='Category pair. E.g. disease gene') parser.add_argument('-s', '--species', type=str, required=True, help='NCBITaxon ID') parser.add_argument('-S', '--slim', nargs='*', type=str, required=False, help='Slim IDs') parser.add_argument('-L', '--limit', type=int, default=100000, required=False, help='Limit on number of rows') parser.add_argument('-u', '--url', type=str, required=False, help='Solr URL. E.g. http://localhost:8983/solr/golr') parser.add_argument('-v', '--verbosity', default=0, action='count', help='Increase output verbosity') parser.add_argument('ids',nargs='*') args = parser.parse_args() if args.verbosity >= 2: logging.basicConfig(level=logging.DEBUG) elif args.verbosity == 1: logging.basicConfig(level=logging.INFO) else: logging.basicConfig(level=logging.WARNING) logging.info("Welcome!") [subject_category, object_category] = args.category assocs = bulk_fetch(subject_category, object_category, args.species, rows=args.limit, slim=args.slim, url=args.url) for a in assocs: print("{}\t{}\t{}".format(a['subject'], a['relation'], ";".join(a['objects'])))
[ "def", "main", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Command line interface to python-ontobio.golr library'", "\"\"\"\n\n Provides command line interface onto the ontobio.golr python library, a high level\n abstraction layer over Monarch and GO solr indices.\n \"\"\"", ",", "formatter_class", "=", "argparse", ".", "RawTextHelpFormatter", ")", "parser", ".", "add_argument", "(", "'-o'", ",", "'--outfile'", ",", "type", "=", "str", ",", "required", "=", "False", ",", "help", "=", "'Path to output file'", ")", "parser", ".", "add_argument", "(", "'-C'", ",", "'--category'", ",", "nargs", "=", "2", ",", "type", "=", "str", ",", "required", "=", "True", ",", "help", "=", "'Category pair. E.g. disease gene'", ")", "parser", ".", "add_argument", "(", "'-s'", ",", "'--species'", ",", "type", "=", "str", ",", "required", "=", "True", ",", "help", "=", "'NCBITaxon ID'", ")", "parser", ".", "add_argument", "(", "'-S'", ",", "'--slim'", ",", "nargs", "=", "'*'", ",", "type", "=", "str", ",", "required", "=", "False", ",", "help", "=", "'Slim IDs'", ")", "parser", ".", "add_argument", "(", "'-L'", ",", "'--limit'", ",", "type", "=", "int", ",", "default", "=", "100000", ",", "required", "=", "False", ",", "help", "=", "'Limit on number of rows'", ")", "parser", ".", "add_argument", "(", "'-u'", ",", "'--url'", ",", "type", "=", "str", ",", "required", "=", "False", ",", "help", "=", "'Solr URL. E.g. http://localhost:8983/solr/golr'", ")", "parser", ".", "add_argument", "(", "'-v'", ",", "'--verbosity'", ",", "default", "=", "0", ",", "action", "=", "'count'", ",", "help", "=", "'Increase output verbosity'", ")", "parser", ".", "add_argument", "(", "'ids'", ",", "nargs", "=", "'*'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "if", "args", ".", "verbosity", ">=", "2", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "DEBUG", ")", "elif", "args", ".", "verbosity", "==", "1", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ")", "else", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "WARNING", ")", "logging", ".", "info", "(", "\"Welcome!\"", ")", "[", "subject_category", ",", "object_category", "]", "=", "args", ".", "category", "assocs", "=", "bulk_fetch", "(", "subject_category", ",", "object_category", ",", "args", ".", "species", ",", "rows", "=", "args", ".", "limit", ",", "slim", "=", "args", ".", "slim", ",", "url", "=", "args", ".", "url", ")", "for", "a", "in", "assocs", ":", "print", "(", "\"{}\\t{}\\t{}\"", ".", "format", "(", "a", "[", "'subject'", "]", ",", "a", "[", "'relation'", "]", ",", "\";\"", ".", "join", "(", "a", "[", "'objects'", "]", ")", ")", ")" ]
Wrapper for OGR
[ "Wrapper", "for", "OGR" ]
python
train
fastavro/fastavro
fastavro/_write_py.py
https://github.com/fastavro/fastavro/blob/bafe826293e19eb93e77bbb0f6adfa059c7884b2/fastavro/_write_py.py#L302-L341
def write_union(fo, datum, schema): """A union is encoded by first writing a long value indicating the zero-based position within the union of the schema of its value. The value is then encoded per the indicated schema within the union.""" if isinstance(datum, tuple): (name, datum) = datum for index, candidate in enumerate(schema): if extract_record_type(candidate) == 'record': schema_name = candidate['name'] else: schema_name = candidate if name == schema_name: break else: msg = 'provided union type name %s not found in schema %s' \ % (name, schema) raise ValueError(msg) else: pytype = type(datum) best_match_index = -1 most_fields = -1 for index, candidate in enumerate(schema): if validate(datum, candidate, raise_errors=False): if extract_record_type(candidate) == 'record': fields = len(candidate['fields']) if fields > most_fields: best_match_index = index most_fields = fields else: best_match_index = index break if best_match_index < 0: msg = '%r (type %s) do not match %s' % (datum, pytype, schema) raise ValueError(msg) index = best_match_index # write data write_long(fo, index) write_data(fo, datum, schema[index])
[ "def", "write_union", "(", "fo", ",", "datum", ",", "schema", ")", ":", "if", "isinstance", "(", "datum", ",", "tuple", ")", ":", "(", "name", ",", "datum", ")", "=", "datum", "for", "index", ",", "candidate", "in", "enumerate", "(", "schema", ")", ":", "if", "extract_record_type", "(", "candidate", ")", "==", "'record'", ":", "schema_name", "=", "candidate", "[", "'name'", "]", "else", ":", "schema_name", "=", "candidate", "if", "name", "==", "schema_name", ":", "break", "else", ":", "msg", "=", "'provided union type name %s not found in schema %s'", "%", "(", "name", ",", "schema", ")", "raise", "ValueError", "(", "msg", ")", "else", ":", "pytype", "=", "type", "(", "datum", ")", "best_match_index", "=", "-", "1", "most_fields", "=", "-", "1", "for", "index", ",", "candidate", "in", "enumerate", "(", "schema", ")", ":", "if", "validate", "(", "datum", ",", "candidate", ",", "raise_errors", "=", "False", ")", ":", "if", "extract_record_type", "(", "candidate", ")", "==", "'record'", ":", "fields", "=", "len", "(", "candidate", "[", "'fields'", "]", ")", "if", "fields", ">", "most_fields", ":", "best_match_index", "=", "index", "most_fields", "=", "fields", "else", ":", "best_match_index", "=", "index", "break", "if", "best_match_index", "<", "0", ":", "msg", "=", "'%r (type %s) do not match %s'", "%", "(", "datum", ",", "pytype", ",", "schema", ")", "raise", "ValueError", "(", "msg", ")", "index", "=", "best_match_index", "# write data", "write_long", "(", "fo", ",", "index", ")", "write_data", "(", "fo", ",", "datum", ",", "schema", "[", "index", "]", ")" ]
A union is encoded by first writing a long value indicating the zero-based position within the union of the schema of its value. The value is then encoded per the indicated schema within the union.
[ "A", "union", "is", "encoded", "by", "first", "writing", "a", "long", "value", "indicating", "the", "zero", "-", "based", "position", "within", "the", "union", "of", "the", "schema", "of", "its", "value", ".", "The", "value", "is", "then", "encoded", "per", "the", "indicated", "schema", "within", "the", "union", "." ]
python
train
hearsaycorp/normalize
normalize/selector.py
https://github.com/hearsaycorp/normalize/blob/8b36522ddca6d41b434580bd848f3bdaa7a999c8/normalize/selector.py#L864-L898
def patch(self, target, source, copy=False): """Copies fields from ``obj`` to ``target``. If a matched field does not exist in ``obj``, it will be deleted from ``target``, otherwise it will be assigned (or copied). args: ``target=``\ *OBJECT* the object to set the fields in ``source=``\ *OBJECT* the object to lift the fields from ``copy=``\ *BOOL*\ \|\ *FUNCTION* deep copy the values set, using copy.deepcopy (or the passed function). False by default. """ # TODO: this could also be a whole lot more efficient! fs_val = [] for fs in self: try: fs_val.append((fs, fs.get(source))) except AttributeError: fs_val.append((fs, _None)) except FieldSelectorException: raise if copy and not callable(copy): copy = deepcopy for fs, val in fs_val: if val is _None: fs.delete(target) else: fs.post(target, val if not copy else copy(val))
[ "def", "patch", "(", "self", ",", "target", ",", "source", ",", "copy", "=", "False", ")", ":", "# TODO: this could also be a whole lot more efficient!", "fs_val", "=", "[", "]", "for", "fs", "in", "self", ":", "try", ":", "fs_val", ".", "append", "(", "(", "fs", ",", "fs", ".", "get", "(", "source", ")", ")", ")", "except", "AttributeError", ":", "fs_val", ".", "append", "(", "(", "fs", ",", "_None", ")", ")", "except", "FieldSelectorException", ":", "raise", "if", "copy", "and", "not", "callable", "(", "copy", ")", ":", "copy", "=", "deepcopy", "for", "fs", ",", "val", "in", "fs_val", ":", "if", "val", "is", "_None", ":", "fs", ".", "delete", "(", "target", ")", "else", ":", "fs", ".", "post", "(", "target", ",", "val", "if", "not", "copy", "else", "copy", "(", "val", ")", ")" ]
Copies fields from ``obj`` to ``target``. If a matched field does not exist in ``obj``, it will be deleted from ``target``, otherwise it will be assigned (or copied). args: ``target=``\ *OBJECT* the object to set the fields in ``source=``\ *OBJECT* the object to lift the fields from ``copy=``\ *BOOL*\ \|\ *FUNCTION* deep copy the values set, using copy.deepcopy (or the passed function). False by default.
[ "Copies", "fields", "from", "obj", "to", "target", ".", "If", "a", "matched", "field", "does", "not", "exist", "in", "obj", "it", "will", "be", "deleted", "from", "target", "otherwise", "it", "will", "be", "assigned", "(", "or", "copied", ")", "." ]
python
train
mdsol/rwslib
rwslib/builders/metadata.py
https://github.com/mdsol/rwslib/blob/1a86bc072d408c009ed1de8bf6e98a1769f54d18/rwslib/builders/metadata.py#L2180-L2185
def check_action_type(self, value): """Set the value for the CheckActionType, validating input""" if value is not None: if not isinstance(value, ActionType): raise AttributeError("Invalid check action %s" % value) self._check_action_type = value
[ "def", "check_action_type", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "if", "not", "isinstance", "(", "value", ",", "ActionType", ")", ":", "raise", "AttributeError", "(", "\"Invalid check action %s\"", "%", "value", ")", "self", ".", "_check_action_type", "=", "value" ]
Set the value for the CheckActionType, validating input
[ "Set", "the", "value", "for", "the", "CheckActionType", "validating", "input" ]
python
train
deepmind/sonnet
sonnet/python/modules/basic_rnn.py
https://github.com/deepmind/sonnet/blob/00612ca3178964d86b556e062694d808ff81fcca/sonnet/python/modules/basic_rnn.py#L294-L309
def _check_cores_output_sizes(self): """Checks the output_sizes of the cores of the DeepRNN module. Raises: ValueError: if the outputs of the cores cannot be concatenated along their first dimension. """ for core_sizes in zip(*tuple(_get_flat_core_sizes(self._cores))): first_core_list = core_sizes[0][1:] for i, core_list in enumerate(core_sizes[1:]): if core_list[1:] != first_core_list: raise ValueError("The outputs of the provided cores are not able " "to be concatenated along the first feature " "dimension. Core 0 has shape %s, whereas Core %d " "has shape %s - these must only differ in the first " "dimension" % (core_sizes[0], i + 1, core_list))
[ "def", "_check_cores_output_sizes", "(", "self", ")", ":", "for", "core_sizes", "in", "zip", "(", "*", "tuple", "(", "_get_flat_core_sizes", "(", "self", ".", "_cores", ")", ")", ")", ":", "first_core_list", "=", "core_sizes", "[", "0", "]", "[", "1", ":", "]", "for", "i", ",", "core_list", "in", "enumerate", "(", "core_sizes", "[", "1", ":", "]", ")", ":", "if", "core_list", "[", "1", ":", "]", "!=", "first_core_list", ":", "raise", "ValueError", "(", "\"The outputs of the provided cores are not able \"", "\"to be concatenated along the first feature \"", "\"dimension. Core 0 has shape %s, whereas Core %d \"", "\"has shape %s - these must only differ in the first \"", "\"dimension\"", "%", "(", "core_sizes", "[", "0", "]", ",", "i", "+", "1", ",", "core_list", ")", ")" ]
Checks the output_sizes of the cores of the DeepRNN module. Raises: ValueError: if the outputs of the cores cannot be concatenated along their first dimension.
[ "Checks", "the", "output_sizes", "of", "the", "cores", "of", "the", "DeepRNN", "module", "." ]
python
train
openstax/cnx-publishing
cnxpublishing/db.py
https://github.com/openstax/cnx-publishing/blob/f55b4a2c45d8618737288f1b74b4139d5ac74154/cnxpublishing/db.py#L349-L357
def _validate_subjects(cursor, model): """Give a database cursor and model, check the subjects against the subject vocabulary. """ subject_vocab = [term[0] for term in acquire_subject_vocabulary(cursor)] subjects = model.metadata.get('subjects', []) invalid_subjects = [s for s in subjects if s not in subject_vocab] if invalid_subjects: raise exceptions.InvalidMetadata('subjects', invalid_subjects)
[ "def", "_validate_subjects", "(", "cursor", ",", "model", ")", ":", "subject_vocab", "=", "[", "term", "[", "0", "]", "for", "term", "in", "acquire_subject_vocabulary", "(", "cursor", ")", "]", "subjects", "=", "model", ".", "metadata", ".", "get", "(", "'subjects'", ",", "[", "]", ")", "invalid_subjects", "=", "[", "s", "for", "s", "in", "subjects", "if", "s", "not", "in", "subject_vocab", "]", "if", "invalid_subjects", ":", "raise", "exceptions", ".", "InvalidMetadata", "(", "'subjects'", ",", "invalid_subjects", ")" ]
Give a database cursor and model, check the subjects against the subject vocabulary.
[ "Give", "a", "database", "cursor", "and", "model", "check", "the", "subjects", "against", "the", "subject", "vocabulary", "." ]
python
valid
PmagPy/PmagPy
programs/thellier_gui.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/thellier_gui.py#L1246-L1296
def select_bounds_in_logger(self, index): """ sets index as the upper or lower bound of a fit based on what the other bound is and selects it in the logger. Requires 2 calls to completely update a interpretation. NOTE: Requires an interpretation to exist before it is called. @param: index - index of the step to select in the logger """ tmin_index, tmax_index = "", "" if str(self.tmin_box.GetValue()) != "": tmin_index = self.tmin_box.GetSelection() if str(self.tmax_box.GetValue()) != "": tmax_index = self.tmax_box.GetSelection() # if there is no prior interpretation, assume first click is # tmin and set highest possible temp as tmax if not tmin_index and not tmax_index: tmin_index = index self.tmin_box.SetSelection(index) # set to the highest step max_step_data = self.Data[self.s]['datablock'][-1] step_key = 'treatment_temp' if MICROWAVE: step_key = 'treatment_mw_power' max_step = max_step_data[step_key] tmax_index = self.tmax_box.GetCount() - 1 self.tmax_box.SetSelection(tmax_index) elif self.list_bound_loc != 0: if self.list_bound_loc == 1: if index < tmin_index: self.tmin_box.SetSelection(index) self.tmax_box.SetSelection(tmin_index) elif index == tmin_index: pass else: self.tmax_box.SetSelection(index) else: if index > tmax_index: self.tmin_box.SetSelection(tmax_index) self.tmax_box.SetSelection(index) elif index == tmax_index: pass else: self.tmin_box.SetSelection(index) self.list_bound_loc = 0 else: if index < tmax_index: self.tmin_box.SetSelection(index) self.list_bound_loc = 1 else: self.tmax_box.SetSelection(index) self.list_bound_loc = 2 self.logger.Select(index, on=0) self.get_new_T_PI_parameters(-1)
[ "def", "select_bounds_in_logger", "(", "self", ",", "index", ")", ":", "tmin_index", ",", "tmax_index", "=", "\"\"", ",", "\"\"", "if", "str", "(", "self", ".", "tmin_box", ".", "GetValue", "(", ")", ")", "!=", "\"\"", ":", "tmin_index", "=", "self", ".", "tmin_box", ".", "GetSelection", "(", ")", "if", "str", "(", "self", ".", "tmax_box", ".", "GetValue", "(", ")", ")", "!=", "\"\"", ":", "tmax_index", "=", "self", ".", "tmax_box", ".", "GetSelection", "(", ")", "# if there is no prior interpretation, assume first click is", "# tmin and set highest possible temp as tmax", "if", "not", "tmin_index", "and", "not", "tmax_index", ":", "tmin_index", "=", "index", "self", ".", "tmin_box", ".", "SetSelection", "(", "index", ")", "# set to the highest step", "max_step_data", "=", "self", ".", "Data", "[", "self", ".", "s", "]", "[", "'datablock'", "]", "[", "-", "1", "]", "step_key", "=", "'treatment_temp'", "if", "MICROWAVE", ":", "step_key", "=", "'treatment_mw_power'", "max_step", "=", "max_step_data", "[", "step_key", "]", "tmax_index", "=", "self", ".", "tmax_box", ".", "GetCount", "(", ")", "-", "1", "self", ".", "tmax_box", ".", "SetSelection", "(", "tmax_index", ")", "elif", "self", ".", "list_bound_loc", "!=", "0", ":", "if", "self", ".", "list_bound_loc", "==", "1", ":", "if", "index", "<", "tmin_index", ":", "self", ".", "tmin_box", ".", "SetSelection", "(", "index", ")", "self", ".", "tmax_box", ".", "SetSelection", "(", "tmin_index", ")", "elif", "index", "==", "tmin_index", ":", "pass", "else", ":", "self", ".", "tmax_box", ".", "SetSelection", "(", "index", ")", "else", ":", "if", "index", ">", "tmax_index", ":", "self", ".", "tmin_box", ".", "SetSelection", "(", "tmax_index", ")", "self", ".", "tmax_box", ".", "SetSelection", "(", "index", ")", "elif", "index", "==", "tmax_index", ":", "pass", "else", ":", "self", ".", "tmin_box", ".", "SetSelection", "(", "index", ")", "self", ".", "list_bound_loc", "=", "0", "else", ":", "if", "index", "<", "tmax_index", ":", "self", ".", "tmin_box", ".", "SetSelection", "(", "index", ")", "self", ".", "list_bound_loc", "=", "1", "else", ":", "self", ".", "tmax_box", ".", "SetSelection", "(", "index", ")", "self", ".", "list_bound_loc", "=", "2", "self", ".", "logger", ".", "Select", "(", "index", ",", "on", "=", "0", ")", "self", ".", "get_new_T_PI_parameters", "(", "-", "1", ")" ]
sets index as the upper or lower bound of a fit based on what the other bound is and selects it in the logger. Requires 2 calls to completely update a interpretation. NOTE: Requires an interpretation to exist before it is called. @param: index - index of the step to select in the logger
[ "sets", "index", "as", "the", "upper", "or", "lower", "bound", "of", "a", "fit", "based", "on", "what", "the", "other", "bound", "is", "and", "selects", "it", "in", "the", "logger", ".", "Requires", "2", "calls", "to", "completely", "update", "a", "interpretation", ".", "NOTE", ":", "Requires", "an", "interpretation", "to", "exist", "before", "it", "is", "called", "." ]
python
train
log2timeline/plaso
plaso/lib/objectfilter.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/lib/objectfilter.py#L645-L657
def Compile(self, filter_implementation): """Compile the binary expression into a filter object.""" operator = self.operator.lower() if operator in ('and', '&&'): method = 'AndFilter' elif operator in ('or', '||'): method = 'OrFilter' else: raise errors.ParseError( 'Invalid binary operator {0:s}.'.format(operator)) args = [x.Compile(filter_implementation) for x in self.args] return filter_implementation.FILTERS[method](arguments=args)
[ "def", "Compile", "(", "self", ",", "filter_implementation", ")", ":", "operator", "=", "self", ".", "operator", ".", "lower", "(", ")", "if", "operator", "in", "(", "'and'", ",", "'&&'", ")", ":", "method", "=", "'AndFilter'", "elif", "operator", "in", "(", "'or'", ",", "'||'", ")", ":", "method", "=", "'OrFilter'", "else", ":", "raise", "errors", ".", "ParseError", "(", "'Invalid binary operator {0:s}.'", ".", "format", "(", "operator", ")", ")", "args", "=", "[", "x", ".", "Compile", "(", "filter_implementation", ")", "for", "x", "in", "self", ".", "args", "]", "return", "filter_implementation", ".", "FILTERS", "[", "method", "]", "(", "arguments", "=", "args", ")" ]
Compile the binary expression into a filter object.
[ "Compile", "the", "binary", "expression", "into", "a", "filter", "object", "." ]
python
train
Robin8Put/pmes
balance/handler.py
https://github.com/Robin8Put/pmes/blob/338bec94162098f05b75bad035417317e1252fd2/balance/handler.py#L700-L766
async def get_frozen(self, *args, **kwargs): """ Get frozen users balance Accepts: - uid [integer] (users id) - types [list | string] (array with needed types or "all") Returns: { type [string] (blockchain type): amount } """ super().validate(*args, **kwargs) if kwargs.get("message"): kwargs = json.loads(kwargs.get("message")) # Get daya from request coinids = kwargs.get("coinids") uid = kwargs.get("uid") address = kwargs.get("address") # Check if required fields exists try: coinid = coinid.replace("TEST", "") except: pass try: uid = int(uid) except: return await self.error_400("User id must be integer. ") if not uid and address: uid = await self.get_uid_by_address(address=address, coinid=coinid) if isinstance(uid, dict): return uid if not all([types, uid]): return await self.error_400("Get frozen. Missed required fields.") if isinstance(types, list): actives = {} for coinid in coinids: database = self.client[self.collection] collection = database[coinid] # Get current balance balance = await collection.find_one({"uid":uid}) if not balance: return await self.error_404( "Get frozen. Balance with uid:%s and type:%s not found" % (uid, coinid)) # Collect actives actives[coinid] = int(balance["amount_frozen"]) if isinstance(coinids, str): actives = {} for coinid in self.types: database = self.client[coinid] collection = database[self.collection] # Get current balance balance = await collection.find_one({"uid":uid}) if not balance: return await self.error_404( "Get frozen. Balance with uid:%s and type:%s not found" % (uid, coinid)) # Collect actives actives[coinid] = int(balance["amount_frozen"]) return actives
[ "async", "def", "get_frozen", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "super", "(", ")", ".", "validate", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "kwargs", ".", "get", "(", "\"message\"", ")", ":", "kwargs", "=", "json", ".", "loads", "(", "kwargs", ".", "get", "(", "\"message\"", ")", ")", "# Get daya from request", "coinids", "=", "kwargs", ".", "get", "(", "\"coinids\"", ")", "uid", "=", "kwargs", ".", "get", "(", "\"uid\"", ")", "address", "=", "kwargs", ".", "get", "(", "\"address\"", ")", "# Check if required fields exists", "try", ":", "coinid", "=", "coinid", ".", "replace", "(", "\"TEST\"", ",", "\"\"", ")", "except", ":", "pass", "try", ":", "uid", "=", "int", "(", "uid", ")", "except", ":", "return", "await", "self", ".", "error_400", "(", "\"User id must be integer. \"", ")", "if", "not", "uid", "and", "address", ":", "uid", "=", "await", "self", ".", "get_uid_by_address", "(", "address", "=", "address", ",", "coinid", "=", "coinid", ")", "if", "isinstance", "(", "uid", ",", "dict", ")", ":", "return", "uid", "if", "not", "all", "(", "[", "types", ",", "uid", "]", ")", ":", "return", "await", "self", ".", "error_400", "(", "\"Get frozen. Missed required fields.\"", ")", "if", "isinstance", "(", "types", ",", "list", ")", ":", "actives", "=", "{", "}", "for", "coinid", "in", "coinids", ":", "database", "=", "self", ".", "client", "[", "self", ".", "collection", "]", "collection", "=", "database", "[", "coinid", "]", "# Get current balance", "balance", "=", "await", "collection", ".", "find_one", "(", "{", "\"uid\"", ":", "uid", "}", ")", "if", "not", "balance", ":", "return", "await", "self", ".", "error_404", "(", "\"Get frozen. Balance with uid:%s and type:%s not found\"", "%", "(", "uid", ",", "coinid", ")", ")", "# Collect actives", "actives", "[", "coinid", "]", "=", "int", "(", "balance", "[", "\"amount_frozen\"", "]", ")", "if", "isinstance", "(", "coinids", ",", "str", ")", ":", "actives", "=", "{", "}", "for", "coinid", "in", "self", ".", "types", ":", "database", "=", "self", ".", "client", "[", "coinid", "]", "collection", "=", "database", "[", "self", ".", "collection", "]", "# Get current balance", "balance", "=", "await", "collection", ".", "find_one", "(", "{", "\"uid\"", ":", "uid", "}", ")", "if", "not", "balance", ":", "return", "await", "self", ".", "error_404", "(", "\"Get frozen. Balance with uid:%s and type:%s not found\"", "%", "(", "uid", ",", "coinid", ")", ")", "# Collect actives", "actives", "[", "coinid", "]", "=", "int", "(", "balance", "[", "\"amount_frozen\"", "]", ")", "return", "actives" ]
Get frozen users balance Accepts: - uid [integer] (users id) - types [list | string] (array with needed types or "all") Returns: { type [string] (blockchain type): amount }
[ "Get", "frozen", "users", "balance" ]
python
train
ternaris/marv
marv/config.py
https://github.com/ternaris/marv/blob/c221354d912ff869bbdb4f714a86a70be30d823e/marv/config.py#L48-L69
def make_funcs(dataset, setdir, store): """Functions available for listing columns and filters.""" return { 'cat': lambda *lists: [x for lst in lists for x in lst], 'comments': lambda: None, 'detail_route': detail_route, 'format': lambda fmt, *args: fmt.format(*args), 'get': partial(getnode, dataset, setdir, store), 'join': lambda sep, *args: sep.join([x for x in args if x]), 'len': len, 'link': (lambda href, title, target=None: {'href': href or "", 'title': title or "", 'target': '_blank' if target is None else target}), 'list': lambda *x: filter(None, list(x)), 'max': max, 'min': min, 'status': lambda: ['#STATUS#'], 'sum': sum, 'tags': lambda: ['#TAGS#'], 'trace': print_trace, }
[ "def", "make_funcs", "(", "dataset", ",", "setdir", ",", "store", ")", ":", "return", "{", "'cat'", ":", "lambda", "*", "lists", ":", "[", "x", "for", "lst", "in", "lists", "for", "x", "in", "lst", "]", ",", "'comments'", ":", "lambda", ":", "None", ",", "'detail_route'", ":", "detail_route", ",", "'format'", ":", "lambda", "fmt", ",", "*", "args", ":", "fmt", ".", "format", "(", "*", "args", ")", ",", "'get'", ":", "partial", "(", "getnode", ",", "dataset", ",", "setdir", ",", "store", ")", ",", "'join'", ":", "lambda", "sep", ",", "*", "args", ":", "sep", ".", "join", "(", "[", "x", "for", "x", "in", "args", "if", "x", "]", ")", ",", "'len'", ":", "len", ",", "'link'", ":", "(", "lambda", "href", ",", "title", ",", "target", "=", "None", ":", "{", "'href'", ":", "href", "or", "\"\"", ",", "'title'", ":", "title", "or", "\"\"", ",", "'target'", ":", "'_blank'", "if", "target", "is", "None", "else", "target", "}", ")", ",", "'list'", ":", "lambda", "*", "x", ":", "filter", "(", "None", ",", "list", "(", "x", ")", ")", ",", "'max'", ":", "max", ",", "'min'", ":", "min", ",", "'status'", ":", "lambda", ":", "[", "'#STATUS#'", "]", ",", "'sum'", ":", "sum", ",", "'tags'", ":", "lambda", ":", "[", "'#TAGS#'", "]", ",", "'trace'", ":", "print_trace", ",", "}" ]
Functions available for listing columns and filters.
[ "Functions", "available", "for", "listing", "columns", "and", "filters", "." ]
python
train
ultrabug/py3status
py3status/events.py
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/events.py#L158-L168
def wm_msg(self, module_name, command): """ Execute the message with i3-msg or swaymsg and log its output. """ wm_msg = self.config["wm"]["msg"] pipe = Popen([wm_msg, command], stdout=PIPE) self.py3_wrapper.log( '{} module="{}" command="{}" stdout={}'.format( wm_msg, module_name, command, pipe.stdout.read() ) )
[ "def", "wm_msg", "(", "self", ",", "module_name", ",", "command", ")", ":", "wm_msg", "=", "self", ".", "config", "[", "\"wm\"", "]", "[", "\"msg\"", "]", "pipe", "=", "Popen", "(", "[", "wm_msg", ",", "command", "]", ",", "stdout", "=", "PIPE", ")", "self", ".", "py3_wrapper", ".", "log", "(", "'{} module=\"{}\" command=\"{}\" stdout={}'", ".", "format", "(", "wm_msg", ",", "module_name", ",", "command", ",", "pipe", ".", "stdout", ".", "read", "(", ")", ")", ")" ]
Execute the message with i3-msg or swaymsg and log its output.
[ "Execute", "the", "message", "with", "i3", "-", "msg", "or", "swaymsg", "and", "log", "its", "output", "." ]
python
train
b3j0f/utils
b3j0f/utils/runtime.py
https://github.com/b3j0f/utils/blob/793871b98e90fd1c7ce9ef0dce839cc18fcbc6ff/b3j0f/utils/runtime.py#L145-L276
def _make_constants(func, builtin_only=False, stoplist=None, verbose=None): """Generate new function where code is an input function code with all LOAD_GLOBAL statements changed to LOAD_CONST statements. :param function func: code function to transform. :param bool builtin_only: only transform builtin objects. :param list stoplist: attribute names to not transform. :param function verbose: logger function which takes in parameter a message .. warning:: Be sure global attributes to transform are not resolved dynamically.""" result = func if stoplist is None: stoplist = [] try: fcode = func.__code__ except AttributeError: return func # Jython doesn't have a __code__ attribute. newcode = list(fcode.co_code) if PY3 else [ord(co) for co in fcode.co_code] newconsts = list(fcode.co_consts) names = fcode.co_names codelen = len(newcode) env = vars(builtins).copy() if builtin_only: stoplist = dict.fromkeys(stoplist) stoplist.update(func.__globals__) else: env.update(func.__globals__) # First pass converts global lookups into constants changed = False i = 0 while i < codelen: opcode = newcode[i] if opcode in (EXTENDED_ARG, STORE_GLOBAL): return func # for simplicity, only optimize common cases if opcode == LOAD_GLOBAL: oparg = newcode[i + 1] + (newcode[i + 2] << 8) name = fcode.co_names[oparg] if name in env and name not in stoplist: value = env[name] for pos, val in enumerate(newconsts): if val is value: break else: pos = len(newconsts) newconsts.append(value) newcode[i] = LOAD_CONST newcode[i + 1] = pos & 0xFF newcode[i + 2] = pos >> 8 changed = True if verbose is not None: verbose("{0} --> {1}".format(name, value)) i += 1 if opcode >= HAVE_ARGUMENT: i += 2 # Second pass folds tuples of constants and constant attribute lookups i = 0 while i < codelen: newtuple = [] while newcode[i] == LOAD_CONST: oparg = newcode[i + 1] + (newcode[i + 2] << 8) newtuple.append(newconsts[oparg]) i += 3 opcode = newcode[i] if not newtuple: i += 1 if opcode >= HAVE_ARGUMENT: i += 2 continue if opcode == LOAD_ATTR: obj = newtuple[-1] oparg = newcode[i + 1] + (newcode[i + 2] << 8) name = names[oparg] try: value = getattr(obj, name) except AttributeError: continue deletions = 1 elif opcode == BUILD_TUPLE: oparg = newcode[i + 1] + (newcode[i + 2] << 8) if oparg != len(newtuple): continue deletions = len(newtuple) value = tuple(newtuple) else: continue reljump = deletions * 3 newcode[i - reljump] = JUMP_FORWARD newcode[i - reljump + 1] = (reljump - 3) & 0xFF newcode[i - reljump + 2] = (reljump - 3) >> 8 nclen = len(newconsts) newconsts.append(value) newcode[i] = LOAD_CONST newcode[i + 1] = nclen & 0xFF newcode[i + 2] = nclen >> 8 i += 3 changed = True if verbose is not None: verbose("new folded constant:{0}".format(value)) if changed: codeobj = getcodeobj(newconsts, newcode, fcode, fcode) result = type(func)( codeobj, func.__globals__, func.__name__, func.__defaults__, func.__closure__ ) # set func attributes to result for prop in WRAPPER_ASSIGNMENTS: try: attr = getattr(func, prop) except AttributeError: pass else: setattr(result, prop, attr) return result
[ "def", "_make_constants", "(", "func", ",", "builtin_only", "=", "False", ",", "stoplist", "=", "None", ",", "verbose", "=", "None", ")", ":", "result", "=", "func", "if", "stoplist", "is", "None", ":", "stoplist", "=", "[", "]", "try", ":", "fcode", "=", "func", ".", "__code__", "except", "AttributeError", ":", "return", "func", "# Jython doesn't have a __code__ attribute.", "newcode", "=", "list", "(", "fcode", ".", "co_code", ")", "if", "PY3", "else", "[", "ord", "(", "co", ")", "for", "co", "in", "fcode", ".", "co_code", "]", "newconsts", "=", "list", "(", "fcode", ".", "co_consts", ")", "names", "=", "fcode", ".", "co_names", "codelen", "=", "len", "(", "newcode", ")", "env", "=", "vars", "(", "builtins", ")", ".", "copy", "(", ")", "if", "builtin_only", ":", "stoplist", "=", "dict", ".", "fromkeys", "(", "stoplist", ")", "stoplist", ".", "update", "(", "func", ".", "__globals__", ")", "else", ":", "env", ".", "update", "(", "func", ".", "__globals__", ")", "# First pass converts global lookups into constants", "changed", "=", "False", "i", "=", "0", "while", "i", "<", "codelen", ":", "opcode", "=", "newcode", "[", "i", "]", "if", "opcode", "in", "(", "EXTENDED_ARG", ",", "STORE_GLOBAL", ")", ":", "return", "func", "# for simplicity, only optimize common cases", "if", "opcode", "==", "LOAD_GLOBAL", ":", "oparg", "=", "newcode", "[", "i", "+", "1", "]", "+", "(", "newcode", "[", "i", "+", "2", "]", "<<", "8", ")", "name", "=", "fcode", ".", "co_names", "[", "oparg", "]", "if", "name", "in", "env", "and", "name", "not", "in", "stoplist", ":", "value", "=", "env", "[", "name", "]", "for", "pos", ",", "val", "in", "enumerate", "(", "newconsts", ")", ":", "if", "val", "is", "value", ":", "break", "else", ":", "pos", "=", "len", "(", "newconsts", ")", "newconsts", ".", "append", "(", "value", ")", "newcode", "[", "i", "]", "=", "LOAD_CONST", "newcode", "[", "i", "+", "1", "]", "=", "pos", "&", "0xFF", "newcode", "[", "i", "+", "2", "]", "=", "pos", ">>", "8", "changed", "=", "True", "if", "verbose", "is", "not", "None", ":", "verbose", "(", "\"{0} --> {1}\"", ".", "format", "(", "name", ",", "value", ")", ")", "i", "+=", "1", "if", "opcode", ">=", "HAVE_ARGUMENT", ":", "i", "+=", "2", "# Second pass folds tuples of constants and constant attribute lookups", "i", "=", "0", "while", "i", "<", "codelen", ":", "newtuple", "=", "[", "]", "while", "newcode", "[", "i", "]", "==", "LOAD_CONST", ":", "oparg", "=", "newcode", "[", "i", "+", "1", "]", "+", "(", "newcode", "[", "i", "+", "2", "]", "<<", "8", ")", "newtuple", ".", "append", "(", "newconsts", "[", "oparg", "]", ")", "i", "+=", "3", "opcode", "=", "newcode", "[", "i", "]", "if", "not", "newtuple", ":", "i", "+=", "1", "if", "opcode", ">=", "HAVE_ARGUMENT", ":", "i", "+=", "2", "continue", "if", "opcode", "==", "LOAD_ATTR", ":", "obj", "=", "newtuple", "[", "-", "1", "]", "oparg", "=", "newcode", "[", "i", "+", "1", "]", "+", "(", "newcode", "[", "i", "+", "2", "]", "<<", "8", ")", "name", "=", "names", "[", "oparg", "]", "try", ":", "value", "=", "getattr", "(", "obj", ",", "name", ")", "except", "AttributeError", ":", "continue", "deletions", "=", "1", "elif", "opcode", "==", "BUILD_TUPLE", ":", "oparg", "=", "newcode", "[", "i", "+", "1", "]", "+", "(", "newcode", "[", "i", "+", "2", "]", "<<", "8", ")", "if", "oparg", "!=", "len", "(", "newtuple", ")", ":", "continue", "deletions", "=", "len", "(", "newtuple", ")", "value", "=", "tuple", "(", "newtuple", ")", "else", ":", "continue", "reljump", "=", "deletions", "*", "3", "newcode", "[", "i", "-", "reljump", "]", "=", "JUMP_FORWARD", "newcode", "[", "i", "-", "reljump", "+", "1", "]", "=", "(", "reljump", "-", "3", ")", "&", "0xFF", "newcode", "[", "i", "-", "reljump", "+", "2", "]", "=", "(", "reljump", "-", "3", ")", ">>", "8", "nclen", "=", "len", "(", "newconsts", ")", "newconsts", ".", "append", "(", "value", ")", "newcode", "[", "i", "]", "=", "LOAD_CONST", "newcode", "[", "i", "+", "1", "]", "=", "nclen", "&", "0xFF", "newcode", "[", "i", "+", "2", "]", "=", "nclen", ">>", "8", "i", "+=", "3", "changed", "=", "True", "if", "verbose", "is", "not", "None", ":", "verbose", "(", "\"new folded constant:{0}\"", ".", "format", "(", "value", ")", ")", "if", "changed", ":", "codeobj", "=", "getcodeobj", "(", "newconsts", ",", "newcode", ",", "fcode", ",", "fcode", ")", "result", "=", "type", "(", "func", ")", "(", "codeobj", ",", "func", ".", "__globals__", ",", "func", ".", "__name__", ",", "func", ".", "__defaults__", ",", "func", ".", "__closure__", ")", "# set func attributes to result", "for", "prop", "in", "WRAPPER_ASSIGNMENTS", ":", "try", ":", "attr", "=", "getattr", "(", "func", ",", "prop", ")", "except", "AttributeError", ":", "pass", "else", ":", "setattr", "(", "result", ",", "prop", ",", "attr", ")", "return", "result" ]
Generate new function where code is an input function code with all LOAD_GLOBAL statements changed to LOAD_CONST statements. :param function func: code function to transform. :param bool builtin_only: only transform builtin objects. :param list stoplist: attribute names to not transform. :param function verbose: logger function which takes in parameter a message .. warning:: Be sure global attributes to transform are not resolved dynamically.
[ "Generate", "new", "function", "where", "code", "is", "an", "input", "function", "code", "with", "all", "LOAD_GLOBAL", "statements", "changed", "to", "LOAD_CONST", "statements", "." ]
python
train
klen/zeta-library
zetalibrary/main.py
https://github.com/klen/zeta-library/blob/b76f89000f467e10ddcc94aded3f6c6bf4a0e5bd/zetalibrary/main.py#L69-L72
def pack(args): " Parse file or dir, import css, js code and save with prefix " assert op.exists(args.source), "Does not exists: %s" % args.source zeta_pack(args)
[ "def", "pack", "(", "args", ")", ":", "assert", "op", ".", "exists", "(", "args", ".", "source", ")", ",", "\"Does not exists: %s\"", "%", "args", ".", "source", "zeta_pack", "(", "args", ")" ]
Parse file or dir, import css, js code and save with prefix
[ "Parse", "file", "or", "dir", "import", "css", "js", "code", "and", "save", "with", "prefix" ]
python
train
andresriancho/docker-tag-naming
docker_tag_naming/utils.py
https://github.com/andresriancho/docker-tag-naming/blob/8c043d69a63fb2fa9f38eda5bee1c0d7a3fd1b2b/docker_tag_naming/utils.py#L208-L219
def get_all_tags(image_name, branch=None): """ GET /v1/repositories/<namespace>/<repository_name>/tags :param image_name: The docker image name :param branch: The branch to filter by :return: A list of Version instances, latest first """ try: return get_all_tags_no_auth(image_name, branch) except AuthException: return get_all_tags_with_auth(image_name, branch)
[ "def", "get_all_tags", "(", "image_name", ",", "branch", "=", "None", ")", ":", "try", ":", "return", "get_all_tags_no_auth", "(", "image_name", ",", "branch", ")", "except", "AuthException", ":", "return", "get_all_tags_with_auth", "(", "image_name", ",", "branch", ")" ]
GET /v1/repositories/<namespace>/<repository_name>/tags :param image_name: The docker image name :param branch: The branch to filter by :return: A list of Version instances, latest first
[ "GET", "/", "v1", "/", "repositories", "/", "<namespace", ">", "/", "<repository_name", ">", "/", "tags" ]
python
train
Geotab/mygeotab-python
mygeotab/api.py
https://github.com/Geotab/mygeotab-python/blob/baa678e7df90bdd15f5dc55c1374b5c048791a94/mygeotab/api.py#L146-L158
def add(self, type_name, entity): """Adds an entity using the API. Shortcut for using call() with the 'Add' method. :param type_name: The type of entity. :type type_name: str :param entity: The entity to add. :type entity: dict :raise MyGeotabException: Raises when an exception occurs on the MyGeotab server. :raise TimeoutException: Raises when the request does not respond after some time. :return: The id of the object added. :rtype: str """ return self.call('Add', type_name=type_name, entity=entity)
[ "def", "add", "(", "self", ",", "type_name", ",", "entity", ")", ":", "return", "self", ".", "call", "(", "'Add'", ",", "type_name", "=", "type_name", ",", "entity", "=", "entity", ")" ]
Adds an entity using the API. Shortcut for using call() with the 'Add' method. :param type_name: The type of entity. :type type_name: str :param entity: The entity to add. :type entity: dict :raise MyGeotabException: Raises when an exception occurs on the MyGeotab server. :raise TimeoutException: Raises when the request does not respond after some time. :return: The id of the object added. :rtype: str
[ "Adds", "an", "entity", "using", "the", "API", ".", "Shortcut", "for", "using", "call", "()", "with", "the", "Add", "method", "." ]
python
train
pudo/dataset
dataset/database.py
https://github.com/pudo/dataset/blob/a008d120c7f3c48ccba98a282c0c67d6e719c0e5/dataset/database.py#L208-L218
def get_table(self, table_name, primary_id=None, primary_type=None): """Load or create a table. This is now the same as ``create_table``. :: table = db.get_table('population') # you can also use the short-hand syntax: table = db['population'] """ return self.create_table(table_name, primary_id, primary_type)
[ "def", "get_table", "(", "self", ",", "table_name", ",", "primary_id", "=", "None", ",", "primary_type", "=", "None", ")", ":", "return", "self", ".", "create_table", "(", "table_name", ",", "primary_id", ",", "primary_type", ")" ]
Load or create a table. This is now the same as ``create_table``. :: table = db.get_table('population') # you can also use the short-hand syntax: table = db['population']
[ "Load", "or", "create", "a", "table", "." ]
python
train
kodexlab/reliure
reliure/web.py
https://github.com/kodexlab/reliure/blob/0450c7a9254c5c003162738458bbe0c49e777ba5/reliure/web.py#L148-L171
def parse_request(self): """ Parse request for :func:`play` """ data = {} options = {} ### get data if request.headers['Content-Type'].startswith('application/json'): # data in JSON data = request.json assert data is not None #FIXME: better error than assertError ? if "options" in data: options = data["options"] del data["options"] else: # data in URL/post data = dict() data.update(request.form) data.update(request.args) for key, value in six.iteritems(data): if isinstance(value, list) and len(value) == 1: data[key] = value[0] # manage config in url options = self._config_from_url() return data, options
[ "def", "parse_request", "(", "self", ")", ":", "data", "=", "{", "}", "options", "=", "{", "}", "### get data", "if", "request", ".", "headers", "[", "'Content-Type'", "]", ".", "startswith", "(", "'application/json'", ")", ":", "# data in JSON", "data", "=", "request", ".", "json", "assert", "data", "is", "not", "None", "#FIXME: better error than assertError ?", "if", "\"options\"", "in", "data", ":", "options", "=", "data", "[", "\"options\"", "]", "del", "data", "[", "\"options\"", "]", "else", ":", "# data in URL/post", "data", "=", "dict", "(", ")", "data", ".", "update", "(", "request", ".", "form", ")", "data", ".", "update", "(", "request", ".", "args", ")", "for", "key", ",", "value", "in", "six", ".", "iteritems", "(", "data", ")", ":", "if", "isinstance", "(", "value", ",", "list", ")", "and", "len", "(", "value", ")", "==", "1", ":", "data", "[", "key", "]", "=", "value", "[", "0", "]", "# manage config in url", "options", "=", "self", ".", "_config_from_url", "(", ")", "return", "data", ",", "options" ]
Parse request for :func:`play`
[ "Parse", "request", "for", ":", "func", ":", "play" ]
python
train
worldcompany/djangoembed
oembed/templatetags/oembed_tags.py
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/templatetags/oembed_tags.py#L108-L167
def do_oembed(parser, token): """ A node which parses everything between its two nodes, and replaces any links with OEmbed-provided objects, if possible. Supports two optional argument, which is the maximum width and height, specified like so: {% oembed 640x480 %}http://www.viddler.com/explore/SYSTM/videos/49/{% endoembed %} and or the name of a sub tempalte directory to render templates from: {% oembed 320x240 in "comments" %}http://www.viddler.com/explore/SYSTM/videos/49/{% endoembed %} or: {% oembed in "comments" %}http://www.viddler.com/explore/SYSTM/videos/49/{% endoembed %} either of those will render templates in oembed/comments/oembedtype.html Additionally, you can specify a context variable to drop the rendered text in: {% oembed 600x400 in "comments" as var_name %}...{% endoembed %} {% oembed as var_name %}...{% endoembed %} """ args = token.split_contents() template_dir = None var_name = None if len(args) > 2: if len(args) == 3 and args[1] == 'in': template_dir = args[2] elif len(args) == 3 and args[1] == 'as': var_name = args[2] elif len(args) == 4 and args[2] == 'in': template_dir = args[3] elif len(args) == 4 and args[2] == 'as': var_name = args[3] elif len(args) == 6 and args[4] == 'as': template_dir = args[3] var_name = args[5] else: raise template.TemplateSyntaxError("OEmbed either takes a single " \ "(optional) argument: WIDTHxHEIGHT, where WIDTH and HEIGHT " \ "are positive integers, and or an optional 'in " \ " \"template_dir\"' argument set.") if template_dir: if not (template_dir[0] == template_dir[-1] and template_dir[0] in ('"', "'")): raise template.TemplateSyntaxError("template_dir must be quoted") template_dir = template_dir[1:-1] if len(args) >= 2 and 'x' in args[1]: width, height = args[1].lower().split('x') if not width and height: raise template.TemplateSyntaxError("OEmbed's optional WIDTHxHEIGH" \ "T argument requires WIDTH and HEIGHT to be positive integers.") else: width, height = None, None nodelist = parser.parse(('endoembed',)) parser.delete_first_token() return OEmbedNode(nodelist, width, height, template_dir, var_name)
[ "def", "do_oembed", "(", "parser", ",", "token", ")", ":", "args", "=", "token", ".", "split_contents", "(", ")", "template_dir", "=", "None", "var_name", "=", "None", "if", "len", "(", "args", ")", ">", "2", ":", "if", "len", "(", "args", ")", "==", "3", "and", "args", "[", "1", "]", "==", "'in'", ":", "template_dir", "=", "args", "[", "2", "]", "elif", "len", "(", "args", ")", "==", "3", "and", "args", "[", "1", "]", "==", "'as'", ":", "var_name", "=", "args", "[", "2", "]", "elif", "len", "(", "args", ")", "==", "4", "and", "args", "[", "2", "]", "==", "'in'", ":", "template_dir", "=", "args", "[", "3", "]", "elif", "len", "(", "args", ")", "==", "4", "and", "args", "[", "2", "]", "==", "'as'", ":", "var_name", "=", "args", "[", "3", "]", "elif", "len", "(", "args", ")", "==", "6", "and", "args", "[", "4", "]", "==", "'as'", ":", "template_dir", "=", "args", "[", "3", "]", "var_name", "=", "args", "[", "5", "]", "else", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"OEmbed either takes a single \"", "\"(optional) argument: WIDTHxHEIGHT, where WIDTH and HEIGHT \"", "\"are positive integers, and or an optional 'in \"", "\" \\\"template_dir\\\"' argument set.\"", ")", "if", "template_dir", ":", "if", "not", "(", "template_dir", "[", "0", "]", "==", "template_dir", "[", "-", "1", "]", "and", "template_dir", "[", "0", "]", "in", "(", "'\"'", ",", "\"'\"", ")", ")", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"template_dir must be quoted\"", ")", "template_dir", "=", "template_dir", "[", "1", ":", "-", "1", "]", "if", "len", "(", "args", ")", ">=", "2", "and", "'x'", "in", "args", "[", "1", "]", ":", "width", ",", "height", "=", "args", "[", "1", "]", ".", "lower", "(", ")", ".", "split", "(", "'x'", ")", "if", "not", "width", "and", "height", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "\"OEmbed's optional WIDTHxHEIGH\"", "\"T argument requires WIDTH and HEIGHT to be positive integers.\"", ")", "else", ":", "width", ",", "height", "=", "None", ",", "None", "nodelist", "=", "parser", ".", "parse", "(", "(", "'endoembed'", ",", ")", ")", "parser", ".", "delete_first_token", "(", ")", "return", "OEmbedNode", "(", "nodelist", ",", "width", ",", "height", ",", "template_dir", ",", "var_name", ")" ]
A node which parses everything between its two nodes, and replaces any links with OEmbed-provided objects, if possible. Supports two optional argument, which is the maximum width and height, specified like so: {% oembed 640x480 %}http://www.viddler.com/explore/SYSTM/videos/49/{% endoembed %} and or the name of a sub tempalte directory to render templates from: {% oembed 320x240 in "comments" %}http://www.viddler.com/explore/SYSTM/videos/49/{% endoembed %} or: {% oembed in "comments" %}http://www.viddler.com/explore/SYSTM/videos/49/{% endoembed %} either of those will render templates in oembed/comments/oembedtype.html Additionally, you can specify a context variable to drop the rendered text in: {% oembed 600x400 in "comments" as var_name %}...{% endoembed %} {% oembed as var_name %}...{% endoembed %}
[ "A", "node", "which", "parses", "everything", "between", "its", "two", "nodes", "and", "replaces", "any", "links", "with", "OEmbed", "-", "provided", "objects", "if", "possible", "." ]
python
valid
mfcloud/python-zvm-sdk
smtLayer/powerVM.py
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/smtLayer/powerVM.py#L515-L567
def reboot(rh): """ Reboot a virtual machine. Input: Request Handle with the following properties: function - 'POWERVM' subfunction - 'REBOOT' userid - userid of the virtual machine parms['desiredState'] - Desired state. Optional, unless 'maxQueries' is specified. parms['maxQueries'] - Maximum number of queries to issue. Optional. parms['maxWait'] - Maximum time to wait in seconds. Optional, unless 'maxQueries' is specified. parms['poll'] - Polling interval in seconds. Optional, unless 'maxQueries' is specified. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter powerVM.reboot, userid: " + rh.userid) strCmd = "shutdown -r now" results = execCmdThruIUCV(rh, rh.userid, strCmd) if results['overallRC'] != 0: # Command failed to execute using IUCV. rh.printLn("ES", results['response']) rh.updateResults(results) if rh.results['overallRC'] == 0: # Wait for the OS to go down results = waitForOSState(rh, rh.userid, "down", maxQueries=30, sleepSecs=10) if results['overallRC'] == 0: rh.printLn("N", rh.userid + ": down (interim state)") if rh.results['overallRC'] == 0 and 'maxQueries' in rh.parms: results = waitForOSState(rh, rh.userid, 'up', maxQueries=rh.parms['maxQueries'], sleepSecs=rh.parms['poll']) if results['overallRC'] == 0: rh.printLn("N", rh.userid + ": up") else: rh.updateResults(results) rh.printSysLog("Exit powerVM.reboot, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC']
[ "def", "reboot", "(", "rh", ")", ":", "rh", ".", "printSysLog", "(", "\"Enter powerVM.reboot, userid: \"", "+", "rh", ".", "userid", ")", "strCmd", "=", "\"shutdown -r now\"", "results", "=", "execCmdThruIUCV", "(", "rh", ",", "rh", ".", "userid", ",", "strCmd", ")", "if", "results", "[", "'overallRC'", "]", "!=", "0", ":", "# Command failed to execute using IUCV.", "rh", ".", "printLn", "(", "\"ES\"", ",", "results", "[", "'response'", "]", ")", "rh", ".", "updateResults", "(", "results", ")", "if", "rh", ".", "results", "[", "'overallRC'", "]", "==", "0", ":", "# Wait for the OS to go down", "results", "=", "waitForOSState", "(", "rh", ",", "rh", ".", "userid", ",", "\"down\"", ",", "maxQueries", "=", "30", ",", "sleepSecs", "=", "10", ")", "if", "results", "[", "'overallRC'", "]", "==", "0", ":", "rh", ".", "printLn", "(", "\"N\"", ",", "rh", ".", "userid", "+", "\": down (interim state)\"", ")", "if", "rh", ".", "results", "[", "'overallRC'", "]", "==", "0", "and", "'maxQueries'", "in", "rh", ".", "parms", ":", "results", "=", "waitForOSState", "(", "rh", ",", "rh", ".", "userid", ",", "'up'", ",", "maxQueries", "=", "rh", ".", "parms", "[", "'maxQueries'", "]", ",", "sleepSecs", "=", "rh", ".", "parms", "[", "'poll'", "]", ")", "if", "results", "[", "'overallRC'", "]", "==", "0", ":", "rh", ".", "printLn", "(", "\"N\"", ",", "rh", ".", "userid", "+", "\": up\"", ")", "else", ":", "rh", ".", "updateResults", "(", "results", ")", "rh", ".", "printSysLog", "(", "\"Exit powerVM.reboot, rc: \"", "+", "str", "(", "rh", ".", "results", "[", "'overallRC'", "]", ")", ")", "return", "rh", ".", "results", "[", "'overallRC'", "]" ]
Reboot a virtual machine. Input: Request Handle with the following properties: function - 'POWERVM' subfunction - 'REBOOT' userid - userid of the virtual machine parms['desiredState'] - Desired state. Optional, unless 'maxQueries' is specified. parms['maxQueries'] - Maximum number of queries to issue. Optional. parms['maxWait'] - Maximum time to wait in seconds. Optional, unless 'maxQueries' is specified. parms['poll'] - Polling interval in seconds. Optional, unless 'maxQueries' is specified. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error
[ "Reboot", "a", "virtual", "machine", "." ]
python
train
blue-yonder/tsfresh
tsfresh/feature_extraction/extraction.py
https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/feature_extraction/extraction.py#L43-L189
def extract_features(timeseries_container, default_fc_parameters=None, kind_to_fc_parameters=None, column_id=None, column_sort=None, column_kind=None, column_value=None, chunksize=defaults.CHUNKSIZE, n_jobs=defaults.N_PROCESSES, show_warnings=defaults.SHOW_WARNINGS, disable_progressbar=defaults.DISABLE_PROGRESSBAR, impute_function=defaults.IMPUTE_FUNCTION, profile=defaults.PROFILING, profiling_filename=defaults.PROFILING_FILENAME, profiling_sorting=defaults.PROFILING_SORTING, distributor=None): """ Extract features from * a :class:`pandas.DataFrame` containing the different time series or * a dictionary of :class:`pandas.DataFrame` each containing one type of time series In both cases a :class:`pandas.DataFrame` with the calculated features will be returned. For a list of all the calculated time series features, please see the :class:`~tsfresh.feature_extraction.settings.ComprehensiveFCParameters` class, which is used to control which features with which parameters are calculated. For a detailed explanation of the different parameters and data formats please see :ref:`data-formats-label`. Examples ======== >>> from tsfresh.examples import load_robot_execution_failures >>> from tsfresh import extract_features >>> df, _ = load_robot_execution_failures() >>> X = extract_features(df, column_id='id', column_sort='time') :param timeseries_container: The pandas.DataFrame with the time series to compute the features for, or a dictionary of pandas.DataFrames. :type timeseries_container: pandas.DataFrame or dict :param default_fc_parameters: mapping from feature calculator names to parameters. Only those names which are keys in this dict will be calculated. See the class:`ComprehensiveFCParameters` for more information. :type default_fc_parameters: dict :param kind_to_fc_parameters: mapping from kind names to objects of the same type as the ones for default_fc_parameters. If you put a kind as a key here, the fc_parameters object (which is the value), will be used instead of the default_fc_parameters. This means that kinds, for which kind_of_fc_parameters doe not have any entries, will be ignored by the feature selection. :type kind_to_fc_parameters: dict :param column_id: The name of the id column to group by. :type column_id: str :param column_sort: The name of the sort column. :type column_sort: str :param column_kind: The name of the column keeping record on the kind of the value. :type column_kind: str :param column_value: The name for the column keeping the value itself. :type column_value: str :param n_jobs: The number of processes to use for parallelization. If zero, no parallelization is used. :type n_jobs: int :param chunksize: The size of one chunk that is submitted to the worker process for the parallelisation. Where one chunk is defined as a singular time series for one id and one kind. If you set the chunksize to 10, then it means that one task is to calculate all features for 10 time series. If it is set it to None, depending on distributor, heuristics are used to find the optimal chunksize. If you get out of memory exceptions, you can try it with the dask distributor and a smaller chunksize. :type chunksize: None or int :param: show_warnings: Show warnings during the feature extraction (needed for debugging of calculators). :type show_warnings: bool :param disable_progressbar: Do not show a progressbar while doing the calculation. :type disable_progressbar: bool :param impute_function: None, if no imputing should happen or the function to call for imputing. :type impute_function: None or callable :param profile: Turn on profiling during feature extraction :type profile: bool :param profiling_sorting: How to sort the profiling results (see the documentation of the profiling package for more information) :type profiling_sorting: basestring :param profiling_filename: Where to save the profiling results. :type profiling_filename: basestring :param distributor: Advanced parameter: set this to a class name that you want to use as a distributor. See the utilities/distribution.py for more information. Leave to None, if you want TSFresh to choose the best distributor. :type distributor: class :return: The (maybe imputed) DataFrame containing extracted features. :rtype: pandas.DataFrame """ # Always use the standardized way of storing the data. # See the function normalize_input_to_internal_representation for more information. df_melt, column_id, column_kind, column_value = \ dataframe_functions._normalize_input_to_internal_representation( timeseries_container=timeseries_container, column_id=column_id, column_kind=column_kind, column_sort=column_sort, column_value=column_value) # Use the standard setting if the user did not supply ones himself. if default_fc_parameters is None and kind_to_fc_parameters is None: default_fc_parameters = ComprehensiveFCParameters() elif default_fc_parameters is None and kind_to_fc_parameters is not None: default_fc_parameters = {} # If requested, do profiling (advanced feature) if profile: profiler = profiling.start_profiling() with warnings.catch_warnings(): if not show_warnings: warnings.simplefilter("ignore") else: warnings.simplefilter("default") result = _do_extraction(df=df_melt, column_id=column_id, column_value=column_value, column_kind=column_kind, n_jobs=n_jobs, chunk_size=chunksize, disable_progressbar=disable_progressbar, default_fc_parameters=default_fc_parameters, kind_to_fc_parameters=kind_to_fc_parameters, distributor=distributor) # Impute the result if requested if impute_function is not None: impute_function(result) # Turn off profiling if it was turned on if profile: profiling.end_profiling(profiler, filename=profiling_filename, sorting=profiling_sorting) return result
[ "def", "extract_features", "(", "timeseries_container", ",", "default_fc_parameters", "=", "None", ",", "kind_to_fc_parameters", "=", "None", ",", "column_id", "=", "None", ",", "column_sort", "=", "None", ",", "column_kind", "=", "None", ",", "column_value", "=", "None", ",", "chunksize", "=", "defaults", ".", "CHUNKSIZE", ",", "n_jobs", "=", "defaults", ".", "N_PROCESSES", ",", "show_warnings", "=", "defaults", ".", "SHOW_WARNINGS", ",", "disable_progressbar", "=", "defaults", ".", "DISABLE_PROGRESSBAR", ",", "impute_function", "=", "defaults", ".", "IMPUTE_FUNCTION", ",", "profile", "=", "defaults", ".", "PROFILING", ",", "profiling_filename", "=", "defaults", ".", "PROFILING_FILENAME", ",", "profiling_sorting", "=", "defaults", ".", "PROFILING_SORTING", ",", "distributor", "=", "None", ")", ":", "# Always use the standardized way of storing the data.", "# See the function normalize_input_to_internal_representation for more information.", "df_melt", ",", "column_id", ",", "column_kind", ",", "column_value", "=", "dataframe_functions", ".", "_normalize_input_to_internal_representation", "(", "timeseries_container", "=", "timeseries_container", ",", "column_id", "=", "column_id", ",", "column_kind", "=", "column_kind", ",", "column_sort", "=", "column_sort", ",", "column_value", "=", "column_value", ")", "# Use the standard setting if the user did not supply ones himself.", "if", "default_fc_parameters", "is", "None", "and", "kind_to_fc_parameters", "is", "None", ":", "default_fc_parameters", "=", "ComprehensiveFCParameters", "(", ")", "elif", "default_fc_parameters", "is", "None", "and", "kind_to_fc_parameters", "is", "not", "None", ":", "default_fc_parameters", "=", "{", "}", "# If requested, do profiling (advanced feature)", "if", "profile", ":", "profiler", "=", "profiling", ".", "start_profiling", "(", ")", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "if", "not", "show_warnings", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "else", ":", "warnings", ".", "simplefilter", "(", "\"default\"", ")", "result", "=", "_do_extraction", "(", "df", "=", "df_melt", ",", "column_id", "=", "column_id", ",", "column_value", "=", "column_value", ",", "column_kind", "=", "column_kind", ",", "n_jobs", "=", "n_jobs", ",", "chunk_size", "=", "chunksize", ",", "disable_progressbar", "=", "disable_progressbar", ",", "default_fc_parameters", "=", "default_fc_parameters", ",", "kind_to_fc_parameters", "=", "kind_to_fc_parameters", ",", "distributor", "=", "distributor", ")", "# Impute the result if requested", "if", "impute_function", "is", "not", "None", ":", "impute_function", "(", "result", ")", "# Turn off profiling if it was turned on", "if", "profile", ":", "profiling", ".", "end_profiling", "(", "profiler", ",", "filename", "=", "profiling_filename", ",", "sorting", "=", "profiling_sorting", ")", "return", "result" ]
Extract features from * a :class:`pandas.DataFrame` containing the different time series or * a dictionary of :class:`pandas.DataFrame` each containing one type of time series In both cases a :class:`pandas.DataFrame` with the calculated features will be returned. For a list of all the calculated time series features, please see the :class:`~tsfresh.feature_extraction.settings.ComprehensiveFCParameters` class, which is used to control which features with which parameters are calculated. For a detailed explanation of the different parameters and data formats please see :ref:`data-formats-label`. Examples ======== >>> from tsfresh.examples import load_robot_execution_failures >>> from tsfresh import extract_features >>> df, _ = load_robot_execution_failures() >>> X = extract_features(df, column_id='id', column_sort='time') :param timeseries_container: The pandas.DataFrame with the time series to compute the features for, or a dictionary of pandas.DataFrames. :type timeseries_container: pandas.DataFrame or dict :param default_fc_parameters: mapping from feature calculator names to parameters. Only those names which are keys in this dict will be calculated. See the class:`ComprehensiveFCParameters` for more information. :type default_fc_parameters: dict :param kind_to_fc_parameters: mapping from kind names to objects of the same type as the ones for default_fc_parameters. If you put a kind as a key here, the fc_parameters object (which is the value), will be used instead of the default_fc_parameters. This means that kinds, for which kind_of_fc_parameters doe not have any entries, will be ignored by the feature selection. :type kind_to_fc_parameters: dict :param column_id: The name of the id column to group by. :type column_id: str :param column_sort: The name of the sort column. :type column_sort: str :param column_kind: The name of the column keeping record on the kind of the value. :type column_kind: str :param column_value: The name for the column keeping the value itself. :type column_value: str :param n_jobs: The number of processes to use for parallelization. If zero, no parallelization is used. :type n_jobs: int :param chunksize: The size of one chunk that is submitted to the worker process for the parallelisation. Where one chunk is defined as a singular time series for one id and one kind. If you set the chunksize to 10, then it means that one task is to calculate all features for 10 time series. If it is set it to None, depending on distributor, heuristics are used to find the optimal chunksize. If you get out of memory exceptions, you can try it with the dask distributor and a smaller chunksize. :type chunksize: None or int :param: show_warnings: Show warnings during the feature extraction (needed for debugging of calculators). :type show_warnings: bool :param disable_progressbar: Do not show a progressbar while doing the calculation. :type disable_progressbar: bool :param impute_function: None, if no imputing should happen or the function to call for imputing. :type impute_function: None or callable :param profile: Turn on profiling during feature extraction :type profile: bool :param profiling_sorting: How to sort the profiling results (see the documentation of the profiling package for more information) :type profiling_sorting: basestring :param profiling_filename: Where to save the profiling results. :type profiling_filename: basestring :param distributor: Advanced parameter: set this to a class name that you want to use as a distributor. See the utilities/distribution.py for more information. Leave to None, if you want TSFresh to choose the best distributor. :type distributor: class :return: The (maybe imputed) DataFrame containing extracted features. :rtype: pandas.DataFrame
[ "Extract", "features", "from" ]
python
train
blockcypher/blockcypher-python
blockcypher/api.py
https://github.com/blockcypher/blockcypher-python/blob/7601ea21916957ff279384fd699527ff9c28a56e/blockcypher/api.py#L281-L296
def get_address_overview(address, coin_symbol='btc', api_key=None): ''' Takes an address and coin_symbol and return the address details ''' assert is_valid_address_for_coinsymbol(b58_address=address, coin_symbol=coin_symbol) url = make_url(coin_symbol, 'addrs', **{address: 'balance'}) params = {} if api_key: params['token'] = api_key r = requests.get(url, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS) return get_valid_json(r)
[ "def", "get_address_overview", "(", "address", ",", "coin_symbol", "=", "'btc'", ",", "api_key", "=", "None", ")", ":", "assert", "is_valid_address_for_coinsymbol", "(", "b58_address", "=", "address", ",", "coin_symbol", "=", "coin_symbol", ")", "url", "=", "make_url", "(", "coin_symbol", ",", "'addrs'", ",", "*", "*", "{", "address", ":", "'balance'", "}", ")", "params", "=", "{", "}", "if", "api_key", ":", "params", "[", "'token'", "]", "=", "api_key", "r", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "params", ",", "verify", "=", "True", ",", "timeout", "=", "TIMEOUT_IN_SECONDS", ")", "return", "get_valid_json", "(", "r", ")" ]
Takes an address and coin_symbol and return the address details
[ "Takes", "an", "address", "and", "coin_symbol", "and", "return", "the", "address", "details" ]
python
train
peterwittek/ncpol2sdpa
ncpol2sdpa/sdp_relaxation.py
https://github.com/peterwittek/ncpol2sdpa/blob/bce75d524d0b9d0093f32e3a0a5611f8589351a7/ncpol2sdpa/sdp_relaxation.py#L536-L556
def _get_facvar(self, polynomial): """Return dense vector representation of a polynomial. This function is nearly identical to __push_facvar_sparse, but instead of pushing sparse entries to the constraint matrices, it returns a dense vector. """ facvar = [0] * (self.n_vars + 1) # Preprocess the polynomial for uniform handling later if is_number_type(polynomial): facvar[0] = polynomial return facvar polynomial = polynomial.expand() if polynomial.is_Mul: elements = [polynomial] else: elements = polynomial.as_coeff_mul()[1][0].as_coeff_add()[1] for element in elements: results = self._get_index_of_monomial(element) for (k, coeff) in results: facvar[k] += coeff return facvar
[ "def", "_get_facvar", "(", "self", ",", "polynomial", ")", ":", "facvar", "=", "[", "0", "]", "*", "(", "self", ".", "n_vars", "+", "1", ")", "# Preprocess the polynomial for uniform handling later", "if", "is_number_type", "(", "polynomial", ")", ":", "facvar", "[", "0", "]", "=", "polynomial", "return", "facvar", "polynomial", "=", "polynomial", ".", "expand", "(", ")", "if", "polynomial", ".", "is_Mul", ":", "elements", "=", "[", "polynomial", "]", "else", ":", "elements", "=", "polynomial", ".", "as_coeff_mul", "(", ")", "[", "1", "]", "[", "0", "]", ".", "as_coeff_add", "(", ")", "[", "1", "]", "for", "element", "in", "elements", ":", "results", "=", "self", ".", "_get_index_of_monomial", "(", "element", ")", "for", "(", "k", ",", "coeff", ")", "in", "results", ":", "facvar", "[", "k", "]", "+=", "coeff", "return", "facvar" ]
Return dense vector representation of a polynomial. This function is nearly identical to __push_facvar_sparse, but instead of pushing sparse entries to the constraint matrices, it returns a dense vector.
[ "Return", "dense", "vector", "representation", "of", "a", "polynomial", ".", "This", "function", "is", "nearly", "identical", "to", "__push_facvar_sparse", "but", "instead", "of", "pushing", "sparse", "entries", "to", "the", "constraint", "matrices", "it", "returns", "a", "dense", "vector", "." ]
python
train
rosshamish/hexgrid
hexgrid.py
https://github.com/rosshamish/hexgrid/blob/16abb1822dc2789cb355f54fb06c7774eea1d9f2/hexgrid.py#L413-L422
def legal_node_coords(): """ Return all legal node coordinates on the grid """ nodes = set() for tile_id in legal_tile_ids(): for node in nodes_touching_tile(tile_id): nodes.add(node) logging.debug('Legal node coords({})={}'.format(len(nodes), nodes)) return nodes
[ "def", "legal_node_coords", "(", ")", ":", "nodes", "=", "set", "(", ")", "for", "tile_id", "in", "legal_tile_ids", "(", ")", ":", "for", "node", "in", "nodes_touching_tile", "(", "tile_id", ")", ":", "nodes", ".", "add", "(", "node", ")", "logging", ".", "debug", "(", "'Legal node coords({})={}'", ".", "format", "(", "len", "(", "nodes", ")", ",", "nodes", ")", ")", "return", "nodes" ]
Return all legal node coordinates on the grid
[ "Return", "all", "legal", "node", "coordinates", "on", "the", "grid" ]
python
train
smira/py-numa
numa.py
https://github.com/smira/py-numa/blob/eb38979c61028eb9422a4ad1eda0387cd93ea390/numa.py#L277-L290
def set_preferred(node): """ Sets the preferred node for the current thread to node. The preferred node is the node on which memory is preferably allocated before falling back to other nodes. The default is to use the node on which the process is currently running (local policy). @param node: node idx @type node: C{int} """ if node < 0 or node > get_max_node(): raise ValueError(node) libnuma.numa_set_preferred(node)
[ "def", "set_preferred", "(", "node", ")", ":", "if", "node", "<", "0", "or", "node", ">", "get_max_node", "(", ")", ":", "raise", "ValueError", "(", "node", ")", "libnuma", ".", "numa_set_preferred", "(", "node", ")" ]
Sets the preferred node for the current thread to node. The preferred node is the node on which memory is preferably allocated before falling back to other nodes. The default is to use the node on which the process is currently running (local policy). @param node: node idx @type node: C{int}
[ "Sets", "the", "preferred", "node", "for", "the", "current", "thread", "to", "node", "." ]
python
train
log2timeline/plaso
plaso/analysis/virustotal.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/analysis/virustotal.py#L57-L89
def Analyze(self, hashes): """Looks up hashes in VirusTotal using the VirusTotal HTTP API. The API is documented here: https://www.virustotal.com/en/documentation/public-api/ Args: hashes (list[str]): hashes to look up. Returns: list[HashAnalysis]: analysis results. Raises: RuntimeError: If the VirusTotal API key has not been set. """ if not self._api_key: raise RuntimeError('No API key specified for VirusTotal lookup.') hash_analyses = [] json_response = self._QueryHashes(hashes) or [] # VirusTotal returns a dictionary when a single hash is queried # and a list when multiple hashes are queried. if isinstance(json_response, dict): json_response = [json_response] for result in json_response: resource = result['resource'] hash_analysis = interface.HashAnalysis(resource, result) hash_analyses.append(hash_analysis) return hash_analyses
[ "def", "Analyze", "(", "self", ",", "hashes", ")", ":", "if", "not", "self", ".", "_api_key", ":", "raise", "RuntimeError", "(", "'No API key specified for VirusTotal lookup.'", ")", "hash_analyses", "=", "[", "]", "json_response", "=", "self", ".", "_QueryHashes", "(", "hashes", ")", "or", "[", "]", "# VirusTotal returns a dictionary when a single hash is queried", "# and a list when multiple hashes are queried.", "if", "isinstance", "(", "json_response", ",", "dict", ")", ":", "json_response", "=", "[", "json_response", "]", "for", "result", "in", "json_response", ":", "resource", "=", "result", "[", "'resource'", "]", "hash_analysis", "=", "interface", ".", "HashAnalysis", "(", "resource", ",", "result", ")", "hash_analyses", ".", "append", "(", "hash_analysis", ")", "return", "hash_analyses" ]
Looks up hashes in VirusTotal using the VirusTotal HTTP API. The API is documented here: https://www.virustotal.com/en/documentation/public-api/ Args: hashes (list[str]): hashes to look up. Returns: list[HashAnalysis]: analysis results. Raises: RuntimeError: If the VirusTotal API key has not been set.
[ "Looks", "up", "hashes", "in", "VirusTotal", "using", "the", "VirusTotal", "HTTP", "API", "." ]
python
train
mcieslik-mctp/papy
src/papy/core.py
https://github.com/mcieslik-mctp/papy/blob/708e50827b5db46bbea081982cb74b9b0e464064/src/papy/core.py#L492-L505
def del_pipes(self, pipes, *args, **kwargs): """ Deletes a sequence of pipes from the ``Dagger`` in the specified order. Takes optional arguments for ``Dagger.del_pipe``. Arguments: - pipes(sequence of valid ``del_pipe`` arguments) Sequence of pipes or other valid ``Dagger.del_pipe`` arguments to be removed from the ``Dagger`` in the left to right order. """ for pipe in pipes: self.del_pipe(pipe * args, **kwargs)
[ "def", "del_pipes", "(", "self", ",", "pipes", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "pipe", "in", "pipes", ":", "self", ".", "del_pipe", "(", "pipe", "*", "args", ",", "*", "*", "kwargs", ")" ]
Deletes a sequence of pipes from the ``Dagger`` in the specified order. Takes optional arguments for ``Dagger.del_pipe``. Arguments: - pipes(sequence of valid ``del_pipe`` arguments) Sequence of pipes or other valid ``Dagger.del_pipe`` arguments to be removed from the ``Dagger`` in the left to right order.
[ "Deletes", "a", "sequence", "of", "pipes", "from", "the", "Dagger", "in", "the", "specified", "order", ".", "Takes", "optional", "arguments", "for", "Dagger", ".", "del_pipe", ".", "Arguments", ":", "-", "pipes", "(", "sequence", "of", "valid", "del_pipe", "arguments", ")", "Sequence", "of", "pipes", "or", "other", "valid", "Dagger", ".", "del_pipe", "arguments", "to", "be", "removed", "from", "the", "Dagger", "in", "the", "left", "to", "right", "order", "." ]
python
train
oscarlazoarjona/fast
fast/bloch.py
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/bloch.py#L1920-L2053
def fast_lindblad_terms(gamma, unfolding, matrix_form=False, file_name=None, return_code=False): r"""Return a fast function that returns the Lindblad terms. We test a basic two-level system. >>> import numpy as np >>> Ne = 2 >>> gamma21 = 2*np.pi*6e6 >>> gamma = np.array([[0.0, -gamma21], ... [gamma21, 0.0]]) >>> rhos = np.array([[0.6, 3+2j], ... [3-2j, 0.4]]) An map to unfold the density matrix. >>> unfolding = Unfolding(Ne, True, True, True) We obtain a function to calculate Lindblad terms. >>> lindblad_terms = fast_lindblad_terms(gamma, unfolding) Apply this to a density matrix. >>> rhos = np.array([[0.6, 3+2j], ... [3-2j, 0.4]]) >>> rhosv = unfolding(rhos) >>> rhs_lindblad = lindblad_terms(rhosv) >>> print(rhs_lindblad) [-15079644.7372 -56548667.7646 37699111.8431] """ Ne = unfolding.Ne Nrho = unfolding.Nrho Mu = unfolding.Mu # We establish the arguments of the output function. if True: code = "" code += "def lindblad_terms(" if not matrix_form: code += "rho, " if code[-2:] == ", ": code = code[:-2] code += "):\n" # We initialize the output and auxiliaries. if True: # We introduce the factor that multiplies all terms. if matrix_form: code += " A = np.zeros(("+str(Nrho)+", "+str(Nrho) if not unfolding.real: code += "), complex)\n\n" else: code += "))\n\n" if unfolding.normalized: code += " b = np.zeros(("+str(Nrho) if not unfolding.real: code += "), complex)\n\n" else: code += "))\n\n" else: code += " rhs = np.zeros(("+str(Nrho) if not unfolding.real: code += "), complex)\n\n" else: code += "))\n\n" for a in range(Ne): for b in range(a): # The first term is of the from # gamma_ab * rho_aa |b><b| if not (unfolding.normalized and b == 0): coef = gamma[a, b] if unfolding.real: mu = Mu(1, b, b) nu = Mu(1, a, a) else: mu = Mu(0, b, b) nu = Mu(0, a, a) code += term_code(mu, nu, coef, matrix_form, False) # The second term is of the form # sum_j -gamma_ab/2 rho_aj |a><j| # for a lower triangular unfolding, this j runs from 1 to a. for j in range(a): coef = -gamma[a, b]*0.5 if unfolding.real: mur = Mu(1, a, j) code += term_code(mur, mur, coef, matrix_form, False) mui = Mu(-1, a, j) code += term_code(mui, mui, coef, matrix_form, False) else: mu = Mu(0, a, j) code += term_code(mu, mu, coef, matrix_form, False) # The third term is of the form # - sum_i 1/2 rho_ia |i><a| # for a lower triangular unfolding, this i runs from a to Ne. for i in range(a+1, Ne): coef = -gamma[a, b]*0.5 if unfolding.real: mur = Mu(1, i, a) code += term_code(mur, mur, coef, matrix_form, False) mui = Mu(-1, i, a) code += term_code(mui, mui, coef, matrix_form, False) else: mu = Mu(0, i, a) code += term_code(mu, mu, coef, matrix_form, False) # We missed one term in each of the previous fors, that together # correspond to # -gamma_ab * rho_aa |a><a| coef = -gamma[a, b] if unfolding.real: mu = Mu(1, a, a) else: mu = Mu(0, a, a) code += term_code(mu, mu, coef, matrix_form, False) # We finish the code. if True: if matrix_form: if unfolding.normalized: code += " return A, b\n" else: code += " return A\n" else: code += " return rhs\n" # We write the code to file if provided, and execute it. if True: if file_name is not None: f = file(file_name+".py", "w") f.write(code) f.close() lindblad_terms = code if not return_code: exec lindblad_terms return lindblad_terms
[ "def", "fast_lindblad_terms", "(", "gamma", ",", "unfolding", ",", "matrix_form", "=", "False", ",", "file_name", "=", "None", ",", "return_code", "=", "False", ")", ":", "Ne", "=", "unfolding", ".", "Ne", "Nrho", "=", "unfolding", ".", "Nrho", "Mu", "=", "unfolding", ".", "Mu", "# We establish the arguments of the output function.", "if", "True", ":", "code", "=", "\"\"", "code", "+=", "\"def lindblad_terms(\"", "if", "not", "matrix_form", ":", "code", "+=", "\"rho, \"", "if", "code", "[", "-", "2", ":", "]", "==", "\", \"", ":", "code", "=", "code", "[", ":", "-", "2", "]", "code", "+=", "\"):\\n\"", "# We initialize the output and auxiliaries.", "if", "True", ":", "# We introduce the factor that multiplies all terms.", "if", "matrix_form", ":", "code", "+=", "\" A = np.zeros((\"", "+", "str", "(", "Nrho", ")", "+", "\", \"", "+", "str", "(", "Nrho", ")", "if", "not", "unfolding", ".", "real", ":", "code", "+=", "\"), complex)\\n\\n\"", "else", ":", "code", "+=", "\"))\\n\\n\"", "if", "unfolding", ".", "normalized", ":", "code", "+=", "\" b = np.zeros((\"", "+", "str", "(", "Nrho", ")", "if", "not", "unfolding", ".", "real", ":", "code", "+=", "\"), complex)\\n\\n\"", "else", ":", "code", "+=", "\"))\\n\\n\"", "else", ":", "code", "+=", "\" rhs = np.zeros((\"", "+", "str", "(", "Nrho", ")", "if", "not", "unfolding", ".", "real", ":", "code", "+=", "\"), complex)\\n\\n\"", "else", ":", "code", "+=", "\"))\\n\\n\"", "for", "a", "in", "range", "(", "Ne", ")", ":", "for", "b", "in", "range", "(", "a", ")", ":", "# The first term is of the from", "# gamma_ab * rho_aa |b><b|", "if", "not", "(", "unfolding", ".", "normalized", "and", "b", "==", "0", ")", ":", "coef", "=", "gamma", "[", "a", ",", "b", "]", "if", "unfolding", ".", "real", ":", "mu", "=", "Mu", "(", "1", ",", "b", ",", "b", ")", "nu", "=", "Mu", "(", "1", ",", "a", ",", "a", ")", "else", ":", "mu", "=", "Mu", "(", "0", ",", "b", ",", "b", ")", "nu", "=", "Mu", "(", "0", ",", "a", ",", "a", ")", "code", "+=", "term_code", "(", "mu", ",", "nu", ",", "coef", ",", "matrix_form", ",", "False", ")", "# The second term is of the form", "# sum_j -gamma_ab/2 rho_aj |a><j|", "# for a lower triangular unfolding, this j runs from 1 to a.", "for", "j", "in", "range", "(", "a", ")", ":", "coef", "=", "-", "gamma", "[", "a", ",", "b", "]", "*", "0.5", "if", "unfolding", ".", "real", ":", "mur", "=", "Mu", "(", "1", ",", "a", ",", "j", ")", "code", "+=", "term_code", "(", "mur", ",", "mur", ",", "coef", ",", "matrix_form", ",", "False", ")", "mui", "=", "Mu", "(", "-", "1", ",", "a", ",", "j", ")", "code", "+=", "term_code", "(", "mui", ",", "mui", ",", "coef", ",", "matrix_form", ",", "False", ")", "else", ":", "mu", "=", "Mu", "(", "0", ",", "a", ",", "j", ")", "code", "+=", "term_code", "(", "mu", ",", "mu", ",", "coef", ",", "matrix_form", ",", "False", ")", "# The third term is of the form", "# - sum_i 1/2 rho_ia |i><a|", "# for a lower triangular unfolding, this i runs from a to Ne.", "for", "i", "in", "range", "(", "a", "+", "1", ",", "Ne", ")", ":", "coef", "=", "-", "gamma", "[", "a", ",", "b", "]", "*", "0.5", "if", "unfolding", ".", "real", ":", "mur", "=", "Mu", "(", "1", ",", "i", ",", "a", ")", "code", "+=", "term_code", "(", "mur", ",", "mur", ",", "coef", ",", "matrix_form", ",", "False", ")", "mui", "=", "Mu", "(", "-", "1", ",", "i", ",", "a", ")", "code", "+=", "term_code", "(", "mui", ",", "mui", ",", "coef", ",", "matrix_form", ",", "False", ")", "else", ":", "mu", "=", "Mu", "(", "0", ",", "i", ",", "a", ")", "code", "+=", "term_code", "(", "mu", ",", "mu", ",", "coef", ",", "matrix_form", ",", "False", ")", "# We missed one term in each of the previous fors, that together", "# correspond to", "# -gamma_ab * rho_aa |a><a|", "coef", "=", "-", "gamma", "[", "a", ",", "b", "]", "if", "unfolding", ".", "real", ":", "mu", "=", "Mu", "(", "1", ",", "a", ",", "a", ")", "else", ":", "mu", "=", "Mu", "(", "0", ",", "a", ",", "a", ")", "code", "+=", "term_code", "(", "mu", ",", "mu", ",", "coef", ",", "matrix_form", ",", "False", ")", "# We finish the code.", "if", "True", ":", "if", "matrix_form", ":", "if", "unfolding", ".", "normalized", ":", "code", "+=", "\" return A, b\\n\"", "else", ":", "code", "+=", "\" return A\\n\"", "else", ":", "code", "+=", "\" return rhs\\n\"", "# We write the code to file if provided, and execute it.", "if", "True", ":", "if", "file_name", "is", "not", "None", ":", "f", "=", "file", "(", "file_name", "+", "\".py\"", ",", "\"w\"", ")", "f", ".", "write", "(", "code", ")", "f", ".", "close", "(", ")", "lindblad_terms", "=", "code", "if", "not", "return_code", ":", "exec", "lindblad_terms", "return", "lindblad_terms" ]
r"""Return a fast function that returns the Lindblad terms. We test a basic two-level system. >>> import numpy as np >>> Ne = 2 >>> gamma21 = 2*np.pi*6e6 >>> gamma = np.array([[0.0, -gamma21], ... [gamma21, 0.0]]) >>> rhos = np.array([[0.6, 3+2j], ... [3-2j, 0.4]]) An map to unfold the density matrix. >>> unfolding = Unfolding(Ne, True, True, True) We obtain a function to calculate Lindblad terms. >>> lindblad_terms = fast_lindblad_terms(gamma, unfolding) Apply this to a density matrix. >>> rhos = np.array([[0.6, 3+2j], ... [3-2j, 0.4]]) >>> rhosv = unfolding(rhos) >>> rhs_lindblad = lindblad_terms(rhosv) >>> print(rhs_lindblad) [-15079644.7372 -56548667.7646 37699111.8431]
[ "r", "Return", "a", "fast", "function", "that", "returns", "the", "Lindblad", "terms", "." ]
python
train
bspaans/python-mingus
mingus/midi/midi_track.py
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/midi/midi_track.py#L248-L260
def key_signature_event(self, key='C'): """Return the bytes for a key signature event.""" if key.islower(): val = minor_keys.index(key) - 7 mode = '\x01' else: val = major_keys.index(key) - 7 mode = '\x00' if val < 0: val = 256 + val key = a2b_hex('%02x' % val) return '{0}{1}{2}\x02{3}{4}'.format(self.delta_time, META_EVENT, KEY_SIGNATURE, key, mode)
[ "def", "key_signature_event", "(", "self", ",", "key", "=", "'C'", ")", ":", "if", "key", ".", "islower", "(", ")", ":", "val", "=", "minor_keys", ".", "index", "(", "key", ")", "-", "7", "mode", "=", "'\\x01'", "else", ":", "val", "=", "major_keys", ".", "index", "(", "key", ")", "-", "7", "mode", "=", "'\\x00'", "if", "val", "<", "0", ":", "val", "=", "256", "+", "val", "key", "=", "a2b_hex", "(", "'%02x'", "%", "val", ")", "return", "'{0}{1}{2}\\x02{3}{4}'", ".", "format", "(", "self", ".", "delta_time", ",", "META_EVENT", ",", "KEY_SIGNATURE", ",", "key", ",", "mode", ")" ]
Return the bytes for a key signature event.
[ "Return", "the", "bytes", "for", "a", "key", "signature", "event", "." ]
python
train