repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
fabioz/PyDev.Debugger
_pydev_imps/_pydev_SimpleXMLRPCServer.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydev_imps/_pydev_SimpleXMLRPCServer.py#L578-L594
def handle_request(self, request_text=None): """Handle a single XML-RPC request passed through a CGI post method. If no XML data is given then it is read from stdin. The resulting XML-RPC response is printed to stdout along with the correct HTTP headers. """ if request_text is None and \ os.environ.get('REQUEST_METHOD', None) == 'GET': self.handle_get() else: # POST data is normally available through stdin if request_text is None: request_text = sys.stdin.read() self.handle_xmlrpc(request_text)
[ "def", "handle_request", "(", "self", ",", "request_text", "=", "None", ")", ":", "if", "request_text", "is", "None", "and", "os", ".", "environ", ".", "get", "(", "'REQUEST_METHOD'", ",", "None", ")", "==", "'GET'", ":", "self", ".", "handle_get", "(", ")", "else", ":", "# POST data is normally available through stdin", "if", "request_text", "is", "None", ":", "request_text", "=", "sys", ".", "stdin", ".", "read", "(", ")", "self", ".", "handle_xmlrpc", "(", "request_text", ")" ]
Handle a single XML-RPC request passed through a CGI post method. If no XML data is given then it is read from stdin. The resulting XML-RPC response is printed to stdout along with the correct HTTP headers.
[ "Handle", "a", "single", "XML", "-", "RPC", "request", "passed", "through", "a", "CGI", "post", "method", "." ]
python
train
rbw/pysnow
pysnow/oauth_client.py
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/oauth_client.py#L68-L84
def set_token(self, token): """Validate and set token :param token: the token (dict) to set """ if not token: self.token = None return expected_keys = ['token_type', 'refresh_token', 'access_token', 'scope', 'expires_in', 'expires_at'] if not isinstance(token, dict) or not set(token) >= set(expected_keys): raise InvalidUsage("Expected a token dictionary containing the following keys: {0}" .format(expected_keys)) # Set sanitized token self.token = dict((k, v) for k, v in token.items() if k in expected_keys)
[ "def", "set_token", "(", "self", ",", "token", ")", ":", "if", "not", "token", ":", "self", ".", "token", "=", "None", "return", "expected_keys", "=", "[", "'token_type'", ",", "'refresh_token'", ",", "'access_token'", ",", "'scope'", ",", "'expires_in'", ",", "'expires_at'", "]", "if", "not", "isinstance", "(", "token", ",", "dict", ")", "or", "not", "set", "(", "token", ")", ">=", "set", "(", "expected_keys", ")", ":", "raise", "InvalidUsage", "(", "\"Expected a token dictionary containing the following keys: {0}\"", ".", "format", "(", "expected_keys", ")", ")", "# Set sanitized token", "self", ".", "token", "=", "dict", "(", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "token", ".", "items", "(", ")", "if", "k", "in", "expected_keys", ")" ]
Validate and set token :param token: the token (dict) to set
[ "Validate", "and", "set", "token" ]
python
train
bachiraoun/pysimplelog
SimpleLog.py
https://github.com/bachiraoun/pysimplelog/blob/2681ed5b1b8d7e66c3fff3ec3cca2b14ac571238/SimpleLog.py#L692-L704
def set_log_file_maximum_size(self, logFileMaxSize): """ Set the log file maximum size in megabytes :Parameters: #. logFileMaxSize (number): The maximum size in Megabytes of a logging file. Once exceeded, another logging file as logFileBasename_N.logFileExtension will be created. Where N is an automatically incremented number. """ assert _is_number(logFileMaxSize), "logFileMaxSize must be a number" logFileMaxSize = float(logFileMaxSize) assert logFileMaxSize>=1, "logFileMaxSize minimum size is 1 megabytes" self.__maxlogFileSize = logFileMaxSize
[ "def", "set_log_file_maximum_size", "(", "self", ",", "logFileMaxSize", ")", ":", "assert", "_is_number", "(", "logFileMaxSize", ")", ",", "\"logFileMaxSize must be a number\"", "logFileMaxSize", "=", "float", "(", "logFileMaxSize", ")", "assert", "logFileMaxSize", ">=", "1", ",", "\"logFileMaxSize minimum size is 1 megabytes\"", "self", ".", "__maxlogFileSize", "=", "logFileMaxSize" ]
Set the log file maximum size in megabytes :Parameters: #. logFileMaxSize (number): The maximum size in Megabytes of a logging file. Once exceeded, another logging file as logFileBasename_N.logFileExtension will be created. Where N is an automatically incremented number.
[ "Set", "the", "log", "file", "maximum", "size", "in", "megabytes" ]
python
train
saltstack/salt
doc/_ext/saltdomain.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/doc/_ext/saltdomain.py#L52-L72
def parse_lit(self, lines): ''' Parse a string line-by-line delineating comments and code :returns: An tuple of boolean/list-of-string pairs. True designates a comment; False designates code. ''' comment_char = '#' # TODO: move this into a directive option comment = re.compile(r'^\s*{0}[ \n]'.format(comment_char)) section_test = lambda val: bool(comment.match(val)) sections = [] for is_doc, group in itertools.groupby(lines, section_test): if is_doc: text = [comment.sub('', i).rstrip('\r\n') for i in group] else: text = [i.rstrip('\r\n') for i in group] sections.append((is_doc, text)) return sections
[ "def", "parse_lit", "(", "self", ",", "lines", ")", ":", "comment_char", "=", "'#'", "# TODO: move this into a directive option", "comment", "=", "re", ".", "compile", "(", "r'^\\s*{0}[ \\n]'", ".", "format", "(", "comment_char", ")", ")", "section_test", "=", "lambda", "val", ":", "bool", "(", "comment", ".", "match", "(", "val", ")", ")", "sections", "=", "[", "]", "for", "is_doc", ",", "group", "in", "itertools", ".", "groupby", "(", "lines", ",", "section_test", ")", ":", "if", "is_doc", ":", "text", "=", "[", "comment", ".", "sub", "(", "''", ",", "i", ")", ".", "rstrip", "(", "'\\r\\n'", ")", "for", "i", "in", "group", "]", "else", ":", "text", "=", "[", "i", ".", "rstrip", "(", "'\\r\\n'", ")", "for", "i", "in", "group", "]", "sections", ".", "append", "(", "(", "is_doc", ",", "text", ")", ")", "return", "sections" ]
Parse a string line-by-line delineating comments and code :returns: An tuple of boolean/list-of-string pairs. True designates a comment; False designates code.
[ "Parse", "a", "string", "line", "-", "by", "-", "line", "delineating", "comments", "and", "code" ]
python
train
apple/turicreate
src/unity/python/turicreate/toolkits/recommender/util.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/recommender/util.py#L859-L925
def predict(self, dataset, new_observation_data=None, new_user_data=None, new_item_data=None): """ Return a score prediction for the user ids and item ids in the provided data set. Parameters ---------- dataset : SFrame Dataset in the same form used for training. new_observation_data : SFrame, optional ``new_observation_data`` gives additional observation data to the model, which may be used by the models to improve score accuracy. Must be in the same format as the observation data passed to ``create``. How this data is used varies by model. new_user_data : SFrame, optional ``new_user_data`` may give additional user data to the model. If present, scoring is done with reference to this new information. If there is any overlap with the side information present at training time, then this new side data is preferred. Must be in the same format as the user data passed to ``create``. new_item_data : SFrame, optional ``new_item_data`` may give additional item data to the model. If present, scoring is done with reference to this new information. If there is any overlap with the side information present at training time, then this new side data is preferred. Must be in the same format as the item data passed to ``create``. Returns ------- out : SArray An SArray with predicted scores for each given observation predicted by the model. See Also -------- recommend, evaluate """ if new_observation_data is None: new_observation_data = _SFrame() if new_user_data is None: new_user_data = _SFrame() if new_item_data is None: new_item_data = _SFrame() dataset = self.__prepare_dataset_parameter(dataset) def check_type(arg, arg_name, required_type, allowed_types): if not isinstance(arg, required_type): raise TypeError("Parameter " + arg_name + " must be of type(s) " + (", ".join(allowed_types)) + "; Type '" + str(type(arg)) + "' not recognized.") check_type(new_observation_data, "new_observation_data", _SFrame, ["SFrame"]) check_type(new_user_data, "new_user_data", _SFrame, ["SFrame"]) check_type(new_item_data, "new_item_data", _SFrame, ["SFrame"]) response = self.__proxy__.predict(dataset, new_user_data, new_item_data) return response['prediction']
[ "def", "predict", "(", "self", ",", "dataset", ",", "new_observation_data", "=", "None", ",", "new_user_data", "=", "None", ",", "new_item_data", "=", "None", ")", ":", "if", "new_observation_data", "is", "None", ":", "new_observation_data", "=", "_SFrame", "(", ")", "if", "new_user_data", "is", "None", ":", "new_user_data", "=", "_SFrame", "(", ")", "if", "new_item_data", "is", "None", ":", "new_item_data", "=", "_SFrame", "(", ")", "dataset", "=", "self", ".", "__prepare_dataset_parameter", "(", "dataset", ")", "def", "check_type", "(", "arg", ",", "arg_name", ",", "required_type", ",", "allowed_types", ")", ":", "if", "not", "isinstance", "(", "arg", ",", "required_type", ")", ":", "raise", "TypeError", "(", "\"Parameter \"", "+", "arg_name", "+", "\" must be of type(s) \"", "+", "(", "\", \"", ".", "join", "(", "allowed_types", ")", ")", "+", "\"; Type '\"", "+", "str", "(", "type", "(", "arg", ")", ")", "+", "\"' not recognized.\"", ")", "check_type", "(", "new_observation_data", ",", "\"new_observation_data\"", ",", "_SFrame", ",", "[", "\"SFrame\"", "]", ")", "check_type", "(", "new_user_data", ",", "\"new_user_data\"", ",", "_SFrame", ",", "[", "\"SFrame\"", "]", ")", "check_type", "(", "new_item_data", ",", "\"new_item_data\"", ",", "_SFrame", ",", "[", "\"SFrame\"", "]", ")", "response", "=", "self", ".", "__proxy__", ".", "predict", "(", "dataset", ",", "new_user_data", ",", "new_item_data", ")", "return", "response", "[", "'prediction'", "]" ]
Return a score prediction for the user ids and item ids in the provided data set. Parameters ---------- dataset : SFrame Dataset in the same form used for training. new_observation_data : SFrame, optional ``new_observation_data`` gives additional observation data to the model, which may be used by the models to improve score accuracy. Must be in the same format as the observation data passed to ``create``. How this data is used varies by model. new_user_data : SFrame, optional ``new_user_data`` may give additional user data to the model. If present, scoring is done with reference to this new information. If there is any overlap with the side information present at training time, then this new side data is preferred. Must be in the same format as the user data passed to ``create``. new_item_data : SFrame, optional ``new_item_data`` may give additional item data to the model. If present, scoring is done with reference to this new information. If there is any overlap with the side information present at training time, then this new side data is preferred. Must be in the same format as the item data passed to ``create``. Returns ------- out : SArray An SArray with predicted scores for each given observation predicted by the model. See Also -------- recommend, evaluate
[ "Return", "a", "score", "prediction", "for", "the", "user", "ids", "and", "item", "ids", "in", "the", "provided", "data", "set", "." ]
python
train
lago-project/lago
lago/virt.py
https://github.com/lago-project/lago/blob/5b8970f7687e063e4619066d5b8093ca997678c9/lago/virt.py#L446-L476
def get_vms(self, vm_names=None): """ Returns the vm objects associated with vm_names if vm_names is None, return all the vms in the prefix Args: vm_names (list of str): The names of the requested vms Returns dict: Which contains the requested vm objects indexed by name Raises: utils.LagoUserException: If a vm name doesn't exist """ if not vm_names: return self._vms.copy() missing_vms = [] vms = {} for name in vm_names: try: vms[name] = self._vms[name] except KeyError: # TODO: add resolver by suffix missing_vms.append(name) if missing_vms: raise utils.LagoUserException( 'The following vms do not exist: \n{}'.format( '\n'.join(missing_vms) ) ) return vms
[ "def", "get_vms", "(", "self", ",", "vm_names", "=", "None", ")", ":", "if", "not", "vm_names", ":", "return", "self", ".", "_vms", ".", "copy", "(", ")", "missing_vms", "=", "[", "]", "vms", "=", "{", "}", "for", "name", "in", "vm_names", ":", "try", ":", "vms", "[", "name", "]", "=", "self", ".", "_vms", "[", "name", "]", "except", "KeyError", ":", "# TODO: add resolver by suffix", "missing_vms", ".", "append", "(", "name", ")", "if", "missing_vms", ":", "raise", "utils", ".", "LagoUserException", "(", "'The following vms do not exist: \\n{}'", ".", "format", "(", "'\\n'", ".", "join", "(", "missing_vms", ")", ")", ")", "return", "vms" ]
Returns the vm objects associated with vm_names if vm_names is None, return all the vms in the prefix Args: vm_names (list of str): The names of the requested vms Returns dict: Which contains the requested vm objects indexed by name Raises: utils.LagoUserException: If a vm name doesn't exist
[ "Returns", "the", "vm", "objects", "associated", "with", "vm_names", "if", "vm_names", "is", "None", "return", "all", "the", "vms", "in", "the", "prefix", "Args", ":", "vm_names", "(", "list", "of", "str", ")", ":", "The", "names", "of", "the", "requested", "vms", "Returns", "dict", ":", "Which", "contains", "the", "requested", "vm", "objects", "indexed", "by", "name", "Raises", ":", "utils", ".", "LagoUserException", ":", "If", "a", "vm", "name", "doesn", "t", "exist" ]
python
train
echinopsii/net.echinopsii.ariane.community.cli.python3
ariane_clip3/natsd/driver.py
https://github.com/echinopsii/net.echinopsii.ariane.community.cli.python3/blob/0a7feddebf66fee4bef38d64f456d93a7e9fcd68/ariane_clip3/natsd/driver.py#L243-L308
def on_response(self, msg): """ setup response if correlation id is the good one """ LOGGER.debug("natsd.Requester.on_response: " + str(sys.getsizeof(msg)) + " bytes received") working_response = json.loads(msg.data.decode()) working_properties = DriverTools.json2properties(working_response['properties']) working_body = b''+bytes(working_response['body'], 'utf8') if 'body' in working_response else None if DriverTools.MSG_CORRELATION_ID in working_properties: if self.corr_id == working_properties[DriverTools.MSG_CORRELATION_ID]: if DriverTools.MSG_SPLIT_COUNT in working_properties and \ int(working_properties[DriverTools.MSG_SPLIT_COUNT]) > 1: working_body_decoded = base64.b64decode(working_body) if working_body is not None else None if self.split_responses is None: self.split_responses = [] self.split_responses_mid = working_properties[DriverTools.MSG_SPLIT_MID] if working_properties[DriverTools.MSG_SPLIT_MID] == self.split_responses_mid: response = { 'properties': working_properties, 'body': working_body_decoded } self.split_responses.insert(int(working_properties[DriverTools.MSG_SPLIT_OID]), response) if self.split_responses.__len__() == int(working_properties[DriverTools.MSG_SPLIT_COUNT]): properties = {} body = b'' for num in range(0, self.split_responses.__len__()): properties.update(self.split_responses[num]['properties']) body += self.split_responses[num]['body'] self.response = { 'properties': properties, 'body': body } self.split_responses = None self.split_responses_mid = None else: LOGGER.warn("natsd.Requester.on_response - discarded response : (" + str(working_properties[DriverTools.MSG_CORRELATION_ID]) + "," + str(working_properties[DriverTools.MSG_SPLIT_MID]) + ")") LOGGER.debug("natsd.Requester.on_response - discarded response : " + str({ 'properties': working_properties, 'body': working_body_decoded })) else: working_body_decoded = base64.b64decode(working_body) if working_body is not None else \ bytes(json.dumps({}), 'utf8') self.response = { 'properties': working_properties, 'body': working_body_decoded } else: working_body_decoded = base64.b64decode(working_body) if working_body is not None else None LOGGER.warn("natsd.Requester.on_response - discarded response : " + str(working_properties[DriverTools.MSG_CORRELATION_ID])) LOGGER.debug("natsd.Requester.on_response - discarded response : " + str({ 'properties': working_properties, 'body': working_body_decoded })) else: working_body_decoded = base64.b64decode(working_body) if working_body is not None else None LOGGER.warn("natsd.Requester.on_response - discarded response (no correlation ID)") LOGGER.debug("natsd.Requester.on_response - discarded response : " + str({ 'properties': working_properties, 'body': working_body_decoded }))
[ "def", "on_response", "(", "self", ",", "msg", ")", ":", "LOGGER", ".", "debug", "(", "\"natsd.Requester.on_response: \"", "+", "str", "(", "sys", ".", "getsizeof", "(", "msg", ")", ")", "+", "\" bytes received\"", ")", "working_response", "=", "json", ".", "loads", "(", "msg", ".", "data", ".", "decode", "(", ")", ")", "working_properties", "=", "DriverTools", ".", "json2properties", "(", "working_response", "[", "'properties'", "]", ")", "working_body", "=", "b''", "+", "bytes", "(", "working_response", "[", "'body'", "]", ",", "'utf8'", ")", "if", "'body'", "in", "working_response", "else", "None", "if", "DriverTools", ".", "MSG_CORRELATION_ID", "in", "working_properties", ":", "if", "self", ".", "corr_id", "==", "working_properties", "[", "DriverTools", ".", "MSG_CORRELATION_ID", "]", ":", "if", "DriverTools", ".", "MSG_SPLIT_COUNT", "in", "working_properties", "and", "int", "(", "working_properties", "[", "DriverTools", ".", "MSG_SPLIT_COUNT", "]", ")", ">", "1", ":", "working_body_decoded", "=", "base64", ".", "b64decode", "(", "working_body", ")", "if", "working_body", "is", "not", "None", "else", "None", "if", "self", ".", "split_responses", "is", "None", ":", "self", ".", "split_responses", "=", "[", "]", "self", ".", "split_responses_mid", "=", "working_properties", "[", "DriverTools", ".", "MSG_SPLIT_MID", "]", "if", "working_properties", "[", "DriverTools", ".", "MSG_SPLIT_MID", "]", "==", "self", ".", "split_responses_mid", ":", "response", "=", "{", "'properties'", ":", "working_properties", ",", "'body'", ":", "working_body_decoded", "}", "self", ".", "split_responses", ".", "insert", "(", "int", "(", "working_properties", "[", "DriverTools", ".", "MSG_SPLIT_OID", "]", ")", ",", "response", ")", "if", "self", ".", "split_responses", ".", "__len__", "(", ")", "==", "int", "(", "working_properties", "[", "DriverTools", ".", "MSG_SPLIT_COUNT", "]", ")", ":", "properties", "=", "{", "}", "body", "=", "b''", "for", "num", "in", "range", "(", "0", ",", "self", ".", "split_responses", ".", "__len__", "(", ")", ")", ":", "properties", ".", "update", "(", "self", ".", "split_responses", "[", "num", "]", "[", "'properties'", "]", ")", "body", "+=", "self", ".", "split_responses", "[", "num", "]", "[", "'body'", "]", "self", ".", "response", "=", "{", "'properties'", ":", "properties", ",", "'body'", ":", "body", "}", "self", ".", "split_responses", "=", "None", "self", ".", "split_responses_mid", "=", "None", "else", ":", "LOGGER", ".", "warn", "(", "\"natsd.Requester.on_response - discarded response : (\"", "+", "str", "(", "working_properties", "[", "DriverTools", ".", "MSG_CORRELATION_ID", "]", ")", "+", "\",\"", "+", "str", "(", "working_properties", "[", "DriverTools", ".", "MSG_SPLIT_MID", "]", ")", "+", "\")\"", ")", "LOGGER", ".", "debug", "(", "\"natsd.Requester.on_response - discarded response : \"", "+", "str", "(", "{", "'properties'", ":", "working_properties", ",", "'body'", ":", "working_body_decoded", "}", ")", ")", "else", ":", "working_body_decoded", "=", "base64", ".", "b64decode", "(", "working_body", ")", "if", "working_body", "is", "not", "None", "else", "bytes", "(", "json", ".", "dumps", "(", "{", "}", ")", ",", "'utf8'", ")", "self", ".", "response", "=", "{", "'properties'", ":", "working_properties", ",", "'body'", ":", "working_body_decoded", "}", "else", ":", "working_body_decoded", "=", "base64", ".", "b64decode", "(", "working_body", ")", "if", "working_body", "is", "not", "None", "else", "None", "LOGGER", ".", "warn", "(", "\"natsd.Requester.on_response - discarded response : \"", "+", "str", "(", "working_properties", "[", "DriverTools", ".", "MSG_CORRELATION_ID", "]", ")", ")", "LOGGER", ".", "debug", "(", "\"natsd.Requester.on_response - discarded response : \"", "+", "str", "(", "{", "'properties'", ":", "working_properties", ",", "'body'", ":", "working_body_decoded", "}", ")", ")", "else", ":", "working_body_decoded", "=", "base64", ".", "b64decode", "(", "working_body", ")", "if", "working_body", "is", "not", "None", "else", "None", "LOGGER", ".", "warn", "(", "\"natsd.Requester.on_response - discarded response (no correlation ID)\"", ")", "LOGGER", ".", "debug", "(", "\"natsd.Requester.on_response - discarded response : \"", "+", "str", "(", "{", "'properties'", ":", "working_properties", ",", "'body'", ":", "working_body_decoded", "}", ")", ")" ]
setup response if correlation id is the good one
[ "setup", "response", "if", "correlation", "id", "is", "the", "good", "one" ]
python
train
docker/docker-py
docker/models/containers.py
https://github.com/docker/docker-py/blob/613d6aad83acc9931ff2ecfd6a6c7bd8061dc125/docker/models/containers.py#L33-L40
def image(self): """ The image of the container. """ image_id = self.attrs.get('ImageID', self.attrs['Image']) if image_id is None: return None return self.client.images.get(image_id.split(':')[1])
[ "def", "image", "(", "self", ")", ":", "image_id", "=", "self", ".", "attrs", ".", "get", "(", "'ImageID'", ",", "self", ".", "attrs", "[", "'Image'", "]", ")", "if", "image_id", "is", "None", ":", "return", "None", "return", "self", ".", "client", ".", "images", ".", "get", "(", "image_id", ".", "split", "(", "':'", ")", "[", "1", "]", ")" ]
The image of the container.
[ "The", "image", "of", "the", "container", "." ]
python
train
MDAnalysis/GridDataFormats
gridData/CCP4.py
https://github.com/MDAnalysis/GridDataFormats/blob/3eeb0432f8cf856912436e4f3e7aba99d3c916be/gridData/CCP4.py#L267-L314
def _read_header(self, ccp4file): """Read header bytes""" bsaflag = self._detect_byteorder(ccp4file) # Parse the top of the header (4-byte words, 1 to 25). nheader = struct.calcsize(self._headerfmt) names = [r.key for r in self._header_struct] bintopheader = ccp4file.read(25 * 4) def decode_header(header, bsaflag='@'): h = dict(zip(names, struct.unpack(bsaflag + self._headerfmt, header))) h['bsaflag'] = bsaflag return h header = decode_header(bintopheader, bsaflag) for rec in self._header_struct: if not rec.is_legal_dict(header): warnings.warn( "Key %s: Illegal value %r" % (rec.key, header[rec.key])) # Parse the latter half of the header (4-byte words, 26 to 256). if (header['lskflg']): skewmatrix = np.fromfile(ccp4file, dtype=np.float32, count=9) header['skwmat'] = skewmatrix.reshape((3, 3)) header['skwtrn'] = np.fromfile(ccp4file, dtype=np.float32, count=3) else: header['skwmat'] = header['skwtrn'] = None ccp4file.seek(12 * 4, 1) ccp4file.seek(15 * 4, 1) # Skip future use section. ccp4file.seek(4, 1) # Skip map text, already used above to verify format. # TODO: Compare file specified endianness to one obtained above. endiancode = struct.unpack(bsaflag + '4b', ccp4file.read(4)) header['endianness'] = 'little' if endiancode == (0x44, 0x41, 0, 0 ) else 'big' header['arms'] = struct.unpack(bsaflag + 'f', ccp4file.read(4))[0] header['nlabl'] = struct.unpack(bsaflag + 'I', ccp4file.read(4))[0] if header['nlabl']: binlabel = ccp4file.read(80 * header['nlabl']) flag = bsaflag + str(80 * header['nlabl']) + 's' label = struct.unpack(flag, binlabel)[0] header['label'] = label.decode('utf-8').rstrip('\x00') else: header['label'] = None ccp4file.seek(256 * 4) # TODO: Parse symmetry records, if any. return header
[ "def", "_read_header", "(", "self", ",", "ccp4file", ")", ":", "bsaflag", "=", "self", ".", "_detect_byteorder", "(", "ccp4file", ")", "# Parse the top of the header (4-byte words, 1 to 25).", "nheader", "=", "struct", ".", "calcsize", "(", "self", ".", "_headerfmt", ")", "names", "=", "[", "r", ".", "key", "for", "r", "in", "self", ".", "_header_struct", "]", "bintopheader", "=", "ccp4file", ".", "read", "(", "25", "*", "4", ")", "def", "decode_header", "(", "header", ",", "bsaflag", "=", "'@'", ")", ":", "h", "=", "dict", "(", "zip", "(", "names", ",", "struct", ".", "unpack", "(", "bsaflag", "+", "self", ".", "_headerfmt", ",", "header", ")", ")", ")", "h", "[", "'bsaflag'", "]", "=", "bsaflag", "return", "h", "header", "=", "decode_header", "(", "bintopheader", ",", "bsaflag", ")", "for", "rec", "in", "self", ".", "_header_struct", ":", "if", "not", "rec", ".", "is_legal_dict", "(", "header", ")", ":", "warnings", ".", "warn", "(", "\"Key %s: Illegal value %r\"", "%", "(", "rec", ".", "key", ",", "header", "[", "rec", ".", "key", "]", ")", ")", "# Parse the latter half of the header (4-byte words, 26 to 256).", "if", "(", "header", "[", "'lskflg'", "]", ")", ":", "skewmatrix", "=", "np", ".", "fromfile", "(", "ccp4file", ",", "dtype", "=", "np", ".", "float32", ",", "count", "=", "9", ")", "header", "[", "'skwmat'", "]", "=", "skewmatrix", ".", "reshape", "(", "(", "3", ",", "3", ")", ")", "header", "[", "'skwtrn'", "]", "=", "np", ".", "fromfile", "(", "ccp4file", ",", "dtype", "=", "np", ".", "float32", ",", "count", "=", "3", ")", "else", ":", "header", "[", "'skwmat'", "]", "=", "header", "[", "'skwtrn'", "]", "=", "None", "ccp4file", ".", "seek", "(", "12", "*", "4", ",", "1", ")", "ccp4file", ".", "seek", "(", "15", "*", "4", ",", "1", ")", "# Skip future use section.", "ccp4file", ".", "seek", "(", "4", ",", "1", ")", "# Skip map text, already used above to verify format.", "# TODO: Compare file specified endianness to one obtained above.", "endiancode", "=", "struct", ".", "unpack", "(", "bsaflag", "+", "'4b'", ",", "ccp4file", ".", "read", "(", "4", ")", ")", "header", "[", "'endianness'", "]", "=", "'little'", "if", "endiancode", "==", "(", "0x44", ",", "0x41", ",", "0", ",", "0", ")", "else", "'big'", "header", "[", "'arms'", "]", "=", "struct", ".", "unpack", "(", "bsaflag", "+", "'f'", ",", "ccp4file", ".", "read", "(", "4", ")", ")", "[", "0", "]", "header", "[", "'nlabl'", "]", "=", "struct", ".", "unpack", "(", "bsaflag", "+", "'I'", ",", "ccp4file", ".", "read", "(", "4", ")", ")", "[", "0", "]", "if", "header", "[", "'nlabl'", "]", ":", "binlabel", "=", "ccp4file", ".", "read", "(", "80", "*", "header", "[", "'nlabl'", "]", ")", "flag", "=", "bsaflag", "+", "str", "(", "80", "*", "header", "[", "'nlabl'", "]", ")", "+", "'s'", "label", "=", "struct", ".", "unpack", "(", "flag", ",", "binlabel", ")", "[", "0", "]", "header", "[", "'label'", "]", "=", "label", ".", "decode", "(", "'utf-8'", ")", ".", "rstrip", "(", "'\\x00'", ")", "else", ":", "header", "[", "'label'", "]", "=", "None", "ccp4file", ".", "seek", "(", "256", "*", "4", ")", "# TODO: Parse symmetry records, if any.", "return", "header" ]
Read header bytes
[ "Read", "header", "bytes" ]
python
valid
minhhoit/yacms
yacms/blog/management/base.py
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/blog/management/base.py#L86-L104
def add_page(self, title=None, content=None, old_url=None, tags=None, old_id=None, old_parent_id=None): """ Adds a page to the list of pages to be imported - used by the Wordpress importer. """ if not title: text = decode_entities(strip_tags(content)).replace("\n", " ") title = text.split(". ")[0] if tags is None: tags = [] self.pages.append({ "title": title, "content": content, "tags": tags, "old_url": old_url, "old_id": old_id, "old_parent_id": old_parent_id, })
[ "def", "add_page", "(", "self", ",", "title", "=", "None", ",", "content", "=", "None", ",", "old_url", "=", "None", ",", "tags", "=", "None", ",", "old_id", "=", "None", ",", "old_parent_id", "=", "None", ")", ":", "if", "not", "title", ":", "text", "=", "decode_entities", "(", "strip_tags", "(", "content", ")", ")", ".", "replace", "(", "\"\\n\"", ",", "\" \"", ")", "title", "=", "text", ".", "split", "(", "\". \"", ")", "[", "0", "]", "if", "tags", "is", "None", ":", "tags", "=", "[", "]", "self", ".", "pages", ".", "append", "(", "{", "\"title\"", ":", "title", ",", "\"content\"", ":", "content", ",", "\"tags\"", ":", "tags", ",", "\"old_url\"", ":", "old_url", ",", "\"old_id\"", ":", "old_id", ",", "\"old_parent_id\"", ":", "old_parent_id", ",", "}", ")" ]
Adds a page to the list of pages to be imported - used by the Wordpress importer.
[ "Adds", "a", "page", "to", "the", "list", "of", "pages", "to", "be", "imported", "-", "used", "by", "the", "Wordpress", "importer", "." ]
python
train
awslabs/sockeye
sockeye/lexical_constraints.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/lexical_constraints.py#L181-L192
def reorder(self, indices: mx.nd.NDArray) -> None: """ Reorders the avoid list according to the selected row indices. This can produce duplicates, but this is fixed if state changes occur in consume(). :param indices: An mx.nd.NDArray containing indices of hypotheses to select. """ if self.global_avoid_states: self.global_avoid_states = [self.global_avoid_states[x] for x in indices.asnumpy()] if self.local_avoid_states: self.local_avoid_states = [self.local_avoid_states[x] for x in indices.asnumpy()]
[ "def", "reorder", "(", "self", ",", "indices", ":", "mx", ".", "nd", ".", "NDArray", ")", "->", "None", ":", "if", "self", ".", "global_avoid_states", ":", "self", ".", "global_avoid_states", "=", "[", "self", ".", "global_avoid_states", "[", "x", "]", "for", "x", "in", "indices", ".", "asnumpy", "(", ")", "]", "if", "self", ".", "local_avoid_states", ":", "self", ".", "local_avoid_states", "=", "[", "self", ".", "local_avoid_states", "[", "x", "]", "for", "x", "in", "indices", ".", "asnumpy", "(", ")", "]" ]
Reorders the avoid list according to the selected row indices. This can produce duplicates, but this is fixed if state changes occur in consume(). :param indices: An mx.nd.NDArray containing indices of hypotheses to select.
[ "Reorders", "the", "avoid", "list", "according", "to", "the", "selected", "row", "indices", ".", "This", "can", "produce", "duplicates", "but", "this", "is", "fixed", "if", "state", "changes", "occur", "in", "consume", "()", "." ]
python
train
allianceauth/allianceauth
allianceauth/authentication/admin.py
https://github.com/allianceauth/allianceauth/blob/6585b07e96571a99a4d6dc03cc03f9b8c8f690ca/allianceauth/authentication/admin.py#L15-L27
def make_service_hooks_update_groups_action(service): """ Make a admin action for the given service :param service: services.hooks.ServicesHook :return: fn to update services groups for the selected users """ def update_service_groups(modeladmin, request, queryset): for user in queryset: # queryset filtering doesn't work here? service.update_groups(user) update_service_groups.__name__ = str('update_{}_groups'.format(slugify(service.name))) update_service_groups.short_description = "Sync groups for selected {} accounts".format(service.title) return update_service_groups
[ "def", "make_service_hooks_update_groups_action", "(", "service", ")", ":", "def", "update_service_groups", "(", "modeladmin", ",", "request", ",", "queryset", ")", ":", "for", "user", "in", "queryset", ":", "# queryset filtering doesn't work here?", "service", ".", "update_groups", "(", "user", ")", "update_service_groups", ".", "__name__", "=", "str", "(", "'update_{}_groups'", ".", "format", "(", "slugify", "(", "service", ".", "name", ")", ")", ")", "update_service_groups", ".", "short_description", "=", "\"Sync groups for selected {} accounts\"", ".", "format", "(", "service", ".", "title", ")", "return", "update_service_groups" ]
Make a admin action for the given service :param service: services.hooks.ServicesHook :return: fn to update services groups for the selected users
[ "Make", "a", "admin", "action", "for", "the", "given", "service", ":", "param", "service", ":", "services", ".", "hooks", ".", "ServicesHook", ":", "return", ":", "fn", "to", "update", "services", "groups", "for", "the", "selected", "users" ]
python
train
fr33jc/bang
bang/stack.py
https://github.com/fr33jc/bang/blob/8f000713f88d2a9a8c1193b63ca10a6578560c16/bang/stack.py#L89-L111
def get_namespace(self, key): """ Returns a :class:`~bang.util.SharedNamespace` for the given :attr:`key`. These are used by :class:`~bang.deployers.deployer.Deployer` objects of the same ``deployer_class`` to coordinate control over multiple deployed instances of like resources. E.g. With 5 clones of an application server, 5 :class:`~bang.deployers.deployer.Deployer` objects in separate, concurrent processes will use the same shared namespace to ensure that each object/process controls a distinct server. :param str key: Unique ID for the namespace. :class:`~bang.deployers.deployer.Deployer` objects that call :meth:`get_namespace` with the same :attr:`key` will receive the same :class:`~bang.util.SharedNamespace` object. """ namespace = self.shared_namespaces.get(key) if namespace: return namespace ns = SharedNamespace(self.manager) self.shared_namespaces[key] = ns return ns
[ "def", "get_namespace", "(", "self", ",", "key", ")", ":", "namespace", "=", "self", ".", "shared_namespaces", ".", "get", "(", "key", ")", "if", "namespace", ":", "return", "namespace", "ns", "=", "SharedNamespace", "(", "self", ".", "manager", ")", "self", ".", "shared_namespaces", "[", "key", "]", "=", "ns", "return", "ns" ]
Returns a :class:`~bang.util.SharedNamespace` for the given :attr:`key`. These are used by :class:`~bang.deployers.deployer.Deployer` objects of the same ``deployer_class`` to coordinate control over multiple deployed instances of like resources. E.g. With 5 clones of an application server, 5 :class:`~bang.deployers.deployer.Deployer` objects in separate, concurrent processes will use the same shared namespace to ensure that each object/process controls a distinct server. :param str key: Unique ID for the namespace. :class:`~bang.deployers.deployer.Deployer` objects that call :meth:`get_namespace` with the same :attr:`key` will receive the same :class:`~bang.util.SharedNamespace` object.
[ "Returns", "a", ":", "class", ":", "~bang", ".", "util", ".", "SharedNamespace", "for", "the", "given", ":", "attr", ":", "key", ".", "These", "are", "used", "by", ":", "class", ":", "~bang", ".", "deployers", ".", "deployer", ".", "Deployer", "objects", "of", "the", "same", "deployer_class", "to", "coordinate", "control", "over", "multiple", "deployed", "instances", "of", "like", "resources", ".", "E", ".", "g", ".", "With", "5", "clones", "of", "an", "application", "server", "5", ":", "class", ":", "~bang", ".", "deployers", ".", "deployer", ".", "Deployer", "objects", "in", "separate", "concurrent", "processes", "will", "use", "the", "same", "shared", "namespace", "to", "ensure", "that", "each", "object", "/", "process", "controls", "a", "distinct", "server", "." ]
python
train
a1ezzz/wasp-general
wasp_general/os/linux/mounts.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/os/linux/mounts.py#L123-L132
def mounts(cls): """ Return tuple of current mount points :return: tuple of WMountPoint """ result = [] with open(cls.__mounts_file__) as f: for mount_record in f: result.append(WMountPoint(mount_record)) return tuple(result)
[ "def", "mounts", "(", "cls", ")", ":", "result", "=", "[", "]", "with", "open", "(", "cls", ".", "__mounts_file__", ")", "as", "f", ":", "for", "mount_record", "in", "f", ":", "result", ".", "append", "(", "WMountPoint", "(", "mount_record", ")", ")", "return", "tuple", "(", "result", ")" ]
Return tuple of current mount points :return: tuple of WMountPoint
[ "Return", "tuple", "of", "current", "mount", "points" ]
python
train
pymc-devs/pymc
pymc/diagnostics.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/diagnostics.py#L236-L315
def geweke(x, first=.1, last=.5, intervals=20, maxlag=20): """Return z-scores for convergence diagnostics. Compare the mean of the first % of series with the mean of the last % of series. x is divided into a number of segments for which this difference is computed. If the series is converged, this score should oscillate between -1 and 1. Parameters ---------- x : array-like The trace of some stochastic parameter. first : float The fraction of series at the beginning of the trace. last : float The fraction of series at the end to be compared with the section at the beginning. intervals : int The number of segments. maxlag : int Maximum autocorrelation lag for estimation of spectral variance Returns ------- scores : list [[]] Return a list of [i, score], where i is the starting index for each interval and score the Geweke score on the interval. Notes ----- The Geweke score on some series x is computed by: .. math:: \frac{E[x_s] - E[x_e]}{\sqrt{V[x_s] + V[x_e]}} where :math:`E` stands for the mean, :math:`V` the variance, :math:`x_s` a section at the start of the series and :math:`x_e` a section at the end of the series. References ---------- Geweke (1992) """ if not has_sm: print("statsmodels not available. Geweke diagnostic cannot be calculated.") return if np.ndim(x) > 1: return [geweke(y, first, last, intervals) for y in np.transpose(x)] # Filter out invalid intervals if first + last >= 1: raise ValueError( "Invalid intervals for Geweke convergence analysis", (first, last)) # Initialize list of z-scores zscores = [None] * intervals # Starting points for calculations starts = np.linspace(0, int(len(x)*(1.-last)), intervals).astype(int) # Loop over start indices for i,s in enumerate(starts): # Size of remaining array x_trunc = x[s:] n = len(x_trunc) # Calculate slices first_slice = x_trunc[:int(first * n)] last_slice = x_trunc[int(last * n):] z = (first_slice.mean() - last_slice.mean()) z /= np.sqrt(spec(first_slice)/len(first_slice) + spec(last_slice)/len(last_slice)) zscores[i] = len(x) - n, z return zscores
[ "def", "geweke", "(", "x", ",", "first", "=", ".1", ",", "last", "=", ".5", ",", "intervals", "=", "20", ",", "maxlag", "=", "20", ")", ":", "if", "not", "has_sm", ":", "print", "(", "\"statsmodels not available. Geweke diagnostic cannot be calculated.\"", ")", "return", "if", "np", ".", "ndim", "(", "x", ")", ">", "1", ":", "return", "[", "geweke", "(", "y", ",", "first", ",", "last", ",", "intervals", ")", "for", "y", "in", "np", ".", "transpose", "(", "x", ")", "]", "# Filter out invalid intervals", "if", "first", "+", "last", ">=", "1", ":", "raise", "ValueError", "(", "\"Invalid intervals for Geweke convergence analysis\"", ",", "(", "first", ",", "last", ")", ")", "# Initialize list of z-scores", "zscores", "=", "[", "None", "]", "*", "intervals", "# Starting points for calculations", "starts", "=", "np", ".", "linspace", "(", "0", ",", "int", "(", "len", "(", "x", ")", "*", "(", "1.", "-", "last", ")", ")", ",", "intervals", ")", ".", "astype", "(", "int", ")", "# Loop over start indices", "for", "i", ",", "s", "in", "enumerate", "(", "starts", ")", ":", "# Size of remaining array", "x_trunc", "=", "x", "[", "s", ":", "]", "n", "=", "len", "(", "x_trunc", ")", "# Calculate slices", "first_slice", "=", "x_trunc", "[", ":", "int", "(", "first", "*", "n", ")", "]", "last_slice", "=", "x_trunc", "[", "int", "(", "last", "*", "n", ")", ":", "]", "z", "=", "(", "first_slice", ".", "mean", "(", ")", "-", "last_slice", ".", "mean", "(", ")", ")", "z", "/=", "np", ".", "sqrt", "(", "spec", "(", "first_slice", ")", "/", "len", "(", "first_slice", ")", "+", "spec", "(", "last_slice", ")", "/", "len", "(", "last_slice", ")", ")", "zscores", "[", "i", "]", "=", "len", "(", "x", ")", "-", "n", ",", "z", "return", "zscores" ]
Return z-scores for convergence diagnostics. Compare the mean of the first % of series with the mean of the last % of series. x is divided into a number of segments for which this difference is computed. If the series is converged, this score should oscillate between -1 and 1. Parameters ---------- x : array-like The trace of some stochastic parameter. first : float The fraction of series at the beginning of the trace. last : float The fraction of series at the end to be compared with the section at the beginning. intervals : int The number of segments. maxlag : int Maximum autocorrelation lag for estimation of spectral variance Returns ------- scores : list [[]] Return a list of [i, score], where i is the starting index for each interval and score the Geweke score on the interval. Notes ----- The Geweke score on some series x is computed by: .. math:: \frac{E[x_s] - E[x_e]}{\sqrt{V[x_s] + V[x_e]}} where :math:`E` stands for the mean, :math:`V` the variance, :math:`x_s` a section at the start of the series and :math:`x_e` a section at the end of the series. References ---------- Geweke (1992)
[ "Return", "z", "-", "scores", "for", "convergence", "diagnostics", "." ]
python
train
csparpa/pyowm
pyowm/weatherapi25/forecast.py
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/weatherapi25/forecast.py#L206-L223
def _to_DOM(self): """ Dumps object data to a fully traversable DOM representation of the object. :returns: a ``xml.etree.Element`` object """ root_node = ET.Element("forecast") interval_node = ET.SubElement(root_node, "interval") interval_node.text = self._interval reception_time_node = ET.SubElement(root_node, "reception_time") reception_time_node.text = str(self._reception_time) root_node.append(self._location._to_DOM()) weathers_node = ET.SubElement(root_node, "weathers") for weather in self: weathers_node.append(weather._to_DOM()) return root_node
[ "def", "_to_DOM", "(", "self", ")", ":", "root_node", "=", "ET", ".", "Element", "(", "\"forecast\"", ")", "interval_node", "=", "ET", ".", "SubElement", "(", "root_node", ",", "\"interval\"", ")", "interval_node", ".", "text", "=", "self", ".", "_interval", "reception_time_node", "=", "ET", ".", "SubElement", "(", "root_node", ",", "\"reception_time\"", ")", "reception_time_node", ".", "text", "=", "str", "(", "self", ".", "_reception_time", ")", "root_node", ".", "append", "(", "self", ".", "_location", ".", "_to_DOM", "(", ")", ")", "weathers_node", "=", "ET", ".", "SubElement", "(", "root_node", ",", "\"weathers\"", ")", "for", "weather", "in", "self", ":", "weathers_node", ".", "append", "(", "weather", ".", "_to_DOM", "(", ")", ")", "return", "root_node" ]
Dumps object data to a fully traversable DOM representation of the object. :returns: a ``xml.etree.Element`` object
[ "Dumps", "object", "data", "to", "a", "fully", "traversable", "DOM", "representation", "of", "the", "object", "." ]
python
train
expfactory/expfactory
expfactory/database/filesystem.py
https://github.com/expfactory/expfactory/blob/27ce6cc93e17231df8a8024f18e631336afd3501/expfactory/database/filesystem.py#L105-L123
def generate_user(self, subid=None): '''generate a new user on the filesystem, still session based so we create a new identifier. This function is called from the users new entrypoint, and it assumes we want a user generated with a token. since we don't have a database proper, we write the folder name to the filesystem ''' # Only generate token if subid being created if subid is None: token = str(uuid.uuid4()) subid = self.generate_subid(token=token) if os.path.exists(self.data_base): # /scif/data data_base = "%s/%s" %(self.data_base, subid) # expfactory/00001 if not os.path.exists(data_base): mkdir_p(data_base) return data_base
[ "def", "generate_user", "(", "self", ",", "subid", "=", "None", ")", ":", "# Only generate token if subid being created", "if", "subid", "is", "None", ":", "token", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "subid", "=", "self", ".", "generate_subid", "(", "token", "=", "token", ")", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "data_base", ")", ":", "# /scif/data", "data_base", "=", "\"%s/%s\"", "%", "(", "self", ".", "data_base", ",", "subid", ")", "# expfactory/00001", "if", "not", "os", ".", "path", ".", "exists", "(", "data_base", ")", ":", "mkdir_p", "(", "data_base", ")", "return", "data_base" ]
generate a new user on the filesystem, still session based so we create a new identifier. This function is called from the users new entrypoint, and it assumes we want a user generated with a token. since we don't have a database proper, we write the folder name to the filesystem
[ "generate", "a", "new", "user", "on", "the", "filesystem", "still", "session", "based", "so", "we", "create", "a", "new", "identifier", ".", "This", "function", "is", "called", "from", "the", "users", "new", "entrypoint", "and", "it", "assumes", "we", "want", "a", "user", "generated", "with", "a", "token", ".", "since", "we", "don", "t", "have", "a", "database", "proper", "we", "write", "the", "folder", "name", "to", "the", "filesystem" ]
python
train
jxtech/wechatpy
wechatpy/client/api/scan.py
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/client/api/scan.py#L111-L131
def list_product(self, offset=0, limit=10, status=None, key=None): """ 批量查询商品信息 详情请参考 http://mp.weixin.qq.com/wiki/15/7fa787701295b884410b5163e13313af.html :param offset: 可选,批量查询的起始位置,从 0 开始,包含该起始位置 :param limit: 可选,批量查询的数量,默认为 10 :param status: 可选,支持按状态拉取。on为发布状态,off为未发布状态, check为审核中状态,reject为审核未通过状态,all为所有状态 :param key: 支持按部分编码内容拉取。填写该参数后,可将编码内容中包含所传参数的商品信息拉出 :return: 返回的 JSON 数据包 """ data = optionaldict( offset=offset, limit=limit, status=status, keystr=key, ) return self._post('product/getlist', data=data)
[ "def", "list_product", "(", "self", ",", "offset", "=", "0", ",", "limit", "=", "10", ",", "status", "=", "None", ",", "key", "=", "None", ")", ":", "data", "=", "optionaldict", "(", "offset", "=", "offset", ",", "limit", "=", "limit", ",", "status", "=", "status", ",", "keystr", "=", "key", ",", ")", "return", "self", ".", "_post", "(", "'product/getlist'", ",", "data", "=", "data", ")" ]
批量查询商品信息 详情请参考 http://mp.weixin.qq.com/wiki/15/7fa787701295b884410b5163e13313af.html :param offset: 可选,批量查询的起始位置,从 0 开始,包含该起始位置 :param limit: 可选,批量查询的数量,默认为 10 :param status: 可选,支持按状态拉取。on为发布状态,off为未发布状态, check为审核中状态,reject为审核未通过状态,all为所有状态 :param key: 支持按部分编码内容拉取。填写该参数后,可将编码内容中包含所传参数的商品信息拉出 :return: 返回的 JSON 数据包
[ "批量查询商品信息" ]
python
train
djordon/queueing-tool
queueing_tool/network/queue_network.py
https://github.com/djordon/queueing-tool/blob/ccd418cf647ac03a54f78ba5e3725903f541b808/queueing_tool/network/queue_network.py#L1381-L1410
def start_collecting_data(self, queues=None, edge=None, edge_type=None): """Tells the queues to collect data on agents' arrival, service start, and departure times. If none of the parameters are given then every :class:`.QueueServer` will start collecting data. Parameters ---------- queues : :any:`int`, *array_like* (optional) The edge index (or an iterable of edge indices) identifying the :class:`QueueServer(s)<.QueueServer>` that will start collecting data. edge : 2-tuple of int or *array_like* (optional) Explicitly specify which queues will collect data. Must be either: * A 2-tuple of the edge's source and target vertex indices, or * An iterable of 2-tuples of the edge's source and target vertex indices. edge_type : int or an iterable of int (optional) A integer, or a collection of integers identifying which edge types will be set active. """ queues = _get_queues(self.g, queues, edge, edge_type) for k in queues: self.edge2queue[k].collect_data = True
[ "def", "start_collecting_data", "(", "self", ",", "queues", "=", "None", ",", "edge", "=", "None", ",", "edge_type", "=", "None", ")", ":", "queues", "=", "_get_queues", "(", "self", ".", "g", ",", "queues", ",", "edge", ",", "edge_type", ")", "for", "k", "in", "queues", ":", "self", ".", "edge2queue", "[", "k", "]", ".", "collect_data", "=", "True" ]
Tells the queues to collect data on agents' arrival, service start, and departure times. If none of the parameters are given then every :class:`.QueueServer` will start collecting data. Parameters ---------- queues : :any:`int`, *array_like* (optional) The edge index (or an iterable of edge indices) identifying the :class:`QueueServer(s)<.QueueServer>` that will start collecting data. edge : 2-tuple of int or *array_like* (optional) Explicitly specify which queues will collect data. Must be either: * A 2-tuple of the edge's source and target vertex indices, or * An iterable of 2-tuples of the edge's source and target vertex indices. edge_type : int or an iterable of int (optional) A integer, or a collection of integers identifying which edge types will be set active.
[ "Tells", "the", "queues", "to", "collect", "data", "on", "agents", "arrival", "service", "start", "and", "departure", "times", "." ]
python
valid
openstack/networking-arista
networking_arista/ml2/mechanism_arista.py
https://github.com/openstack/networking-arista/blob/07ce6b1fc62ff74308a6eabfc4cc0ee09fb7b0fe/networking_arista/ml2/mechanism_arista.py#L78-L82
def create_tenant(self, tenant_id): """Enqueue tenant create""" t_res = MechResource(tenant_id, a_const.TENANT_RESOURCE, a_const.CREATE) self.provision_queue.put(t_res)
[ "def", "create_tenant", "(", "self", ",", "tenant_id", ")", ":", "t_res", "=", "MechResource", "(", "tenant_id", ",", "a_const", ".", "TENANT_RESOURCE", ",", "a_const", ".", "CREATE", ")", "self", ".", "provision_queue", ".", "put", "(", "t_res", ")" ]
Enqueue tenant create
[ "Enqueue", "tenant", "create" ]
python
train
materialsproject/pymatgen
pymatgen/io/abinit/flows.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/flows.py#L278-L329
def pickle_load(cls, filepath, spectator_mode=True, remove_lock=False): """ Loads the object from a pickle file and performs initial setup. Args: filepath: Filename or directory name. It filepath is a directory, we scan the directory tree starting from filepath and we read the first pickle database. Raise RuntimeError if multiple databases are found. spectator_mode: If True, the nodes of the flow are not connected by signals. This option is usually used when we want to read a flow in read-only mode and we want to avoid callbacks that can change the flow. remove_lock: True to remove the file lock if any (use it carefully). """ if os.path.isdir(filepath): # Walk through each directory inside path and find the pickle database. for dirpath, dirnames, filenames in os.walk(filepath): fnames = [f for f in filenames if f == cls.PICKLE_FNAME] if fnames: if len(fnames) == 1: filepath = os.path.join(dirpath, fnames[0]) break # Exit os.walk else: err_msg = "Found multiple databases:\n %s" % str(fnames) raise RuntimeError(err_msg) else: err_msg = "Cannot find %s inside directory %s" % (cls.PICKLE_FNAME, filepath) raise ValueError(err_msg) if remove_lock and os.path.exists(filepath + ".lock"): try: os.remove(filepath + ".lock") except: pass with FileLock(filepath): with open(filepath, "rb") as fh: flow = pmg_pickle_load(fh) # Check if versions match. if flow.VERSION != cls.VERSION: msg = ("File flow version %s != latest version %s\n." "Regenerate the flow to solve the problem " % (flow.VERSION, cls.VERSION)) warnings.warn(msg) flow.set_spectator_mode(spectator_mode) # Recompute the status of each task since tasks that # have been submitted previously might be completed. flow.check_status() return flow
[ "def", "pickle_load", "(", "cls", ",", "filepath", ",", "spectator_mode", "=", "True", ",", "remove_lock", "=", "False", ")", ":", "if", "os", ".", "path", ".", "isdir", "(", "filepath", ")", ":", "# Walk through each directory inside path and find the pickle database.", "for", "dirpath", ",", "dirnames", ",", "filenames", "in", "os", ".", "walk", "(", "filepath", ")", ":", "fnames", "=", "[", "f", "for", "f", "in", "filenames", "if", "f", "==", "cls", ".", "PICKLE_FNAME", "]", "if", "fnames", ":", "if", "len", "(", "fnames", ")", "==", "1", ":", "filepath", "=", "os", ".", "path", ".", "join", "(", "dirpath", ",", "fnames", "[", "0", "]", ")", "break", "# Exit os.walk", "else", ":", "err_msg", "=", "\"Found multiple databases:\\n %s\"", "%", "str", "(", "fnames", ")", "raise", "RuntimeError", "(", "err_msg", ")", "else", ":", "err_msg", "=", "\"Cannot find %s inside directory %s\"", "%", "(", "cls", ".", "PICKLE_FNAME", ",", "filepath", ")", "raise", "ValueError", "(", "err_msg", ")", "if", "remove_lock", "and", "os", ".", "path", ".", "exists", "(", "filepath", "+", "\".lock\"", ")", ":", "try", ":", "os", ".", "remove", "(", "filepath", "+", "\".lock\"", ")", "except", ":", "pass", "with", "FileLock", "(", "filepath", ")", ":", "with", "open", "(", "filepath", ",", "\"rb\"", ")", "as", "fh", ":", "flow", "=", "pmg_pickle_load", "(", "fh", ")", "# Check if versions match.", "if", "flow", ".", "VERSION", "!=", "cls", ".", "VERSION", ":", "msg", "=", "(", "\"File flow version %s != latest version %s\\n.\"", "\"Regenerate the flow to solve the problem \"", "%", "(", "flow", ".", "VERSION", ",", "cls", ".", "VERSION", ")", ")", "warnings", ".", "warn", "(", "msg", ")", "flow", ".", "set_spectator_mode", "(", "spectator_mode", ")", "# Recompute the status of each task since tasks that", "# have been submitted previously might be completed.", "flow", ".", "check_status", "(", ")", "return", "flow" ]
Loads the object from a pickle file and performs initial setup. Args: filepath: Filename or directory name. It filepath is a directory, we scan the directory tree starting from filepath and we read the first pickle database. Raise RuntimeError if multiple databases are found. spectator_mode: If True, the nodes of the flow are not connected by signals. This option is usually used when we want to read a flow in read-only mode and we want to avoid callbacks that can change the flow. remove_lock: True to remove the file lock if any (use it carefully).
[ "Loads", "the", "object", "from", "a", "pickle", "file", "and", "performs", "initial", "setup", "." ]
python
train
foutaise/texttable
texttable.py
https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L280-L292
def set_cols_align(self, array): """Set the desired columns alignment - the elements of the array should be either "l", "c" or "r": * "l": column flushed left * "c": column centered * "r": column flushed right """ self._check_row_size(array) self._align = array return self
[ "def", "set_cols_align", "(", "self", ",", "array", ")", ":", "self", ".", "_check_row_size", "(", "array", ")", "self", ".", "_align", "=", "array", "return", "self" ]
Set the desired columns alignment - the elements of the array should be either "l", "c" or "r": * "l": column flushed left * "c": column centered * "r": column flushed right
[ "Set", "the", "desired", "columns", "alignment" ]
python
train
pyviz/imagen
imagen/patterngenerator.py
https://github.com/pyviz/imagen/blob/53c5685c880f54b42795964d8db50b02e8590e88/imagen/patterngenerator.py#L516-L522
def state_push(self): """ Push the state of all generators """ super(Composite,self).state_push() for gen in self.generators: gen.state_push()
[ "def", "state_push", "(", "self", ")", ":", "super", "(", "Composite", ",", "self", ")", ".", "state_push", "(", ")", "for", "gen", "in", "self", ".", "generators", ":", "gen", ".", "state_push", "(", ")" ]
Push the state of all generators
[ "Push", "the", "state", "of", "all", "generators" ]
python
train
callowayproject/Calloway
calloway/apps/django_ext/templatetags/listutil.py
https://github.com/callowayproject/Calloway/blob/d22e98d41fbd298ab6393ba7bd84a75528be9f81/calloway/apps/django_ext/templatetags/listutil.py#L77-L103
def partition_horizontal_twice(thelist, numbers): """ numbers is split on a comma to n and n2. Break a list into peices each peice alternating between n and n2 items long ``partition_horizontal_twice(range(14), "3,4")`` gives:: [[0, 1, 2], [3, 4, 5, 6], [7, 8, 9], [10, 11, 12, 13]] Clear as mud? """ n, n2 = numbers.split(',') try: n = int(n) n2 = int(n2) thelist = list(thelist) except (ValueError, TypeError): return [thelist] newlists = [] while thelist: newlists.append(thelist[:n]) thelist = thelist[n:] newlists.append(thelist[:n2]) thelist = thelist[n2:] return newlists
[ "def", "partition_horizontal_twice", "(", "thelist", ",", "numbers", ")", ":", "n", ",", "n2", "=", "numbers", ".", "split", "(", "','", ")", "try", ":", "n", "=", "int", "(", "n", ")", "n2", "=", "int", "(", "n2", ")", "thelist", "=", "list", "(", "thelist", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "return", "[", "thelist", "]", "newlists", "=", "[", "]", "while", "thelist", ":", "newlists", ".", "append", "(", "thelist", "[", ":", "n", "]", ")", "thelist", "=", "thelist", "[", "n", ":", "]", "newlists", ".", "append", "(", "thelist", "[", ":", "n2", "]", ")", "thelist", "=", "thelist", "[", "n2", ":", "]", "return", "newlists" ]
numbers is split on a comma to n and n2. Break a list into peices each peice alternating between n and n2 items long ``partition_horizontal_twice(range(14), "3,4")`` gives:: [[0, 1, 2], [3, 4, 5, 6], [7, 8, 9], [10, 11, 12, 13]] Clear as mud?
[ "numbers", "is", "split", "on", "a", "comma", "to", "n", "and", "n2", ".", "Break", "a", "list", "into", "peices", "each", "peice", "alternating", "between", "n", "and", "n2", "items", "long", "partition_horizontal_twice", "(", "range", "(", "14", ")", "3", "4", ")", "gives", "::", "[[", "0", "1", "2", "]", "[", "3", "4", "5", "6", "]", "[", "7", "8", "9", "]", "[", "10", "11", "12", "13", "]]", "Clear", "as", "mud?" ]
python
train
squaresLab/BugZoo
bugzoo/mgr/build.py
https://github.com/squaresLab/BugZoo/blob/68664f1977e85b37a78604f7c570382ffae1fa3b/bugzoo/mgr/build.py#L192-L216
def upload(self, name: str) -> bool: """ Attempts to upload a given Docker image from this server to DockerHub. Parameters: name: the name of the Docker image. Returns: `True` if successfully uploaded, otherwise `False`. """ try: out = self.__docker.images.push(name, stream=True) for line in out: line = line.strip().decode('utf-8') jsn = json.loads(line) if 'progress' in jsn: line = "{}. {}.".format(jsn['status'], jsn['progress']) print(line, end='\r') elif 'status' in jsn: print(jsn['status']) print('uploaded image to DockerHub: {}'.format(name)) return True except docker.errors.NotFound: print("Failed to push image ({}): not installed.".format(name)) return False
[ "def", "upload", "(", "self", ",", "name", ":", "str", ")", "->", "bool", ":", "try", ":", "out", "=", "self", ".", "__docker", ".", "images", ".", "push", "(", "name", ",", "stream", "=", "True", ")", "for", "line", "in", "out", ":", "line", "=", "line", ".", "strip", "(", ")", ".", "decode", "(", "'utf-8'", ")", "jsn", "=", "json", ".", "loads", "(", "line", ")", "if", "'progress'", "in", "jsn", ":", "line", "=", "\"{}. {}.\"", ".", "format", "(", "jsn", "[", "'status'", "]", ",", "jsn", "[", "'progress'", "]", ")", "print", "(", "line", ",", "end", "=", "'\\r'", ")", "elif", "'status'", "in", "jsn", ":", "print", "(", "jsn", "[", "'status'", "]", ")", "print", "(", "'uploaded image to DockerHub: {}'", ".", "format", "(", "name", ")", ")", "return", "True", "except", "docker", ".", "errors", ".", "NotFound", ":", "print", "(", "\"Failed to push image ({}): not installed.\"", ".", "format", "(", "name", ")", ")", "return", "False" ]
Attempts to upload a given Docker image from this server to DockerHub. Parameters: name: the name of the Docker image. Returns: `True` if successfully uploaded, otherwise `False`.
[ "Attempts", "to", "upload", "a", "given", "Docker", "image", "from", "this", "server", "to", "DockerHub", "." ]
python
train
materialsproject/pymatgen
pymatgen/io/abinit/abitimer.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/abitimer.py#L696-L706
def get_values(self, keys): """ Return a list of values associated to a particular list of keys. """ if is_string(keys): return [s.__dict__[keys] for s in self.sections] else: values = [] for k in keys: values.append([s.__dict__[k] for s in self.sections]) return values
[ "def", "get_values", "(", "self", ",", "keys", ")", ":", "if", "is_string", "(", "keys", ")", ":", "return", "[", "s", ".", "__dict__", "[", "keys", "]", "for", "s", "in", "self", ".", "sections", "]", "else", ":", "values", "=", "[", "]", "for", "k", "in", "keys", ":", "values", ".", "append", "(", "[", "s", ".", "__dict__", "[", "k", "]", "for", "s", "in", "self", ".", "sections", "]", ")", "return", "values" ]
Return a list of values associated to a particular list of keys.
[ "Return", "a", "list", "of", "values", "associated", "to", "a", "particular", "list", "of", "keys", "." ]
python
train
pydsigner/pygu
pygu/pyramid.py
https://github.com/pydsigner/pygu/blob/09fe71534900933908ab83db12f5659b7827e31c/pygu/pyramid.py#L170-L176
def load_image(self, loc, title, group): ''' Used internally when loading images. You should probably use load_objects(). ''' self.images.setdefault(group, {}) self.images[group][title] = pygame.image.load(loc).convert_alpha()
[ "def", "load_image", "(", "self", ",", "loc", ",", "title", ",", "group", ")", ":", "self", ".", "images", ".", "setdefault", "(", "group", ",", "{", "}", ")", "self", ".", "images", "[", "group", "]", "[", "title", "]", "=", "pygame", ".", "image", ".", "load", "(", "loc", ")", ".", "convert_alpha", "(", ")" ]
Used internally when loading images. You should probably use load_objects().
[ "Used", "internally", "when", "loading", "images", ".", "You", "should", "probably", "use", "load_objects", "()", "." ]
python
train
OpenHumans/open-humans-api
ohapi/api.py
https://github.com/OpenHumans/open-humans-api/blob/ca2a28cf5d55cfdae13dd222ba58c25565bdb86e/ohapi/api.py#L166-L203
def delete_file(access_token, project_member_id=None, base_url=OH_BASE_URL, file_basename=None, file_id=None, all_files=False): """ Delete project member files by file_basename, file_id, or all_files. To learn more about Open Humans OAuth2 projects, go to: https://www.openhumans.org/direct-sharing/oauth2-features/. :param access_token: This field is user specific access_token. :param project_member_id: This field is the project member id of user. It's default value is None. :param base_url: It is this URL `https://www.openhumans.org`. :param file_basename: This field is the name of the file to delete for the particular user for the particular project. :param file_id: This field is the id of the file to delete for the particular user for the particular project. :param all_files: This is a boolean field to delete all files for the particular user for the particular project. """ url = urlparse.urljoin( base_url, '/api/direct-sharing/project/files/delete/?{}'.format( urlparse.urlencode({'access_token': access_token}))) if not(project_member_id): response = exchange_oauth2_member(access_token, base_url=base_url) project_member_id = response['project_member_id'] data = {'project_member_id': project_member_id} if file_basename and not (file_id or all_files): data['file_basename'] = file_basename elif file_id and not (file_basename or all_files): data['file_id'] = file_id elif all_files and not (file_id or file_basename): data['all_files'] = True else: raise ValueError( "One (and only one) of the following must be specified: " "file_basename, file_id, or all_files is set to True.") response = requests.post(url, data=data) handle_error(response, 200) return response
[ "def", "delete_file", "(", "access_token", ",", "project_member_id", "=", "None", ",", "base_url", "=", "OH_BASE_URL", ",", "file_basename", "=", "None", ",", "file_id", "=", "None", ",", "all_files", "=", "False", ")", ":", "url", "=", "urlparse", ".", "urljoin", "(", "base_url", ",", "'/api/direct-sharing/project/files/delete/?{}'", ".", "format", "(", "urlparse", ".", "urlencode", "(", "{", "'access_token'", ":", "access_token", "}", ")", ")", ")", "if", "not", "(", "project_member_id", ")", ":", "response", "=", "exchange_oauth2_member", "(", "access_token", ",", "base_url", "=", "base_url", ")", "project_member_id", "=", "response", "[", "'project_member_id'", "]", "data", "=", "{", "'project_member_id'", ":", "project_member_id", "}", "if", "file_basename", "and", "not", "(", "file_id", "or", "all_files", ")", ":", "data", "[", "'file_basename'", "]", "=", "file_basename", "elif", "file_id", "and", "not", "(", "file_basename", "or", "all_files", ")", ":", "data", "[", "'file_id'", "]", "=", "file_id", "elif", "all_files", "and", "not", "(", "file_id", "or", "file_basename", ")", ":", "data", "[", "'all_files'", "]", "=", "True", "else", ":", "raise", "ValueError", "(", "\"One (and only one) of the following must be specified: \"", "\"file_basename, file_id, or all_files is set to True.\"", ")", "response", "=", "requests", ".", "post", "(", "url", ",", "data", "=", "data", ")", "handle_error", "(", "response", ",", "200", ")", "return", "response" ]
Delete project member files by file_basename, file_id, or all_files. To learn more about Open Humans OAuth2 projects, go to: https://www.openhumans.org/direct-sharing/oauth2-features/. :param access_token: This field is user specific access_token. :param project_member_id: This field is the project member id of user. It's default value is None. :param base_url: It is this URL `https://www.openhumans.org`. :param file_basename: This field is the name of the file to delete for the particular user for the particular project. :param file_id: This field is the id of the file to delete for the particular user for the particular project. :param all_files: This is a boolean field to delete all files for the particular user for the particular project.
[ "Delete", "project", "member", "files", "by", "file_basename", "file_id", "or", "all_files", ".", "To", "learn", "more", "about", "Open", "Humans", "OAuth2", "projects", "go", "to", ":", "https", ":", "//", "www", ".", "openhumans", ".", "org", "/", "direct", "-", "sharing", "/", "oauth2", "-", "features", "/", "." ]
python
train
Loudr/pale
pale/doc.py
https://github.com/Loudr/pale/blob/dc002ee6032c856551143af222ff8f71ed9853fe/pale/doc.py#L107-L169
def generate_raml_docs(module, fields, shared_types, user=None, title="My API", version="v1", api_root="api", base_uri="http://mysite.com/{version}"): """Return a RAML file of a Pale module's documentation as a string. The user argument is optional. If included, it expects the user to be an object with an "is_admin" boolean attribute. Any endpoint protected with a "@requires_permission" decorator will require user.is_admin == True to display documentation on that endpoint. The arguments for 'title', 'version', and 'base_uri' are added to the RAML header info. """ output = StringIO() # Add the RAML header info output.write('#%RAML 1.0 \n') output.write('title: ' + title + ' \n') output.write('baseUri: ' + base_uri + ' \n') output.write('version: ' + version + '\n') output.write('mediaType: application/json\n\n') output.write('documentation:\n') output.write(' - title: Welcome\n') output.write(' content: |\n') output.write("""\ Welcome to the Loudr API Docs.\n You'll find comprehensive documentation on our endpoints and resources here. """) output.write("\n###############\n# Resource Types:\n###############\n\n") output.write('types:\n') basic_fields = [] for field_module in inspect.getmembers(fields, inspect.ismodule): for field_class in inspect.getmembers(field_module[1], inspect.isclass): basic_fields.append(field_class[1]) pale_basic_types = generate_basic_type_docs(basic_fields, {}) output.write("\n# Pale Basic Types:\n\n") output.write(pale_basic_types[0]) shared_fields = [] for shared_type in shared_types: for field_class in inspect.getmembers(shared_type, inspect.isclass): shared_fields.append(field_class[1]) pale_shared_types = generate_basic_type_docs(shared_fields, pale_basic_types[1]) output.write("\n# Pale Shared Types:\n\n") output.write(pale_shared_types[0]) raml_resource_types = generate_raml_resource_types(module) output.write("\n# API Resource Types:\n\n") output.write(raml_resource_types) raml_resources = generate_raml_resources(module, api_root, user) output.write("\n\n###############\n# API Endpoints:\n###############\n\n") output.write(raml_resources) raml_docs = output.getvalue() output.close() return raml_docs
[ "def", "generate_raml_docs", "(", "module", ",", "fields", ",", "shared_types", ",", "user", "=", "None", ",", "title", "=", "\"My API\"", ",", "version", "=", "\"v1\"", ",", "api_root", "=", "\"api\"", ",", "base_uri", "=", "\"http://mysite.com/{version}\"", ")", ":", "output", "=", "StringIO", "(", ")", "# Add the RAML header info", "output", ".", "write", "(", "'#%RAML 1.0 \\n'", ")", "output", ".", "write", "(", "'title: '", "+", "title", "+", "' \\n'", ")", "output", ".", "write", "(", "'baseUri: '", "+", "base_uri", "+", "' \\n'", ")", "output", ".", "write", "(", "'version: '", "+", "version", "+", "'\\n'", ")", "output", ".", "write", "(", "'mediaType: application/json\\n\\n'", ")", "output", ".", "write", "(", "'documentation:\\n'", ")", "output", ".", "write", "(", "' - title: Welcome\\n'", ")", "output", ".", "write", "(", "' content: |\\n'", ")", "output", ".", "write", "(", "\"\"\"\\\n Welcome to the Loudr API Docs.\\n\n You'll find comprehensive documentation on our endpoints and resources here.\n \"\"\"", ")", "output", ".", "write", "(", "\"\\n###############\\n# Resource Types:\\n###############\\n\\n\"", ")", "output", ".", "write", "(", "'types:\\n'", ")", "basic_fields", "=", "[", "]", "for", "field_module", "in", "inspect", ".", "getmembers", "(", "fields", ",", "inspect", ".", "ismodule", ")", ":", "for", "field_class", "in", "inspect", ".", "getmembers", "(", "field_module", "[", "1", "]", ",", "inspect", ".", "isclass", ")", ":", "basic_fields", ".", "append", "(", "field_class", "[", "1", "]", ")", "pale_basic_types", "=", "generate_basic_type_docs", "(", "basic_fields", ",", "{", "}", ")", "output", ".", "write", "(", "\"\\n# Pale Basic Types:\\n\\n\"", ")", "output", ".", "write", "(", "pale_basic_types", "[", "0", "]", ")", "shared_fields", "=", "[", "]", "for", "shared_type", "in", "shared_types", ":", "for", "field_class", "in", "inspect", ".", "getmembers", "(", "shared_type", ",", "inspect", ".", "isclass", ")", ":", "shared_fields", ".", "append", "(", "field_class", "[", "1", "]", ")", "pale_shared_types", "=", "generate_basic_type_docs", "(", "shared_fields", ",", "pale_basic_types", "[", "1", "]", ")", "output", ".", "write", "(", "\"\\n# Pale Shared Types:\\n\\n\"", ")", "output", ".", "write", "(", "pale_shared_types", "[", "0", "]", ")", "raml_resource_types", "=", "generate_raml_resource_types", "(", "module", ")", "output", ".", "write", "(", "\"\\n# API Resource Types:\\n\\n\"", ")", "output", ".", "write", "(", "raml_resource_types", ")", "raml_resources", "=", "generate_raml_resources", "(", "module", ",", "api_root", ",", "user", ")", "output", ".", "write", "(", "\"\\n\\n###############\\n# API Endpoints:\\n###############\\n\\n\"", ")", "output", ".", "write", "(", "raml_resources", ")", "raml_docs", "=", "output", ".", "getvalue", "(", ")", "output", ".", "close", "(", ")", "return", "raml_docs" ]
Return a RAML file of a Pale module's documentation as a string. The user argument is optional. If included, it expects the user to be an object with an "is_admin" boolean attribute. Any endpoint protected with a "@requires_permission" decorator will require user.is_admin == True to display documentation on that endpoint. The arguments for 'title', 'version', and 'base_uri' are added to the RAML header info.
[ "Return", "a", "RAML", "file", "of", "a", "Pale", "module", "s", "documentation", "as", "a", "string", "." ]
python
train
wandb/client
wandb/vendor/prompt_toolkit/key_binding/bindings/named_commands.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/vendor/prompt_toolkit/key_binding/bindings/named_commands.py#L214-L229
def transpose_chars(event): """ Emulate Emacs transpose-char behavior: at the beginning of the buffer, do nothing. At the end of a line or buffer, swap the characters before the cursor. Otherwise, move the cursor right, and then swap the characters before the cursor. """ b = event.current_buffer p = b.cursor_position if p == 0: return elif p == len(b.text) or b.text[p] == '\n': b.swap_characters_before_cursor() else: b.cursor_position += b.document.get_cursor_right_position() b.swap_characters_before_cursor()
[ "def", "transpose_chars", "(", "event", ")", ":", "b", "=", "event", ".", "current_buffer", "p", "=", "b", ".", "cursor_position", "if", "p", "==", "0", ":", "return", "elif", "p", "==", "len", "(", "b", ".", "text", ")", "or", "b", ".", "text", "[", "p", "]", "==", "'\\n'", ":", "b", ".", "swap_characters_before_cursor", "(", ")", "else", ":", "b", ".", "cursor_position", "+=", "b", ".", "document", ".", "get_cursor_right_position", "(", ")", "b", ".", "swap_characters_before_cursor", "(", ")" ]
Emulate Emacs transpose-char behavior: at the beginning of the buffer, do nothing. At the end of a line or buffer, swap the characters before the cursor. Otherwise, move the cursor right, and then swap the characters before the cursor.
[ "Emulate", "Emacs", "transpose", "-", "char", "behavior", ":", "at", "the", "beginning", "of", "the", "buffer", "do", "nothing", ".", "At", "the", "end", "of", "a", "line", "or", "buffer", "swap", "the", "characters", "before", "the", "cursor", ".", "Otherwise", "move", "the", "cursor", "right", "and", "then", "swap", "the", "characters", "before", "the", "cursor", "." ]
python
train
LasLabs/python-helpscout
helpscout/apis/conversations.py
https://github.com/LasLabs/python-helpscout/blob/84bf669417d72ca19641a02c9a660e1ae4271de4/helpscout/apis/conversations.py#L228-L240
def list(cls, session, mailbox): """Return conversations in a mailbox. Args: session (requests.sessions.Session): Authenticated session. mailbox (helpscout.models.Mailbox): Mailbox to list. Returns: RequestPaginator(output_type=helpscout.models.Conversation): Conversations iterator. """ endpoint = '/mailboxes/%d/conversations.json' % mailbox.id return super(Conversations, cls).list(session, endpoint)
[ "def", "list", "(", "cls", ",", "session", ",", "mailbox", ")", ":", "endpoint", "=", "'/mailboxes/%d/conversations.json'", "%", "mailbox", ".", "id", "return", "super", "(", "Conversations", ",", "cls", ")", ".", "list", "(", "session", ",", "endpoint", ")" ]
Return conversations in a mailbox. Args: session (requests.sessions.Session): Authenticated session. mailbox (helpscout.models.Mailbox): Mailbox to list. Returns: RequestPaginator(output_type=helpscout.models.Conversation): Conversations iterator.
[ "Return", "conversations", "in", "a", "mailbox", "." ]
python
train
Qiskit/qiskit-terra
qiskit/compiler/disassembler.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/compiler/disassembler.py#L81-L96
def disassemble(qobj): """Dissasemble a qobj and return the circuits, run_config, and user header Args: qobj (Qobj): The input qobj object to dissasemble Returns: circuits (list): A list of quantum circuits run_config (dict): The dist of the run config user_qobj_header (dict): The dict of any user headers in the qobj """ run_config = qobj.config.to_dict() user_qobj_header = qobj.header.to_dict() circuits = _experiments_to_circuits(qobj) return circuits, run_config, user_qobj_header
[ "def", "disassemble", "(", "qobj", ")", ":", "run_config", "=", "qobj", ".", "config", ".", "to_dict", "(", ")", "user_qobj_header", "=", "qobj", ".", "header", ".", "to_dict", "(", ")", "circuits", "=", "_experiments_to_circuits", "(", "qobj", ")", "return", "circuits", ",", "run_config", ",", "user_qobj_header" ]
Dissasemble a qobj and return the circuits, run_config, and user header Args: qobj (Qobj): The input qobj object to dissasemble Returns: circuits (list): A list of quantum circuits run_config (dict): The dist of the run config user_qobj_header (dict): The dict of any user headers in the qobj
[ "Dissasemble", "a", "qobj", "and", "return", "the", "circuits", "run_config", "and", "user", "header" ]
python
test
fumitoh/modelx
modelx/core/space.py
https://github.com/fumitoh/modelx/blob/0180da34d052c44fb94dab9e115e218bbebfc9c3/modelx/core/space.py#L1576-L1580
def evalrepr(self): """Evaluable repr""" args = [repr(arg) for arg in get_interfaces(self.argvalues)] param = ", ".join(args) return "%s(%s)" % (self.parent.evalrepr, param)
[ "def", "evalrepr", "(", "self", ")", ":", "args", "=", "[", "repr", "(", "arg", ")", "for", "arg", "in", "get_interfaces", "(", "self", ".", "argvalues", ")", "]", "param", "=", "\", \"", ".", "join", "(", "args", ")", "return", "\"%s(%s)\"", "%", "(", "self", ".", "parent", ".", "evalrepr", ",", "param", ")" ]
Evaluable repr
[ "Evaluable", "repr" ]
python
valid
F5Networks/f5-common-python
f5/multi_device/trust_domain.py
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/multi_device/trust_domain.py#L114-L131
def _populate_domain(self): '''Populate TrustDomain's domain attribute. This entails an inspection of each device's certificate-authority devices in its trust domain and recording them. After which, we get a dictionary of who trusts who in the domain. ''' self.domain = {} for device in self.devices: device_name = get_device_info(device).name ca_devices = \ device.tm.cm.trust_domains.trust_domain.load( name='Root' ).caDevices self.domain[device_name] = [ d.replace('/%s/' % self.partition, '') for d in ca_devices ]
[ "def", "_populate_domain", "(", "self", ")", ":", "self", ".", "domain", "=", "{", "}", "for", "device", "in", "self", ".", "devices", ":", "device_name", "=", "get_device_info", "(", "device", ")", ".", "name", "ca_devices", "=", "device", ".", "tm", ".", "cm", ".", "trust_domains", ".", "trust_domain", ".", "load", "(", "name", "=", "'Root'", ")", ".", "caDevices", "self", ".", "domain", "[", "device_name", "]", "=", "[", "d", ".", "replace", "(", "'/%s/'", "%", "self", ".", "partition", ",", "''", ")", "for", "d", "in", "ca_devices", "]" ]
Populate TrustDomain's domain attribute. This entails an inspection of each device's certificate-authority devices in its trust domain and recording them. After which, we get a dictionary of who trusts who in the domain.
[ "Populate", "TrustDomain", "s", "domain", "attribute", "." ]
python
train
DEIB-GECO/PyGMQL
gmql/dataset/GMQLDataset.py
https://github.com/DEIB-GECO/PyGMQL/blob/e58b2f9402a86056dcda484a32e3de0bb06ed991/gmql/dataset/GMQLDataset.py#L554-L650
def cover(self, minAcc, maxAcc, groupBy=None, new_reg_fields=None, cover_type="normal"): """ *Wrapper of* ``COVER`` COVER is a GMQL operator that takes as input a dataset (of usually, but not necessarily, multiple samples) and returns another dataset (with a single sample, if no groupby option is specified) by “collapsing” the input samples and their regions according to certain rules specified by the COVER parameters. The attributes of the output regions are only the region coordinates, plus in case, when aggregate functions are specified, new attributes with aggregate values over attribute values of the contributing input regions; output metadata are the union of the input ones, plus the metadata attributes JaccardIntersect and JaccardResult, representing global Jaccard Indexes for the considered dataset, computed as the correspondent region Jaccard Indexes but on the whole sample regions. :param cover_type: the kind of cover variant you want ['normal', 'flat', 'summit', 'histogram'] :param minAcc: minimum accumulation value, i.e. the minimum number of overlapping regions to be considered during COVER execution. It can be any positive number or the strings {'ALL', 'ANY'}. :param maxAcc: maximum accumulation value, i.e. the maximum number of overlapping regions to be considered during COVER execution. It can be any positive number or the strings {'ALL', 'ANY'}. :param groupBy: optional list of metadata attributes :param new_reg_fields: dictionary of the type {'new_region_attribute' : AGGREGATE_FUNCTION('field'), ...} :return: a new GMQLDataset An example of usage:: cell_tf = narrow_peak.cover("normal", minAcc=1, maxAcc="Any", groupBy=['cell', 'antibody_target']) """ if isinstance(cover_type, str): coverFlag = self.opmng.getCoverTypes(cover_type) else: raise TypeError("type must be a string. " "{} was provided".format(type(cover_type))) if isinstance(minAcc, str): minAccParam = self.opmng.getCoverParam(minAcc.lower()) elif isinstance(minAcc, int): minAccParam = self.opmng.getCoverParam(str(minAcc).lower()) else: raise TypeError("minAcc must be a string or an integer. " "{} was provided".format(type(minAcc))) if isinstance(maxAcc, str): maxAccParam = self.opmng.getCoverParam(maxAcc.lower()) elif isinstance(maxAcc, int): maxAccParam = self.opmng.getCoverParam(str(maxAcc).lower()) else: raise TypeError("maxAcc must be a string or an integer. " "{} was provided".format(type(minAcc))) if isinstance(groupBy, list) and \ all([isinstance(x, str) for x in groupBy]): groupBy_result = Some(groupBy) elif groupBy is None: groupBy_result = none() else: raise TypeError("groupBy must be a list of string. " "{} was provided".format(type(groupBy))) aggregates = [] if isinstance(new_reg_fields, dict): expBuild = self.pmg.getNewExpressionBuilder(self.__index) for k in new_reg_fields.keys(): if isinstance(k, str): item = new_reg_fields[k] if isinstance(item, (SUM, MIN, MAX, AVG, BAG, BAGD, MEDIAN, COUNT)): op_name = item.get_aggregate_name() op_argument = item.get_argument() if op_argument is None: op_argument = none() else: op_argument = Some(op_argument) regsToReg = expBuild.getRegionsToRegion(op_name, k, op_argument) aggregates.append(regsToReg) else: raise TypeError("The items in new_reg_fields must be Aggregates (SUM, MIN, MAX, AVG, BAG, " "BAGD, MEDIAN, COUNT)" " {} was provided".format(type(item))) else: raise TypeError("The key of new_reg_fields must be a string. " "{} was provided".format(type(k))) elif new_reg_fields is None: pass else: raise TypeError("new_reg_fields must be a list of dictionary. " "{} was provided".format(type(new_reg_fields))) new_index = self.opmng.cover(self.__index, coverFlag, minAccParam, maxAccParam, groupBy_result, aggregates) return GMQLDataset(index=new_index, location=self.location, local_sources=self._local_sources, remote_sources=self._remote_sources, meta_profile=self.meta_profile)
[ "def", "cover", "(", "self", ",", "minAcc", ",", "maxAcc", ",", "groupBy", "=", "None", ",", "new_reg_fields", "=", "None", ",", "cover_type", "=", "\"normal\"", ")", ":", "if", "isinstance", "(", "cover_type", ",", "str", ")", ":", "coverFlag", "=", "self", ".", "opmng", ".", "getCoverTypes", "(", "cover_type", ")", "else", ":", "raise", "TypeError", "(", "\"type must be a string. \"", "\"{} was provided\"", ".", "format", "(", "type", "(", "cover_type", ")", ")", ")", "if", "isinstance", "(", "minAcc", ",", "str", ")", ":", "minAccParam", "=", "self", ".", "opmng", ".", "getCoverParam", "(", "minAcc", ".", "lower", "(", ")", ")", "elif", "isinstance", "(", "minAcc", ",", "int", ")", ":", "minAccParam", "=", "self", ".", "opmng", ".", "getCoverParam", "(", "str", "(", "minAcc", ")", ".", "lower", "(", ")", ")", "else", ":", "raise", "TypeError", "(", "\"minAcc must be a string or an integer. \"", "\"{} was provided\"", ".", "format", "(", "type", "(", "minAcc", ")", ")", ")", "if", "isinstance", "(", "maxAcc", ",", "str", ")", ":", "maxAccParam", "=", "self", ".", "opmng", ".", "getCoverParam", "(", "maxAcc", ".", "lower", "(", ")", ")", "elif", "isinstance", "(", "maxAcc", ",", "int", ")", ":", "maxAccParam", "=", "self", ".", "opmng", ".", "getCoverParam", "(", "str", "(", "maxAcc", ")", ".", "lower", "(", ")", ")", "else", ":", "raise", "TypeError", "(", "\"maxAcc must be a string or an integer. \"", "\"{} was provided\"", ".", "format", "(", "type", "(", "minAcc", ")", ")", ")", "if", "isinstance", "(", "groupBy", ",", "list", ")", "and", "all", "(", "[", "isinstance", "(", "x", ",", "str", ")", "for", "x", "in", "groupBy", "]", ")", ":", "groupBy_result", "=", "Some", "(", "groupBy", ")", "elif", "groupBy", "is", "None", ":", "groupBy_result", "=", "none", "(", ")", "else", ":", "raise", "TypeError", "(", "\"groupBy must be a list of string. \"", "\"{} was provided\"", ".", "format", "(", "type", "(", "groupBy", ")", ")", ")", "aggregates", "=", "[", "]", "if", "isinstance", "(", "new_reg_fields", ",", "dict", ")", ":", "expBuild", "=", "self", ".", "pmg", ".", "getNewExpressionBuilder", "(", "self", ".", "__index", ")", "for", "k", "in", "new_reg_fields", ".", "keys", "(", ")", ":", "if", "isinstance", "(", "k", ",", "str", ")", ":", "item", "=", "new_reg_fields", "[", "k", "]", "if", "isinstance", "(", "item", ",", "(", "SUM", ",", "MIN", ",", "MAX", ",", "AVG", ",", "BAG", ",", "BAGD", ",", "MEDIAN", ",", "COUNT", ")", ")", ":", "op_name", "=", "item", ".", "get_aggregate_name", "(", ")", "op_argument", "=", "item", ".", "get_argument", "(", ")", "if", "op_argument", "is", "None", ":", "op_argument", "=", "none", "(", ")", "else", ":", "op_argument", "=", "Some", "(", "op_argument", ")", "regsToReg", "=", "expBuild", ".", "getRegionsToRegion", "(", "op_name", ",", "k", ",", "op_argument", ")", "aggregates", ".", "append", "(", "regsToReg", ")", "else", ":", "raise", "TypeError", "(", "\"The items in new_reg_fields must be Aggregates (SUM, MIN, MAX, AVG, BAG, \"", "\"BAGD, MEDIAN, COUNT)\"", "\" {} was provided\"", ".", "format", "(", "type", "(", "item", ")", ")", ")", "else", ":", "raise", "TypeError", "(", "\"The key of new_reg_fields must be a string. \"", "\"{} was provided\"", ".", "format", "(", "type", "(", "k", ")", ")", ")", "elif", "new_reg_fields", "is", "None", ":", "pass", "else", ":", "raise", "TypeError", "(", "\"new_reg_fields must be a list of dictionary. \"", "\"{} was provided\"", ".", "format", "(", "type", "(", "new_reg_fields", ")", ")", ")", "new_index", "=", "self", ".", "opmng", ".", "cover", "(", "self", ".", "__index", ",", "coverFlag", ",", "minAccParam", ",", "maxAccParam", ",", "groupBy_result", ",", "aggregates", ")", "return", "GMQLDataset", "(", "index", "=", "new_index", ",", "location", "=", "self", ".", "location", ",", "local_sources", "=", "self", ".", "_local_sources", ",", "remote_sources", "=", "self", ".", "_remote_sources", ",", "meta_profile", "=", "self", ".", "meta_profile", ")" ]
*Wrapper of* ``COVER`` COVER is a GMQL operator that takes as input a dataset (of usually, but not necessarily, multiple samples) and returns another dataset (with a single sample, if no groupby option is specified) by “collapsing” the input samples and their regions according to certain rules specified by the COVER parameters. The attributes of the output regions are only the region coordinates, plus in case, when aggregate functions are specified, new attributes with aggregate values over attribute values of the contributing input regions; output metadata are the union of the input ones, plus the metadata attributes JaccardIntersect and JaccardResult, representing global Jaccard Indexes for the considered dataset, computed as the correspondent region Jaccard Indexes but on the whole sample regions. :param cover_type: the kind of cover variant you want ['normal', 'flat', 'summit', 'histogram'] :param minAcc: minimum accumulation value, i.e. the minimum number of overlapping regions to be considered during COVER execution. It can be any positive number or the strings {'ALL', 'ANY'}. :param maxAcc: maximum accumulation value, i.e. the maximum number of overlapping regions to be considered during COVER execution. It can be any positive number or the strings {'ALL', 'ANY'}. :param groupBy: optional list of metadata attributes :param new_reg_fields: dictionary of the type {'new_region_attribute' : AGGREGATE_FUNCTION('field'), ...} :return: a new GMQLDataset An example of usage:: cell_tf = narrow_peak.cover("normal", minAcc=1, maxAcc="Any", groupBy=['cell', 'antibody_target'])
[ "*", "Wrapper", "of", "*", "COVER" ]
python
train
night-crawler/django-docker-helpers
django_docker_helpers/config/backends/mpt_consul_parser.py
https://github.com/night-crawler/django-docker-helpers/blob/b64f8009101a8eb61d3841124ba19e3ab881aa2f/django_docker_helpers/config/backends/mpt_consul_parser.py#L76-L119
def get(self, variable_path: str, default: t.Optional[t.Any] = None, coerce_type: t.Optional[t.Type] = None, coercer: t.Optional[t.Callable] = None, **kwargs): """ :param variable_path: a delimiter-separated path to a nested value :param default: default value if there's no object by specified path :param coerce_type: cast a type of a value to a specified one :param coercer: perform a type casting with specified callback :param kwargs: additional arguments inherited parser may need :return: value or default """ if self.path_separator != self.consul_path_separator: variable_path = variable_path.replace(self.path_separator, self.consul_path_separator) if self.scope: _scope = self.consul_path_separator.join(self.scope.split(self.path_separator)) variable_path = '{0}/{1}'.format(_scope, variable_path) index, data = self.client.kv.get(variable_path, **kwargs) if data is None: return default val = data['Value'] if val is None: # None is present and it is a valid value return val if val.startswith(self.object_serialize_prefix): # since complex data types are yaml-serialized there's no need to coerce anything _val = val[len(self.object_serialize_prefix):] bundle = self.object_deserialize(_val) if bundle == '': # check for reinforced empty flag return self.coerce(bundle, coerce_type=coerce_type, coercer=coercer) return bundle if isinstance(val, bytes): val = val.decode() return self.coerce(val, coerce_type=coerce_type, coercer=coercer)
[ "def", "get", "(", "self", ",", "variable_path", ":", "str", ",", "default", ":", "t", ".", "Optional", "[", "t", ".", "Any", "]", "=", "None", ",", "coerce_type", ":", "t", ".", "Optional", "[", "t", ".", "Type", "]", "=", "None", ",", "coercer", ":", "t", ".", "Optional", "[", "t", ".", "Callable", "]", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "path_separator", "!=", "self", ".", "consul_path_separator", ":", "variable_path", "=", "variable_path", ".", "replace", "(", "self", ".", "path_separator", ",", "self", ".", "consul_path_separator", ")", "if", "self", ".", "scope", ":", "_scope", "=", "self", ".", "consul_path_separator", ".", "join", "(", "self", ".", "scope", ".", "split", "(", "self", ".", "path_separator", ")", ")", "variable_path", "=", "'{0}/{1}'", ".", "format", "(", "_scope", ",", "variable_path", ")", "index", ",", "data", "=", "self", ".", "client", ".", "kv", ".", "get", "(", "variable_path", ",", "*", "*", "kwargs", ")", "if", "data", "is", "None", ":", "return", "default", "val", "=", "data", "[", "'Value'", "]", "if", "val", "is", "None", ":", "# None is present and it is a valid value", "return", "val", "if", "val", ".", "startswith", "(", "self", ".", "object_serialize_prefix", ")", ":", "# since complex data types are yaml-serialized there's no need to coerce anything", "_val", "=", "val", "[", "len", "(", "self", ".", "object_serialize_prefix", ")", ":", "]", "bundle", "=", "self", ".", "object_deserialize", "(", "_val", ")", "if", "bundle", "==", "''", ":", "# check for reinforced empty flag", "return", "self", ".", "coerce", "(", "bundle", ",", "coerce_type", "=", "coerce_type", ",", "coercer", "=", "coercer", ")", "return", "bundle", "if", "isinstance", "(", "val", ",", "bytes", ")", ":", "val", "=", "val", ".", "decode", "(", ")", "return", "self", ".", "coerce", "(", "val", ",", "coerce_type", "=", "coerce_type", ",", "coercer", "=", "coercer", ")" ]
:param variable_path: a delimiter-separated path to a nested value :param default: default value if there's no object by specified path :param coerce_type: cast a type of a value to a specified one :param coercer: perform a type casting with specified callback :param kwargs: additional arguments inherited parser may need :return: value or default
[ ":", "param", "variable_path", ":", "a", "delimiter", "-", "separated", "path", "to", "a", "nested", "value", ":", "param", "default", ":", "default", "value", "if", "there", "s", "no", "object", "by", "specified", "path", ":", "param", "coerce_type", ":", "cast", "a", "type", "of", "a", "value", "to", "a", "specified", "one", ":", "param", "coercer", ":", "perform", "a", "type", "casting", "with", "specified", "callback", ":", "param", "kwargs", ":", "additional", "arguments", "inherited", "parser", "may", "need", ":", "return", ":", "value", "or", "default" ]
python
train
IBMStreams/pypi.streamsx
streamsx/topology/schema.py
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/topology/schema.py#L464-L473
def _set(self, schema): """Set a schema from another schema""" if isinstance(schema, CommonSchema): self._spl_type = False self._schema = schema.schema() self._style = self._default_style() else: self._spl_type = schema._spl_type self._schema = schema._schema self._style = schema._style
[ "def", "_set", "(", "self", ",", "schema", ")", ":", "if", "isinstance", "(", "schema", ",", "CommonSchema", ")", ":", "self", ".", "_spl_type", "=", "False", "self", ".", "_schema", "=", "schema", ".", "schema", "(", ")", "self", ".", "_style", "=", "self", ".", "_default_style", "(", ")", "else", ":", "self", ".", "_spl_type", "=", "schema", ".", "_spl_type", "self", ".", "_schema", "=", "schema", ".", "_schema", "self", ".", "_style", "=", "schema", ".", "_style" ]
Set a schema from another schema
[ "Set", "a", "schema", "from", "another", "schema" ]
python
train
eandersson/amqpstorm
amqpstorm/connection.py
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/connection.py#L211-L226
def open(self): """Open Connection. :raises AMQPConnectionError: Raises if the connection encountered an error. """ LOGGER.debug('Connection Opening') self.set_state(self.OPENING) self._exceptions = [] self._channels = {} self._last_channel_id = None self._io.open() self._send_handshake() self._wait_for_connection_state(state=Stateful.OPEN) self.heartbeat.start(self._exceptions) LOGGER.debug('Connection Opened')
[ "def", "open", "(", "self", ")", ":", "LOGGER", ".", "debug", "(", "'Connection Opening'", ")", "self", ".", "set_state", "(", "self", ".", "OPENING", ")", "self", ".", "_exceptions", "=", "[", "]", "self", ".", "_channels", "=", "{", "}", "self", ".", "_last_channel_id", "=", "None", "self", ".", "_io", ".", "open", "(", ")", "self", ".", "_send_handshake", "(", ")", "self", ".", "_wait_for_connection_state", "(", "state", "=", "Stateful", ".", "OPEN", ")", "self", ".", "heartbeat", ".", "start", "(", "self", ".", "_exceptions", ")", "LOGGER", ".", "debug", "(", "'Connection Opened'", ")" ]
Open Connection. :raises AMQPConnectionError: Raises if the connection encountered an error.
[ "Open", "Connection", "." ]
python
train
codenerix/django-codenerix
codenerix/authbackend.py
https://github.com/codenerix/django-codenerix/blob/1f5527b352141caaee902b37b2648791a06bd57d/codenerix/authbackend.py#L698-L717
def synchronize(self, user, info): ''' It tries to do a group synchronization if possible This methods should be redeclared by the developer ''' self.debug("Synchronize!") # Remove all groups from this user user.groups.clear() # For all domains found for this user for domain in info['groups']: # For all groups he is for groupname in info['groups'][domain]: # Lookup for that group group = Group.objects.filter(name=groupname).first() if group: # If found, add the user to that group user.groups.add(group)
[ "def", "synchronize", "(", "self", ",", "user", ",", "info", ")", ":", "self", ".", "debug", "(", "\"Synchronize!\"", ")", "# Remove all groups from this user", "user", ".", "groups", ".", "clear", "(", ")", "# For all domains found for this user", "for", "domain", "in", "info", "[", "'groups'", "]", ":", "# For all groups he is", "for", "groupname", "in", "info", "[", "'groups'", "]", "[", "domain", "]", ":", "# Lookup for that group", "group", "=", "Group", ".", "objects", ".", "filter", "(", "name", "=", "groupname", ")", ".", "first", "(", ")", "if", "group", ":", "# If found, add the user to that group", "user", ".", "groups", ".", "add", "(", "group", ")" ]
It tries to do a group synchronization if possible This methods should be redeclared by the developer
[ "It", "tries", "to", "do", "a", "group", "synchronization", "if", "possible", "This", "methods", "should", "be", "redeclared", "by", "the", "developer" ]
python
train
Clarify/clarify_python
clarify_python/clarify.py
https://github.com/Clarify/clarify_python/blob/1a00a5e39f77af9ad7f2e08480a3ab14e7d72aeb/clarify_python/clarify.py#L563-L578
def delete_track(self, href=None): """Delete a track. 'href' the relative index of the track. May not be none. Returns nothing. If the response status is not 204, throws and APIException.""" # Argument error checking. assert href is not None raw_result = self.delete(href) if raw_result.status != 204: raise APIException(raw_result.status, raw_result.json)
[ "def", "delete_track", "(", "self", ",", "href", "=", "None", ")", ":", "# Argument error checking.", "assert", "href", "is", "not", "None", "raw_result", "=", "self", ".", "delete", "(", "href", ")", "if", "raw_result", ".", "status", "!=", "204", ":", "raise", "APIException", "(", "raw_result", ".", "status", ",", "raw_result", ".", "json", ")" ]
Delete a track. 'href' the relative index of the track. May not be none. Returns nothing. If the response status is not 204, throws and APIException.
[ "Delete", "a", "track", "." ]
python
train
googleapis/google-cloud-python
dns/google/cloud/dns/changes.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/dns/google/cloud/dns/changes.py#L80-L90
def path(self): """URL path for change set APIs. :rtype: str :returns: the path based on project, zone, and change set names. """ return "/projects/%s/managedZones/%s/changes/%s" % ( self.zone.project, self.zone.name, self.name, )
[ "def", "path", "(", "self", ")", ":", "return", "\"/projects/%s/managedZones/%s/changes/%s\"", "%", "(", "self", ".", "zone", ".", "project", ",", "self", ".", "zone", ".", "name", ",", "self", ".", "name", ",", ")" ]
URL path for change set APIs. :rtype: str :returns: the path based on project, zone, and change set names.
[ "URL", "path", "for", "change", "set", "APIs", "." ]
python
train
GoogleCloudPlatform/cloud-debug-python
src/googleclouddebugger/gcp_hub_client.py
https://github.com/GoogleCloudPlatform/cloud-debug-python/blob/89ce3782c98b814838a3ecb5479ed3882368cbee/src/googleclouddebugger/gcp_hub_client.py#L447-L467
def _GetDebuggee(self): """Builds the debuggee structure.""" major_version = 'v' + version.__version__.split('.')[0] python_version = ''.join(platform.python_version().split('.')[:2]) agent_version = ('google.com/python%s-gcp/%s' % (python_version, major_version)) debuggee = { 'project': self._project_number, 'description': self._GetDebuggeeDescription(), 'labels': self._debuggee_labels, 'agentVersion': agent_version, } source_context = self._ReadAppJsonFile('source-context.json') if source_context: debuggee['sourceContexts'] = [source_context] debuggee['uniquifier'] = self._ComputeUniquifier(debuggee) return debuggee
[ "def", "_GetDebuggee", "(", "self", ")", ":", "major_version", "=", "'v'", "+", "version", ".", "__version__", ".", "split", "(", "'.'", ")", "[", "0", "]", "python_version", "=", "''", ".", "join", "(", "platform", ".", "python_version", "(", ")", ".", "split", "(", "'.'", ")", "[", ":", "2", "]", ")", "agent_version", "=", "(", "'google.com/python%s-gcp/%s'", "%", "(", "python_version", ",", "major_version", ")", ")", "debuggee", "=", "{", "'project'", ":", "self", ".", "_project_number", ",", "'description'", ":", "self", ".", "_GetDebuggeeDescription", "(", ")", ",", "'labels'", ":", "self", ".", "_debuggee_labels", ",", "'agentVersion'", ":", "agent_version", ",", "}", "source_context", "=", "self", ".", "_ReadAppJsonFile", "(", "'source-context.json'", ")", "if", "source_context", ":", "debuggee", "[", "'sourceContexts'", "]", "=", "[", "source_context", "]", "debuggee", "[", "'uniquifier'", "]", "=", "self", ".", "_ComputeUniquifier", "(", "debuggee", ")", "return", "debuggee" ]
Builds the debuggee structure.
[ "Builds", "the", "debuggee", "structure", "." ]
python
train
django-salesforce/django-salesforce
salesforce/dbapi/driver.py
https://github.com/django-salesforce/django-salesforce/blob/6fd5643dba69d49c5881de50875cf90204a8f808/salesforce/dbapi/driver.py#L735-L745
def arg_to_soql(arg): """ Perform necessary SOQL quoting on the arg. """ conversion = sql_conversions.get(type(arg)) if conversion: return conversion(arg) for type_ in subclass_conversions: if isinstance(arg, type_): return sql_conversions[type_](arg) return sql_conversions[str](arg)
[ "def", "arg_to_soql", "(", "arg", ")", ":", "conversion", "=", "sql_conversions", ".", "get", "(", "type", "(", "arg", ")", ")", "if", "conversion", ":", "return", "conversion", "(", "arg", ")", "for", "type_", "in", "subclass_conversions", ":", "if", "isinstance", "(", "arg", ",", "type_", ")", ":", "return", "sql_conversions", "[", "type_", "]", "(", "arg", ")", "return", "sql_conversions", "[", "str", "]", "(", "arg", ")" ]
Perform necessary SOQL quoting on the arg.
[ "Perform", "necessary", "SOQL", "quoting", "on", "the", "arg", "." ]
python
train
seung-lab/cloud-volume
cloudvolume/py_compressed_segmentation.py
https://github.com/seung-lab/cloud-volume/blob/d2fd4500333f1bc3cd3e3919a8b649cec5d8e214/cloudvolume/py_compressed_segmentation.py#L25-L33
def pad_block(block, block_size): """Pad a block to block_size with its most frequent value""" unique_vals, unique_counts = np.unique(block, return_counts=True) most_frequent_value = unique_vals[np.argmax(unique_counts)] return np.pad(block, tuple((0, desired_size - actual_size) for desired_size, actual_size in zip(block_size, block.shape)), mode="constant", constant_values=most_frequent_value)
[ "def", "pad_block", "(", "block", ",", "block_size", ")", ":", "unique_vals", ",", "unique_counts", "=", "np", ".", "unique", "(", "block", ",", "return_counts", "=", "True", ")", "most_frequent_value", "=", "unique_vals", "[", "np", ".", "argmax", "(", "unique_counts", ")", "]", "return", "np", ".", "pad", "(", "block", ",", "tuple", "(", "(", "0", ",", "desired_size", "-", "actual_size", ")", "for", "desired_size", ",", "actual_size", "in", "zip", "(", "block_size", ",", "block", ".", "shape", ")", ")", ",", "mode", "=", "\"constant\"", ",", "constant_values", "=", "most_frequent_value", ")" ]
Pad a block to block_size with its most frequent value
[ "Pad", "a", "block", "to", "block_size", "with", "its", "most", "frequent", "value" ]
python
train
cenkalti/kuyruk
kuyruk/task.py
https://github.com/cenkalti/kuyruk/blob/c99d66be9d8fb077610f2fa883d5a1d268b42f04/kuyruk/task.py#L69-L130
def send_to_queue( self, args: Tuple=(), kwargs: Dict[str, Any]={}, host: str=None, wait_result: Union[int, float]=None, message_ttl: Union[int, float]=None, ) -> Any: """ Sends a message to the queue. A worker will run the task's function when it receives the message. :param args: Arguments that will be passed to task on execution. :param kwargs: Keyword arguments that will be passed to task on execution. :param host: Send this task to specific host. ``host`` will be appended to the queue name. If ``host`` is "localhost", hostname of the server will be appended to the queue name. :param wait_result: Wait for result from worker for ``wait_result`` seconds. If timeout occurs, :class:`~kuyruk.exceptions.ResultTimeout` is raised. If excecption occurs in worker, :class:`~kuyruk.exceptions.RemoteException` is raised. :param message_ttl: If set, message will be destroyed in queue after ``message_ttl`` seconds. :return: Result from worker if ``wait_result`` is set, else :const:`None`. """ if self.kuyruk.config.EAGER: # Run the task in current process result = self.apply(*args, **kwargs) return result if wait_result else None logger.debug("Task.send_to_queue args=%r, kwargs=%r", args, kwargs) queue = self._queue_for_host(host) description = self._get_description(args, kwargs) self._send_signal(signals.task_presend, args=args, kwargs=kwargs, description=description) body = json.dumps(description) msg = amqp.Message(body=body) if wait_result: # Use direct reply-to feature from RabbitMQ: # https://www.rabbitmq.com/direct-reply-to.html msg.properties['reply_to'] = 'amq.rabbitmq.reply-to' if message_ttl: msg.properties['expiration'] = str(int(message_ttl * 1000)) with self.kuyruk.channel() as ch: if wait_result: result = Result(ch.connection) ch.basic_consume(queue='amq.rabbitmq.reply-to', no_ack=True, callback=result.process_message) ch.queue_declare(queue=queue, durable=True, auto_delete=False) ch.basic_publish(msg, exchange="", routing_key=queue) self._send_signal(signals.task_postsend, args=args, kwargs=kwargs, description=description) if wait_result: return result.wait(wait_result)
[ "def", "send_to_queue", "(", "self", ",", "args", ":", "Tuple", "=", "(", ")", ",", "kwargs", ":", "Dict", "[", "str", ",", "Any", "]", "=", "{", "}", ",", "host", ":", "str", "=", "None", ",", "wait_result", ":", "Union", "[", "int", ",", "float", "]", "=", "None", ",", "message_ttl", ":", "Union", "[", "int", ",", "float", "]", "=", "None", ",", ")", "->", "Any", ":", "if", "self", ".", "kuyruk", ".", "config", ".", "EAGER", ":", "# Run the task in current process", "result", "=", "self", ".", "apply", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "result", "if", "wait_result", "else", "None", "logger", ".", "debug", "(", "\"Task.send_to_queue args=%r, kwargs=%r\"", ",", "args", ",", "kwargs", ")", "queue", "=", "self", ".", "_queue_for_host", "(", "host", ")", "description", "=", "self", ".", "_get_description", "(", "args", ",", "kwargs", ")", "self", ".", "_send_signal", "(", "signals", ".", "task_presend", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ",", "description", "=", "description", ")", "body", "=", "json", ".", "dumps", "(", "description", ")", "msg", "=", "amqp", ".", "Message", "(", "body", "=", "body", ")", "if", "wait_result", ":", "# Use direct reply-to feature from RabbitMQ:", "# https://www.rabbitmq.com/direct-reply-to.html", "msg", ".", "properties", "[", "'reply_to'", "]", "=", "'amq.rabbitmq.reply-to'", "if", "message_ttl", ":", "msg", ".", "properties", "[", "'expiration'", "]", "=", "str", "(", "int", "(", "message_ttl", "*", "1000", ")", ")", "with", "self", ".", "kuyruk", ".", "channel", "(", ")", "as", "ch", ":", "if", "wait_result", ":", "result", "=", "Result", "(", "ch", ".", "connection", ")", "ch", ".", "basic_consume", "(", "queue", "=", "'amq.rabbitmq.reply-to'", ",", "no_ack", "=", "True", ",", "callback", "=", "result", ".", "process_message", ")", "ch", ".", "queue_declare", "(", "queue", "=", "queue", ",", "durable", "=", "True", ",", "auto_delete", "=", "False", ")", "ch", ".", "basic_publish", "(", "msg", ",", "exchange", "=", "\"\"", ",", "routing_key", "=", "queue", ")", "self", ".", "_send_signal", "(", "signals", ".", "task_postsend", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ",", "description", "=", "description", ")", "if", "wait_result", ":", "return", "result", ".", "wait", "(", "wait_result", ")" ]
Sends a message to the queue. A worker will run the task's function when it receives the message. :param args: Arguments that will be passed to task on execution. :param kwargs: Keyword arguments that will be passed to task on execution. :param host: Send this task to specific host. ``host`` will be appended to the queue name. If ``host`` is "localhost", hostname of the server will be appended to the queue name. :param wait_result: Wait for result from worker for ``wait_result`` seconds. If timeout occurs, :class:`~kuyruk.exceptions.ResultTimeout` is raised. If excecption occurs in worker, :class:`~kuyruk.exceptions.RemoteException` is raised. :param message_ttl: If set, message will be destroyed in queue after ``message_ttl`` seconds. :return: Result from worker if ``wait_result`` is set, else :const:`None`.
[ "Sends", "a", "message", "to", "the", "queue", ".", "A", "worker", "will", "run", "the", "task", "s", "function", "when", "it", "receives", "the", "message", "." ]
python
train
floydhub/floyd-cli
floyd/cli/data.py
https://github.com/floydhub/floyd-cli/blob/ea6b9521119cbde2dfc71ce0cc87c0d9c143fc6c/floyd/cli/data.py#L241-L256
def getfile(data_name, path): """ Download a specific file from a dataset. """ data_source = get_data_object(data_name, use_data_config=False) if not data_source: if 'output' in data_name: floyd_logger.info("Note: You cannot clone the output of a running job. You need to wait for it to finish.") sys.exit() url = "{}/api/v1/resources/{}/{}?content=true".format(floyd.floyd_host, data_source.resource_id, path) fname = os.path.basename(path) DataClient().download(url, filename=fname) floyd_logger.info("Download finished")
[ "def", "getfile", "(", "data_name", ",", "path", ")", ":", "data_source", "=", "get_data_object", "(", "data_name", ",", "use_data_config", "=", "False", ")", "if", "not", "data_source", ":", "if", "'output'", "in", "data_name", ":", "floyd_logger", ".", "info", "(", "\"Note: You cannot clone the output of a running job. You need to wait for it to finish.\"", ")", "sys", ".", "exit", "(", ")", "url", "=", "\"{}/api/v1/resources/{}/{}?content=true\"", ".", "format", "(", "floyd", ".", "floyd_host", ",", "data_source", ".", "resource_id", ",", "path", ")", "fname", "=", "os", ".", "path", ".", "basename", "(", "path", ")", "DataClient", "(", ")", ".", "download", "(", "url", ",", "filename", "=", "fname", ")", "floyd_logger", ".", "info", "(", "\"Download finished\"", ")" ]
Download a specific file from a dataset.
[ "Download", "a", "specific", "file", "from", "a", "dataset", "." ]
python
train
jeremyschulman/halutz
halutz/utils.py
https://github.com/jeremyschulman/halutz/blob/6bb398dc99bf723daabd9eda02494a11252ee109/halutz/utils.py#L10-L29
def humanize_api_path(api_path): """ Converts an API path to a humaized string, for example: # >>> In [2]: humanize_api_path('/api/vlan/{id}') # >>> Out[2]: u'ApiVlanId' Parameters ---------- api_path : str An API path string. Returns ------- str - humazined form. """ return reduce(lambda val, func: func(val), [parameterize, underscore, camelize], unicode(api_path))
[ "def", "humanize_api_path", "(", "api_path", ")", ":", "return", "reduce", "(", "lambda", "val", ",", "func", ":", "func", "(", "val", ")", ",", "[", "parameterize", ",", "underscore", ",", "camelize", "]", ",", "unicode", "(", "api_path", ")", ")" ]
Converts an API path to a humaized string, for example: # >>> In [2]: humanize_api_path('/api/vlan/{id}') # >>> Out[2]: u'ApiVlanId' Parameters ---------- api_path : str An API path string. Returns ------- str - humazined form.
[ "Converts", "an", "API", "path", "to", "a", "humaized", "string", "for", "example", ":" ]
python
train
CivicSpleen/ambry
ambry/identity.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/identity.py#L363-L365
def promote(self, name): """Promote to a PartitionName by combining with a bundle Name.""" return PartitionName(**dict(list(name.dict.items()) + list(self.dict.items())))
[ "def", "promote", "(", "self", ",", "name", ")", ":", "return", "PartitionName", "(", "*", "*", "dict", "(", "list", "(", "name", ".", "dict", ".", "items", "(", ")", ")", "+", "list", "(", "self", ".", "dict", ".", "items", "(", ")", ")", ")", ")" ]
Promote to a PartitionName by combining with a bundle Name.
[ "Promote", "to", "a", "PartitionName", "by", "combining", "with", "a", "bundle", "Name", "." ]
python
train
calston/tensor
tensor/service.py
https://github.com/calston/tensor/blob/7c0c99708b5dbff97f3895f705e11996b608549d/tensor/service.py#L168-L176
def setupSources(self, config): """Sets up source objects from the given config""" sources = config.get('sources', []) for source in sources: src = self.createSource(source) self.setupTriggers(source, src) self.sources.append(src)
[ "def", "setupSources", "(", "self", ",", "config", ")", ":", "sources", "=", "config", ".", "get", "(", "'sources'", ",", "[", "]", ")", "for", "source", "in", "sources", ":", "src", "=", "self", ".", "createSource", "(", "source", ")", "self", ".", "setupTriggers", "(", "source", ",", "src", ")", "self", ".", "sources", ".", "append", "(", "src", ")" ]
Sets up source objects from the given config
[ "Sets", "up", "source", "objects", "from", "the", "given", "config" ]
python
test
pydata/xarray
xarray/core/dataset.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/dataset.py#L654-L673
def persist(self: T, **kwargs) -> T: """ Trigger computation, keeping data as dask arrays This operation can be used to trigger computation on underlying dask arrays, similar to ``.compute()``. However this operation keeps the data as dask arrays. This is particularly useful when using the dask.distributed scheduler and you want to load a large amount of data into distributed memory. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.persist``. See Also -------- dask.persist """ new = self.copy(deep=False) return new._persist_inplace(**kwargs)
[ "def", "persist", "(", "self", ":", "T", ",", "*", "*", "kwargs", ")", "->", "T", ":", "new", "=", "self", ".", "copy", "(", "deep", "=", "False", ")", "return", "new", ".", "_persist_inplace", "(", "*", "*", "kwargs", ")" ]
Trigger computation, keeping data as dask arrays This operation can be used to trigger computation on underlying dask arrays, similar to ``.compute()``. However this operation keeps the data as dask arrays. This is particularly useful when using the dask.distributed scheduler and you want to load a large amount of data into distributed memory. Parameters ---------- **kwargs : dict Additional keyword arguments passed on to ``dask.persist``. See Also -------- dask.persist
[ "Trigger", "computation", "keeping", "data", "as", "dask", "arrays" ]
python
train
benoitkugler/abstractDataLibrary
pyDLib/Core/data_model.py
https://github.com/benoitkugler/abstractDataLibrary/blob/16be28e99837e40287a63803bbfdf67ac1806b7b/pyDLib/Core/data_model.py#L174-L181
def select_by_critere(self, base, criteria): """ :param base: Reference on whole base :param criteria: Callable abstractAcces -> Bool, acting as filter :return: Collection on acces passing the criteria """ Ac = self.ACCES return groups.Collection(Ac(base, i) for i in self if criteria(Ac(base, i)))
[ "def", "select_by_critere", "(", "self", ",", "base", ",", "criteria", ")", ":", "Ac", "=", "self", ".", "ACCES", "return", "groups", ".", "Collection", "(", "Ac", "(", "base", ",", "i", ")", "for", "i", "in", "self", "if", "criteria", "(", "Ac", "(", "base", ",", "i", ")", ")", ")" ]
:param base: Reference on whole base :param criteria: Callable abstractAcces -> Bool, acting as filter :return: Collection on acces passing the criteria
[ ":", "param", "base", ":", "Reference", "on", "whole", "base", ":", "param", "criteria", ":", "Callable", "abstractAcces", "-", ">", "Bool", "acting", "as", "filter", ":", "return", ":", "Collection", "on", "acces", "passing", "the", "criteria" ]
python
train
vaexio/vaex
packages/vaex-core/vaex/functions.py
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/functions.py#L511-L540
def str_capitalize(x): """Capitalize the first letter of a string sample. :returns: an expression containing the capitalized strings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.capitalize() Expression = str_capitalize(text) Length: 5 dtype: str (expression) --------------------------------- 0 Something 1 Very pretty 2 Is coming 3 Our 4 Way. """ sl = _to_string_sequence(x).capitalize() return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
[ "def", "str_capitalize", "(", "x", ")", ":", "sl", "=", "_to_string_sequence", "(", "x", ")", ".", "capitalize", "(", ")", "return", "column", ".", "ColumnStringArrow", "(", "sl", ".", "bytes", ",", "sl", ".", "indices", ",", "sl", ".", "length", ",", "sl", ".", "offset", ",", "string_sequence", "=", "sl", ")" ]
Capitalize the first letter of a string sample. :returns: an expression containing the capitalized strings. Example: >>> import vaex >>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.'] >>> df = vaex.from_arrays(text=text) >>> df # text 0 Something 1 very pretty 2 is coming 3 our 4 way. >>> df.text.str.capitalize() Expression = str_capitalize(text) Length: 5 dtype: str (expression) --------------------------------- 0 Something 1 Very pretty 2 Is coming 3 Our 4 Way.
[ "Capitalize", "the", "first", "letter", "of", "a", "string", "sample", "." ]
python
test
bspaans/python-mingus
mingus/extra/tunings.py
https://github.com/bspaans/python-mingus/blob/aa5a5d992d45ada61be0f9f86261380731bd7749/mingus/extra/tunings.py#L307-L334
def get_Note(self, string=0, fret=0, maxfret=24): """Return the Note on 'string', 'fret'. Throw a RangeError if either the fret or string is unplayable. Examples: >>> t = tunings.StringTuning('test', 'test', ['A-3', 'A-4']) >>> t,get_Note(0, 0) 'A-3' >>> t.get_Note(0, 1) 'A#-3' >>> t.get_Note(1, 0) 'A-4' """ if 0 <= string < self.count_strings(): if 0 <= fret <= maxfret: s = self.tuning[string] if type(s) == list: s = s[0] n = Note(int(s) + fret) n.string = string n.fret = fret return n else: raise RangeError("Fret '%d' on string '%d' is out of range" % (string, fret)) else: raise RangeError("String '%d' out of range" % string)
[ "def", "get_Note", "(", "self", ",", "string", "=", "0", ",", "fret", "=", "0", ",", "maxfret", "=", "24", ")", ":", "if", "0", "<=", "string", "<", "self", ".", "count_strings", "(", ")", ":", "if", "0", "<=", "fret", "<=", "maxfret", ":", "s", "=", "self", ".", "tuning", "[", "string", "]", "if", "type", "(", "s", ")", "==", "list", ":", "s", "=", "s", "[", "0", "]", "n", "=", "Note", "(", "int", "(", "s", ")", "+", "fret", ")", "n", ".", "string", "=", "string", "n", ".", "fret", "=", "fret", "return", "n", "else", ":", "raise", "RangeError", "(", "\"Fret '%d' on string '%d' is out of range\"", "%", "(", "string", ",", "fret", ")", ")", "else", ":", "raise", "RangeError", "(", "\"String '%d' out of range\"", "%", "string", ")" ]
Return the Note on 'string', 'fret'. Throw a RangeError if either the fret or string is unplayable. Examples: >>> t = tunings.StringTuning('test', 'test', ['A-3', 'A-4']) >>> t,get_Note(0, 0) 'A-3' >>> t.get_Note(0, 1) 'A#-3' >>> t.get_Note(1, 0) 'A-4'
[ "Return", "the", "Note", "on", "string", "fret", "." ]
python
train
twisted/vertex
vertex/sigma.py
https://github.com/twisted/vertex/blob/feb591aa1b9a3b2b8fdcf53e4962dad2a0bc38ca/vertex/sigma.py#L713-L721
def seed(self, path, name): """Create a transload from an existing file that is complete. """ t = self.transloads[name] = Transload(self.addr, self, name, None, path, self.ui.startTransload(name, self.addr), seed=True) return t
[ "def", "seed", "(", "self", ",", "path", ",", "name", ")", ":", "t", "=", "self", ".", "transloads", "[", "name", "]", "=", "Transload", "(", "self", ".", "addr", ",", "self", ",", "name", ",", "None", ",", "path", ",", "self", ".", "ui", ".", "startTransload", "(", "name", ",", "self", ".", "addr", ")", ",", "seed", "=", "True", ")", "return", "t" ]
Create a transload from an existing file that is complete.
[ "Create", "a", "transload", "from", "an", "existing", "file", "that", "is", "complete", "." ]
python
train
cisco-sas/kitty
kitty/data/data_manager.py
https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/kitty/data/data_manager.py#L349-L367
def insert(self, fields, values): ''' insert new db entry :param fields: list of fields to insert :param values: list of values to insert :return: row id of the new row ''' if fields: _fields = ' (%s) ' % ','.join(fields) else: _fields = '' _values = ','.join('?' * len(values)) query = ''' INSERT INTO %s %s VALUES (%s) ''' % (self._name, _fields, _values) self._cursor.execute(query, tuple(values)) self._connection.commit() return self._cursor.lastrowid
[ "def", "insert", "(", "self", ",", "fields", ",", "values", ")", ":", "if", "fields", ":", "_fields", "=", "' (%s) '", "%", "','", ".", "join", "(", "fields", ")", "else", ":", "_fields", "=", "''", "_values", "=", "','", ".", "join", "(", "'?'", "*", "len", "(", "values", ")", ")", "query", "=", "'''\n INSERT INTO %s %s VALUES (%s)\n '''", "%", "(", "self", ".", "_name", ",", "_fields", ",", "_values", ")", "self", ".", "_cursor", ".", "execute", "(", "query", ",", "tuple", "(", "values", ")", ")", "self", ".", "_connection", ".", "commit", "(", ")", "return", "self", ".", "_cursor", ".", "lastrowid" ]
insert new db entry :param fields: list of fields to insert :param values: list of values to insert :return: row id of the new row
[ "insert", "new", "db", "entry" ]
python
train
numberoverzero/bloop
bloop/search.py
https://github.com/numberoverzero/bloop/blob/4c95f5a0ff0802443a1c258bfaccecd1758363e7/bloop/search.py#L374-L379
def scanned(self): """Number of items that DynamoDB evaluated, before any filter was applied.""" if self.request["Select"] == "COUNT": while not self.exhausted: next(self, None) return self._scanned
[ "def", "scanned", "(", "self", ")", ":", "if", "self", ".", "request", "[", "\"Select\"", "]", "==", "\"COUNT\"", ":", "while", "not", "self", ".", "exhausted", ":", "next", "(", "self", ",", "None", ")", "return", "self", ".", "_scanned" ]
Number of items that DynamoDB evaluated, before any filter was applied.
[ "Number", "of", "items", "that", "DynamoDB", "evaluated", "before", "any", "filter", "was", "applied", "." ]
python
train
fermiPy/fermipy
fermipy/plotting.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/plotting.py#L943-L1072
def make_residmap_plots(self, maps, roi=None, **kwargs): """Make plots from the output of `~fermipy.gtanalysis.GTAnalysis.residmap`. Parameters ---------- maps : dict Output dictionary of `~fermipy.gtanalysis.GTAnalysis.residmap`. roi : `~fermipy.roi_model.ROIModel` ROI Model object. Generate markers at the positions of the sources in this ROI. zoom : float Crop the image by this factor. If None then no crop is applied. """ fmt = kwargs.get('format', self.config['format']) figsize = kwargs.get('figsize', self.config['figsize']) workdir = kwargs.pop('workdir', self.config['fileio']['workdir']) use_weights = kwargs.pop('use_weights', False) # FIXME, how to set this: no_contour = False zoom = kwargs.get('zoom', None) kwargs.setdefault('graticule_radii', self.config['graticule_radii']) kwargs.setdefault('label_ts_threshold', self.config['label_ts_threshold']) cmap = kwargs.setdefault('cmap', self.config['cmap']) cmap_resid = kwargs.pop('cmap_resid', self.config['cmap_resid']) kwargs.setdefault('catalogs', self.config['catalogs']) if no_contour: sigma_levels = None else: sigma_levels = [-5, -3, 3, 5, 7] + list(np.logspace(1, 3, 17)) load_bluered_cmap() prefix = maps['name'] mask = maps['mask'] if use_weights: sigma_hist_data = maps['sigma'].data[maps['mask'].data.astype( bool)] maps['sigma'].data *= maps['mask'].data maps['data'].data *= maps['mask'].data maps['model'].data *= maps['mask'].data maps['excess'].data *= maps['mask'].data else: sigma_hist_data = maps['sigma'].data fig = plt.figure(figsize=figsize) p = ROIPlotter(maps['sigma'], roi=roi, **kwargs) p.plot(vmin=-5, vmax=5, levels=sigma_levels, cb_label='Significance [$\sigma$]', interpolation='bicubic', cmap=cmap_resid, zoom=zoom) plt.savefig(utils.format_filename(workdir, 'residmap_sigma', prefix=[prefix], extension=fmt)) plt.close(fig) # make and draw histogram fig, ax = plt.subplots(figsize=figsize) nBins = np.linspace(-6, 6, 121) data = np.nan_to_num(sigma_hist_data) # find best fit parameters mu, sigma = norm.fit(data.flatten()) # make and draw the histogram data[data > 6.0] = 6.0 data[data < -6.0] = -6.0 n, bins, patches = ax.hist(data.flatten(), nBins, density=True, histtype='stepfilled', facecolor='green', alpha=0.75) # make and draw best fit line y = norm.pdf(bins, mu, sigma) ax.plot(bins, y, 'r--', linewidth=2) y = norm.pdf(bins, 0.0, 1.0) ax.plot(bins, y, 'k', linewidth=1) # labels and such ax.set_xlabel(r'Significance ($\sigma$)') ax.set_ylabel('Probability') paramtext = 'Gaussian fit:\n' paramtext += '$\\mu=%.2f$\n' % mu paramtext += '$\\sigma=%.2f$' % sigma ax.text(0.05, 0.95, paramtext, verticalalignment='top', horizontalalignment='left', transform=ax.transAxes) plt.savefig(utils.format_filename(workdir, 'residmap_sigma_hist', prefix=[prefix], extension=fmt)) plt.close(fig) vmax = max(np.max(maps['data'].data), np.max(maps['model'].data)) vmin = min(np.min(maps['data'].data), np.min(maps['model'].data)) fig = plt.figure(figsize=figsize) p = ROIPlotter(maps['data'], roi=roi, **kwargs) p.plot(cb_label='Counts', interpolation='bicubic', cmap=cmap, zscale='sqrt', vmin=vmin, vmax=vmax) plt.savefig(utils.format_filename(workdir, 'residmap_data', prefix=[prefix], extension=fmt)) plt.close(fig) fig = plt.figure(figsize=figsize) p = ROIPlotter(maps['model'], roi=roi, **kwargs) p.plot(cb_label='Counts', interpolation='bicubic', cmap=cmap, zscale='sqrt', vmin=vmin, vmax=vmax) plt.savefig(utils.format_filename(workdir, 'residmap_model', prefix=[prefix], extension=fmt)) plt.close(fig) fig = plt.figure(figsize=figsize) p = ROIPlotter(maps['excess'], roi=roi, **kwargs) p.plot(cb_label='Counts', interpolation='bicubic', cmap=cmap_resid) plt.savefig(utils.format_filename(workdir, 'residmap_excess', prefix=[prefix], extension=fmt)) plt.close(fig)
[ "def", "make_residmap_plots", "(", "self", ",", "maps", ",", "roi", "=", "None", ",", "*", "*", "kwargs", ")", ":", "fmt", "=", "kwargs", ".", "get", "(", "'format'", ",", "self", ".", "config", "[", "'format'", "]", ")", "figsize", "=", "kwargs", ".", "get", "(", "'figsize'", ",", "self", ".", "config", "[", "'figsize'", "]", ")", "workdir", "=", "kwargs", ".", "pop", "(", "'workdir'", ",", "self", ".", "config", "[", "'fileio'", "]", "[", "'workdir'", "]", ")", "use_weights", "=", "kwargs", ".", "pop", "(", "'use_weights'", ",", "False", ")", "# FIXME, how to set this:", "no_contour", "=", "False", "zoom", "=", "kwargs", ".", "get", "(", "'zoom'", ",", "None", ")", "kwargs", ".", "setdefault", "(", "'graticule_radii'", ",", "self", ".", "config", "[", "'graticule_radii'", "]", ")", "kwargs", ".", "setdefault", "(", "'label_ts_threshold'", ",", "self", ".", "config", "[", "'label_ts_threshold'", "]", ")", "cmap", "=", "kwargs", ".", "setdefault", "(", "'cmap'", ",", "self", ".", "config", "[", "'cmap'", "]", ")", "cmap_resid", "=", "kwargs", ".", "pop", "(", "'cmap_resid'", ",", "self", ".", "config", "[", "'cmap_resid'", "]", ")", "kwargs", ".", "setdefault", "(", "'catalogs'", ",", "self", ".", "config", "[", "'catalogs'", "]", ")", "if", "no_contour", ":", "sigma_levels", "=", "None", "else", ":", "sigma_levels", "=", "[", "-", "5", ",", "-", "3", ",", "3", ",", "5", ",", "7", "]", "+", "list", "(", "np", ".", "logspace", "(", "1", ",", "3", ",", "17", ")", ")", "load_bluered_cmap", "(", ")", "prefix", "=", "maps", "[", "'name'", "]", "mask", "=", "maps", "[", "'mask'", "]", "if", "use_weights", ":", "sigma_hist_data", "=", "maps", "[", "'sigma'", "]", ".", "data", "[", "maps", "[", "'mask'", "]", ".", "data", ".", "astype", "(", "bool", ")", "]", "maps", "[", "'sigma'", "]", ".", "data", "*=", "maps", "[", "'mask'", "]", ".", "data", "maps", "[", "'data'", "]", ".", "data", "*=", "maps", "[", "'mask'", "]", ".", "data", "maps", "[", "'model'", "]", ".", "data", "*=", "maps", "[", "'mask'", "]", ".", "data", "maps", "[", "'excess'", "]", ".", "data", "*=", "maps", "[", "'mask'", "]", ".", "data", "else", ":", "sigma_hist_data", "=", "maps", "[", "'sigma'", "]", ".", "data", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "p", "=", "ROIPlotter", "(", "maps", "[", "'sigma'", "]", ",", "roi", "=", "roi", ",", "*", "*", "kwargs", ")", "p", ".", "plot", "(", "vmin", "=", "-", "5", ",", "vmax", "=", "5", ",", "levels", "=", "sigma_levels", ",", "cb_label", "=", "'Significance [$\\sigma$]'", ",", "interpolation", "=", "'bicubic'", ",", "cmap", "=", "cmap_resid", ",", "zoom", "=", "zoom", ")", "plt", ".", "savefig", "(", "utils", ".", "format_filename", "(", "workdir", ",", "'residmap_sigma'", ",", "prefix", "=", "[", "prefix", "]", ",", "extension", "=", "fmt", ")", ")", "plt", ".", "close", "(", "fig", ")", "# make and draw histogram", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "figsize", "=", "figsize", ")", "nBins", "=", "np", ".", "linspace", "(", "-", "6", ",", "6", ",", "121", ")", "data", "=", "np", ".", "nan_to_num", "(", "sigma_hist_data", ")", "# find best fit parameters", "mu", ",", "sigma", "=", "norm", ".", "fit", "(", "data", ".", "flatten", "(", ")", ")", "# make and draw the histogram", "data", "[", "data", ">", "6.0", "]", "=", "6.0", "data", "[", "data", "<", "-", "6.0", "]", "=", "-", "6.0", "n", ",", "bins", ",", "patches", "=", "ax", ".", "hist", "(", "data", ".", "flatten", "(", ")", ",", "nBins", ",", "density", "=", "True", ",", "histtype", "=", "'stepfilled'", ",", "facecolor", "=", "'green'", ",", "alpha", "=", "0.75", ")", "# make and draw best fit line", "y", "=", "norm", ".", "pdf", "(", "bins", ",", "mu", ",", "sigma", ")", "ax", ".", "plot", "(", "bins", ",", "y", ",", "'r--'", ",", "linewidth", "=", "2", ")", "y", "=", "norm", ".", "pdf", "(", "bins", ",", "0.0", ",", "1.0", ")", "ax", ".", "plot", "(", "bins", ",", "y", ",", "'k'", ",", "linewidth", "=", "1", ")", "# labels and such", "ax", ".", "set_xlabel", "(", "r'Significance ($\\sigma$)'", ")", "ax", ".", "set_ylabel", "(", "'Probability'", ")", "paramtext", "=", "'Gaussian fit:\\n'", "paramtext", "+=", "'$\\\\mu=%.2f$\\n'", "%", "mu", "paramtext", "+=", "'$\\\\sigma=%.2f$'", "%", "sigma", "ax", ".", "text", "(", "0.05", ",", "0.95", ",", "paramtext", ",", "verticalalignment", "=", "'top'", ",", "horizontalalignment", "=", "'left'", ",", "transform", "=", "ax", ".", "transAxes", ")", "plt", ".", "savefig", "(", "utils", ".", "format_filename", "(", "workdir", ",", "'residmap_sigma_hist'", ",", "prefix", "=", "[", "prefix", "]", ",", "extension", "=", "fmt", ")", ")", "plt", ".", "close", "(", "fig", ")", "vmax", "=", "max", "(", "np", ".", "max", "(", "maps", "[", "'data'", "]", ".", "data", ")", ",", "np", ".", "max", "(", "maps", "[", "'model'", "]", ".", "data", ")", ")", "vmin", "=", "min", "(", "np", ".", "min", "(", "maps", "[", "'data'", "]", ".", "data", ")", ",", "np", ".", "min", "(", "maps", "[", "'model'", "]", ".", "data", ")", ")", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "p", "=", "ROIPlotter", "(", "maps", "[", "'data'", "]", ",", "roi", "=", "roi", ",", "*", "*", "kwargs", ")", "p", ".", "plot", "(", "cb_label", "=", "'Counts'", ",", "interpolation", "=", "'bicubic'", ",", "cmap", "=", "cmap", ",", "zscale", "=", "'sqrt'", ",", "vmin", "=", "vmin", ",", "vmax", "=", "vmax", ")", "plt", ".", "savefig", "(", "utils", ".", "format_filename", "(", "workdir", ",", "'residmap_data'", ",", "prefix", "=", "[", "prefix", "]", ",", "extension", "=", "fmt", ")", ")", "plt", ".", "close", "(", "fig", ")", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "p", "=", "ROIPlotter", "(", "maps", "[", "'model'", "]", ",", "roi", "=", "roi", ",", "*", "*", "kwargs", ")", "p", ".", "plot", "(", "cb_label", "=", "'Counts'", ",", "interpolation", "=", "'bicubic'", ",", "cmap", "=", "cmap", ",", "zscale", "=", "'sqrt'", ",", "vmin", "=", "vmin", ",", "vmax", "=", "vmax", ")", "plt", ".", "savefig", "(", "utils", ".", "format_filename", "(", "workdir", ",", "'residmap_model'", ",", "prefix", "=", "[", "prefix", "]", ",", "extension", "=", "fmt", ")", ")", "plt", ".", "close", "(", "fig", ")", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "p", "=", "ROIPlotter", "(", "maps", "[", "'excess'", "]", ",", "roi", "=", "roi", ",", "*", "*", "kwargs", ")", "p", ".", "plot", "(", "cb_label", "=", "'Counts'", ",", "interpolation", "=", "'bicubic'", ",", "cmap", "=", "cmap_resid", ")", "plt", ".", "savefig", "(", "utils", ".", "format_filename", "(", "workdir", ",", "'residmap_excess'", ",", "prefix", "=", "[", "prefix", "]", ",", "extension", "=", "fmt", ")", ")", "plt", ".", "close", "(", "fig", ")" ]
Make plots from the output of `~fermipy.gtanalysis.GTAnalysis.residmap`. Parameters ---------- maps : dict Output dictionary of `~fermipy.gtanalysis.GTAnalysis.residmap`. roi : `~fermipy.roi_model.ROIModel` ROI Model object. Generate markers at the positions of the sources in this ROI. zoom : float Crop the image by this factor. If None then no crop is applied.
[ "Make", "plots", "from", "the", "output", "of", "~fermipy", ".", "gtanalysis", ".", "GTAnalysis", ".", "residmap", "." ]
python
train
rwl/godot
godot/edge.py
https://github.com/rwl/godot/blob/013687c9e8983d2aa2ceebb8a76c5c4f1e37c90f/godot/edge.py#L733-L776
def _parse_xdot_directive(self, name, new): """ Handles parsing Xdot drawing directives. """ parser = XdotAttrParser() components = parser.parse_xdot_data(new) # The absolute coordinate of the drawing container wrt graph origin. x1 = min( [c.x for c in components] ) y1 = min( [c.y for c in components] ) print "X1/Y1:", name, x1, y1 # Components are positioned relative to their container. This # function positions the bottom-left corner of the components at # their origin rather than relative to the graph. # move_to_origin( components ) for c in components: if isinstance(c, Ellipse): component.x_origin -= x1 component.y_origin -= y1 # c.position = [ c.x - x1, c.y - y1 ] elif isinstance(c, (Polygon, BSpline)): print "Points:", c.points c.points = [ (t[0] - x1, t[1] - y1) for t in c.points ] print "Points:", c.points elif isinstance(c, Text): # font = str_to_font( str(c.pen.font) ) c.text_x, c.text_y = c.x - x1, c.y - y1 container = Container(auto_size=True, position=[ x1, y1 ], bgcolor="yellow") container.add( *components ) if name == "_draw_": self.drawing = container elif name == "_hdraw_": self.arrowhead_drawing = container else: raise
[ "def", "_parse_xdot_directive", "(", "self", ",", "name", ",", "new", ")", ":", "parser", "=", "XdotAttrParser", "(", ")", "components", "=", "parser", ".", "parse_xdot_data", "(", "new", ")", "# The absolute coordinate of the drawing container wrt graph origin.", "x1", "=", "min", "(", "[", "c", ".", "x", "for", "c", "in", "components", "]", ")", "y1", "=", "min", "(", "[", "c", ".", "y", "for", "c", "in", "components", "]", ")", "print", "\"X1/Y1:\"", ",", "name", ",", "x1", ",", "y1", "# Components are positioned relative to their container. This", "# function positions the bottom-left corner of the components at", "# their origin rather than relative to the graph.", "# move_to_origin( components )", "for", "c", "in", "components", ":", "if", "isinstance", "(", "c", ",", "Ellipse", ")", ":", "component", ".", "x_origin", "-=", "x1", "component", ".", "y_origin", "-=", "y1", "# c.position = [ c.x - x1, c.y - y1 ]", "elif", "isinstance", "(", "c", ",", "(", "Polygon", ",", "BSpline", ")", ")", ":", "print", "\"Points:\"", ",", "c", ".", "points", "c", ".", "points", "=", "[", "(", "t", "[", "0", "]", "-", "x1", ",", "t", "[", "1", "]", "-", "y1", ")", "for", "t", "in", "c", ".", "points", "]", "print", "\"Points:\"", ",", "c", ".", "points", "elif", "isinstance", "(", "c", ",", "Text", ")", ":", "# font = str_to_font( str(c.pen.font) )", "c", ".", "text_x", ",", "c", ".", "text_y", "=", "c", ".", "x", "-", "x1", ",", "c", ".", "y", "-", "y1", "container", "=", "Container", "(", "auto_size", "=", "True", ",", "position", "=", "[", "x1", ",", "y1", "]", ",", "bgcolor", "=", "\"yellow\"", ")", "container", ".", "add", "(", "*", "components", ")", "if", "name", "==", "\"_draw_\"", ":", "self", ".", "drawing", "=", "container", "elif", "name", "==", "\"_hdraw_\"", ":", "self", ".", "arrowhead_drawing", "=", "container", "else", ":", "raise" ]
Handles parsing Xdot drawing directives.
[ "Handles", "parsing", "Xdot", "drawing", "directives", "." ]
python
test
SatelliteQE/nailgun
nailgun/entities.py
https://github.com/SatelliteQE/nailgun/blob/c36d8c20862e87bf6975bd48ac1ca40a9e634eaa/nailgun/entities.py#L5410-L5441
def read(self, entity=None, attrs=None, ignore=None, params=None): """Fetch an attribute missing from the server's response. Also add sync plan to the responce if needed, as :meth:`nailgun.entity_mixins.EntityReadMixin.read` can't initialize sync plan. For more information, see `Bugzilla #1237283 <https://bugzilla.redhat.com/show_bug.cgi?id=1237283>`_ and `nailgun#261 <https://github.com/SatelliteQE/nailgun/issues/261>`_. """ if attrs is None: attrs = self.read_json() if _get_version(self._server_config) < Version('6.1'): org = _get_org(self._server_config, attrs['organization']['label']) attrs['organization'] = org.get_values() if ignore is None: ignore = set() ignore.add('sync_plan') result = super(Product, self).read(entity, attrs, ignore, params) if 'sync_plan' in attrs: sync_plan_id = attrs.get('sync_plan_id') if sync_plan_id is None: result.sync_plan = None else: result.sync_plan = SyncPlan( server_config=self._server_config, id=sync_plan_id, organization=result.organization, ) return result
[ "def", "read", "(", "self", ",", "entity", "=", "None", ",", "attrs", "=", "None", ",", "ignore", "=", "None", ",", "params", "=", "None", ")", ":", "if", "attrs", "is", "None", ":", "attrs", "=", "self", ".", "read_json", "(", ")", "if", "_get_version", "(", "self", ".", "_server_config", ")", "<", "Version", "(", "'6.1'", ")", ":", "org", "=", "_get_org", "(", "self", ".", "_server_config", ",", "attrs", "[", "'organization'", "]", "[", "'label'", "]", ")", "attrs", "[", "'organization'", "]", "=", "org", ".", "get_values", "(", ")", "if", "ignore", "is", "None", ":", "ignore", "=", "set", "(", ")", "ignore", ".", "add", "(", "'sync_plan'", ")", "result", "=", "super", "(", "Product", ",", "self", ")", ".", "read", "(", "entity", ",", "attrs", ",", "ignore", ",", "params", ")", "if", "'sync_plan'", "in", "attrs", ":", "sync_plan_id", "=", "attrs", ".", "get", "(", "'sync_plan_id'", ")", "if", "sync_plan_id", "is", "None", ":", "result", ".", "sync_plan", "=", "None", "else", ":", "result", ".", "sync_plan", "=", "SyncPlan", "(", "server_config", "=", "self", ".", "_server_config", ",", "id", "=", "sync_plan_id", ",", "organization", "=", "result", ".", "organization", ",", ")", "return", "result" ]
Fetch an attribute missing from the server's response. Also add sync plan to the responce if needed, as :meth:`nailgun.entity_mixins.EntityReadMixin.read` can't initialize sync plan. For more information, see `Bugzilla #1237283 <https://bugzilla.redhat.com/show_bug.cgi?id=1237283>`_ and `nailgun#261 <https://github.com/SatelliteQE/nailgun/issues/261>`_.
[ "Fetch", "an", "attribute", "missing", "from", "the", "server", "s", "response", "." ]
python
train
apache/incubator-heron
heron/instance/src/python/utils/metrics/metrics_helper.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/instance/src/python/utils/metrics/metrics_helper.py#L192-L194
def serialize_data_tuple(self, stream_id, latency_in_ns): """Apply update to serialization metrics""" self.update_count(self.TUPLE_SERIALIZATION_TIME_NS, incr_by=latency_in_ns, key=stream_id)
[ "def", "serialize_data_tuple", "(", "self", ",", "stream_id", ",", "latency_in_ns", ")", ":", "self", ".", "update_count", "(", "self", ".", "TUPLE_SERIALIZATION_TIME_NS", ",", "incr_by", "=", "latency_in_ns", ",", "key", "=", "stream_id", ")" ]
Apply update to serialization metrics
[ "Apply", "update", "to", "serialization", "metrics" ]
python
valid
nerdvegas/rez
src/rez/utils/filesystem.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/utils/filesystem.py#L170-L185
def safe_remove(path): """Safely remove the given file or directory. Works in a multithreaded scenario. """ if not os.path.exists(path): return try: if os.path.isdir(path) and not os.path.islink(path): shutil.rmtree(path) else: os.remove(path) except OSError: if os.path.exists(path): raise
[ "def", "safe_remove", "(", "path", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "return", "try", ":", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", "and", "not", "os", ".", "path", ".", "islink", "(", "path", ")", ":", "shutil", ".", "rmtree", "(", "path", ")", "else", ":", "os", ".", "remove", "(", "path", ")", "except", "OSError", ":", "if", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "raise" ]
Safely remove the given file or directory. Works in a multithreaded scenario.
[ "Safely", "remove", "the", "given", "file", "or", "directory", "." ]
python
train
senaite/senaite.core
bika/lims/api/__init__.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/api/__init__.py#L928-L939
def get_version(brain_or_object): """Get the version of the current object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: The current version of the object, or None if not available :rtype: int or None """ obj = get_object(brain_or_object) if not is_versionable(obj): return None return getattr(aq_base(obj), "version_id", 0)
[ "def", "get_version", "(", "brain_or_object", ")", ":", "obj", "=", "get_object", "(", "brain_or_object", ")", "if", "not", "is_versionable", "(", "obj", ")", ":", "return", "None", "return", "getattr", "(", "aq_base", "(", "obj", ")", ",", "\"version_id\"", ",", "0", ")" ]
Get the version of the current object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: The current version of the object, or None if not available :rtype: int or None
[ "Get", "the", "version", "of", "the", "current", "object" ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/core/core_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/core/core_client.py#L260-L273
def queue_delete_project(self, project_id): """QueueDeleteProject. [Preview API] Queues a project to be deleted. Use the [GetOperation](../../operations/operations/get) to periodically check for delete project status. :param str project_id: The project id of the project to delete. :rtype: :class:`<OperationReference> <azure.devops.v5_1.core.models.OperationReference>` """ route_values = {} if project_id is not None: route_values['projectId'] = self._serialize.url('project_id', project_id, 'str') response = self._send(http_method='DELETE', location_id='603fe2ac-9723-48b9-88ad-09305aa6c6e1', version='5.1-preview.4', route_values=route_values) return self._deserialize('OperationReference', response)
[ "def", "queue_delete_project", "(", "self", ",", "project_id", ")", ":", "route_values", "=", "{", "}", "if", "project_id", "is", "not", "None", ":", "route_values", "[", "'projectId'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'project_id'", ",", "project_id", ",", "'str'", ")", "response", "=", "self", ".", "_send", "(", "http_method", "=", "'DELETE'", ",", "location_id", "=", "'603fe2ac-9723-48b9-88ad-09305aa6c6e1'", ",", "version", "=", "'5.1-preview.4'", ",", "route_values", "=", "route_values", ")", "return", "self", ".", "_deserialize", "(", "'OperationReference'", ",", "response", ")" ]
QueueDeleteProject. [Preview API] Queues a project to be deleted. Use the [GetOperation](../../operations/operations/get) to periodically check for delete project status. :param str project_id: The project id of the project to delete. :rtype: :class:`<OperationReference> <azure.devops.v5_1.core.models.OperationReference>`
[ "QueueDeleteProject", ".", "[", "Preview", "API", "]", "Queues", "a", "project", "to", "be", "deleted", ".", "Use", "the", "[", "GetOperation", "]", "(", "..", "/", "..", "/", "operations", "/", "operations", "/", "get", ")", "to", "periodically", "check", "for", "delete", "project", "status", ".", ":", "param", "str", "project_id", ":", "The", "project", "id", "of", "the", "project", "to", "delete", ".", ":", "rtype", ":", ":", "class", ":", "<OperationReference", ">", "<azure", ".", "devops", ".", "v5_1", ".", "core", ".", "models", ".", "OperationReference", ">" ]
python
train
pypa/pipenv
pipenv/patched/notpip/_internal/pep425tags.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/pep425tags.py#L275-L381
def get_supported( versions=None, # type: Optional[List[str]] noarch=False, # type: bool platform=None, # type: Optional[str] impl=None, # type: Optional[str] abi=None # type: Optional[str] ): # type: (...) -> List[Pep425Tag] """Return a list of supported tags for each version specified in `versions`. :param versions: a list of string versions, of the form ["33", "32"], or None. The first version will be assumed to support our ABI. :param platform: specify the exact platform you want valid tags for, or None. If None, use the local system platform. :param impl: specify the exact implementation you want valid tags for, or None. If None, use the local interpreter impl. :param abi: specify the exact abi you want valid tags for, or None. If None, use the local interpreter abi. """ supported = [] # Versions must be given with respect to the preference if versions is None: version_info = get_impl_version_info() versions = get_all_minor_versions_as_strings(version_info) impl = impl or get_abbr_impl() abis = [] # type: List[str] abi = abi or get_abi_tag() if abi: abis[0:0] = [abi] abi3s = set() for suffix in get_extension_suffixes(): if suffix.startswith('.abi'): abi3s.add(suffix.split('.', 2)[1]) abis.extend(sorted(list(abi3s))) abis.append('none') if not noarch: arch = platform or get_platform() arch_prefix, arch_sep, arch_suffix = arch.partition('_') if arch.startswith('macosx'): # support macosx-10.6-intel on macosx-10.9-x86_64 match = _osx_arch_pat.match(arch) if match: name, major, minor, actual_arch = match.groups() tpl = '{}_{}_%i_%s'.format(name, major) arches = [] for m in reversed(range(int(minor) + 1)): for a in get_darwin_arches(int(major), m, actual_arch): arches.append(tpl % (m, a)) else: # arch pattern didn't match (?!) arches = [arch] elif arch_prefix == 'manylinux2010': # manylinux1 wheels run on most manylinux2010 systems with the # exception of wheels depending on ncurses. PEP 571 states # manylinux1 wheels should be considered manylinux2010 wheels: # https://www.python.org/dev/peps/pep-0571/#backwards-compatibility-with-manylinux1-wheels arches = [arch, 'manylinux1' + arch_sep + arch_suffix] elif platform is None: arches = [] if is_manylinux2010_compatible(): arches.append('manylinux2010' + arch_sep + arch_suffix) if is_manylinux1_compatible(): arches.append('manylinux1' + arch_sep + arch_suffix) arches.append(arch) else: arches = [arch] # Current version, current API (built specifically for our Python): for abi in abis: for arch in arches: supported.append(('%s%s' % (impl, versions[0]), abi, arch)) # abi3 modules compatible with older version of Python for version in versions[1:]: # abi3 was introduced in Python 3.2 if version in {'31', '30'}: break for abi in abi3s: # empty set if not Python 3 for arch in arches: supported.append(("%s%s" % (impl, version), abi, arch)) # Has binaries, does not use the Python API: for arch in arches: supported.append(('py%s' % (versions[0][0]), 'none', arch)) # No abi / arch, but requires our implementation: supported.append(('%s%s' % (impl, versions[0]), 'none', 'any')) # Tagged specifically as being cross-version compatible # (with just the major version specified) supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any')) # No abi / arch, generic Python for i, version in enumerate(versions): supported.append(('py%s' % (version,), 'none', 'any')) if i == 0: supported.append(('py%s' % (version[0]), 'none', 'any')) return supported
[ "def", "get_supported", "(", "versions", "=", "None", ",", "# type: Optional[List[str]]", "noarch", "=", "False", ",", "# type: bool", "platform", "=", "None", ",", "# type: Optional[str]", "impl", "=", "None", ",", "# type: Optional[str]", "abi", "=", "None", "# type: Optional[str]", ")", ":", "# type: (...) -> List[Pep425Tag]", "supported", "=", "[", "]", "# Versions must be given with respect to the preference", "if", "versions", "is", "None", ":", "version_info", "=", "get_impl_version_info", "(", ")", "versions", "=", "get_all_minor_versions_as_strings", "(", "version_info", ")", "impl", "=", "impl", "or", "get_abbr_impl", "(", ")", "abis", "=", "[", "]", "# type: List[str]", "abi", "=", "abi", "or", "get_abi_tag", "(", ")", "if", "abi", ":", "abis", "[", "0", ":", "0", "]", "=", "[", "abi", "]", "abi3s", "=", "set", "(", ")", "for", "suffix", "in", "get_extension_suffixes", "(", ")", ":", "if", "suffix", ".", "startswith", "(", "'.abi'", ")", ":", "abi3s", ".", "add", "(", "suffix", ".", "split", "(", "'.'", ",", "2", ")", "[", "1", "]", ")", "abis", ".", "extend", "(", "sorted", "(", "list", "(", "abi3s", ")", ")", ")", "abis", ".", "append", "(", "'none'", ")", "if", "not", "noarch", ":", "arch", "=", "platform", "or", "get_platform", "(", ")", "arch_prefix", ",", "arch_sep", ",", "arch_suffix", "=", "arch", ".", "partition", "(", "'_'", ")", "if", "arch", ".", "startswith", "(", "'macosx'", ")", ":", "# support macosx-10.6-intel on macosx-10.9-x86_64", "match", "=", "_osx_arch_pat", ".", "match", "(", "arch", ")", "if", "match", ":", "name", ",", "major", ",", "minor", ",", "actual_arch", "=", "match", ".", "groups", "(", ")", "tpl", "=", "'{}_{}_%i_%s'", ".", "format", "(", "name", ",", "major", ")", "arches", "=", "[", "]", "for", "m", "in", "reversed", "(", "range", "(", "int", "(", "minor", ")", "+", "1", ")", ")", ":", "for", "a", "in", "get_darwin_arches", "(", "int", "(", "major", ")", ",", "m", ",", "actual_arch", ")", ":", "arches", ".", "append", "(", "tpl", "%", "(", "m", ",", "a", ")", ")", "else", ":", "# arch pattern didn't match (?!)", "arches", "=", "[", "arch", "]", "elif", "arch_prefix", "==", "'manylinux2010'", ":", "# manylinux1 wheels run on most manylinux2010 systems with the", "# exception of wheels depending on ncurses. PEP 571 states", "# manylinux1 wheels should be considered manylinux2010 wheels:", "# https://www.python.org/dev/peps/pep-0571/#backwards-compatibility-with-manylinux1-wheels", "arches", "=", "[", "arch", ",", "'manylinux1'", "+", "arch_sep", "+", "arch_suffix", "]", "elif", "platform", "is", "None", ":", "arches", "=", "[", "]", "if", "is_manylinux2010_compatible", "(", ")", ":", "arches", ".", "append", "(", "'manylinux2010'", "+", "arch_sep", "+", "arch_suffix", ")", "if", "is_manylinux1_compatible", "(", ")", ":", "arches", ".", "append", "(", "'manylinux1'", "+", "arch_sep", "+", "arch_suffix", ")", "arches", ".", "append", "(", "arch", ")", "else", ":", "arches", "=", "[", "arch", "]", "# Current version, current API (built specifically for our Python):", "for", "abi", "in", "abis", ":", "for", "arch", "in", "arches", ":", "supported", ".", "append", "(", "(", "'%s%s'", "%", "(", "impl", ",", "versions", "[", "0", "]", ")", ",", "abi", ",", "arch", ")", ")", "# abi3 modules compatible with older version of Python", "for", "version", "in", "versions", "[", "1", ":", "]", ":", "# abi3 was introduced in Python 3.2", "if", "version", "in", "{", "'31'", ",", "'30'", "}", ":", "break", "for", "abi", "in", "abi3s", ":", "# empty set if not Python 3", "for", "arch", "in", "arches", ":", "supported", ".", "append", "(", "(", "\"%s%s\"", "%", "(", "impl", ",", "version", ")", ",", "abi", ",", "arch", ")", ")", "# Has binaries, does not use the Python API:", "for", "arch", "in", "arches", ":", "supported", ".", "append", "(", "(", "'py%s'", "%", "(", "versions", "[", "0", "]", "[", "0", "]", ")", ",", "'none'", ",", "arch", ")", ")", "# No abi / arch, but requires our implementation:", "supported", ".", "append", "(", "(", "'%s%s'", "%", "(", "impl", ",", "versions", "[", "0", "]", ")", ",", "'none'", ",", "'any'", ")", ")", "# Tagged specifically as being cross-version compatible", "# (with just the major version specified)", "supported", ".", "append", "(", "(", "'%s%s'", "%", "(", "impl", ",", "versions", "[", "0", "]", "[", "0", "]", ")", ",", "'none'", ",", "'any'", ")", ")", "# No abi / arch, generic Python", "for", "i", ",", "version", "in", "enumerate", "(", "versions", ")", ":", "supported", ".", "append", "(", "(", "'py%s'", "%", "(", "version", ",", ")", ",", "'none'", ",", "'any'", ")", ")", "if", "i", "==", "0", ":", "supported", ".", "append", "(", "(", "'py%s'", "%", "(", "version", "[", "0", "]", ")", ",", "'none'", ",", "'any'", ")", ")", "return", "supported" ]
Return a list of supported tags for each version specified in `versions`. :param versions: a list of string versions, of the form ["33", "32"], or None. The first version will be assumed to support our ABI. :param platform: specify the exact platform you want valid tags for, or None. If None, use the local system platform. :param impl: specify the exact implementation you want valid tags for, or None. If None, use the local interpreter impl. :param abi: specify the exact abi you want valid tags for, or None. If None, use the local interpreter abi.
[ "Return", "a", "list", "of", "supported", "tags", "for", "each", "version", "specified", "in", "versions", "." ]
python
train
pypa/pipenv
pipenv/patched/notpip/_internal/utils/misc.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/utils/misc.py#L139-L149
def display_path(path): # type: (Union[str, Text]) -> str """Gives the display value for a given path, making it relative to cwd if possible.""" path = os.path.normcase(os.path.abspath(path)) if sys.version_info[0] == 2: path = path.decode(sys.getfilesystemencoding(), 'replace') path = path.encode(sys.getdefaultencoding(), 'replace') if path.startswith(os.getcwd() + os.path.sep): path = '.' + path[len(os.getcwd()):] return path
[ "def", "display_path", "(", "path", ")", ":", "# type: (Union[str, Text]) -> str", "path", "=", "os", ".", "path", ".", "normcase", "(", "os", ".", "path", ".", "abspath", "(", "path", ")", ")", "if", "sys", ".", "version_info", "[", "0", "]", "==", "2", ":", "path", "=", "path", ".", "decode", "(", "sys", ".", "getfilesystemencoding", "(", ")", ",", "'replace'", ")", "path", "=", "path", ".", "encode", "(", "sys", ".", "getdefaultencoding", "(", ")", ",", "'replace'", ")", "if", "path", ".", "startswith", "(", "os", ".", "getcwd", "(", ")", "+", "os", ".", "path", ".", "sep", ")", ":", "path", "=", "'.'", "+", "path", "[", "len", "(", "os", ".", "getcwd", "(", ")", ")", ":", "]", "return", "path" ]
Gives the display value for a given path, making it relative to cwd if possible.
[ "Gives", "the", "display", "value", "for", "a", "given", "path", "making", "it", "relative", "to", "cwd", "if", "possible", "." ]
python
train
cedrus-opensource/pyxid
pyxid/pyxid_impl.py
https://github.com/cedrus-opensource/pyxid/blob/02dba3a825f0d4f4c0bfa044c6a361492e4c25b6/pyxid/pyxid_impl.py#L363-L417
def init_device(self): """ Initializes the device with the proper keymaps and name """ try: product_id = int(self._send_command('_d2', 1)) except ValueError: product_id = self._send_command('_d2', 1) if product_id == 0: self._impl = ResponseDevice( self.con, 'Cedrus Lumina LP-400 Response Pad System', lumina_keymap) elif product_id == 1: self._impl = ResponseDevice( self.con, 'Cedrus SV-1 Voice Key', None, 'Voice Response') elif product_id == 2: model_id = int(self._send_command('_d3', 1)) if model_id == 1: self._impl = ResponseDevice( self.con, 'Cedrus RB-530', rb_530_keymap) elif model_id == 2: self._impl = ResponseDevice( self.con, 'Cedrus RB-730', rb_730_keymap) elif model_id == 3: self._impl = ResponseDevice( self.con, 'Cedrus RB-830', rb_830_keymap) elif model_id == 4: self._impl = ResponseDevice( self.con, 'Cedrus RB-834', rb_834_keymap) else: raise XidError('Unknown RB Device') elif product_id == 4: self._impl = StimTracker( self.con, 'Cedrus C-POD') elif product_id == b'S': self._impl = StimTracker( self.con, 'Cedrus StimTracker') elif product_id == -99: raise XidError('Invalid XID device')
[ "def", "init_device", "(", "self", ")", ":", "try", ":", "product_id", "=", "int", "(", "self", ".", "_send_command", "(", "'_d2'", ",", "1", ")", ")", "except", "ValueError", ":", "product_id", "=", "self", ".", "_send_command", "(", "'_d2'", ",", "1", ")", "if", "product_id", "==", "0", ":", "self", ".", "_impl", "=", "ResponseDevice", "(", "self", ".", "con", ",", "'Cedrus Lumina LP-400 Response Pad System'", ",", "lumina_keymap", ")", "elif", "product_id", "==", "1", ":", "self", ".", "_impl", "=", "ResponseDevice", "(", "self", ".", "con", ",", "'Cedrus SV-1 Voice Key'", ",", "None", ",", "'Voice Response'", ")", "elif", "product_id", "==", "2", ":", "model_id", "=", "int", "(", "self", ".", "_send_command", "(", "'_d3'", ",", "1", ")", ")", "if", "model_id", "==", "1", ":", "self", ".", "_impl", "=", "ResponseDevice", "(", "self", ".", "con", ",", "'Cedrus RB-530'", ",", "rb_530_keymap", ")", "elif", "model_id", "==", "2", ":", "self", ".", "_impl", "=", "ResponseDevice", "(", "self", ".", "con", ",", "'Cedrus RB-730'", ",", "rb_730_keymap", ")", "elif", "model_id", "==", "3", ":", "self", ".", "_impl", "=", "ResponseDevice", "(", "self", ".", "con", ",", "'Cedrus RB-830'", ",", "rb_830_keymap", ")", "elif", "model_id", "==", "4", ":", "self", ".", "_impl", "=", "ResponseDevice", "(", "self", ".", "con", ",", "'Cedrus RB-834'", ",", "rb_834_keymap", ")", "else", ":", "raise", "XidError", "(", "'Unknown RB Device'", ")", "elif", "product_id", "==", "4", ":", "self", ".", "_impl", "=", "StimTracker", "(", "self", ".", "con", ",", "'Cedrus C-POD'", ")", "elif", "product_id", "==", "b'S'", ":", "self", ".", "_impl", "=", "StimTracker", "(", "self", ".", "con", ",", "'Cedrus StimTracker'", ")", "elif", "product_id", "==", "-", "99", ":", "raise", "XidError", "(", "'Invalid XID device'", ")" ]
Initializes the device with the proper keymaps and name
[ "Initializes", "the", "device", "with", "the", "proper", "keymaps", "and", "name" ]
python
train
F-Secure/see
see/context/resources/lxc.py
https://github.com/F-Secure/see/blob/3e053e52a45229f96a12db9e98caf7fb3880e811/see/context/resources/lxc.py#L93-L122
def domain_xml(identifier, xml, mounts, network_name=None): """Fills the XML file with the required fields. @param identifier: (str) UUID of the Environment. @param xml: (str) XML configuration of the domain. @param filesystem: (tuple) ((source, target), (source, target)) * name * uuid * devices * network * filesystem """ domain = etree.fromstring(xml) subelement(domain, './/name', 'name', identifier) subelement(domain, './/uuid', 'uuid', identifier) devices = subelement(domain, './/devices', 'devices', None) for mount in mounts: filesystem = etree.SubElement(devices, 'filesystem', type='mount') etree.SubElement(filesystem, 'source', dir=mount[0]) etree.SubElement(filesystem, 'target', dir=mount[1]) if network_name is not None: network = subelement(devices, './/interface[@type="network"]', 'interface', None, type='network') subelement(network, './/source', 'source', None, network=network_name) return etree.tostring(domain).decode('utf-8')
[ "def", "domain_xml", "(", "identifier", ",", "xml", ",", "mounts", ",", "network_name", "=", "None", ")", ":", "domain", "=", "etree", ".", "fromstring", "(", "xml", ")", "subelement", "(", "domain", ",", "'.//name'", ",", "'name'", ",", "identifier", ")", "subelement", "(", "domain", ",", "'.//uuid'", ",", "'uuid'", ",", "identifier", ")", "devices", "=", "subelement", "(", "domain", ",", "'.//devices'", ",", "'devices'", ",", "None", ")", "for", "mount", "in", "mounts", ":", "filesystem", "=", "etree", ".", "SubElement", "(", "devices", ",", "'filesystem'", ",", "type", "=", "'mount'", ")", "etree", ".", "SubElement", "(", "filesystem", ",", "'source'", ",", "dir", "=", "mount", "[", "0", "]", ")", "etree", ".", "SubElement", "(", "filesystem", ",", "'target'", ",", "dir", "=", "mount", "[", "1", "]", ")", "if", "network_name", "is", "not", "None", ":", "network", "=", "subelement", "(", "devices", ",", "'.//interface[@type=\"network\"]'", ",", "'interface'", ",", "None", ",", "type", "=", "'network'", ")", "subelement", "(", "network", ",", "'.//source'", ",", "'source'", ",", "None", ",", "network", "=", "network_name", ")", "return", "etree", ".", "tostring", "(", "domain", ")", ".", "decode", "(", "'utf-8'", ")" ]
Fills the XML file with the required fields. @param identifier: (str) UUID of the Environment. @param xml: (str) XML configuration of the domain. @param filesystem: (tuple) ((source, target), (source, target)) * name * uuid * devices * network * filesystem
[ "Fills", "the", "XML", "file", "with", "the", "required", "fields", "." ]
python
train
sporteasy/python-poeditor
poeditor/client.py
https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L242-L251
def list_project_languages(self, project_id): """ Returns project languages, percentage of translation done for each and the datetime (UTC - ISO 8601) when the last change was made. """ data = self._run( url_path="languages/list", id=project_id ) return data['result'].get('languages', [])
[ "def", "list_project_languages", "(", "self", ",", "project_id", ")", ":", "data", "=", "self", ".", "_run", "(", "url_path", "=", "\"languages/list\"", ",", "id", "=", "project_id", ")", "return", "data", "[", "'result'", "]", ".", "get", "(", "'languages'", ",", "[", "]", ")" ]
Returns project languages, percentage of translation done for each and the datetime (UTC - ISO 8601) when the last change was made.
[ "Returns", "project", "languages", "percentage", "of", "translation", "done", "for", "each", "and", "the", "datetime", "(", "UTC", "-", "ISO", "8601", ")", "when", "the", "last", "change", "was", "made", "." ]
python
train
inasafe/inasafe
safe/report/impact_report.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/report/impact_report.py#L674-L688
def map_title(self): """Get the map title from the layer keywords if possible. :returns: None on error, otherwise the title. :rtype: None, str """ # noinspection PyBroadException try: title = self._keyword_io.read_keywords( self.impact, 'map_title') return title except KeywordNotFoundError: return None except Exception: # pylint: disable=broad-except return None
[ "def", "map_title", "(", "self", ")", ":", "# noinspection PyBroadException", "try", ":", "title", "=", "self", ".", "_keyword_io", ".", "read_keywords", "(", "self", ".", "impact", ",", "'map_title'", ")", "return", "title", "except", "KeywordNotFoundError", ":", "return", "None", "except", "Exception", ":", "# pylint: disable=broad-except", "return", "None" ]
Get the map title from the layer keywords if possible. :returns: None on error, otherwise the title. :rtype: None, str
[ "Get", "the", "map", "title", "from", "the", "layer", "keywords", "if", "possible", "." ]
python
train
globality-corp/microcosm-flask
microcosm_flask/swagger/definitions.py
https://github.com/globality-corp/microcosm-flask/blob/c2eaf57f03e7d041eea343751a4a90fcc80df418/microcosm_flask/swagger/definitions.py#L163-L173
def query_param(name, field, required=False): """ Build a query parameter definition. """ parameter = build_parameter(field) parameter["name"] = name parameter["in"] = "query" parameter["required"] = False return swagger.QueryParameterSubSchema(**parameter)
[ "def", "query_param", "(", "name", ",", "field", ",", "required", "=", "False", ")", ":", "parameter", "=", "build_parameter", "(", "field", ")", "parameter", "[", "\"name\"", "]", "=", "name", "parameter", "[", "\"in\"", "]", "=", "\"query\"", "parameter", "[", "\"required\"", "]", "=", "False", "return", "swagger", ".", "QueryParameterSubSchema", "(", "*", "*", "parameter", ")" ]
Build a query parameter definition.
[ "Build", "a", "query", "parameter", "definition", "." ]
python
train
dhermes/bezier
src/bezier/_helpers.py
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_helpers.py#L210-L245
def _wiggle_interval(value, wiggle=0.5 ** 44): r"""Check if ``value`` is in :math:`\left[0, 1\right]`. Allows a little bit of wiggle room outside the interval. Any value within ``wiggle`` of ``0.0` will be converted to ``0.0` and similar for ``1.0``. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Args: value (float): Value to check in interval. wiggle (Optional[float]): The amount of wiggle room around the the endpoints ``0.0`` and ``1.0``. Returns: Tuple[float, bool]: Pair of * The ``value`` if it's in the interval, or ``0`` or ``1`` if the value lies slightly outside. If the ``value`` is too far outside the unit interval, will be NaN. * Boolean indicating if the ``value`` is inside the unit interval. """ if -wiggle < value < wiggle: return 0.0, True elif wiggle <= value <= 1.0 - wiggle: return value, True elif 1.0 - wiggle < value < 1.0 + wiggle: return 1.0, True else: return np.nan, False
[ "def", "_wiggle_interval", "(", "value", ",", "wiggle", "=", "0.5", "**", "44", ")", ":", "if", "-", "wiggle", "<", "value", "<", "wiggle", ":", "return", "0.0", ",", "True", "elif", "wiggle", "<=", "value", "<=", "1.0", "-", "wiggle", ":", "return", "value", ",", "True", "elif", "1.0", "-", "wiggle", "<", "value", "<", "1.0", "+", "wiggle", ":", "return", "1.0", ",", "True", "else", ":", "return", "np", ".", "nan", ",", "False" ]
r"""Check if ``value`` is in :math:`\left[0, 1\right]`. Allows a little bit of wiggle room outside the interval. Any value within ``wiggle`` of ``0.0` will be converted to ``0.0` and similar for ``1.0``. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Args: value (float): Value to check in interval. wiggle (Optional[float]): The amount of wiggle room around the the endpoints ``0.0`` and ``1.0``. Returns: Tuple[float, bool]: Pair of * The ``value`` if it's in the interval, or ``0`` or ``1`` if the value lies slightly outside. If the ``value`` is too far outside the unit interval, will be NaN. * Boolean indicating if the ``value`` is inside the unit interval.
[ "r", "Check", "if", "value", "is", "in", ":", "math", ":", "\\", "left", "[", "0", "1", "\\", "right", "]", "." ]
python
train
staggerpkg/stagger
stagger/tags.py
https://github.com/staggerpkg/stagger/blob/6530db14afc5d7d8a4599b7f3b26158fb367d786/stagger/tags.py#L95-L117
def detect_tag(filename): """Return type and position of ID3v2 tag in filename. Returns (tag_class, offset, length), where tag_class is either Tag22, Tag23, or Tag24, and (offset, length) is the position of the tag in the file. """ with fileutil.opened(filename, "rb") as file: file.seek(0) header = file.read(10) file.seek(0) if len(header) < 10: raise NoTagError("File too short") if header[0:3] != b"ID3": raise NoTagError("ID3v2 tag not found") if header[3] not in _tag_versions or header[4] != 0: raise TagError("Unknown ID3 version: 2.{0}.{1}" .format(*header[3:5])) cls = _tag_versions[header[3]] offset = 0 length = Syncsafe.decode(header[6:10]) + 10 if header[3] == 4 and header[5] & _TAG24_FOOTER: length += 10 return (cls, offset, length)
[ "def", "detect_tag", "(", "filename", ")", ":", "with", "fileutil", ".", "opened", "(", "filename", ",", "\"rb\"", ")", "as", "file", ":", "file", ".", "seek", "(", "0", ")", "header", "=", "file", ".", "read", "(", "10", ")", "file", ".", "seek", "(", "0", ")", "if", "len", "(", "header", ")", "<", "10", ":", "raise", "NoTagError", "(", "\"File too short\"", ")", "if", "header", "[", "0", ":", "3", "]", "!=", "b\"ID3\"", ":", "raise", "NoTagError", "(", "\"ID3v2 tag not found\"", ")", "if", "header", "[", "3", "]", "not", "in", "_tag_versions", "or", "header", "[", "4", "]", "!=", "0", ":", "raise", "TagError", "(", "\"Unknown ID3 version: 2.{0}.{1}\"", ".", "format", "(", "*", "header", "[", "3", ":", "5", "]", ")", ")", "cls", "=", "_tag_versions", "[", "header", "[", "3", "]", "]", "offset", "=", "0", "length", "=", "Syncsafe", ".", "decode", "(", "header", "[", "6", ":", "10", "]", ")", "+", "10", "if", "header", "[", "3", "]", "==", "4", "and", "header", "[", "5", "]", "&", "_TAG24_FOOTER", ":", "length", "+=", "10", "return", "(", "cls", ",", "offset", ",", "length", ")" ]
Return type and position of ID3v2 tag in filename. Returns (tag_class, offset, length), where tag_class is either Tag22, Tag23, or Tag24, and (offset, length) is the position of the tag in the file.
[ "Return", "type", "and", "position", "of", "ID3v2", "tag", "in", "filename", ".", "Returns", "(", "tag_class", "offset", "length", ")", "where", "tag_class", "is", "either", "Tag22", "Tag23", "or", "Tag24", "and", "(", "offset", "length", ")", "is", "the", "position", "of", "the", "tag", "in", "the", "file", "." ]
python
train
twitterdev/twitter-python-ads-sdk
twitter_ads/audience.py
https://github.com/twitterdev/twitter-python-ads-sdk/blob/b4488333ac2a794b85b7f16ded71e98b60e51c74/twitter_ads/audience.py#L196-L208
def demographics(self): """ Get the demographic breakdown for an input targeting criteria """ body = { "audience_definition": self.audience_definition, "targeting_inputs": self.targeting_inputs } resource = self.RESOURCE_DEMOGRAPHICS.format(account_id=self.account.id) response = Request( self.account.client, self.METHOD, resource, headers=self.HEADERS, body=json.dumps(body)).perform() return response.body['data']
[ "def", "demographics", "(", "self", ")", ":", "body", "=", "{", "\"audience_definition\"", ":", "self", ".", "audience_definition", ",", "\"targeting_inputs\"", ":", "self", ".", "targeting_inputs", "}", "resource", "=", "self", ".", "RESOURCE_DEMOGRAPHICS", ".", "format", "(", "account_id", "=", "self", ".", "account", ".", "id", ")", "response", "=", "Request", "(", "self", ".", "account", ".", "client", ",", "self", ".", "METHOD", ",", "resource", ",", "headers", "=", "self", ".", "HEADERS", ",", "body", "=", "json", ".", "dumps", "(", "body", ")", ")", ".", "perform", "(", ")", "return", "response", ".", "body", "[", "'data'", "]" ]
Get the demographic breakdown for an input targeting criteria
[ "Get", "the", "demographic", "breakdown", "for", "an", "input", "targeting", "criteria" ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/controllers/execution_ticker.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/execution_ticker.py#L90-L95
def disable(self): """ Relieve all state machines that have no active execution and hide the widget """ self.ticker_text_label.hide() if self.current_observed_sm_m: self.stop_sm_m_observation(self.current_observed_sm_m)
[ "def", "disable", "(", "self", ")", ":", "self", ".", "ticker_text_label", ".", "hide", "(", ")", "if", "self", ".", "current_observed_sm_m", ":", "self", ".", "stop_sm_m_observation", "(", "self", ".", "current_observed_sm_m", ")" ]
Relieve all state machines that have no active execution and hide the widget
[ "Relieve", "all", "state", "machines", "that", "have", "no", "active", "execution", "and", "hide", "the", "widget" ]
python
train
facebookresearch/fastText
python/fastText/FastText.py
https://github.com/facebookresearch/fastText/blob/6dd2e11b5fe82854c4529d2a58d699b2cb182b1b/python/fastText/FastText.py#L145-L152
def get_input_matrix(self): """ Get a copy of the full input matrix of a Model. This only works if the model is not quantized. """ if self.f.isQuant(): raise ValueError("Can't get quantized Matrix") return np.array(self.f.getInputMatrix())
[ "def", "get_input_matrix", "(", "self", ")", ":", "if", "self", ".", "f", ".", "isQuant", "(", ")", ":", "raise", "ValueError", "(", "\"Can't get quantized Matrix\"", ")", "return", "np", ".", "array", "(", "self", ".", "f", ".", "getInputMatrix", "(", ")", ")" ]
Get a copy of the full input matrix of a Model. This only works if the model is not quantized.
[ "Get", "a", "copy", "of", "the", "full", "input", "matrix", "of", "a", "Model", ".", "This", "only", "works", "if", "the", "model", "is", "not", "quantized", "." ]
python
train
Azure/azure-multiapi-storage-python
azure/multiapi/storage/v2016_05_31/_deserialization.py
https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2016_05_31/_deserialization.py#L168-L191
def _convert_xml_to_service_stats(response): ''' <?xml version="1.0" encoding="utf-8"?> <StorageServiceStats> <GeoReplication> <Status>live|bootstrap|unavailable</Status> <LastSyncTime>sync-time|<empty></LastSyncTime> </GeoReplication> </StorageServiceStats> ''' if response is None or response.body is None: return None service_stats_element = ETree.fromstring(response.body) geo_replication_element = service_stats_element.find('GeoReplication') geo_replication = GeoReplication() geo_replication.status = geo_replication_element.find('Status').text geo_replication.last_sync_time = parser.parse(geo_replication_element.find('LastSyncTime').text) service_stats = ServiceStats() service_stats.geo_replication = geo_replication return service_stats
[ "def", "_convert_xml_to_service_stats", "(", "response", ")", ":", "if", "response", "is", "None", "or", "response", ".", "body", "is", "None", ":", "return", "None", "service_stats_element", "=", "ETree", ".", "fromstring", "(", "response", ".", "body", ")", "geo_replication_element", "=", "service_stats_element", ".", "find", "(", "'GeoReplication'", ")", "geo_replication", "=", "GeoReplication", "(", ")", "geo_replication", ".", "status", "=", "geo_replication_element", ".", "find", "(", "'Status'", ")", ".", "text", "geo_replication", ".", "last_sync_time", "=", "parser", ".", "parse", "(", "geo_replication_element", ".", "find", "(", "'LastSyncTime'", ")", ".", "text", ")", "service_stats", "=", "ServiceStats", "(", ")", "service_stats", ".", "geo_replication", "=", "geo_replication", "return", "service_stats" ]
<?xml version="1.0" encoding="utf-8"?> <StorageServiceStats> <GeoReplication> <Status>live|bootstrap|unavailable</Status> <LastSyncTime>sync-time|<empty></LastSyncTime> </GeoReplication> </StorageServiceStats>
[ "<?xml", "version", "=", "1", ".", "0", "encoding", "=", "utf", "-", "8", "?", ">", "<StorageServiceStats", ">", "<GeoReplication", ">", "<Status", ">", "live|bootstrap|unavailable<", "/", "Status", ">", "<LastSyncTime", ">", "sync", "-", "time|<empty", ">", "<", "/", "LastSyncTime", ">", "<", "/", "GeoReplication", ">", "<", "/", "StorageServiceStats", ">" ]
python
train
saltstack/salt
salt/utils/roster_matcher.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/roster_matcher.py#L108-L117
def ret_range_minions(self): ''' Return minions that are returned by a range query ''' if HAS_RANGE is False: raise RuntimeError("Python lib 'seco.range' is not available") minions = {} range_hosts = _convert_range_to_list(self.tgt, __opts__['range_server']) return self._ret_minions(range_hosts.__contains__)
[ "def", "ret_range_minions", "(", "self", ")", ":", "if", "HAS_RANGE", "is", "False", ":", "raise", "RuntimeError", "(", "\"Python lib 'seco.range' is not available\"", ")", "minions", "=", "{", "}", "range_hosts", "=", "_convert_range_to_list", "(", "self", ".", "tgt", ",", "__opts__", "[", "'range_server'", "]", ")", "return", "self", ".", "_ret_minions", "(", "range_hosts", ".", "__contains__", ")" ]
Return minions that are returned by a range query
[ "Return", "minions", "that", "are", "returned", "by", "a", "range", "query" ]
python
train
GoogleCloudPlatform/appengine-mapreduce
python/demo/main.py
https://github.com/GoogleCloudPlatform/appengine-mapreduce/blob/2045eb3605b6ecb40c83d11dd5442a89fe5c5dd6/python/demo/main.py#L230-L243
def phrases_map(data): """Phrases demo map function.""" (entry, text_fn) = data text = text_fn() filename = entry.filename logging.debug("Got %s", filename) for s in split_into_sentences(text): words = split_into_words(s.lower()) if len(words) < PHRASE_LENGTH: yield (":".join(words), filename) continue for i in range(0, len(words) - PHRASE_LENGTH): yield (":".join(words[i:i+PHRASE_LENGTH]), filename)
[ "def", "phrases_map", "(", "data", ")", ":", "(", "entry", ",", "text_fn", ")", "=", "data", "text", "=", "text_fn", "(", ")", "filename", "=", "entry", ".", "filename", "logging", ".", "debug", "(", "\"Got %s\"", ",", "filename", ")", "for", "s", "in", "split_into_sentences", "(", "text", ")", ":", "words", "=", "split_into_words", "(", "s", ".", "lower", "(", ")", ")", "if", "len", "(", "words", ")", "<", "PHRASE_LENGTH", ":", "yield", "(", "\":\"", ".", "join", "(", "words", ")", ",", "filename", ")", "continue", "for", "i", "in", "range", "(", "0", ",", "len", "(", "words", ")", "-", "PHRASE_LENGTH", ")", ":", "yield", "(", "\":\"", ".", "join", "(", "words", "[", "i", ":", "i", "+", "PHRASE_LENGTH", "]", ")", ",", "filename", ")" ]
Phrases demo map function.
[ "Phrases", "demo", "map", "function", "." ]
python
train
limpyd/redis-limpyd-extensions
limpyd_extensions/related.py
https://github.com/limpyd/redis-limpyd-extensions/blob/13f34e39efd2f802761457da30ab2a4213b63934/limpyd_extensions/related.py#L140-L151
def zadd(self, *args, **kwargs): """ For each score/value given as paramter, do a "zadd" call with score/self.instance as parameter call for each value. Values must be primary keys of the related model. """ if 'values_callback' not in kwargs: kwargs['values_callback'] = self._to_fields pieces = fields.SortedSetField.coerce_zadd_args(*args, **kwargs) for (score, related_field) in zip(*[iter(pieces)] * 2): related_method = getattr(related_field, 'zadd') related_method(score, self.instance._pk, values_callback=None)
[ "def", "zadd", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "'values_callback'", "not", "in", "kwargs", ":", "kwargs", "[", "'values_callback'", "]", "=", "self", ".", "_to_fields", "pieces", "=", "fields", ".", "SortedSetField", ".", "coerce_zadd_args", "(", "*", "args", ",", "*", "*", "kwargs", ")", "for", "(", "score", ",", "related_field", ")", "in", "zip", "(", "*", "[", "iter", "(", "pieces", ")", "]", "*", "2", ")", ":", "related_method", "=", "getattr", "(", "related_field", ",", "'zadd'", ")", "related_method", "(", "score", ",", "self", ".", "instance", ".", "_pk", ",", "values_callback", "=", "None", ")" ]
For each score/value given as paramter, do a "zadd" call with score/self.instance as parameter call for each value. Values must be primary keys of the related model.
[ "For", "each", "score", "/", "value", "given", "as", "paramter", "do", "a", "zadd", "call", "with", "score", "/", "self", ".", "instance", "as", "parameter", "call", "for", "each", "value", ".", "Values", "must", "be", "primary", "keys", "of", "the", "related", "model", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/controllers/state_editor/io_data_port_list.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/state_editor/io_data_port_list.py#L258-L269
def _apply_new_data_port_type(self, path, new_data_type_str): """Applies the new data type of the data port defined by path :param str path: The path identifying the edited data port :param str new_data_type_str: New data type as str """ try: data_port_id = self.list_store[path][self.ID_STORAGE_ID] if self.state_data_port_dict[data_port_id].data_type.__name__ != new_data_type_str: self.state_data_port_dict[data_port_id].change_data_type(new_data_type_str) except ValueError as e: logger.exception("Error while changing data type")
[ "def", "_apply_new_data_port_type", "(", "self", ",", "path", ",", "new_data_type_str", ")", ":", "try", ":", "data_port_id", "=", "self", ".", "list_store", "[", "path", "]", "[", "self", ".", "ID_STORAGE_ID", "]", "if", "self", ".", "state_data_port_dict", "[", "data_port_id", "]", ".", "data_type", ".", "__name__", "!=", "new_data_type_str", ":", "self", ".", "state_data_port_dict", "[", "data_port_id", "]", ".", "change_data_type", "(", "new_data_type_str", ")", "except", "ValueError", "as", "e", ":", "logger", ".", "exception", "(", "\"Error while changing data type\"", ")" ]
Applies the new data type of the data port defined by path :param str path: The path identifying the edited data port :param str new_data_type_str: New data type as str
[ "Applies", "the", "new", "data", "type", "of", "the", "data", "port", "defined", "by", "path" ]
python
train
saltstack/salt
salt/utils/hashutils.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/hashutils.py#L127-L137
def hmac_signature(string, shared_secret, challenge_hmac): ''' Verify a challenging hmac signature against a string / shared-secret Returns a boolean if the verification succeeded or failed. ''' msg = salt.utils.stringutils.to_bytes(string) key = salt.utils.stringutils.to_bytes(shared_secret) challenge = salt.utils.stringutils.to_bytes(challenge_hmac) hmac_hash = hmac.new(key, msg, hashlib.sha256) valid_hmac = base64.b64encode(hmac_hash.digest()) return valid_hmac == challenge
[ "def", "hmac_signature", "(", "string", ",", "shared_secret", ",", "challenge_hmac", ")", ":", "msg", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_bytes", "(", "string", ")", "key", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_bytes", "(", "shared_secret", ")", "challenge", "=", "salt", ".", "utils", ".", "stringutils", ".", "to_bytes", "(", "challenge_hmac", ")", "hmac_hash", "=", "hmac", ".", "new", "(", "key", ",", "msg", ",", "hashlib", ".", "sha256", ")", "valid_hmac", "=", "base64", ".", "b64encode", "(", "hmac_hash", ".", "digest", "(", ")", ")", "return", "valid_hmac", "==", "challenge" ]
Verify a challenging hmac signature against a string / shared-secret Returns a boolean if the verification succeeded or failed.
[ "Verify", "a", "challenging", "hmac", "signature", "against", "a", "string", "/", "shared", "-", "secret", "Returns", "a", "boolean", "if", "the", "verification", "succeeded", "or", "failed", "." ]
python
train
scnerd/miniutils
miniutils/progress_bar.py
https://github.com/scnerd/miniutils/blob/fe927e26afc5877416dead28dabdf6604387f42c/miniutils/progress_bar.py#L122-L141
def parallel_progbar(mapper, iterable, nprocs=None, starmap=False, flatmap=False, shuffle=False, verbose=True, verbose_flatmap=None, **kwargs): """Performs a parallel mapping of the given iterable, reporting a progress bar as values get returned :param mapper: The mapping function to apply to elements of the iterable :param iterable: The iterable to map :param nprocs: The number of processes (defaults to the number of cpu's) :param starmap: If true, the iterable is expected to contain tuples and the mapper function gets each element of a tuple as an argument :param flatmap: If true, flatten out the returned values if the mapper function returns a list of objects :param shuffle: If true, randomly sort the elements before processing them. This might help provide more uniform runtimes if processing different objects takes different amounts of time. :param verbose: Whether or not to print the progress bar :param verbose_flatmap: If performing a flatmap, whether or not to report each object as it's returned :param kwargs: Any other keyword arguments to pass to the progress bar (see ``progbar``) :return: A list of the returned objects, in the same order as provided """ results = _parallel_progbar_launch(mapper, iterable, nprocs, starmap, flatmap, shuffle, verbose, verbose_flatmap, **kwargs) return [x for i, x in sorted(results, key=lambda p: p[0])]
[ "def", "parallel_progbar", "(", "mapper", ",", "iterable", ",", "nprocs", "=", "None", ",", "starmap", "=", "False", ",", "flatmap", "=", "False", ",", "shuffle", "=", "False", ",", "verbose", "=", "True", ",", "verbose_flatmap", "=", "None", ",", "*", "*", "kwargs", ")", ":", "results", "=", "_parallel_progbar_launch", "(", "mapper", ",", "iterable", ",", "nprocs", ",", "starmap", ",", "flatmap", ",", "shuffle", ",", "verbose", ",", "verbose_flatmap", ",", "*", "*", "kwargs", ")", "return", "[", "x", "for", "i", ",", "x", "in", "sorted", "(", "results", ",", "key", "=", "lambda", "p", ":", "p", "[", "0", "]", ")", "]" ]
Performs a parallel mapping of the given iterable, reporting a progress bar as values get returned :param mapper: The mapping function to apply to elements of the iterable :param iterable: The iterable to map :param nprocs: The number of processes (defaults to the number of cpu's) :param starmap: If true, the iterable is expected to contain tuples and the mapper function gets each element of a tuple as an argument :param flatmap: If true, flatten out the returned values if the mapper function returns a list of objects :param shuffle: If true, randomly sort the elements before processing them. This might help provide more uniform runtimes if processing different objects takes different amounts of time. :param verbose: Whether or not to print the progress bar :param verbose_flatmap: If performing a flatmap, whether or not to report each object as it's returned :param kwargs: Any other keyword arguments to pass to the progress bar (see ``progbar``) :return: A list of the returned objects, in the same order as provided
[ "Performs", "a", "parallel", "mapping", "of", "the", "given", "iterable", "reporting", "a", "progress", "bar", "as", "values", "get", "returned" ]
python
train
WimpyAnalytics/pynt-of-django
pyntofdjango/utils.py
https://github.com/WimpyAnalytics/pynt-of-django/blob/f862c20742a5c7efbf0e92c1d415e2cb2fbbef76/pyntofdjango/utils.py#L47-L53
def execute(*args, **kwargs): """A wrapper of pyntcontrib's execute that handles kwargs""" if kwargs: # TODO: Remove this when pyntcontrib's execute does this args = list(args) args.extend(_kwargs_to_execute_args(kwargs)) _execute(*args)
[ "def", "execute", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "kwargs", ":", "# TODO: Remove this when pyntcontrib's execute does this", "args", "=", "list", "(", "args", ")", "args", ".", "extend", "(", "_kwargs_to_execute_args", "(", "kwargs", ")", ")", "_execute", "(", "*", "args", ")" ]
A wrapper of pyntcontrib's execute that handles kwargs
[ "A", "wrapper", "of", "pyntcontrib", "s", "execute", "that", "handles", "kwargs" ]
python
train
daviddrysdale/python-phonenumbers
python/phonenumbers/carrier.py
https://github.com/daviddrysdale/python-phonenumbers/blob/9cc5bb4ab5e661e70789b4c64bf7a9383c7bdc20/python/phonenumbers/carrier.py#L111-L133
def safe_display_name(numobj, lang, script=None, region=None): """Gets the name of the carrier for the given PhoneNumber object only when it is 'safe' to display to users. A carrier name is onsidered safe if the number is valid and for a region that doesn't support mobile number portability (http://en.wikipedia.org/wiki/Mobile_number_portability). This function explicitly checks the validity of the number passed in Arguments: numobj -- The PhoneNumber object for which we want to get a carrier name. lang -- A 2-letter lowercase ISO 639-1 language code for the language in which the description should be returned (e.g. "en") script -- A 4-letter titlecase (first letter uppercase, rest lowercase) ISO script code as defined in ISO 15924, separated by an underscore (e.g. "Hant") region -- A 2-letter uppercase ISO 3166-1 country code (e.g. "GB") Returns a carrier name that is safe to display to users, or the empty string. """ if is_mobile_number_portable_region(region_code_for_number(numobj)): return U_EMPTY_STRING return name_for_number(numobj, lang, script, region)
[ "def", "safe_display_name", "(", "numobj", ",", "lang", ",", "script", "=", "None", ",", "region", "=", "None", ")", ":", "if", "is_mobile_number_portable_region", "(", "region_code_for_number", "(", "numobj", ")", ")", ":", "return", "U_EMPTY_STRING", "return", "name_for_number", "(", "numobj", ",", "lang", ",", "script", ",", "region", ")" ]
Gets the name of the carrier for the given PhoneNumber object only when it is 'safe' to display to users. A carrier name is onsidered safe if the number is valid and for a region that doesn't support mobile number portability (http://en.wikipedia.org/wiki/Mobile_number_portability). This function explicitly checks the validity of the number passed in Arguments: numobj -- The PhoneNumber object for which we want to get a carrier name. lang -- A 2-letter lowercase ISO 639-1 language code for the language in which the description should be returned (e.g. "en") script -- A 4-letter titlecase (first letter uppercase, rest lowercase) ISO script code as defined in ISO 15924, separated by an underscore (e.g. "Hant") region -- A 2-letter uppercase ISO 3166-1 country code (e.g. "GB") Returns a carrier name that is safe to display to users, or the empty string.
[ "Gets", "the", "name", "of", "the", "carrier", "for", "the", "given", "PhoneNumber", "object", "only", "when", "it", "is", "safe", "to", "display", "to", "users", ".", "A", "carrier", "name", "is", "onsidered", "safe", "if", "the", "number", "is", "valid", "and", "for", "a", "region", "that", "doesn", "t", "support", "mobile", "number", "portability", "(", "http", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Mobile_number_portability", ")", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/rl/player.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/rl/player.py#L157-L191
def get_keys_to_action(self): """Get mapping from keyboard keys to actions. Required by gym.utils.play in environment or top level wrapper. Returns: { Unicode code point for keyboard key: action (formatted for step()), ... } """ # Based on gym AtariEnv.get_keys_to_action() keyword_to_key = { "UP": ord("w"), "DOWN": ord("s"), "LEFT": ord("a"), "RIGHT": ord("d"), "FIRE": ord(" "), } keys_to_action = {} for action_id, action_meaning in enumerate(self.action_meanings): keys_tuple = tuple(sorted([ key for keyword, key in keyword_to_key.items() if keyword in action_meaning])) assert keys_tuple not in keys_to_action keys_to_action[keys_tuple] = action_id # Special actions: keys_to_action[(ord("r"),)] = self.RETURN_DONE_ACTION keys_to_action[(ord("c"),)] = self.TOGGLE_WAIT_ACTION keys_to_action[(ord("n"),)] = self.WAIT_MODE_NOOP_ACTION return keys_to_action
[ "def", "get_keys_to_action", "(", "self", ")", ":", "# Based on gym AtariEnv.get_keys_to_action()", "keyword_to_key", "=", "{", "\"UP\"", ":", "ord", "(", "\"w\"", ")", ",", "\"DOWN\"", ":", "ord", "(", "\"s\"", ")", ",", "\"LEFT\"", ":", "ord", "(", "\"a\"", ")", ",", "\"RIGHT\"", ":", "ord", "(", "\"d\"", ")", ",", "\"FIRE\"", ":", "ord", "(", "\" \"", ")", ",", "}", "keys_to_action", "=", "{", "}", "for", "action_id", ",", "action_meaning", "in", "enumerate", "(", "self", ".", "action_meanings", ")", ":", "keys_tuple", "=", "tuple", "(", "sorted", "(", "[", "key", "for", "keyword", ",", "key", "in", "keyword_to_key", ".", "items", "(", ")", "if", "keyword", "in", "action_meaning", "]", ")", ")", "assert", "keys_tuple", "not", "in", "keys_to_action", "keys_to_action", "[", "keys_tuple", "]", "=", "action_id", "# Special actions:", "keys_to_action", "[", "(", "ord", "(", "\"r\"", ")", ",", ")", "]", "=", "self", ".", "RETURN_DONE_ACTION", "keys_to_action", "[", "(", "ord", "(", "\"c\"", ")", ",", ")", "]", "=", "self", ".", "TOGGLE_WAIT_ACTION", "keys_to_action", "[", "(", "ord", "(", "\"n\"", ")", ",", ")", "]", "=", "self", ".", "WAIT_MODE_NOOP_ACTION", "return", "keys_to_action" ]
Get mapping from keyboard keys to actions. Required by gym.utils.play in environment or top level wrapper. Returns: { Unicode code point for keyboard key: action (formatted for step()), ... }
[ "Get", "mapping", "from", "keyboard", "keys", "to", "actions", "." ]
python
train
numenta/nupic
src/nupic/algorithms/backtracking_tm.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/algorithms/backtracking_tm.py#L1056-L1096
def printStates(self, printPrevious = True, printLearnState = True): """ TODO: document :param printPrevious: :param printLearnState: :return: """ def formatRow(var, i): s = '' for c in range(self.numberOfCols): if c > 0 and c % 10 == 0: s += ' ' s += str(var[c, i]) s += ' ' return s print "\nInference Active state" for i in xrange(self.cellsPerColumn): if printPrevious: print formatRow(self.infActiveState['t-1'], i), print formatRow(self.infActiveState['t'], i) print "Inference Predicted state" for i in xrange(self.cellsPerColumn): if printPrevious: print formatRow(self.infPredictedState['t-1'], i), print formatRow(self.infPredictedState['t'], i) if printLearnState: print "\nLearn Active state" for i in xrange(self.cellsPerColumn): if printPrevious: print formatRow(self.lrnActiveState['t-1'], i), print formatRow(self.lrnActiveState['t'], i) print "Learn Predicted state" for i in xrange(self.cellsPerColumn): if printPrevious: print formatRow(self.lrnPredictedState['t-1'], i), print formatRow(self.lrnPredictedState['t'], i)
[ "def", "printStates", "(", "self", ",", "printPrevious", "=", "True", ",", "printLearnState", "=", "True", ")", ":", "def", "formatRow", "(", "var", ",", "i", ")", ":", "s", "=", "''", "for", "c", "in", "range", "(", "self", ".", "numberOfCols", ")", ":", "if", "c", ">", "0", "and", "c", "%", "10", "==", "0", ":", "s", "+=", "' '", "s", "+=", "str", "(", "var", "[", "c", ",", "i", "]", ")", "s", "+=", "' '", "return", "s", "print", "\"\\nInference Active state\"", "for", "i", "in", "xrange", "(", "self", ".", "cellsPerColumn", ")", ":", "if", "printPrevious", ":", "print", "formatRow", "(", "self", ".", "infActiveState", "[", "'t-1'", "]", ",", "i", ")", ",", "print", "formatRow", "(", "self", ".", "infActiveState", "[", "'t'", "]", ",", "i", ")", "print", "\"Inference Predicted state\"", "for", "i", "in", "xrange", "(", "self", ".", "cellsPerColumn", ")", ":", "if", "printPrevious", ":", "print", "formatRow", "(", "self", ".", "infPredictedState", "[", "'t-1'", "]", ",", "i", ")", ",", "print", "formatRow", "(", "self", ".", "infPredictedState", "[", "'t'", "]", ",", "i", ")", "if", "printLearnState", ":", "print", "\"\\nLearn Active state\"", "for", "i", "in", "xrange", "(", "self", ".", "cellsPerColumn", ")", ":", "if", "printPrevious", ":", "print", "formatRow", "(", "self", ".", "lrnActiveState", "[", "'t-1'", "]", ",", "i", ")", ",", "print", "formatRow", "(", "self", ".", "lrnActiveState", "[", "'t'", "]", ",", "i", ")", "print", "\"Learn Predicted state\"", "for", "i", "in", "xrange", "(", "self", ".", "cellsPerColumn", ")", ":", "if", "printPrevious", ":", "print", "formatRow", "(", "self", ".", "lrnPredictedState", "[", "'t-1'", "]", ",", "i", ")", ",", "print", "formatRow", "(", "self", ".", "lrnPredictedState", "[", "'t'", "]", ",", "i", ")" ]
TODO: document :param printPrevious: :param printLearnState: :return:
[ "TODO", ":", "document", ":", "param", "printPrevious", ":", ":", "param", "printLearnState", ":", ":", "return", ":" ]
python
valid
BlueBrain/NeuroM
neurom/view/view.py
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/view/view.py#L258-L281
def plot_neuron3d(ax, nrn, neurite_type=NeuriteType.all, diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH, color=None, alpha=_ALPHA): ''' Generates a figure of the neuron, that contains a soma and a list of trees. Args: ax(matplotlib axes): on what to plot nrn(neuron): neuron to be plotted neurite_type(NeuriteType): an optional filter on the neurite type diameter_scale(float): Scale factor multiplied with segment diameters before plotting linewidth(float): all segments are plotted with this width, but only if diameter_scale=None color(str or None): Color of plotted values, None corresponds to default choice alpha(float): Transparency of plotted values ''' plot_soma3d(ax, nrn.soma, color=color, alpha=alpha) for neurite in iter_neurites(nrn, filt=tree_type_checker(neurite_type)): plot_tree3d(ax, neurite, diameter_scale=diameter_scale, linewidth=linewidth, color=color, alpha=alpha) ax.set_title(nrn.name)
[ "def", "plot_neuron3d", "(", "ax", ",", "nrn", ",", "neurite_type", "=", "NeuriteType", ".", "all", ",", "diameter_scale", "=", "_DIAMETER_SCALE", ",", "linewidth", "=", "_LINEWIDTH", ",", "color", "=", "None", ",", "alpha", "=", "_ALPHA", ")", ":", "plot_soma3d", "(", "ax", ",", "nrn", ".", "soma", ",", "color", "=", "color", ",", "alpha", "=", "alpha", ")", "for", "neurite", "in", "iter_neurites", "(", "nrn", ",", "filt", "=", "tree_type_checker", "(", "neurite_type", ")", ")", ":", "plot_tree3d", "(", "ax", ",", "neurite", ",", "diameter_scale", "=", "diameter_scale", ",", "linewidth", "=", "linewidth", ",", "color", "=", "color", ",", "alpha", "=", "alpha", ")", "ax", ".", "set_title", "(", "nrn", ".", "name", ")" ]
Generates a figure of the neuron, that contains a soma and a list of trees. Args: ax(matplotlib axes): on what to plot nrn(neuron): neuron to be plotted neurite_type(NeuriteType): an optional filter on the neurite type diameter_scale(float): Scale factor multiplied with segment diameters before plotting linewidth(float): all segments are plotted with this width, but only if diameter_scale=None color(str or None): Color of plotted values, None corresponds to default choice alpha(float): Transparency of plotted values
[ "Generates", "a", "figure", "of", "the", "neuron", "that", "contains", "a", "soma", "and", "a", "list", "of", "trees", "." ]
python
train
keis/base58
base58.py
https://github.com/keis/base58/blob/ae55031bb1206b2165af99face2f421e12748368/base58.py#L70-L78
def b58decode_int(v): '''Decode a Base58 encoded string as an integer''' v = v.rstrip() v = scrub_input(v) decimal = 0 for char in v: decimal = decimal * 58 + alphabet.index(char) return decimal
[ "def", "b58decode_int", "(", "v", ")", ":", "v", "=", "v", ".", "rstrip", "(", ")", "v", "=", "scrub_input", "(", "v", ")", "decimal", "=", "0", "for", "char", "in", "v", ":", "decimal", "=", "decimal", "*", "58", "+", "alphabet", ".", "index", "(", "char", ")", "return", "decimal" ]
Decode a Base58 encoded string as an integer
[ "Decode", "a", "Base58", "encoded", "string", "as", "an", "integer" ]
python
train
AguaClara/aide_document-DEPRECATED
aide_document/convert.py
https://github.com/AguaClara/aide_document-DEPRECATED/blob/3f3b5c9f321264e0e4d8ed68dfbc080762579815/aide_document/convert.py#L3-L31
def md_to_pdf(input_name, output_name): """ Converts an input MarkDown file to a PDF of the given output name. Parameters ========== input_name : String Relative file location of the input file to where this function is being called. output_name : String Relative file location of the output file to where this function is being called. Note that .pdf can be omitted. Examples ======== Suppose we have a directory as follows: data/ doc.md To convert the document: >>> from aide_document import convert >>> convert.md_to_pdf('data/doc.md', 'data/doc.pdf') .pdf can also be omitted from the second argument. """ if output_name[-4:] == '.pdf': os.system("pandoc " + input_name + " -o " + output_name) else: os.system("pandoc " + input_name + " -o " + output_name + ".pdf" )
[ "def", "md_to_pdf", "(", "input_name", ",", "output_name", ")", ":", "if", "output_name", "[", "-", "4", ":", "]", "==", "'.pdf'", ":", "os", ".", "system", "(", "\"pandoc \"", "+", "input_name", "+", "\" -o \"", "+", "output_name", ")", "else", ":", "os", ".", "system", "(", "\"pandoc \"", "+", "input_name", "+", "\" -o \"", "+", "output_name", "+", "\".pdf\"", ")" ]
Converts an input MarkDown file to a PDF of the given output name. Parameters ========== input_name : String Relative file location of the input file to where this function is being called. output_name : String Relative file location of the output file to where this function is being called. Note that .pdf can be omitted. Examples ======== Suppose we have a directory as follows: data/ doc.md To convert the document: >>> from aide_document import convert >>> convert.md_to_pdf('data/doc.md', 'data/doc.pdf') .pdf can also be omitted from the second argument.
[ "Converts", "an", "input", "MarkDown", "file", "to", "a", "PDF", "of", "the", "given", "output", "name", "." ]
python
train
mrcagney/make_gtfs
make_gtfs/main.py
https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/main.py#L10-L30
def get_duration(timestr1, timestr2, units='s'): """ Return the duration of the time period between the first and second time string in the given units. Allowable units are 's' (seconds), 'min' (minutes), 'h' (hours). Assume ``timestr1 < timestr2``. """ valid_units = ['s', 'min', 'h'] assert units in valid_units,\ "Units must be one of {!s}".format(valid_units) duration = ( gt.timestr_to_seconds(timestr2) - gt.timestr_to_seconds(timestr1) ) if units == 's': return duration elif units == 'min': return duration/60 else: return duration/3600
[ "def", "get_duration", "(", "timestr1", ",", "timestr2", ",", "units", "=", "'s'", ")", ":", "valid_units", "=", "[", "'s'", ",", "'min'", ",", "'h'", "]", "assert", "units", "in", "valid_units", ",", "\"Units must be one of {!s}\"", ".", "format", "(", "valid_units", ")", "duration", "=", "(", "gt", ".", "timestr_to_seconds", "(", "timestr2", ")", "-", "gt", ".", "timestr_to_seconds", "(", "timestr1", ")", ")", "if", "units", "==", "'s'", ":", "return", "duration", "elif", "units", "==", "'min'", ":", "return", "duration", "/", "60", "else", ":", "return", "duration", "/", "3600" ]
Return the duration of the time period between the first and second time string in the given units. Allowable units are 's' (seconds), 'min' (minutes), 'h' (hours). Assume ``timestr1 < timestr2``.
[ "Return", "the", "duration", "of", "the", "time", "period", "between", "the", "first", "and", "second", "time", "string", "in", "the", "given", "units", ".", "Allowable", "units", "are", "s", "(", "seconds", ")", "min", "(", "minutes", ")", "h", "(", "hours", ")", ".", "Assume", "timestr1", "<", "timestr2", "." ]
python
train
gouthambs/Flask-Blogging
flask_blogging/engine.py
https://github.com/gouthambs/Flask-Blogging/blob/6636b8941175e9910f116a329521f96b8b05a9ac/flask_blogging/engine.py#L78-L111
def init_app(self, app, storage=None, cache=None, file_upload=None): """ Initialize the engine. :param app: The app to use :type app: Object :param storage: The blog storage instance that implements the :type storage: Object :param cache: (Optional) A Flask-Cache object to enable caching :type cache: Object ``Storage`` class interface. """ self.app = app self.config = self.app.config self.storage = storage or self.storage self.file_upload = file_upload or self.file_upload self.cache = cache or self.cache self._register_plugins(self.app, self.config) from .views import create_blueprint blog_app = create_blueprint(__name__, self) # external urls blueprint_created.send(self.app, engine=self, blueprint=blog_app) self.app.register_blueprint( blog_app, url_prefix=self.config.get("BLOGGING_URL_PREFIX")) self.app.extensions["FLASK_BLOGGING_ENGINE"] = self # duplicate self.app.extensions["blogging"] = self self.principal = Principal(self.app) engine_initialised.send(self.app, engine=self) if self.config.get("BLOGGING_ALLOW_FILEUPLOAD", True): self.ffu = self.file_upload or FlaskFileUpload(app)
[ "def", "init_app", "(", "self", ",", "app", ",", "storage", "=", "None", ",", "cache", "=", "None", ",", "file_upload", "=", "None", ")", ":", "self", ".", "app", "=", "app", "self", ".", "config", "=", "self", ".", "app", ".", "config", "self", ".", "storage", "=", "storage", "or", "self", ".", "storage", "self", ".", "file_upload", "=", "file_upload", "or", "self", ".", "file_upload", "self", ".", "cache", "=", "cache", "or", "self", ".", "cache", "self", ".", "_register_plugins", "(", "self", ".", "app", ",", "self", ".", "config", ")", "from", ".", "views", "import", "create_blueprint", "blog_app", "=", "create_blueprint", "(", "__name__", ",", "self", ")", "# external urls", "blueprint_created", ".", "send", "(", "self", ".", "app", ",", "engine", "=", "self", ",", "blueprint", "=", "blog_app", ")", "self", ".", "app", ".", "register_blueprint", "(", "blog_app", ",", "url_prefix", "=", "self", ".", "config", ".", "get", "(", "\"BLOGGING_URL_PREFIX\"", ")", ")", "self", ".", "app", ".", "extensions", "[", "\"FLASK_BLOGGING_ENGINE\"", "]", "=", "self", "# duplicate", "self", ".", "app", ".", "extensions", "[", "\"blogging\"", "]", "=", "self", "self", ".", "principal", "=", "Principal", "(", "self", ".", "app", ")", "engine_initialised", ".", "send", "(", "self", ".", "app", ",", "engine", "=", "self", ")", "if", "self", ".", "config", ".", "get", "(", "\"BLOGGING_ALLOW_FILEUPLOAD\"", ",", "True", ")", ":", "self", ".", "ffu", "=", "self", ".", "file_upload", "or", "FlaskFileUpload", "(", "app", ")" ]
Initialize the engine. :param app: The app to use :type app: Object :param storage: The blog storage instance that implements the :type storage: Object :param cache: (Optional) A Flask-Cache object to enable caching :type cache: Object ``Storage`` class interface.
[ "Initialize", "the", "engine", "." ]
python
train
sighingnow/parsec.py
src/parsec/__init__.py
https://github.com/sighingnow/parsec.py/blob/ed50e1e259142757470b925f8d20dfe5ad223af0/src/parsec/__init__.py#L227-L229
def result(self, res): '''Return a value according to the parameter `res` when parse successfully.''' return self >> Parser(lambda _, index: Value.success(index, res))
[ "def", "result", "(", "self", ",", "res", ")", ":", "return", "self", ">>", "Parser", "(", "lambda", "_", ",", "index", ":", "Value", ".", "success", "(", "index", ",", "res", ")", ")" ]
Return a value according to the parameter `res` when parse successfully.
[ "Return", "a", "value", "according", "to", "the", "parameter", "res", "when", "parse", "successfully", "." ]
python
train
xiongchiamiov/pyfixit
pyfixit/guide.py
https://github.com/xiongchiamiov/pyfixit/blob/808a0c852a26e4211b2e3a72da972ab34a586dc4/pyfixit/guide.py#L56-L93
def refresh(self): '''Refetch instance data from the API. ''' response = requests.get('%s/guides/%s' % (API_BASE_URL, self.id)) attributes = response.json() self.category = Category(attributes['category']) self.url = attributes['url'] self.title = attributes['title'] if attributes['image']: self.image = Image(attributes['image']['id']) else: self.image = None self.locale = attributes['locale'] self.introduction = WikiText(attributes['introduction_raw'], attributes['introduction_rendered']) self.conclusion = WikiText(attributes['conclusion_raw'], attributes['conclusion_rendered']) #self.tools = attributes['tools'] #self.parts = attributes['parts'] self.subject = attributes['subject'] self.modifiedDate = datetime.utcfromtimestamp(attributes['modified_date']) self.createdDate = datetime.utcfromtimestamp(attributes['created_date']) self.publishedDate = datetime.utcfromtimestamp(attributes['published_date']) #self.documents = attributes['documents'] author = attributes['author'] #self.author = User(author['userid'], name=author['text']) #self.timeRequired = attributes['timeRequired'] self.steps = [Step(step['guideid'], step['stepid'], data=step) for step in attributes['steps']] self.type = attributes['type'] self.public = attributes['public'] self.revision = attributes['revisionid'] self.difficulty = attributes['difficulty'] self.prerequisites = [Guide(guide['guideid']) for guide in attributes['prerequisites']] # attributes['prereq_modified_date'] #self.summary = attributes['summary'] self.flags = [Flag.from_id(flag['flagid']) for flag in attributes['flags']]
[ "def", "refresh", "(", "self", ")", ":", "response", "=", "requests", ".", "get", "(", "'%s/guides/%s'", "%", "(", "API_BASE_URL", ",", "self", ".", "id", ")", ")", "attributes", "=", "response", ".", "json", "(", ")", "self", ".", "category", "=", "Category", "(", "attributes", "[", "'category'", "]", ")", "self", ".", "url", "=", "attributes", "[", "'url'", "]", "self", ".", "title", "=", "attributes", "[", "'title'", "]", "if", "attributes", "[", "'image'", "]", ":", "self", ".", "image", "=", "Image", "(", "attributes", "[", "'image'", "]", "[", "'id'", "]", ")", "else", ":", "self", ".", "image", "=", "None", "self", ".", "locale", "=", "attributes", "[", "'locale'", "]", "self", ".", "introduction", "=", "WikiText", "(", "attributes", "[", "'introduction_raw'", "]", ",", "attributes", "[", "'introduction_rendered'", "]", ")", "self", ".", "conclusion", "=", "WikiText", "(", "attributes", "[", "'conclusion_raw'", "]", ",", "attributes", "[", "'conclusion_rendered'", "]", ")", "#self.tools = attributes['tools']", "#self.parts = attributes['parts']", "self", ".", "subject", "=", "attributes", "[", "'subject'", "]", "self", ".", "modifiedDate", "=", "datetime", ".", "utcfromtimestamp", "(", "attributes", "[", "'modified_date'", "]", ")", "self", ".", "createdDate", "=", "datetime", ".", "utcfromtimestamp", "(", "attributes", "[", "'created_date'", "]", ")", "self", ".", "publishedDate", "=", "datetime", ".", "utcfromtimestamp", "(", "attributes", "[", "'published_date'", "]", ")", "#self.documents = attributes['documents']", "author", "=", "attributes", "[", "'author'", "]", "#self.author = User(author['userid'], name=author['text'])", "#self.timeRequired = attributes['timeRequired']", "self", ".", "steps", "=", "[", "Step", "(", "step", "[", "'guideid'", "]", ",", "step", "[", "'stepid'", "]", ",", "data", "=", "step", ")", "for", "step", "in", "attributes", "[", "'steps'", "]", "]", "self", ".", "type", "=", "attributes", "[", "'type'", "]", "self", ".", "public", "=", "attributes", "[", "'public'", "]", "self", ".", "revision", "=", "attributes", "[", "'revisionid'", "]", "self", ".", "difficulty", "=", "attributes", "[", "'difficulty'", "]", "self", ".", "prerequisites", "=", "[", "Guide", "(", "guide", "[", "'guideid'", "]", ")", "for", "guide", "in", "attributes", "[", "'prerequisites'", "]", "]", "# attributes['prereq_modified_date']", "#self.summary = attributes['summary']", "self", ".", "flags", "=", "[", "Flag", ".", "from_id", "(", "flag", "[", "'flagid'", "]", ")", "for", "flag", "in", "attributes", "[", "'flags'", "]", "]" ]
Refetch instance data from the API.
[ "Refetch", "instance", "data", "from", "the", "API", "." ]
python
train
tanghaibao/jcvi
jcvi/formats/coords.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/formats/coords.py#L316-L333
def fromdelta(args): """ %prog fromdelta deltafile Convert deltafile to coordsfile. """ p = OptionParser(fromdelta.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) deltafile, = args coordsfile = deltafile.rsplit(".", 1)[0] + ".coords" cmd = "show-coords -rclH {0}".format(deltafile) sh(cmd, outfile=coordsfile) return coordsfile
[ "def", "fromdelta", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "fromdelta", ".", "__doc__", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "1", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "deltafile", ",", "=", "args", "coordsfile", "=", "deltafile", ".", "rsplit", "(", "\".\"", ",", "1", ")", "[", "0", "]", "+", "\".coords\"", "cmd", "=", "\"show-coords -rclH {0}\"", ".", "format", "(", "deltafile", ")", "sh", "(", "cmd", ",", "outfile", "=", "coordsfile", ")", "return", "coordsfile" ]
%prog fromdelta deltafile Convert deltafile to coordsfile.
[ "%prog", "fromdelta", "deltafile" ]
python
train
PmagPy/PmagPy
pmagpy/pmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10186-L10299
def read_criteria_from_file(path, acceptance_criteria, **kwargs): ''' Read accceptance criteria from magic criteria file # old format: multiple lines. pmag_criteria_code defines the type of criteria to deal with old format this function reads all the lines and ignore empty cells. i.e., the program assumes that in each column there is only one value (in one of the lines) special case in the old format: specimen_dang has a value and pmag_criteria_code is IE-specimen. The program assumes that the user means specimen_int_dang # New format for thellier_gui and demag_gui: one long line. pmag_criteria_code=ACCEPT path is the full path to the criteria file the function takes exiting acceptance_criteria and updtate it with criteria from file output: acceptance_criteria={} acceptance_criteria[MagIC Variable Names]={} acceptance_criteria[MagIC Variable Names]['value']: a number for acceptance criteria value -999 for N/A 1/0 for True/False or Good/Bad acceptance_criteria[MagIC Variable Names]['threshold_type']: "low": lower cutoff value i.e. crit>=value pass criteria "high": high cutoff value i.e. crit<=value pass criteria [string1,string2,....]: for flags acceptance_criteria[MagIC Variable Names]['decimal_points']:number of decimal points in rounding (this is used in displaying criteria in the dialog box) ''' warnings = [] acceptance_criteria_list = list(acceptance_criteria.keys()) if 'data_model' in list(kwargs.keys()) and kwargs['data_model'] == 3: crit_data = acceptance_criteria # data already read in else: crit_data, file_type = magic_read(path) if 'criteria' not in file_type: if 'empty' in file_type: print('-W- No criteria found: {} '.format(path)) else: print( '-W- {} could not be read and may be improperly formatted...'.format(path)) for rec in crit_data: # gather metadata metadata_dict = {'pmag_criteria_code': '', 'criteria_definition': '', 'er_citation_names': ''} for metadata in metadata_dict: if metadata in rec: metadata_dict[metadata] = rec[metadata] # check each record for correct name and compatibility for crit in list(rec.keys()): if crit == 'anisotropy_ftest_flag' and crit not in list(rec.keys()): crit = 'specimen_aniso_ftest_flag' # convert legacy criterion to 2.5 rec[crit] = rec[crit].strip('\n') if crit in ['pmag_criteria_code', 'criteria_definition', 'magic_experiment_names', 'er_citation_names']: continue elif rec[crit] == "": continue # this catches all the ones that are being overwritten if crit in acceptance_criteria: if acceptance_criteria[crit]['value'] not in [-999, '-999', -999]: print( "-W- You have multiple different criteria that both use column: {}.\nThe last will be used:\n{}.".format(crit, rec)) warn_string = 'multiple criteria for column: {} (only last will be used)'.format( crit) if warn_string not in warnings: warnings.append(warn_string) if crit == "specimen_dang" and "pmag_criteria_code" in list(rec.keys()) and "IE-SPEC" in rec["pmag_criteria_code"]: crit = "specimen_int_dang" print("-W- Found backward compatibility problem with selection criteria specimen_dang. Cannot be associated with IE-SPEC. Program assumes that the statistic is specimen_int_dang") if 'specimen_int_dang' not in acceptance_criteria: acceptance_criteria["specimen_int_dang"] = {} acceptance_criteria["specimen_int_dang"]['value'] = float( rec["specimen_dang"]) elif crit not in acceptance_criteria_list: print( "-W- WARNING: criteria code %s is not supported by PmagPy GUI. please check" % crit) acceptance_criteria[crit] = {} acceptance_criteria[crit]['value'] = rec[crit] acceptance_criteria[crit]['threshold_type'] = "inherited" acceptance_criteria[crit]['decimal_points'] = -999 acceptance_criteria[crit]['category'] = None # boolean flag elif acceptance_criteria[crit]['threshold_type'] == 'bool': if str(rec[crit]) in ['1', 'g', 'True', 'TRUE']: acceptance_criteria[crit]['value'] = True else: acceptance_criteria[crit]['value'] = False # criteria as flags elif type(acceptance_criteria[crit]['threshold_type']) == list: if str(rec[crit]) in acceptance_criteria[crit]['threshold_type']: acceptance_criteria[crit]['value'] = str(rec[crit]) else: print( "-W- WARNING: data %s from criteria code %s and is not supported by PmagPy GUI. please check" % (crit, rec[crit])) elif float(rec[crit]) == -999: pass else: acceptance_criteria[crit]['value'] = float(rec[crit]) # add in metadata to each record acceptance_criteria[crit].update(metadata_dict) if "return_warnings" in kwargs: return (acceptance_criteria, warnings) else: return(acceptance_criteria)
[ "def", "read_criteria_from_file", "(", "path", ",", "acceptance_criteria", ",", "*", "*", "kwargs", ")", ":", "warnings", "=", "[", "]", "acceptance_criteria_list", "=", "list", "(", "acceptance_criteria", ".", "keys", "(", ")", ")", "if", "'data_model'", "in", "list", "(", "kwargs", ".", "keys", "(", ")", ")", "and", "kwargs", "[", "'data_model'", "]", "==", "3", ":", "crit_data", "=", "acceptance_criteria", "# data already read in", "else", ":", "crit_data", ",", "file_type", "=", "magic_read", "(", "path", ")", "if", "'criteria'", "not", "in", "file_type", ":", "if", "'empty'", "in", "file_type", ":", "print", "(", "'-W- No criteria found: {} '", ".", "format", "(", "path", ")", ")", "else", ":", "print", "(", "'-W- {} could not be read and may be improperly formatted...'", ".", "format", "(", "path", ")", ")", "for", "rec", "in", "crit_data", ":", "# gather metadata", "metadata_dict", "=", "{", "'pmag_criteria_code'", ":", "''", ",", "'criteria_definition'", ":", "''", ",", "'er_citation_names'", ":", "''", "}", "for", "metadata", "in", "metadata_dict", ":", "if", "metadata", "in", "rec", ":", "metadata_dict", "[", "metadata", "]", "=", "rec", "[", "metadata", "]", "# check each record for correct name and compatibility", "for", "crit", "in", "list", "(", "rec", ".", "keys", "(", ")", ")", ":", "if", "crit", "==", "'anisotropy_ftest_flag'", "and", "crit", "not", "in", "list", "(", "rec", ".", "keys", "(", ")", ")", ":", "crit", "=", "'specimen_aniso_ftest_flag'", "# convert legacy criterion to 2.5", "rec", "[", "crit", "]", "=", "rec", "[", "crit", "]", ".", "strip", "(", "'\\n'", ")", "if", "crit", "in", "[", "'pmag_criteria_code'", ",", "'criteria_definition'", ",", "'magic_experiment_names'", ",", "'er_citation_names'", "]", ":", "continue", "elif", "rec", "[", "crit", "]", "==", "\"\"", ":", "continue", "# this catches all the ones that are being overwritten", "if", "crit", "in", "acceptance_criteria", ":", "if", "acceptance_criteria", "[", "crit", "]", "[", "'value'", "]", "not", "in", "[", "-", "999", ",", "'-999'", ",", "-", "999", "]", ":", "print", "(", "\"-W- You have multiple different criteria that both use column: {}.\\nThe last will be used:\\n{}.\"", ".", "format", "(", "crit", ",", "rec", ")", ")", "warn_string", "=", "'multiple criteria for column: {} (only last will be used)'", ".", "format", "(", "crit", ")", "if", "warn_string", "not", "in", "warnings", ":", "warnings", ".", "append", "(", "warn_string", ")", "if", "crit", "==", "\"specimen_dang\"", "and", "\"pmag_criteria_code\"", "in", "list", "(", "rec", ".", "keys", "(", ")", ")", "and", "\"IE-SPEC\"", "in", "rec", "[", "\"pmag_criteria_code\"", "]", ":", "crit", "=", "\"specimen_int_dang\"", "print", "(", "\"-W- Found backward compatibility problem with selection criteria specimen_dang. Cannot be associated with IE-SPEC. Program assumes that the statistic is specimen_int_dang\"", ")", "if", "'specimen_int_dang'", "not", "in", "acceptance_criteria", ":", "acceptance_criteria", "[", "\"specimen_int_dang\"", "]", "=", "{", "}", "acceptance_criteria", "[", "\"specimen_int_dang\"", "]", "[", "'value'", "]", "=", "float", "(", "rec", "[", "\"specimen_dang\"", "]", ")", "elif", "crit", "not", "in", "acceptance_criteria_list", ":", "print", "(", "\"-W- WARNING: criteria code %s is not supported by PmagPy GUI. please check\"", "%", "crit", ")", "acceptance_criteria", "[", "crit", "]", "=", "{", "}", "acceptance_criteria", "[", "crit", "]", "[", "'value'", "]", "=", "rec", "[", "crit", "]", "acceptance_criteria", "[", "crit", "]", "[", "'threshold_type'", "]", "=", "\"inherited\"", "acceptance_criteria", "[", "crit", "]", "[", "'decimal_points'", "]", "=", "-", "999", "acceptance_criteria", "[", "crit", "]", "[", "'category'", "]", "=", "None", "# boolean flag", "elif", "acceptance_criteria", "[", "crit", "]", "[", "'threshold_type'", "]", "==", "'bool'", ":", "if", "str", "(", "rec", "[", "crit", "]", ")", "in", "[", "'1'", ",", "'g'", ",", "'True'", ",", "'TRUE'", "]", ":", "acceptance_criteria", "[", "crit", "]", "[", "'value'", "]", "=", "True", "else", ":", "acceptance_criteria", "[", "crit", "]", "[", "'value'", "]", "=", "False", "# criteria as flags", "elif", "type", "(", "acceptance_criteria", "[", "crit", "]", "[", "'threshold_type'", "]", ")", "==", "list", ":", "if", "str", "(", "rec", "[", "crit", "]", ")", "in", "acceptance_criteria", "[", "crit", "]", "[", "'threshold_type'", "]", ":", "acceptance_criteria", "[", "crit", "]", "[", "'value'", "]", "=", "str", "(", "rec", "[", "crit", "]", ")", "else", ":", "print", "(", "\"-W- WARNING: data %s from criteria code %s and is not supported by PmagPy GUI. please check\"", "%", "(", "crit", ",", "rec", "[", "crit", "]", ")", ")", "elif", "float", "(", "rec", "[", "crit", "]", ")", "==", "-", "999", ":", "pass", "else", ":", "acceptance_criteria", "[", "crit", "]", "[", "'value'", "]", "=", "float", "(", "rec", "[", "crit", "]", ")", "# add in metadata to each record", "acceptance_criteria", "[", "crit", "]", ".", "update", "(", "metadata_dict", ")", "if", "\"return_warnings\"", "in", "kwargs", ":", "return", "(", "acceptance_criteria", ",", "warnings", ")", "else", ":", "return", "(", "acceptance_criteria", ")" ]
Read accceptance criteria from magic criteria file # old format: multiple lines. pmag_criteria_code defines the type of criteria to deal with old format this function reads all the lines and ignore empty cells. i.e., the program assumes that in each column there is only one value (in one of the lines) special case in the old format: specimen_dang has a value and pmag_criteria_code is IE-specimen. The program assumes that the user means specimen_int_dang # New format for thellier_gui and demag_gui: one long line. pmag_criteria_code=ACCEPT path is the full path to the criteria file the function takes exiting acceptance_criteria and updtate it with criteria from file output: acceptance_criteria={} acceptance_criteria[MagIC Variable Names]={} acceptance_criteria[MagIC Variable Names]['value']: a number for acceptance criteria value -999 for N/A 1/0 for True/False or Good/Bad acceptance_criteria[MagIC Variable Names]['threshold_type']: "low": lower cutoff value i.e. crit>=value pass criteria "high": high cutoff value i.e. crit<=value pass criteria [string1,string2,....]: for flags acceptance_criteria[MagIC Variable Names]['decimal_points']:number of decimal points in rounding (this is used in displaying criteria in the dialog box)
[ "Read", "accceptance", "criteria", "from", "magic", "criteria", "file", "#", "old", "format", ":", "multiple", "lines", ".", "pmag_criteria_code", "defines", "the", "type", "of", "criteria" ]
python
train
ZELLMECHANIK-DRESDEN/dclab
dclab/features/inert_ratio.py
https://github.com/ZELLMECHANIK-DRESDEN/dclab/blob/79002c4356e7020c2ba73ab0a3819c9abd4affec/dclab/features/inert_ratio.py#L295-L344
def get_tilt(cont): """Compute tilt of raw contour relative to channel axis Parameters ---------- cont: ndarray or list of ndarrays of shape (N,2) A 2D array that holds the contour of an event (in pixels) e.g. obtained using `mm.contour` where `mm` is an instance of `RTDCBase`. The first and second columns of `cont` correspond to the x- and y-coordinates of the contour. Returns ------- tilt: float or ndarray of size N Tilt of the contour in the interval [0, PI/2] References ---------- - `<https://en.wikipedia.org/wiki/Image_moment#Examples_2>`__ """ if isinstance(cont, np.ndarray): # If cont is an array, it is not a list of contours, # because contours can have different lengths. cont = [cont] ret_list = False else: ret_list = True length = len(cont) tilt = np.zeros(length, dtype=float) * np.nan for ii in range(length): moments = cont_moments_cv(cont[ii]) if moments is not None: # orientation of the contour oii = 0.5 * np.arctan2(2 * moments['mu11'], moments['mu02'] - moments['mu20']) # +PI/2 because relative to channel axis tilt[ii] = oii + np.pi/2 # restrict to interval [0,PI/2] tilt = np.mod(tilt, np.pi) tilt[tilt > np.pi/2] -= np.pi tilt = np.abs(tilt) if not ret_list: tilt = tilt[0] return tilt
[ "def", "get_tilt", "(", "cont", ")", ":", "if", "isinstance", "(", "cont", ",", "np", ".", "ndarray", ")", ":", "# If cont is an array, it is not a list of contours,", "# because contours can have different lengths.", "cont", "=", "[", "cont", "]", "ret_list", "=", "False", "else", ":", "ret_list", "=", "True", "length", "=", "len", "(", "cont", ")", "tilt", "=", "np", ".", "zeros", "(", "length", ",", "dtype", "=", "float", ")", "*", "np", ".", "nan", "for", "ii", "in", "range", "(", "length", ")", ":", "moments", "=", "cont_moments_cv", "(", "cont", "[", "ii", "]", ")", "if", "moments", "is", "not", "None", ":", "# orientation of the contour", "oii", "=", "0.5", "*", "np", ".", "arctan2", "(", "2", "*", "moments", "[", "'mu11'", "]", ",", "moments", "[", "'mu02'", "]", "-", "moments", "[", "'mu20'", "]", ")", "# +PI/2 because relative to channel axis", "tilt", "[", "ii", "]", "=", "oii", "+", "np", ".", "pi", "/", "2", "# restrict to interval [0,PI/2]", "tilt", "=", "np", ".", "mod", "(", "tilt", ",", "np", ".", "pi", ")", "tilt", "[", "tilt", ">", "np", ".", "pi", "/", "2", "]", "-=", "np", ".", "pi", "tilt", "=", "np", ".", "abs", "(", "tilt", ")", "if", "not", "ret_list", ":", "tilt", "=", "tilt", "[", "0", "]", "return", "tilt" ]
Compute tilt of raw contour relative to channel axis Parameters ---------- cont: ndarray or list of ndarrays of shape (N,2) A 2D array that holds the contour of an event (in pixels) e.g. obtained using `mm.contour` where `mm` is an instance of `RTDCBase`. The first and second columns of `cont` correspond to the x- and y-coordinates of the contour. Returns ------- tilt: float or ndarray of size N Tilt of the contour in the interval [0, PI/2] References ---------- - `<https://en.wikipedia.org/wiki/Image_moment#Examples_2>`__
[ "Compute", "tilt", "of", "raw", "contour", "relative", "to", "channel", "axis" ]
python
train
hydpy-dev/hydpy
hydpy/auxs/smoothtools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/auxs/smoothtools.py#L78-L114
def calc_smoothpar_logistic2(metapar): """Return the smoothing parameter corresponding to the given meta parameter when using |smooth_logistic2|. Calculate the smoothing parameter value corresponding the meta parameter value 2.5: >>> from hydpy.auxs.smoothtools import calc_smoothpar_logistic2 >>> smoothpar = calc_smoothpar_logistic2(2.5) Using this smoothing parameter value, the output of function |smooth_logistic2| differs by 1 % from the related `true` discontinuous step function for the input values -2.5 and 2.5 (which are located at a distance of 2.5 from the position of the discontinuity): >>> from hydpy.cythons import smoothutils >>> from hydpy import round_ >>> round_(smoothutils.smooth_logistic2(-2.5, smoothpar)) 0.01 >>> round_(smoothutils.smooth_logistic2(2.5, smoothpar)) 2.51 For zero or negative meta parameter values, a zero smoothing parameter value is returned: >>> round_(calc_smoothpar_logistic2(0.0)) 0.0 >>> round_(calc_smoothpar_logistic2(-1.0)) 0.0 """ if metapar <= 0.: return 0. return optimize.newton(_error_smoothpar_logistic2, .3 * metapar**.84, _smooth_logistic2_derivative, args=(metapar,))
[ "def", "calc_smoothpar_logistic2", "(", "metapar", ")", ":", "if", "metapar", "<=", "0.", ":", "return", "0.", "return", "optimize", ".", "newton", "(", "_error_smoothpar_logistic2", ",", ".3", "*", "metapar", "**", ".84", ",", "_smooth_logistic2_derivative", ",", "args", "=", "(", "metapar", ",", ")", ")" ]
Return the smoothing parameter corresponding to the given meta parameter when using |smooth_logistic2|. Calculate the smoothing parameter value corresponding the meta parameter value 2.5: >>> from hydpy.auxs.smoothtools import calc_smoothpar_logistic2 >>> smoothpar = calc_smoothpar_logistic2(2.5) Using this smoothing parameter value, the output of function |smooth_logistic2| differs by 1 % from the related `true` discontinuous step function for the input values -2.5 and 2.5 (which are located at a distance of 2.5 from the position of the discontinuity): >>> from hydpy.cythons import smoothutils >>> from hydpy import round_ >>> round_(smoothutils.smooth_logistic2(-2.5, smoothpar)) 0.01 >>> round_(smoothutils.smooth_logistic2(2.5, smoothpar)) 2.51 For zero or negative meta parameter values, a zero smoothing parameter value is returned: >>> round_(calc_smoothpar_logistic2(0.0)) 0.0 >>> round_(calc_smoothpar_logistic2(-1.0)) 0.0
[ "Return", "the", "smoothing", "parameter", "corresponding", "to", "the", "given", "meta", "parameter", "when", "using", "|smooth_logistic2|", "." ]
python
train
taskcluster/taskcluster-client.py
taskcluster/auth.py
https://github.com/taskcluster/taskcluster-client.py/blob/bcc95217f8bf80bed2ae5885a19fa0035da7ebc9/taskcluster/auth.py#L297-L316
def updateRole(self, *args, **kwargs): """ Update Role Update an existing role. The caller's scopes must satisfy all of the new scopes being added, but need not satisfy all of the role's existing scopes. An update of a role that will generate an infinite expansion will result in an error response. This method takes input: ``v1/create-role-request.json#`` This method gives output: ``v1/get-role-response.json#`` This method is ``stable`` """ return self._makeApiCall(self.funcinfo["updateRole"], *args, **kwargs)
[ "def", "updateRole", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_makeApiCall", "(", "self", ".", "funcinfo", "[", "\"updateRole\"", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Update Role Update an existing role. The caller's scopes must satisfy all of the new scopes being added, but need not satisfy all of the role's existing scopes. An update of a role that will generate an infinite expansion will result in an error response. This method takes input: ``v1/create-role-request.json#`` This method gives output: ``v1/get-role-response.json#`` This method is ``stable``
[ "Update", "Role" ]
python
train
tcalmant/ipopo
pelix/ldapfilter.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ldapfilter.py#L733-L748
def _skip_spaces(string, idx): # type: (str, int) -> int """ Retrieves the next non-space character after idx index in the given string :param string: The string to look into :param idx: The base search index :return: The next non-space character index, -1 if not found """ i = idx for char in string[idx:]: if not char.isspace(): return i i += 1 return -1
[ "def", "_skip_spaces", "(", "string", ",", "idx", ")", ":", "# type: (str, int) -> int", "i", "=", "idx", "for", "char", "in", "string", "[", "idx", ":", "]", ":", "if", "not", "char", ".", "isspace", "(", ")", ":", "return", "i", "i", "+=", "1", "return", "-", "1" ]
Retrieves the next non-space character after idx index in the given string :param string: The string to look into :param idx: The base search index :return: The next non-space character index, -1 if not found
[ "Retrieves", "the", "next", "non", "-", "space", "character", "after", "idx", "index", "in", "the", "given", "string" ]
python
train
lark-parser/lark
examples/standalone/json_parser.py
https://github.com/lark-parser/lark/blob/a798dec77907e74520dd7e90c7b6a4acc680633a/examples/standalone/json_parser.py#L453-L459
def visit_children_decor(func): "See Interpreter" @wraps(func) def inner(cls, tree): values = cls.visit_children(tree) return func(cls, values) return inner
[ "def", "visit_children_decor", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "inner", "(", "cls", ",", "tree", ")", ":", "values", "=", "cls", ".", "visit_children", "(", "tree", ")", "return", "func", "(", "cls", ",", "values", ")", "return", "inner" ]
See Interpreter
[ "See", "Interpreter" ]
python
train