repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
fvdsn/py-xml-escpos
xmlescpos/escpos.py
https://github.com/fvdsn/py-xml-escpos/blob/7f77e039c960d5773fb919aed02ba392dccbc360/xmlescpos/escpos.py#L424-L430
def image(self,path_img): """ Open image file """ im_open = Image.open(path_img) im = im_open.convert("RGB") # Convert the RGB image in printable image pix_line, img_size = self._convert_image(im) self._print_image(pix_line, img_size)
[ "def", "image", "(", "self", ",", "path_img", ")", ":", "im_open", "=", "Image", ".", "open", "(", "path_img", ")", "im", "=", "im_open", ".", "convert", "(", "\"RGB\"", ")", "# Convert the RGB image in printable image", "pix_line", ",", "img_size", "=", "self", ".", "_convert_image", "(", "im", ")", "self", ".", "_print_image", "(", "pix_line", ",", "img_size", ")" ]
Open image file
[ "Open", "image", "file" ]
python
train
39.428571
vertexproject/synapse
synapse/common.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/common.py#L571-L587
def result(retn): ''' Return a value or raise an exception from a retn tuple. ''' ok, valu = retn if ok: return valu name, info = valu ctor = getattr(s_exc, name, None) if ctor is not None: raise ctor(**info) info['errx'] = name raise s_exc.SynErr(**info)
[ "def", "result", "(", "retn", ")", ":", "ok", ",", "valu", "=", "retn", "if", "ok", ":", "return", "valu", "name", ",", "info", "=", "valu", "ctor", "=", "getattr", "(", "s_exc", ",", "name", ",", "None", ")", "if", "ctor", "is", "not", "None", ":", "raise", "ctor", "(", "*", "*", "info", ")", "info", "[", "'errx'", "]", "=", "name", "raise", "s_exc", ".", "SynErr", "(", "*", "*", "info", ")" ]
Return a value or raise an exception from a retn tuple.
[ "Return", "a", "value", "or", "raise", "an", "exception", "from", "a", "retn", "tuple", "." ]
python
train
17.588235
ampl/amplpy
amplpy/dataframe.py
https://github.com/ampl/amplpy/blob/39df6954049a11a8f666aed26853259b4687099a/amplpy/dataframe.py#L235-L246
def getRowByIndex(self, index): """ Get row by numeric index. Args: index: Zero-based index of the row to get. Returns: The corresponding row. """ assert isinstance(index, int) return Row(self._impl.getRowByIndex(index))
[ "def", "getRowByIndex", "(", "self", ",", "index", ")", ":", "assert", "isinstance", "(", "index", ",", "int", ")", "return", "Row", "(", "self", ".", "_impl", ".", "getRowByIndex", "(", "index", ")", ")" ]
Get row by numeric index. Args: index: Zero-based index of the row to get. Returns: The corresponding row.
[ "Get", "row", "by", "numeric", "index", "." ]
python
train
24.25
lemieuxl/pyGenClean
pyGenClean/NoCallHetero/clean_noCall_hetero_snps.py
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/NoCallHetero/clean_noCall_hetero_snps.py#L32-L56
def main(argString=None): """The main function of the module. :param argString: the options. :type argString: list These are the steps: 1. Prints the options. 2. Reads the ``tfam`` and ``tped`` files and find all heterozygous and all failed markers (:py:func:`processTPEDandTFAM`). """ # Getting and checking the options args = parseArgs(argString) checkArgs(args) logger.info("Options used:") for key, value in vars(args).iteritems(): logger.info(" --{} {}".format(key.replace("_", "-"), value)) # Process the TPED and TFAM file logger.info("Processing the TPED and TFAM file") processTPEDandTFAM(args.tfile + ".tped", args.tfile + ".tfam", args.out)
[ "def", "main", "(", "argString", "=", "None", ")", ":", "# Getting and checking the options", "args", "=", "parseArgs", "(", "argString", ")", "checkArgs", "(", "args", ")", "logger", ".", "info", "(", "\"Options used:\"", ")", "for", "key", ",", "value", "in", "vars", "(", "args", ")", ".", "iteritems", "(", ")", ":", "logger", ".", "info", "(", "\" --{} {}\"", ".", "format", "(", "key", ".", "replace", "(", "\"_\"", ",", "\"-\"", ")", ",", "value", ")", ")", "# Process the TPED and TFAM file", "logger", ".", "info", "(", "\"Processing the TPED and TFAM file\"", ")", "processTPEDandTFAM", "(", "args", ".", "tfile", "+", "\".tped\"", ",", "args", ".", "tfile", "+", "\".tfam\"", ",", "args", ".", "out", ")" ]
The main function of the module. :param argString: the options. :type argString: list These are the steps: 1. Prints the options. 2. Reads the ``tfam`` and ``tped`` files and find all heterozygous and all failed markers (:py:func:`processTPEDandTFAM`).
[ "The", "main", "function", "of", "the", "module", "." ]
python
train
28.4
OCR-D/core
ocrd/ocrd/cli/workspace.py
https://github.com/OCR-D/core/blob/57e68c578526cb955fd2e368207f5386c459d91d/ocrd/ocrd/cli/workspace.py#L101-L113
def workspace_create(ctx, clobber_mets, directory): """ Create a workspace with an empty METS file in DIRECTORY. Use '.' for $PWD" """ workspace = ctx.resolver.workspace_from_nothing( directory=os.path.abspath(directory), mets_basename=ctx.mets_basename, clobber_mets=clobber_mets ) workspace.save_mets() print(workspace.directory)
[ "def", "workspace_create", "(", "ctx", ",", "clobber_mets", ",", "directory", ")", ":", "workspace", "=", "ctx", ".", "resolver", ".", "workspace_from_nothing", "(", "directory", "=", "os", ".", "path", ".", "abspath", "(", "directory", ")", ",", "mets_basename", "=", "ctx", ".", "mets_basename", ",", "clobber_mets", "=", "clobber_mets", ")", "workspace", ".", "save_mets", "(", ")", "print", "(", "workspace", ".", "directory", ")" ]
Create a workspace with an empty METS file in DIRECTORY. Use '.' for $PWD"
[ "Create", "a", "workspace", "with", "an", "empty", "METS", "file", "in", "DIRECTORY", "." ]
python
train
28.923077
mozilla/DeepSpeech
native_client/ctcdecode/__init__.py
https://github.com/mozilla/DeepSpeech/blob/f64aa73e7fbe9dde40d4fcf23b42ab304747d152/native_client/ctcdecode/__init__.py#L25-L59
def ctc_beam_search_decoder(probs_seq, alphabet, beam_size, cutoff_prob=1.0, cutoff_top_n=40, scorer=None): """Wrapper for the CTC Beam Search Decoder. :param probs_seq: 2-D list of probability distributions over each time step, with each element being a list of normalized probabilities over alphabet and blank. :type probs_seq: 2-D list :param alphabet: alphabet list. :alphabet: Alphabet :param beam_size: Width for beam search. :type beam_size: int :param cutoff_prob: Cutoff probability in pruning, default 1.0, no pruning. :type cutoff_prob: float :param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n characters with highest probs in alphabet will be used in beam search, default 40. :type cutoff_top_n: int :param scorer: External scorer for partially decoded sentence, e.g. word count or language model. :type scorer: Scorer :return: List of tuples of log probability and sentence as decoding results, in descending order of the probability. :rtype: list """ beam_results = swigwrapper.ctc_beam_search_decoder( probs_seq, alphabet.config_file(), beam_size, cutoff_prob, cutoff_top_n, scorer) beam_results = [(res.probability, alphabet.decode(res.tokens)) for res in beam_results] return beam_results
[ "def", "ctc_beam_search_decoder", "(", "probs_seq", ",", "alphabet", ",", "beam_size", ",", "cutoff_prob", "=", "1.0", ",", "cutoff_top_n", "=", "40", ",", "scorer", "=", "None", ")", ":", "beam_results", "=", "swigwrapper", ".", "ctc_beam_search_decoder", "(", "probs_seq", ",", "alphabet", ".", "config_file", "(", ")", ",", "beam_size", ",", "cutoff_prob", ",", "cutoff_top_n", ",", "scorer", ")", "beam_results", "=", "[", "(", "res", ".", "probability", ",", "alphabet", ".", "decode", "(", "res", ".", "tokens", ")", ")", "for", "res", "in", "beam_results", "]", "return", "beam_results" ]
Wrapper for the CTC Beam Search Decoder. :param probs_seq: 2-D list of probability distributions over each time step, with each element being a list of normalized probabilities over alphabet and blank. :type probs_seq: 2-D list :param alphabet: alphabet list. :alphabet: Alphabet :param beam_size: Width for beam search. :type beam_size: int :param cutoff_prob: Cutoff probability in pruning, default 1.0, no pruning. :type cutoff_prob: float :param cutoff_top_n: Cutoff number in pruning, only top cutoff_top_n characters with highest probs in alphabet will be used in beam search, default 40. :type cutoff_top_n: int :param scorer: External scorer for partially decoded sentence, e.g. word count or language model. :type scorer: Scorer :return: List of tuples of log probability and sentence as decoding results, in descending order of the probability. :rtype: list
[ "Wrapper", "for", "the", "CTC", "Beam", "Search", "Decoder", "." ]
python
train
44.914286
Trebek/pydealer
pydealer/stack.py
https://github.com/Trebek/pydealer/blob/2ac583dd8c55715658c740b614387775f4dda333/pydealer/stack.py#L631-L643
def shuffle(self, times=1): """ Shuffles the Stack. .. note:: Shuffling large numbers of cards (100,000+) may take a while. :arg int times: The number of times to shuffle. """ for _ in xrange(times): random.shuffle(self.cards)
[ "def", "shuffle", "(", "self", ",", "times", "=", "1", ")", ":", "for", "_", "in", "xrange", "(", "times", ")", ":", "random", ".", "shuffle", "(", "self", ".", "cards", ")" ]
Shuffles the Stack. .. note:: Shuffling large numbers of cards (100,000+) may take a while. :arg int times: The number of times to shuffle.
[ "Shuffles", "the", "Stack", "." ]
python
train
23.153846
saltstack/salt
salt/modules/vsphere.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vsphere.py#L1534-L1636
def get_service_policy(host, username, password, service_name, protocol=None, port=None, host_names=None): ''' Get the service name's policy for a given host or list of hosts. host The location of the host. username The username used to login to the host, such as ``root``. password The password used to login to the host. service_name The name of the service for which to retrieve the policy. Supported service names are: - DCUI - TSM - SSH - lbtd - lsassd - lwiod - netlogond - ntpd - sfcbd-watchdog - snmpd - vprobed - vpxa - xorg protocol Optionally set to alternate protocol if the host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the host is not using the default port. Default port is ``443``. host_names List of ESXi host names. When the host, username, and password credentials are provided for a vCenter Server, the host_names argument is required to tell vCenter the hosts for which to get service policy information. If host_names is not provided, the service policy information will be retrieved for the ``host`` location instead. This is useful for when service instance connection information is used for a single ESXi host. CLI Example: .. code-block:: bash # Used for single ESXi host connection information salt '*' vsphere.get_service_policy my.esxi.host root bad-password 'ssh' # Used for connecting to a vCenter Server salt '*' vsphere.get_service_policy my.vcenter.location root bad-password 'ntpd' \ host_names='[esxi-1.host.com, esxi-2.host.com]' ''' service_instance = salt.utils.vmware.get_service_instance(host=host, username=username, password=password, protocol=protocol, port=port) valid_services = ['DCUI', 'TSM', 'SSH', 'ssh', 'lbtd', 'lsassd', 'lwiod', 'netlogond', 'ntpd', 'sfcbd-watchdog', 'snmpd', 'vprobed', 'vpxa', 'xorg'] host_names = _check_hosts(service_instance, host, host_names) ret = {} for host_name in host_names: # Check if the service_name provided is a valid one. # If we don't have a valid service, return. The service will be invalid for all hosts. if service_name not in valid_services: ret.update({host_name: {'Error': '{0} is not a valid service name.'.format(service_name)}}) return ret host_ref = _get_host_ref(service_instance, host, host_name=host_name) services = host_ref.configManager.serviceSystem.serviceInfo.service # Don't require users to know that VMware lists the ssh service as TSM-SSH if service_name == 'SSH' or service_name == 'ssh': temp_service_name = 'TSM-SSH' else: temp_service_name = service_name # Loop through services until we find a matching name for service in services: if service.key == temp_service_name: ret.update({host_name: {service_name: service.policy}}) # We've found a match - break out of the loop so we don't overwrite the # Updated host_name value with an error message. break else: msg = 'Could not find service \'{0}\' for host \'{1}\'.'.format(service_name, host_name) ret.update({host_name: {'Error': msg}}) # If we made it this far, something else has gone wrong. if ret.get(host_name) is None: msg = '\'vsphere.get_service_policy\' failed for host {0}.'.format(host_name) log.debug(msg) ret.update({host_name: {'Error': msg}}) return ret
[ "def", "get_service_policy", "(", "host", ",", "username", ",", "password", ",", "service_name", ",", "protocol", "=", "None", ",", "port", "=", "None", ",", "host_names", "=", "None", ")", ":", "service_instance", "=", "salt", ".", "utils", ".", "vmware", ".", "get_service_instance", "(", "host", "=", "host", ",", "username", "=", "username", ",", "password", "=", "password", ",", "protocol", "=", "protocol", ",", "port", "=", "port", ")", "valid_services", "=", "[", "'DCUI'", ",", "'TSM'", ",", "'SSH'", ",", "'ssh'", ",", "'lbtd'", ",", "'lsassd'", ",", "'lwiod'", ",", "'netlogond'", ",", "'ntpd'", ",", "'sfcbd-watchdog'", ",", "'snmpd'", ",", "'vprobed'", ",", "'vpxa'", ",", "'xorg'", "]", "host_names", "=", "_check_hosts", "(", "service_instance", ",", "host", ",", "host_names", ")", "ret", "=", "{", "}", "for", "host_name", "in", "host_names", ":", "# Check if the service_name provided is a valid one.", "# If we don't have a valid service, return. The service will be invalid for all hosts.", "if", "service_name", "not", "in", "valid_services", ":", "ret", ".", "update", "(", "{", "host_name", ":", "{", "'Error'", ":", "'{0} is not a valid service name.'", ".", "format", "(", "service_name", ")", "}", "}", ")", "return", "ret", "host_ref", "=", "_get_host_ref", "(", "service_instance", ",", "host", ",", "host_name", "=", "host_name", ")", "services", "=", "host_ref", ".", "configManager", ".", "serviceSystem", ".", "serviceInfo", ".", "service", "# Don't require users to know that VMware lists the ssh service as TSM-SSH", "if", "service_name", "==", "'SSH'", "or", "service_name", "==", "'ssh'", ":", "temp_service_name", "=", "'TSM-SSH'", "else", ":", "temp_service_name", "=", "service_name", "# Loop through services until we find a matching name", "for", "service", "in", "services", ":", "if", "service", ".", "key", "==", "temp_service_name", ":", "ret", ".", "update", "(", "{", "host_name", ":", "{", "service_name", ":", "service", ".", "policy", "}", "}", ")", "# We've found a match - break out of the loop so we don't overwrite the", "# Updated host_name value with an error message.", "break", "else", ":", "msg", "=", "'Could not find service \\'{0}\\' for host \\'{1}\\'.'", ".", "format", "(", "service_name", ",", "host_name", ")", "ret", ".", "update", "(", "{", "host_name", ":", "{", "'Error'", ":", "msg", "}", "}", ")", "# If we made it this far, something else has gone wrong.", "if", "ret", ".", "get", "(", "host_name", ")", "is", "None", ":", "msg", "=", "'\\'vsphere.get_service_policy\\' failed for host {0}.'", ".", "format", "(", "host_name", ")", "log", ".", "debug", "(", "msg", ")", "ret", ".", "update", "(", "{", "host_name", ":", "{", "'Error'", ":", "msg", "}", "}", ")", "return", "ret" ]
Get the service name's policy for a given host or list of hosts. host The location of the host. username The username used to login to the host, such as ``root``. password The password used to login to the host. service_name The name of the service for which to retrieve the policy. Supported service names are: - DCUI - TSM - SSH - lbtd - lsassd - lwiod - netlogond - ntpd - sfcbd-watchdog - snmpd - vprobed - vpxa - xorg protocol Optionally set to alternate protocol if the host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the host is not using the default port. Default port is ``443``. host_names List of ESXi host names. When the host, username, and password credentials are provided for a vCenter Server, the host_names argument is required to tell vCenter the hosts for which to get service policy information. If host_names is not provided, the service policy information will be retrieved for the ``host`` location instead. This is useful for when service instance connection information is used for a single ESXi host. CLI Example: .. code-block:: bash # Used for single ESXi host connection information salt '*' vsphere.get_service_policy my.esxi.host root bad-password 'ssh' # Used for connecting to a vCenter Server salt '*' vsphere.get_service_policy my.vcenter.location root bad-password 'ntpd' \ host_names='[esxi-1.host.com, esxi-2.host.com]'
[ "Get", "the", "service", "name", "s", "policy", "for", "a", "given", "host", "or", "list", "of", "hosts", "." ]
python
train
40.300971
pybel/pybel-tools
src/pybel_tools/mutation/collapse.py
https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/mutation/collapse.py#L50-L54
def _collapse_variants_by_function(graph: BELGraph, func: str) -> None: """Collapse all of the given functions' variants' edges to their parents, in-place.""" for parent_node, variant_node, data in graph.edges(data=True): if data[RELATION] == HAS_VARIANT and parent_node.function == func: collapse_pair(graph, from_node=variant_node, to_node=parent_node)
[ "def", "_collapse_variants_by_function", "(", "graph", ":", "BELGraph", ",", "func", ":", "str", ")", "->", "None", ":", "for", "parent_node", ",", "variant_node", ",", "data", "in", "graph", ".", "edges", "(", "data", "=", "True", ")", ":", "if", "data", "[", "RELATION", "]", "==", "HAS_VARIANT", "and", "parent_node", ".", "function", "==", "func", ":", "collapse_pair", "(", "graph", ",", "from_node", "=", "variant_node", ",", "to_node", "=", "parent_node", ")" ]
Collapse all of the given functions' variants' edges to their parents, in-place.
[ "Collapse", "all", "of", "the", "given", "functions", "variants", "edges", "to", "their", "parents", "in", "-", "place", "." ]
python
valid
75.6
globocom/GloboNetworkAPI-client-python
networkapiclient/OptionVIP.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/OptionVIP.py#L190-L224
def associate(self, id_option_vip, id_environment_vip): """Create a relationship of OptionVip with EnvironmentVip. :param id_option_vip: Identifier of the Option VIP. Integer value and greater than zero. :param id_environment_vip: Identifier of the Environment VIP. Integer value and greater than zero. :return: Following dictionary :: {'opcoesvip_ambiente_xref': {'id': < id_opcoesvip_ambiente_xref >} } :raise InvalidParameterError: Option VIP/Environment VIP identifier is null and/or invalid. :raise OptionVipNotFoundError: Option VIP not registered. :raise EnvironmentVipNotFoundError: Environment VIP not registered. :raise OptionVipError: Option vip is already associated with the environment vip. :raise UserNotAuthorizedError: User does not have authorization to make this association. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response. """ if not is_valid_int_param(id_option_vip): raise InvalidParameterError( u'The identifier of Option VIP is invalid or was not informed.') if not is_valid_int_param(id_environment_vip): raise InvalidParameterError( u'The identifier of Environment VIP is invalid or was not informed.') url = 'optionvip/' + \ str(id_option_vip) + '/environmentvip/' + str(id_environment_vip) + '/' code, xml = self.submit(None, 'PUT', url) return self.response(code, xml)
[ "def", "associate", "(", "self", ",", "id_option_vip", ",", "id_environment_vip", ")", ":", "if", "not", "is_valid_int_param", "(", "id_option_vip", ")", ":", "raise", "InvalidParameterError", "(", "u'The identifier of Option VIP is invalid or was not informed.'", ")", "if", "not", "is_valid_int_param", "(", "id_environment_vip", ")", ":", "raise", "InvalidParameterError", "(", "u'The identifier of Environment VIP is invalid or was not informed.'", ")", "url", "=", "'optionvip/'", "+", "str", "(", "id_option_vip", ")", "+", "'/environmentvip/'", "+", "str", "(", "id_environment_vip", ")", "+", "'/'", "code", ",", "xml", "=", "self", ".", "submit", "(", "None", ",", "'PUT'", ",", "url", ")", "return", "self", ".", "response", "(", "code", ",", "xml", ")" ]
Create a relationship of OptionVip with EnvironmentVip. :param id_option_vip: Identifier of the Option VIP. Integer value and greater than zero. :param id_environment_vip: Identifier of the Environment VIP. Integer value and greater than zero. :return: Following dictionary :: {'opcoesvip_ambiente_xref': {'id': < id_opcoesvip_ambiente_xref >} } :raise InvalidParameterError: Option VIP/Environment VIP identifier is null and/or invalid. :raise OptionVipNotFoundError: Option VIP not registered. :raise EnvironmentVipNotFoundError: Environment VIP not registered. :raise OptionVipError: Option vip is already associated with the environment vip. :raise UserNotAuthorizedError: User does not have authorization to make this association. :raise DataBaseError: Networkapi failed to access the database. :raise XMLError: Networkapi failed to generate the XML response.
[ "Create", "a", "relationship", "of", "OptionVip", "with", "EnvironmentVip", "." ]
python
train
45.085714
bitesofcode/projexui
projexui/widgets/xsnapshotwidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xsnapshotwidget.py#L82-L92
def keyPressEvent(self, event): """ Listens for the escape key to cancel out from this snapshot. :param event | <QKeyPressEvent> """ # reject on a cancel if event.key() == Qt.Key_Escape: self.reject() super(XSnapshotWidget, self).keyPressEvent(event)
[ "def", "keyPressEvent", "(", "self", ",", "event", ")", ":", "# reject on a cancel\r", "if", "event", ".", "key", "(", ")", "==", "Qt", ".", "Key_Escape", ":", "self", ".", "reject", "(", ")", "super", "(", "XSnapshotWidget", ",", "self", ")", ".", "keyPressEvent", "(", "event", ")" ]
Listens for the escape key to cancel out from this snapshot. :param event | <QKeyPressEvent>
[ "Listens", "for", "the", "escape", "key", "to", "cancel", "out", "from", "this", "snapshot", ".", ":", "param", "event", "|", "<QKeyPressEvent", ">" ]
python
train
31
tgbugs/pyontutils
ilxutils/ilxutils/simple_rdflib.py
https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/simple_rdflib.py#L165-L191
def add_triple( self, subj: Union[URIRef, str], pred: Union[URIRef, str], obj: Union[URIRef, Literal, str] ) -> None: """ Adds triple to rdflib Graph Triple can be of any subject, predicate, and object of the entity without a need for order. Args: subj: Entity subject pred: Entity predicate obj: Entity object Example: In [1]: add_triple( ...: 'http://uri.interlex.org/base/ilx_0101431', ...: RDF.type, ...: 'http://www.w3.org/2002/07/owl#Class') ...: ) """ if obj in [None, "", " "]: return # Empty objects are bad practice _subj = self.process_subj_or_pred(subj) _pred = self.process_subj_or_pred(pred) _obj = self.process_obj(obj) self.g.add( (_subj, _pred, _obj) )
[ "def", "add_triple", "(", "self", ",", "subj", ":", "Union", "[", "URIRef", ",", "str", "]", ",", "pred", ":", "Union", "[", "URIRef", ",", "str", "]", ",", "obj", ":", "Union", "[", "URIRef", ",", "Literal", ",", "str", "]", ")", "->", "None", ":", "if", "obj", "in", "[", "None", ",", "\"\"", ",", "\" \"", "]", ":", "return", "# Empty objects are bad practice", "_subj", "=", "self", ".", "process_subj_or_pred", "(", "subj", ")", "_pred", "=", "self", ".", "process_subj_or_pred", "(", "pred", ")", "_obj", "=", "self", ".", "process_obj", "(", "obj", ")", "self", ".", "g", ".", "add", "(", "(", "_subj", ",", "_pred", ",", "_obj", ")", ")" ]
Adds triple to rdflib Graph Triple can be of any subject, predicate, and object of the entity without a need for order. Args: subj: Entity subject pred: Entity predicate obj: Entity object Example: In [1]: add_triple( ...: 'http://uri.interlex.org/base/ilx_0101431', ...: RDF.type, ...: 'http://www.w3.org/2002/07/owl#Class') ...: )
[ "Adds", "triple", "to", "rdflib", "Graph" ]
python
train
33.481481
amol-/tgext.mailer
tgext/mailer/mailer.py
https://github.com/amol-/tgext.mailer/blob/4c452244969b98431e57d5ebba930f365006dfbd/tgext/mailer/mailer.py#L217-L234
def send_immediately(self, message, fail_silently=False): """Send a message immediately, outside the transaction manager. If there is a connection error to the mail server this will have to be handled manually. However if you pass ``fail_silently`` the error will be swallowed. :versionadded: 0.3 :param message: a 'Message' instance. :param fail_silently: silently handle connection errors. """ try: return self.smtp_mailer.send(*self._message_args(message)) except smtplib.socket.error: if not fail_silently: raise
[ "def", "send_immediately", "(", "self", ",", "message", ",", "fail_silently", "=", "False", ")", ":", "try", ":", "return", "self", ".", "smtp_mailer", ".", "send", "(", "*", "self", ".", "_message_args", "(", "message", ")", ")", "except", "smtplib", ".", "socket", ".", "error", ":", "if", "not", "fail_silently", ":", "raise" ]
Send a message immediately, outside the transaction manager. If there is a connection error to the mail server this will have to be handled manually. However if you pass ``fail_silently`` the error will be swallowed. :versionadded: 0.3 :param message: a 'Message' instance. :param fail_silently: silently handle connection errors.
[ "Send", "a", "message", "immediately", "outside", "the", "transaction", "manager", "." ]
python
train
34.611111
projectatomic/osbs-client
osbs/build/build_request.py
https://github.com/projectatomic/osbs-client/blob/571fe035dab3a7c02e1dccd5d65ffd75be750458/osbs/build/build_request.py#L515-L544
def adjust_for_registry_api_versions(self): """ Enable/disable plugins depending on supported registry API versions """ versions = self.spec.registry_api_versions.value if 'v2' not in versions: raise OsbsValidationException('v1-only docker registry API is not supported') try: push_conf = self.dj.dock_json_get_plugin_conf('postbuild_plugins', 'tag_and_push') tag_and_push_registries = push_conf['args']['registries'] except (KeyError, IndexError): tag_and_push_registries = {} if 'v1' not in versions: # Remove v1-only plugins for phase, name in [('postbuild_plugins', 'pulp_push')]: logger.info("removing v1-only plugin: %s", name) self.dj.remove_plugin(phase, name) # remove extra tag_and_push config self.remove_tag_and_push_registries(tag_and_push_registries, 'v1') # Remove 'version' from tag_and_push plugin config as it's no # longer needed for regdict in tag_and_push_registries.values(): if 'version' in regdict: del regdict['version']
[ "def", "adjust_for_registry_api_versions", "(", "self", ")", ":", "versions", "=", "self", ".", "spec", ".", "registry_api_versions", ".", "value", "if", "'v2'", "not", "in", "versions", ":", "raise", "OsbsValidationException", "(", "'v1-only docker registry API is not supported'", ")", "try", ":", "push_conf", "=", "self", ".", "dj", ".", "dock_json_get_plugin_conf", "(", "'postbuild_plugins'", ",", "'tag_and_push'", ")", "tag_and_push_registries", "=", "push_conf", "[", "'args'", "]", "[", "'registries'", "]", "except", "(", "KeyError", ",", "IndexError", ")", ":", "tag_and_push_registries", "=", "{", "}", "if", "'v1'", "not", "in", "versions", ":", "# Remove v1-only plugins", "for", "phase", ",", "name", "in", "[", "(", "'postbuild_plugins'", ",", "'pulp_push'", ")", "]", ":", "logger", ".", "info", "(", "\"removing v1-only plugin: %s\"", ",", "name", ")", "self", ".", "dj", ".", "remove_plugin", "(", "phase", ",", "name", ")", "# remove extra tag_and_push config", "self", ".", "remove_tag_and_push_registries", "(", "tag_and_push_registries", ",", "'v1'", ")", "# Remove 'version' from tag_and_push plugin config as it's no", "# longer needed", "for", "regdict", "in", "tag_and_push_registries", ".", "values", "(", ")", ":", "if", "'version'", "in", "regdict", ":", "del", "regdict", "[", "'version'", "]" ]
Enable/disable plugins depending on supported registry API versions
[ "Enable", "/", "disable", "plugins", "depending", "on", "supported", "registry", "API", "versions" ]
python
train
40.766667
F5Networks/f5-common-python
f5/bigip/tm/sys/ucs.py
https://github.com/F5Networks/f5-common-python/blob/7e67d5acd757a60e3d5f8c88c534bd72208f5494/f5/bigip/tm/sys/ucs.py#L83-L95
def load(self, **kwargs): """Method to list the UCS on the system Since this is only fixed in 12.1.0 and up we implemented version check here """ # Check if we are using 12.1.0 version or above when using this method self._is_version_supported_method('12.1.0') newinst = self._stamp_out_core() newinst._refresh(**kwargs) return newinst
[ "def", "load", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# Check if we are using 12.1.0 version or above when using this method", "self", ".", "_is_version_supported_method", "(", "'12.1.0'", ")", "newinst", "=", "self", ".", "_stamp_out_core", "(", ")", "newinst", ".", "_refresh", "(", "*", "*", "kwargs", ")", "return", "newinst" ]
Method to list the UCS on the system Since this is only fixed in 12.1.0 and up we implemented version check here
[ "Method", "to", "list", "the", "UCS", "on", "the", "system" ]
python
train
30.615385
titusjan/argos
argos/inspector/pgplugins/imageplot2d.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/inspector/pgplugins/imageplot2d.py#L236-L241
def setHorCrossPlotAutoRangeOn(self, axisNumber): """ Sets the horizontal cross-hair plot's auto-range on for the axis with number axisNumber. :param axisNumber: 0 (X-axis), 1 (Y-axis), 2, (Both X and Y axes). """ setXYAxesAutoRangeOn(self, self.xAxisRangeCti, self.horCrossPlotRangeCti, axisNumber)
[ "def", "setHorCrossPlotAutoRangeOn", "(", "self", ",", "axisNumber", ")", ":", "setXYAxesAutoRangeOn", "(", "self", ",", "self", ".", "xAxisRangeCti", ",", "self", ".", "horCrossPlotRangeCti", ",", "axisNumber", ")" ]
Sets the horizontal cross-hair plot's auto-range on for the axis with number axisNumber. :param axisNumber: 0 (X-axis), 1 (Y-axis), 2, (Both X and Y axes).
[ "Sets", "the", "horizontal", "cross", "-", "hair", "plot", "s", "auto", "-", "range", "on", "for", "the", "axis", "with", "number", "axisNumber", "." ]
python
train
55.166667
avalente/appmetrics
appmetrics/statistics.py
https://github.com/avalente/appmetrics/blob/366fc7e1ca897e49a2227cbfa43bfa02a47f1acc/appmetrics/statistics.py#L442-L455
def percentile(data, n): """Return the n-th percentile of the given data Assume that the data are already sorted """ size = len(data) idx = (n / 100.0) * size - 0.5 if idx < 0 or idx > size: raise StatisticsError("Too few data points ({}) for {}th percentile".format(size, n)) return data[int(idx)]
[ "def", "percentile", "(", "data", ",", "n", ")", ":", "size", "=", "len", "(", "data", ")", "idx", "=", "(", "n", "/", "100.0", ")", "*", "size", "-", "0.5", "if", "idx", "<", "0", "or", "idx", ">", "size", ":", "raise", "StatisticsError", "(", "\"Too few data points ({}) for {}th percentile\"", ".", "format", "(", "size", ",", "n", ")", ")", "return", "data", "[", "int", "(", "idx", ")", "]" ]
Return the n-th percentile of the given data Assume that the data are already sorted
[ "Return", "the", "n", "-", "th", "percentile", "of", "the", "given", "data" ]
python
train
23.285714
SkyLothar/shcmd
shcmd/tar.py
https://github.com/SkyLothar/shcmd/blob/d8cad6311a4da7ef09f3419c86b58e30388b7ee3/shcmd/tar.py#L44-L49
def files(self): """files that will be add to tar file later should be tuple, list or generator that returns strings """ ios_names = [info.name for info in self._ios_to_add.keys()] return set(self.files_to_add + ios_names)
[ "def", "files", "(", "self", ")", ":", "ios_names", "=", "[", "info", ".", "name", "for", "info", "in", "self", ".", "_ios_to_add", ".", "keys", "(", ")", "]", "return", "set", "(", "self", ".", "files_to_add", "+", "ios_names", ")" ]
files that will be add to tar file later should be tuple, list or generator that returns strings
[ "files", "that", "will", "be", "add", "to", "tar", "file", "later", "should", "be", "tuple", "list", "or", "generator", "that", "returns", "strings" ]
python
train
42.833333
pymc-devs/pymc
pymc/StepMethods.py
https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L669-L687
def propose(self): """ Proposals for positive definite matrix using random walk deviations on the Cholesky factor of the current value. """ # Locally store size of matrix dims = self.stochastic.value.shape # Add normal deviate to value and symmetrize dev = rnormal( 0, self.adaptive_scale_factor * self.proposal_sd, size=dims) symmetrize(dev) # Replace self.stochastic.value = dev + self.stochastic.value
[ "def", "propose", "(", "self", ")", ":", "# Locally store size of matrix", "dims", "=", "self", ".", "stochastic", ".", "value", ".", "shape", "# Add normal deviate to value and symmetrize", "dev", "=", "rnormal", "(", "0", ",", "self", ".", "adaptive_scale_factor", "*", "self", ".", "proposal_sd", ",", "size", "=", "dims", ")", "symmetrize", "(", "dev", ")", "# Replace", "self", ".", "stochastic", ".", "value", "=", "dev", "+", "self", ".", "stochastic", ".", "value" ]
Proposals for positive definite matrix using random walk deviations on the Cholesky factor of the current value.
[ "Proposals", "for", "positive", "definite", "matrix", "using", "random", "walk", "deviations", "on", "the", "Cholesky", "factor", "of", "the", "current", "value", "." ]
python
train
27.631579
singnet/snet-cli
snet_cli/utils_ipfs.py
https://github.com/singnet/snet-cli/blob/1b5ac98cb9a64211c861ead9fcfe6208f2749032/snet_cli/utils_ipfs.py#L79-L96
def safe_extract_proto_from_ipfs(ipfs_client, ipfs_hash, protodir): """ Tar files might be dangerous (see https://bugs.python.org/issue21109, and https://docs.python.org/3/library/tarfile.html, TarFile.extractall warning) we extract only simple files """ spec_tar = get_from_ipfs_and_checkhash(ipfs_client, ipfs_hash) with tarfile.open(fileobj=io.BytesIO(spec_tar)) as f: for m in f.getmembers(): if (os.path.dirname(m.name) != ""): raise Exception("tarball has directories. We do not support it.") if (not m.isfile()): raise Exception("tarball contains %s which is not a files"%m.name) fullname = os.path.join(protodir, m.name) if (os.path.exists(fullname)): raise Exception("%s already exists."%fullname) # now it is safe to call extractall f.extractall(protodir)
[ "def", "safe_extract_proto_from_ipfs", "(", "ipfs_client", ",", "ipfs_hash", ",", "protodir", ")", ":", "spec_tar", "=", "get_from_ipfs_and_checkhash", "(", "ipfs_client", ",", "ipfs_hash", ")", "with", "tarfile", ".", "open", "(", "fileobj", "=", "io", ".", "BytesIO", "(", "spec_tar", ")", ")", "as", "f", ":", "for", "m", "in", "f", ".", "getmembers", "(", ")", ":", "if", "(", "os", ".", "path", ".", "dirname", "(", "m", ".", "name", ")", "!=", "\"\"", ")", ":", "raise", "Exception", "(", "\"tarball has directories. We do not support it.\"", ")", "if", "(", "not", "m", ".", "isfile", "(", ")", ")", ":", "raise", "Exception", "(", "\"tarball contains %s which is not a files\"", "%", "m", ".", "name", ")", "fullname", "=", "os", ".", "path", ".", "join", "(", "protodir", ",", "m", ".", "name", ")", "if", "(", "os", ".", "path", ".", "exists", "(", "fullname", ")", ")", ":", "raise", "Exception", "(", "\"%s already exists.\"", "%", "fullname", ")", "# now it is safe to call extractall", "f", ".", "extractall", "(", "protodir", ")" ]
Tar files might be dangerous (see https://bugs.python.org/issue21109, and https://docs.python.org/3/library/tarfile.html, TarFile.extractall warning) we extract only simple files
[ "Tar", "files", "might", "be", "dangerous", "(", "see", "https", ":", "//", "bugs", ".", "python", ".", "org", "/", "issue21109", "and", "https", ":", "//", "docs", ".", "python", ".", "org", "/", "3", "/", "library", "/", "tarfile", ".", "html", "TarFile", ".", "extractall", "warning", ")", "we", "extract", "only", "simple", "files" ]
python
train
49.777778
yymao/generic-catalog-reader
GCR/base.py
https://github.com/yymao/generic-catalog-reader/blob/bc6267ac41b9f68106ed6065184469ac13fdc0b6/GCR/base.py#L162-L170
def get_input_kwargs(self, key=None, default=None): """ Deprecated. Use `get_catalog_info` instead. Get information from the catalog config file. If *key* is `None`, return the full dict. """ warnings.warn("`get_input_kwargs` is deprecated; use `get_catalog_info` instead.", DeprecationWarning) return self.get_catalog_info(key, default)
[ "def", "get_input_kwargs", "(", "self", ",", "key", "=", "None", ",", "default", "=", "None", ")", ":", "warnings", ".", "warn", "(", "\"`get_input_kwargs` is deprecated; use `get_catalog_info` instead.\"", ",", "DeprecationWarning", ")", "return", "self", ".", "get_catalog_info", "(", "key", ",", "default", ")" ]
Deprecated. Use `get_catalog_info` instead. Get information from the catalog config file. If *key* is `None`, return the full dict.
[ "Deprecated", ".", "Use", "get_catalog_info", "instead", "." ]
python
train
42.888889
invinst/ResponseBot
responsebot/responsebot_client.py
https://github.com/invinst/ResponseBot/blob/a6b1a431a343007f7ae55a193e432a61af22253f/responsebot/responsebot_client.py#L97-L109
def get_tweet(self, id): """ Get an existing tweet. :param id: ID of the tweet in question :return: Tweet object. None if not found """ try: return Tweet(self._client.get_status(id=id)._json) except TweepError as e: if e.api_code == TWITTER_TWEET_NOT_FOUND_ERROR: return None raise
[ "def", "get_tweet", "(", "self", ",", "id", ")", ":", "try", ":", "return", "Tweet", "(", "self", ".", "_client", ".", "get_status", "(", "id", "=", "id", ")", ".", "_json", ")", "except", "TweepError", "as", "e", ":", "if", "e", ".", "api_code", "==", "TWITTER_TWEET_NOT_FOUND_ERROR", ":", "return", "None", "raise" ]
Get an existing tweet. :param id: ID of the tweet in question :return: Tweet object. None if not found
[ "Get", "an", "existing", "tweet", "." ]
python
train
29.076923
COLORFULBOARD/revision
revision/config.py
https://github.com/COLORFULBOARD/revision/blob/2f22e72cce5b60032a80c002ac45c2ecef0ed987/revision/config.py#L68-L106
def read_config(config_path_or_dict=None): """ Read config from given path string or dict object. :param config_path_or_dict: :type config_path_or_dict: str or dict :return: Returns config object or None if not found. :rtype: :class:`revision.config.Config` """ config = None if isinstance(config_path_or_dict, dict): config = Config(config_path_or_dict) if isinstance(config_path_or_dict, string_types): if os.path.isabs(config_path_or_dict): config_path = config_path_or_dict else: config_path = os.path.join( os.getcwd(), os.path.normpath(config_path_or_dict) ) else: config_path = os.path.join( os.getcwd(), DEFAULT_CONFIG_PATH ) if os.path.exists(config_path): with open(config_path, 'r') as f: data = json.load(f) config = Config(data) if config is None: raise ConfigNotFound() else: config.validate() return config
[ "def", "read_config", "(", "config_path_or_dict", "=", "None", ")", ":", "config", "=", "None", "if", "isinstance", "(", "config_path_or_dict", ",", "dict", ")", ":", "config", "=", "Config", "(", "config_path_or_dict", ")", "if", "isinstance", "(", "config_path_or_dict", ",", "string_types", ")", ":", "if", "os", ".", "path", ".", "isabs", "(", "config_path_or_dict", ")", ":", "config_path", "=", "config_path_or_dict", "else", ":", "config_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "os", ".", "path", ".", "normpath", "(", "config_path_or_dict", ")", ")", "else", ":", "config_path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "getcwd", "(", ")", ",", "DEFAULT_CONFIG_PATH", ")", "if", "os", ".", "path", ".", "exists", "(", "config_path", ")", ":", "with", "open", "(", "config_path", ",", "'r'", ")", "as", "f", ":", "data", "=", "json", ".", "load", "(", "f", ")", "config", "=", "Config", "(", "data", ")", "if", "config", "is", "None", ":", "raise", "ConfigNotFound", "(", ")", "else", ":", "config", ".", "validate", "(", ")", "return", "config" ]
Read config from given path string or dict object. :param config_path_or_dict: :type config_path_or_dict: str or dict :return: Returns config object or None if not found. :rtype: :class:`revision.config.Config`
[ "Read", "config", "from", "given", "path", "string", "or", "dict", "object", "." ]
python
train
26.487179
serkanyersen/underscore.py
src/underscore.py
https://github.com/serkanyersen/underscore.py/blob/07c25c3f0f789536e4ad47aa315faccc0da9602f/src/underscore.py#L789-L800
def delay(self, wait, *args): """ Delays a function for the given number of milliseconds, and then calls it with the arguments supplied. """ def call_it(): self.obj(*args) t = Timer((float(wait) / float(1000)), call_it) t.start() return self._wrap(self.obj)
[ "def", "delay", "(", "self", ",", "wait", ",", "*", "args", ")", ":", "def", "call_it", "(", ")", ":", "self", ".", "obj", "(", "*", "args", ")", "t", "=", "Timer", "(", "(", "float", "(", "wait", ")", "/", "float", "(", "1000", ")", ")", ",", "call_it", ")", "t", ".", "start", "(", ")", "return", "self", ".", "_wrap", "(", "self", ".", "obj", ")" ]
Delays a function for the given number of milliseconds, and then calls it with the arguments supplied.
[ "Delays", "a", "function", "for", "the", "given", "number", "of", "milliseconds", "and", "then", "calls", "it", "with", "the", "arguments", "supplied", "." ]
python
train
27
lordmauve/lepton
examples/games/bonk/game.py
https://github.com/lordmauve/lepton/blob/bf03f2c20ea8c51ade632f692d0a21e520fbba7c/examples/games/bonk/game.py#L263-L268
def update(self, td): """Update state of ball""" self.sprite.last_position = self.sprite.position self.sprite.last_velocity = self.sprite.velocity if self.particle_group != None: self.update_particle_group(td)
[ "def", "update", "(", "self", ",", "td", ")", ":", "self", ".", "sprite", ".", "last_position", "=", "self", ".", "sprite", ".", "position", "self", ".", "sprite", ".", "last_velocity", "=", "self", ".", "sprite", ".", "velocity", "if", "self", ".", "particle_group", "!=", "None", ":", "self", ".", "update_particle_group", "(", "td", ")" ]
Update state of ball
[ "Update", "state", "of", "ball" ]
python
train
42.166667
juju/python-libjuju
juju/client/_client1.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/client/_client1.py#L3071-L3086
async def DestroyMachines(self, force, machine_names): ''' force : bool machine_names : typing.Sequence[str] Returns -> None ''' # map input types to rpc msg _params = dict() msg = dict(type='Client', request='DestroyMachines', version=1, params=_params) _params['force'] = force _params['machine-names'] = machine_names reply = await self.rpc(msg) return reply
[ "async", "def", "DestroyMachines", "(", "self", ",", "force", ",", "machine_names", ")", ":", "# map input types to rpc msg", "_params", "=", "dict", "(", ")", "msg", "=", "dict", "(", "type", "=", "'Client'", ",", "request", "=", "'DestroyMachines'", ",", "version", "=", "1", ",", "params", "=", "_params", ")", "_params", "[", "'force'", "]", "=", "force", "_params", "[", "'machine-names'", "]", "=", "machine_names", "reply", "=", "await", "self", ".", "rpc", "(", "msg", ")", "return", "reply" ]
force : bool machine_names : typing.Sequence[str] Returns -> None
[ "force", ":", "bool", "machine_names", ":", "typing", ".", "Sequence", "[", "str", "]", "Returns", "-", ">", "None" ]
python
train
31.1875
PlaidWeb/Publ
publ/queries.py
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/queries.py#L145-L157
def where_entry_date(query, datespec): """ Where clause for entries which match a textual date spec datespec -- The date spec to check for, in YYYY[[-]MM[[-]DD]] format """ date, interval, _ = utils.parse_date(datespec) start_date, end_date = date.span(interval) return orm.select( e for e in query if e.local_date >= start_date.naive and e.local_date <= end_date.naive )
[ "def", "where_entry_date", "(", "query", ",", "datespec", ")", ":", "date", ",", "interval", ",", "_", "=", "utils", ".", "parse_date", "(", "datespec", ")", "start_date", ",", "end_date", "=", "date", ".", "span", "(", "interval", ")", "return", "orm", ".", "select", "(", "e", "for", "e", "in", "query", "if", "e", ".", "local_date", ">=", "start_date", ".", "naive", "and", "e", ".", "local_date", "<=", "end_date", ".", "naive", ")" ]
Where clause for entries which match a textual date spec datespec -- The date spec to check for, in YYYY[[-]MM[[-]DD]] format
[ "Where", "clause", "for", "entries", "which", "match", "a", "textual", "date", "spec" ]
python
train
31.769231
picleslivre/plumber
plumber.py
https://github.com/picleslivre/plumber/blob/f5019ef20679b3e9a31c6c84e4f4529d72dc8db9/plumber.py#L203-L229
def run(self, data, rewrap=False, prefetch=0): """ Wires the pipeline and returns a lazy object of the transformed data. :param data: must be an iterable, where a full document must be returned for each loop :param rewrap: (optional) is a bool that indicates the need to rewrap data in cases where iterating over it produces undesired results, for instance ``dict`` instances. :param prefetch: (optional) is an int defining the number of items to be prefetched once the pipeline starts yielding data. The default prefetching mechanism is based on threads, so be careful with CPU-bound processing pipelines. """ if rewrap: data = [data] for _filter in self._filters: _filter.feed(data) data = _filter else: iterable = self._prefetch_callable(data, prefetch) if prefetch else data for out_data in iterable: yield out_data
[ "def", "run", "(", "self", ",", "data", ",", "rewrap", "=", "False", ",", "prefetch", "=", "0", ")", ":", "if", "rewrap", ":", "data", "=", "[", "data", "]", "for", "_filter", "in", "self", ".", "_filters", ":", "_filter", ".", "feed", "(", "data", ")", "data", "=", "_filter", "else", ":", "iterable", "=", "self", ".", "_prefetch_callable", "(", "data", ",", "prefetch", ")", "if", "prefetch", "else", "data", "for", "out_data", "in", "iterable", ":", "yield", "out_data" ]
Wires the pipeline and returns a lazy object of the transformed data. :param data: must be an iterable, where a full document must be returned for each loop :param rewrap: (optional) is a bool that indicates the need to rewrap data in cases where iterating over it produces undesired results, for instance ``dict`` instances. :param prefetch: (optional) is an int defining the number of items to be prefetched once the pipeline starts yielding data. The default prefetching mechanism is based on threads, so be careful with CPU-bound processing pipelines.
[ "Wires", "the", "pipeline", "and", "returns", "a", "lazy", "object", "of", "the", "transformed", "data", "." ]
python
train
37.074074
wimglenn/advent-of-code-data
aocd/models.py
https://github.com/wimglenn/advent-of-code-data/blob/a3856459d225840f2b6919659fc65aa7a6a74533/aocd/models.py#L283-L312
def _get_answer(self, part): """ Note: Answers are only revealed after a correct submission. If you've have not already solved the puzzle, AocdError will be raised. """ answer_fname = getattr(self, "answer_{}_fname".format(part)) if os.path.isfile(answer_fname): with open(answer_fname) as f: return f.read().strip() # scrape puzzle page for any previously solved answers response = requests.get(self.url, cookies=self._cookies, headers=self._headers) response.raise_for_status() soup = bs4.BeautifulSoup(response.text, "html.parser") if not self._title: # may as well save this while we're here self._save_title(soup=soup) hit = "Your puzzle answer was" paras = [p for p in soup.find_all("p") if p.text.startswith(hit)] if paras: parta_correct_answer = paras[0].code.text self._save_correct_answer(value=parta_correct_answer, part="a") if len(paras) > 1: _p1, p2 = paras partb_correct_answer = p2.code.text self._save_correct_answer(value=partb_correct_answer, part="b") if os.path.isfile(answer_fname): with open(answer_fname) as f: return f.read().strip() msg = "Answer {}-{}{} is not available".format(self.year, self.day, part) raise PuzzleUnsolvedError(msg)
[ "def", "_get_answer", "(", "self", ",", "part", ")", ":", "answer_fname", "=", "getattr", "(", "self", ",", "\"answer_{}_fname\"", ".", "format", "(", "part", ")", ")", "if", "os", ".", "path", ".", "isfile", "(", "answer_fname", ")", ":", "with", "open", "(", "answer_fname", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")", ".", "strip", "(", ")", "# scrape puzzle page for any previously solved answers", "response", "=", "requests", ".", "get", "(", "self", ".", "url", ",", "cookies", "=", "self", ".", "_cookies", ",", "headers", "=", "self", ".", "_headers", ")", "response", ".", "raise_for_status", "(", ")", "soup", "=", "bs4", ".", "BeautifulSoup", "(", "response", ".", "text", ",", "\"html.parser\"", ")", "if", "not", "self", ".", "_title", ":", "# may as well save this while we're here", "self", ".", "_save_title", "(", "soup", "=", "soup", ")", "hit", "=", "\"Your puzzle answer was\"", "paras", "=", "[", "p", "for", "p", "in", "soup", ".", "find_all", "(", "\"p\"", ")", "if", "p", ".", "text", ".", "startswith", "(", "hit", ")", "]", "if", "paras", ":", "parta_correct_answer", "=", "paras", "[", "0", "]", ".", "code", ".", "text", "self", ".", "_save_correct_answer", "(", "value", "=", "parta_correct_answer", ",", "part", "=", "\"a\"", ")", "if", "len", "(", "paras", ")", ">", "1", ":", "_p1", ",", "p2", "=", "paras", "partb_correct_answer", "=", "p2", ".", "code", ".", "text", "self", ".", "_save_correct_answer", "(", "value", "=", "partb_correct_answer", ",", "part", "=", "\"b\"", ")", "if", "os", ".", "path", ".", "isfile", "(", "answer_fname", ")", ":", "with", "open", "(", "answer_fname", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")", ".", "strip", "(", ")", "msg", "=", "\"Answer {}-{}{} is not available\"", ".", "format", "(", "self", ".", "year", ",", "self", ".", "day", ",", "part", ")", "raise", "PuzzleUnsolvedError", "(", "msg", ")" ]
Note: Answers are only revealed after a correct submission. If you've have not already solved the puzzle, AocdError will be raised.
[ "Note", ":", "Answers", "are", "only", "revealed", "after", "a", "correct", "submission", ".", "If", "you", "ve", "have", "not", "already", "solved", "the", "puzzle", "AocdError", "will", "be", "raised", "." ]
python
train
47.8
exa-analytics/exa
exa/core/editor.py
https://github.com/exa-analytics/exa/blob/40fb3c22b531d460dbc51e603de75b856cc28f0d/exa/core/editor.py#L183-L219
def find(self, *strings, **kwargs): """ Search the entire editor for lines that match the string. .. code-block:: Python string = '''word one word two three''' ed = Editor(string) ed.find('word') # [(0, "word one"), (1, "word two")] ed.find('word', 'three') # {'word': [...], 'three': [(2, "three")]} Args: strings (str): Any number of strings to search for keys_only (bool): Only return keys start (int): Optional line to start searching on stop (int): Optional line to stop searching on Returns: results: If multiple strings searched a dictionary of string key, (line number, line) values (else just values) """ start = kwargs.pop("start", 0) stop = kwargs.pop("stop", None) keys_only = kwargs.pop("keys_only", False) results = {string: [] for string in strings} stop = len(self) if stop is None else stop for i, line in enumerate(self[start:stop]): for string in strings: if string in line: if keys_only: results[string].append(i) else: results[string].append((i, line)) if len(strings) == 1: return results[strings[0]] return results
[ "def", "find", "(", "self", ",", "*", "strings", ",", "*", "*", "kwargs", ")", ":", "start", "=", "kwargs", ".", "pop", "(", "\"start\"", ",", "0", ")", "stop", "=", "kwargs", ".", "pop", "(", "\"stop\"", ",", "None", ")", "keys_only", "=", "kwargs", ".", "pop", "(", "\"keys_only\"", ",", "False", ")", "results", "=", "{", "string", ":", "[", "]", "for", "string", "in", "strings", "}", "stop", "=", "len", "(", "self", ")", "if", "stop", "is", "None", "else", "stop", "for", "i", ",", "line", "in", "enumerate", "(", "self", "[", "start", ":", "stop", "]", ")", ":", "for", "string", "in", "strings", ":", "if", "string", "in", "line", ":", "if", "keys_only", ":", "results", "[", "string", "]", ".", "append", "(", "i", ")", "else", ":", "results", "[", "string", "]", ".", "append", "(", "(", "i", ",", "line", ")", ")", "if", "len", "(", "strings", ")", "==", "1", ":", "return", "results", "[", "strings", "[", "0", "]", "]", "return", "results" ]
Search the entire editor for lines that match the string. .. code-block:: Python string = '''word one word two three''' ed = Editor(string) ed.find('word') # [(0, "word one"), (1, "word two")] ed.find('word', 'three') # {'word': [...], 'three': [(2, "three")]} Args: strings (str): Any number of strings to search for keys_only (bool): Only return keys start (int): Optional line to start searching on stop (int): Optional line to stop searching on Returns: results: If multiple strings searched a dictionary of string key, (line number, line) values (else just values)
[ "Search", "the", "entire", "editor", "for", "lines", "that", "match", "the", "string", "." ]
python
train
37.459459
xapple/plumbing
plumbing/common.py
https://github.com/xapple/plumbing/blob/4a7706c7722f5996d0ca366f191aff9ac145880a/plumbing/common.py#L417-L445
def download_from_url(source, destination, progress=False, uncompress=False): """Download a file from an URL and place it somewhere. Like wget. Uses requests and tqdm to display progress if you want. By default it will uncompress files. #TODO: handle case where destination is a directory""" # Modules # from tqdm import tqdm import requests from autopaths.file_path import FilePath # Check destination exists # destination = FilePath(destination) destination.directory.create_if_not_exists() # Over HTTP # response = requests.get(source, stream=True) total_size = int(response.headers.get('content-length')) block_size = int(total_size/1024) # Do it # with open(destination, "wb") as handle: if progress: for data in tqdm(response.iter_content(chunk_size=block_size), total=1024): handle.write(data) else: for data in response.iter_content(chunk_size=block_size): handle.write(data) # Uncompress # if uncompress: with open(destination) as f: header = f.read(4) if header == "PK\x03\x04": unzip(destination, inplace=True) # Add other compression formats here # Return # return destination
[ "def", "download_from_url", "(", "source", ",", "destination", ",", "progress", "=", "False", ",", "uncompress", "=", "False", ")", ":", "# Modules #", "from", "tqdm", "import", "tqdm", "import", "requests", "from", "autopaths", ".", "file_path", "import", "FilePath", "# Check destination exists #", "destination", "=", "FilePath", "(", "destination", ")", "destination", ".", "directory", ".", "create_if_not_exists", "(", ")", "# Over HTTP #", "response", "=", "requests", ".", "get", "(", "source", ",", "stream", "=", "True", ")", "total_size", "=", "int", "(", "response", ".", "headers", ".", "get", "(", "'content-length'", ")", ")", "block_size", "=", "int", "(", "total_size", "/", "1024", ")", "# Do it #", "with", "open", "(", "destination", ",", "\"wb\"", ")", "as", "handle", ":", "if", "progress", ":", "for", "data", "in", "tqdm", "(", "response", ".", "iter_content", "(", "chunk_size", "=", "block_size", ")", ",", "total", "=", "1024", ")", ":", "handle", ".", "write", "(", "data", ")", "else", ":", "for", "data", "in", "response", ".", "iter_content", "(", "chunk_size", "=", "block_size", ")", ":", "handle", ".", "write", "(", "data", ")", "# Uncompress #", "if", "uncompress", ":", "with", "open", "(", "destination", ")", "as", "f", ":", "header", "=", "f", ".", "read", "(", "4", ")", "if", "header", "==", "\"PK\\x03\\x04\"", ":", "unzip", "(", "destination", ",", "inplace", "=", "True", ")", "# Add other compression formats here", "# Return #", "return", "destination" ]
Download a file from an URL and place it somewhere. Like wget. Uses requests and tqdm to display progress if you want. By default it will uncompress files. #TODO: handle case where destination is a directory
[ "Download", "a", "file", "from", "an", "URL", "and", "place", "it", "somewhere", ".", "Like", "wget", ".", "Uses", "requests", "and", "tqdm", "to", "display", "progress", "if", "you", "want", ".", "By", "default", "it", "will", "uncompress", "files", ".", "#TODO", ":", "handle", "case", "where", "destination", "is", "a", "directory" ]
python
train
41.655172
Capitains/MyCapytain
MyCapytain/common/reference/_capitains_cts.py
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/common/reference/_capitains_cts.py#L828-L872
def fill(self, passage=None, xpath=None): """ Fill the xpath with given informations :param passage: CapitainsCtsPassage reference :type passage: CtsReference or list or None. Can be list of None and not None :param xpath: If set to True, will return the replaced self.xpath value and not the whole self.refsDecl :type xpath: Boolean :rtype: basestring :returns: Xpath to find the passage .. code-block:: python citation = XmlCtsCitation(name="line", scope="/TEI/text/body/div/div[@n=\"?\"]",xpath="//l[@n=\"?\"]") print(citation.fill(["1", None])) # /TEI/text/body/div/div[@n='1']//l[@n] print(citation.fill(None)) # /TEI/text/body/div/div[@n]//l[@n] print(citation.fill(CtsReference("1.1")) # /TEI/text/body/div/div[@n='1']//l[@n='1'] print(citation.fill("1", xpath=True) # //l[@n='1'] """ if xpath is True: # Then passage is a string or None xpath = self.xpath replacement = r"\1" if isinstance(passage, str): replacement = r"\1\2'" + passage + "'" return REFERENCE_REPLACER.sub(replacement, xpath) else: if isinstance(passage, CtsReference): passage = passage.start.list elif passage is None: return REFERENCE_REPLACER.sub( r"\1", self.refsDecl ) passage = iter(passage) return REFERENCE_REPLACER.sub( lambda m: _ref_replacer(m, passage), self.refsDecl )
[ "def", "fill", "(", "self", ",", "passage", "=", "None", ",", "xpath", "=", "None", ")", ":", "if", "xpath", "is", "True", ":", "# Then passage is a string or None", "xpath", "=", "self", ".", "xpath", "replacement", "=", "r\"\\1\"", "if", "isinstance", "(", "passage", ",", "str", ")", ":", "replacement", "=", "r\"\\1\\2'\"", "+", "passage", "+", "\"'\"", "return", "REFERENCE_REPLACER", ".", "sub", "(", "replacement", ",", "xpath", ")", "else", ":", "if", "isinstance", "(", "passage", ",", "CtsReference", ")", ":", "passage", "=", "passage", ".", "start", ".", "list", "elif", "passage", "is", "None", ":", "return", "REFERENCE_REPLACER", ".", "sub", "(", "r\"\\1\"", ",", "self", ".", "refsDecl", ")", "passage", "=", "iter", "(", "passage", ")", "return", "REFERENCE_REPLACER", ".", "sub", "(", "lambda", "m", ":", "_ref_replacer", "(", "m", ",", "passage", ")", ",", "self", ".", "refsDecl", ")" ]
Fill the xpath with given informations :param passage: CapitainsCtsPassage reference :type passage: CtsReference or list or None. Can be list of None and not None :param xpath: If set to True, will return the replaced self.xpath value and not the whole self.refsDecl :type xpath: Boolean :rtype: basestring :returns: Xpath to find the passage .. code-block:: python citation = XmlCtsCitation(name="line", scope="/TEI/text/body/div/div[@n=\"?\"]",xpath="//l[@n=\"?\"]") print(citation.fill(["1", None])) # /TEI/text/body/div/div[@n='1']//l[@n] print(citation.fill(None)) # /TEI/text/body/div/div[@n]//l[@n] print(citation.fill(CtsReference("1.1")) # /TEI/text/body/div/div[@n='1']//l[@n='1'] print(citation.fill("1", xpath=True) # //l[@n='1']
[ "Fill", "the", "xpath", "with", "given", "informations" ]
python
train
36.933333
h2oai/h2o-3
h2o-py/h2o/model/binomial.py
https://github.com/h2oai/h2o-3/blob/dd62aaa1e7f680a8b16ee14bc66b0fb5195c2ad8/h2o-py/h2o/model/binomial.py#L551-L570
def find_idx_by_threshold(self, threshold, train=False, valid=False, xval=False): """ Retrieve the index in this metric's threshold list at which the given threshold is located. If all are False (default), then return the training metric value. If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid", and "xval". :param float threshold: Threshold value to search for in the threshold list. :param bool train: If True, return the find idx by threshold value for the training data. :param bool valid: If True, return the find idx by threshold value for the validation data. :param bool xval: If True, return the find idx by threshold value for each of the cross-validated splits. :returns: The find idx by threshold values for the specified key(s). """ tm = ModelBase._get_metrics(self, train, valid, xval) m = {} for k, v in viewitems(tm): m[k] = None if v is None else v.find_idx_by_threshold(threshold) return list(m.values())[0] if len(m) == 1 else m
[ "def", "find_idx_by_threshold", "(", "self", ",", "threshold", ",", "train", "=", "False", ",", "valid", "=", "False", ",", "xval", "=", "False", ")", ":", "tm", "=", "ModelBase", ".", "_get_metrics", "(", "self", ",", "train", ",", "valid", ",", "xval", ")", "m", "=", "{", "}", "for", "k", ",", "v", "in", "viewitems", "(", "tm", ")", ":", "m", "[", "k", "]", "=", "None", "if", "v", "is", "None", "else", "v", ".", "find_idx_by_threshold", "(", "threshold", ")", "return", "list", "(", "m", ".", "values", "(", ")", ")", "[", "0", "]", "if", "len", "(", "m", ")", "==", "1", "else", "m" ]
Retrieve the index in this metric's threshold list at which the given threshold is located. If all are False (default), then return the training metric value. If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid", and "xval". :param float threshold: Threshold value to search for in the threshold list. :param bool train: If True, return the find idx by threshold value for the training data. :param bool valid: If True, return the find idx by threshold value for the validation data. :param bool xval: If True, return the find idx by threshold value for each of the cross-validated splits. :returns: The find idx by threshold values for the specified key(s).
[ "Retrieve", "the", "index", "in", "this", "metric", "s", "threshold", "list", "at", "which", "the", "given", "threshold", "is", "located", "." ]
python
test
56.3
nschloe/colorio
experiments/new-cs.py
https://github.com/nschloe/colorio/blob/357d6001b3cf30f752e23726bf429dc1d1c60b3a/experiments/new-cs.py#L757-L798
def cost_min2(self, alpha): """Residual formulation, Hessian is a low-rank update of the identity. """ n = self.V.dim() ax = alpha[:n] ay = alpha[n:] # ml = pyamg.ruge_stuben_solver(self.L) # # ml = pyamg.smoothed_aggregation_solver(self.L) # print(ml) # print() # print(self.L) # print() # x = ml.solve(ax, tol=1e-10) # print('residual: {}'.format(numpy.linalg.norm(ax - self.L*x))) # print() # print(ax) # print() # print(x) # exit(1) # x = sparse.linalg.spsolve(self.L, ax) # print('residual: {}'.format(numpy.linalg.norm(ax - self.L*x))) # exit(1) q2, r2 = self.get_q2_r2(ax, ay) Lax = self.L * ax Lay = self.L * ay out = [ 0.5 * numpy.dot(Lax, Lax), 0.5 * numpy.dot(Lay, Lay), 0.5 * numpy.dot(q2 - 1, q2 - 1), 0.5 * numpy.dot(r2, r2), ] if self.num_f_eval % 10000 == 0: print("{:7d} {:e} {:e} {:e} {:e}".format(self.num_f_eval, *out)) self.num_f_eval += 1 return numpy.sum(out)
[ "def", "cost_min2", "(", "self", ",", "alpha", ")", ":", "n", "=", "self", ".", "V", ".", "dim", "(", ")", "ax", "=", "alpha", "[", ":", "n", "]", "ay", "=", "alpha", "[", "n", ":", "]", "# ml = pyamg.ruge_stuben_solver(self.L)", "# # ml = pyamg.smoothed_aggregation_solver(self.L)", "# print(ml)", "# print()", "# print(self.L)", "# print()", "# x = ml.solve(ax, tol=1e-10)", "# print('residual: {}'.format(numpy.linalg.norm(ax - self.L*x)))", "# print()", "# print(ax)", "# print()", "# print(x)", "# exit(1)", "# x = sparse.linalg.spsolve(self.L, ax)", "# print('residual: {}'.format(numpy.linalg.norm(ax - self.L*x)))", "# exit(1)", "q2", ",", "r2", "=", "self", ".", "get_q2_r2", "(", "ax", ",", "ay", ")", "Lax", "=", "self", ".", "L", "*", "ax", "Lay", "=", "self", ".", "L", "*", "ay", "out", "=", "[", "0.5", "*", "numpy", ".", "dot", "(", "Lax", ",", "Lax", ")", ",", "0.5", "*", "numpy", ".", "dot", "(", "Lay", ",", "Lay", ")", ",", "0.5", "*", "numpy", ".", "dot", "(", "q2", "-", "1", ",", "q2", "-", "1", ")", ",", "0.5", "*", "numpy", ".", "dot", "(", "r2", ",", "r2", ")", ",", "]", "if", "self", ".", "num_f_eval", "%", "10000", "==", "0", ":", "print", "(", "\"{:7d} {:e} {:e} {:e} {:e}\"", ".", "format", "(", "self", ".", "num_f_eval", ",", "*", "out", ")", ")", "self", ".", "num_f_eval", "+=", "1", "return", "numpy", ".", "sum", "(", "out", ")" ]
Residual formulation, Hessian is a low-rank update of the identity.
[ "Residual", "formulation", "Hessian", "is", "a", "low", "-", "rank", "update", "of", "the", "identity", "." ]
python
train
27.238095
JasonKessler/scattertext
scattertext/representations/EmbeddingsResolver.py
https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/representations/EmbeddingsResolver.py#L17-L28
def set_embeddings(self, embeddings): ''' Specifies fixed set of embeddings :param embeddings: array-like, sparse or dense, shape should be (embedding size, # terms) :return: EmbeddingsResolver ''' if self.embeddings_ is not None: raise Exception("You have already set embeddings by running set_embeddings or set_embeddings_model.") assert embeddings.shape[1] == self.corpus_.get_num_terms() self.embeddings_ = embeddings.T self.vocab_ = self.corpus_.get_terms() return self
[ "def", "set_embeddings", "(", "self", ",", "embeddings", ")", ":", "if", "self", ".", "embeddings_", "is", "not", "None", ":", "raise", "Exception", "(", "\"You have already set embeddings by running set_embeddings or set_embeddings_model.\"", ")", "assert", "embeddings", ".", "shape", "[", "1", "]", "==", "self", ".", "corpus_", ".", "get_num_terms", "(", ")", "self", ".", "embeddings_", "=", "embeddings", ".", "T", "self", ".", "vocab_", "=", "self", ".", "corpus_", ".", "get_terms", "(", ")", "return", "self" ]
Specifies fixed set of embeddings :param embeddings: array-like, sparse or dense, shape should be (embedding size, # terms) :return: EmbeddingsResolver
[ "Specifies", "fixed", "set", "of", "embeddings", ":", "param", "embeddings", ":", "array", "-", "like", "sparse", "or", "dense", "shape", "should", "be", "(", "embedding", "size", "#", "terms", ")", ":", "return", ":", "EmbeddingsResolver" ]
python
train
46.25
stephenmcd/gnotty
gnotty/client.py
https://github.com/stephenmcd/gnotty/blob/bea3762dc9cbc3cb21a5ae7224091cf027273c40/gnotty/client.py#L176-L185
def on_quit(self, connection, event): """ Someone left the channel - send the nicknames list to the WebSocket. """ nickname = self.get_nickname(event) nickname_color = self.nicknames[nickname] del self.nicknames[nickname] self.namespace.emit("message", nickname, "leaves", nickname_color) self.emit_nicknames()
[ "def", "on_quit", "(", "self", ",", "connection", ",", "event", ")", ":", "nickname", "=", "self", ".", "get_nickname", "(", "event", ")", "nickname_color", "=", "self", ".", "nicknames", "[", "nickname", "]", "del", "self", ".", "nicknames", "[", "nickname", "]", "self", ".", "namespace", ".", "emit", "(", "\"message\"", ",", "nickname", ",", "\"leaves\"", ",", "nickname_color", ")", "self", ".", "emit_nicknames", "(", ")" ]
Someone left the channel - send the nicknames list to the WebSocket.
[ "Someone", "left", "the", "channel", "-", "send", "the", "nicknames", "list", "to", "the", "WebSocket", "." ]
python
train
37.3
barryp/py-amqplib
amqplib/client_0_8/connection.py
https://github.com/barryp/py-amqplib/blob/2b3a47de34b4712c111d0a55d7ff109dffc2a7b2/amqplib/client_0_8/connection.py#L495-L508
def _open_ok(self, args): """ signal that the connection is ready This method signals to the client that the connection is ready for use. PARAMETERS: known_hosts: shortstr """ self.known_hosts = args.read_shortstr() AMQP_LOGGER.debug('Open OK! known_hosts [%s]' % self.known_hosts) return None
[ "def", "_open_ok", "(", "self", ",", "args", ")", ":", "self", ".", "known_hosts", "=", "args", ".", "read_shortstr", "(", ")", "AMQP_LOGGER", ".", "debug", "(", "'Open OK! known_hosts [%s]'", "%", "self", ".", "known_hosts", ")", "return", "None" ]
signal that the connection is ready This method signals to the client that the connection is ready for use. PARAMETERS: known_hosts: shortstr
[ "signal", "that", "the", "connection", "is", "ready" ]
python
train
26.214286
stephen-bunn/file-config
src/file_config/handlers/ini.py
https://github.com/stephen-bunn/file-config/blob/93429360c949985202e1f2b9cd0340731819ba75/src/file_config/handlers/ini.py#L27-L60
def on_configparser_dumps(self, configparser, config, dictionary, **kwargs): """ The :mod:`configparser` dumps method. :param module configparser: The ``configparser`` module :param class config: The instance's config class :param dict dictionary: The dictionary instance to serialize :param str root: The top-level section of the ini file, defaults to ``config.__name__``, optional :param str delimiter: The delimiter character used for representing nested dictionaries, defaults to ":", optional :return: The ini serialization of the given ``dictionary`` :rtype: str """ root_section = kwargs.pop("root") if not isinstance(root_section, str): root_section = config.__name__ delimiter = kwargs.pop("delimiter", ":") if delimiter in root_section: warnings.warn( f"root section {root_section!r} contains delimiter character " f"{delimiter!r}, loading from the resulting content will likely fail" ) try: return INIParser.from_dict( dictionary, root_section=root_section, delimiter=kwargs.pop("delimiter", ":"), empty_sections=kwargs.pop("empty_sections", False), ).to_ini() except ValueError: raise ValueError("INI cannot handle this config, try using toml instead")
[ "def", "on_configparser_dumps", "(", "self", ",", "configparser", ",", "config", ",", "dictionary", ",", "*", "*", "kwargs", ")", ":", "root_section", "=", "kwargs", ".", "pop", "(", "\"root\"", ")", "if", "not", "isinstance", "(", "root_section", ",", "str", ")", ":", "root_section", "=", "config", ".", "__name__", "delimiter", "=", "kwargs", ".", "pop", "(", "\"delimiter\"", ",", "\":\"", ")", "if", "delimiter", "in", "root_section", ":", "warnings", ".", "warn", "(", "f\"root section {root_section!r} contains delimiter character \"", "f\"{delimiter!r}, loading from the resulting content will likely fail\"", ")", "try", ":", "return", "INIParser", ".", "from_dict", "(", "dictionary", ",", "root_section", "=", "root_section", ",", "delimiter", "=", "kwargs", ".", "pop", "(", "\"delimiter\"", ",", "\":\"", ")", ",", "empty_sections", "=", "kwargs", ".", "pop", "(", "\"empty_sections\"", ",", "False", ")", ",", ")", ".", "to_ini", "(", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"INI cannot handle this config, try using toml instead\"", ")" ]
The :mod:`configparser` dumps method. :param module configparser: The ``configparser`` module :param class config: The instance's config class :param dict dictionary: The dictionary instance to serialize :param str root: The top-level section of the ini file, defaults to ``config.__name__``, optional :param str delimiter: The delimiter character used for representing nested dictionaries, defaults to ":", optional :return: The ini serialization of the given ``dictionary`` :rtype: str
[ "The", ":", "mod", ":", "configparser", "dumps", "method", "." ]
python
train
42.558824
jazzband/django-discover-jenkins
setup.py
https://github.com/jazzband/django-discover-jenkins/blob/c0c859dfdd571de6e8f63865dfc8ebac6bab1d07/setup.py#L12-L19
def get_author_and_version(package): """ Return package author and version as listed in `init.py`. """ init_py = open(os.path.join(package, '__init__.py')).read() author = re.search("__author__ = ['\"]([^'\"]+)['\"]", init_py).group(1) version = re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1) return author, version
[ "def", "get_author_and_version", "(", "package", ")", ":", "init_py", "=", "open", "(", "os", ".", "path", ".", "join", "(", "package", ",", "'__init__.py'", ")", ")", ".", "read", "(", ")", "author", "=", "re", ".", "search", "(", "\"__author__ = ['\\\"]([^'\\\"]+)['\\\"]\"", ",", "init_py", ")", ".", "group", "(", "1", ")", "version", "=", "re", ".", "search", "(", "\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\"", ",", "init_py", ")", ".", "group", "(", "1", ")", "return", "author", ",", "version" ]
Return package author and version as listed in `init.py`.
[ "Return", "package", "author", "and", "version", "as", "listed", "in", "init", ".", "py", "." ]
python
valid
44.25
apache/incubator-heron
heron/tools/common/src/python/access/heron_api.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/common/src/python/access/heron_api.py#L621-L645
def run_instance_jmap(cluster, environ, topology, instance, role=None): ''' :param cluster: :param environ: :param topology: :param instance: :param role: :return: ''' params = dict( cluster=cluster, environ=environ, topology=topology, instance=instance) if role is not None: params['role'] = role request_url = tornado.httputil.url_concat( create_url(JMAP_URL_FMT), params) if role is not None: request_url = tornado.httputil.url_concat(request_url, dict(role=role)) raise tornado.gen.Return((yield fetch_url_as_json(request_url)))
[ "def", "run_instance_jmap", "(", "cluster", ",", "environ", ",", "topology", ",", "instance", ",", "role", "=", "None", ")", ":", "params", "=", "dict", "(", "cluster", "=", "cluster", ",", "environ", "=", "environ", ",", "topology", "=", "topology", ",", "instance", "=", "instance", ")", "if", "role", "is", "not", "None", ":", "params", "[", "'role'", "]", "=", "role", "request_url", "=", "tornado", ".", "httputil", ".", "url_concat", "(", "create_url", "(", "JMAP_URL_FMT", ")", ",", "params", ")", "if", "role", "is", "not", "None", ":", "request_url", "=", "tornado", ".", "httputil", ".", "url_concat", "(", "request_url", ",", "dict", "(", "role", "=", "role", ")", ")", "raise", "tornado", ".", "gen", ".", "Return", "(", "(", "yield", "fetch_url_as_json", "(", "request_url", ")", ")", ")" ]
:param cluster: :param environ: :param topology: :param instance: :param role: :return:
[ ":", "param", "cluster", ":", ":", "param", "environ", ":", ":", "param", "topology", ":", ":", "param", "instance", ":", ":", "param", "role", ":", ":", "return", ":" ]
python
valid
23.04
klen/muffin-babel
muffin_babel.py
https://github.com/klen/muffin-babel/blob/f48ebbbf7806c6c727f66d8d0df331b29f6ead08/muffin_babel.py#L263-L266
def pgettext(self, context, string, domain=None, **variables): """Like :meth:`gettext` but with a context.""" t = self.get_translations(domain) return t.upgettext(context, string) % variables
[ "def", "pgettext", "(", "self", ",", "context", ",", "string", ",", "domain", "=", "None", ",", "*", "*", "variables", ")", ":", "t", "=", "self", ".", "get_translations", "(", "domain", ")", "return", "t", ".", "upgettext", "(", "context", ",", "string", ")", "%", "variables" ]
Like :meth:`gettext` but with a context.
[ "Like", ":", "meth", ":", "gettext", "but", "with", "a", "context", "." ]
python
train
53
junaruga/rpm-py-installer
install.py
https://github.com/junaruga/rpm-py-installer/blob/12f45feb0ba533dec8d0d16ef1e9b7fb8cfbd4ed/install.py#L1770-L1773
def cd(cls, directory): """Change directory. It behaves like "cd directory".""" Log.debug('CMD: cd {0}'.format(directory)) os.chdir(directory)
[ "def", "cd", "(", "cls", ",", "directory", ")", ":", "Log", ".", "debug", "(", "'CMD: cd {0}'", ".", "format", "(", "directory", ")", ")", "os", ".", "chdir", "(", "directory", ")" ]
Change directory. It behaves like "cd directory".
[ "Change", "directory", ".", "It", "behaves", "like", "cd", "directory", "." ]
python
train
40.75
pandas-dev/pandas
pandas/io/pytables.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/io/pytables.py#L2911-L2924
def write(self, obj, **kwargs): """ write it as a collection of individual sparse series """ super().write(obj, **kwargs) for name, ss in obj.items(): key = 'sparse_series_{name}'.format(name=name) if key not in self.group._v_children: node = self._handle.create_group(self.group, key) else: node = getattr(self.group, key) s = SparseSeriesFixed(self.parent, node) s.write(ss) self.attrs.default_fill_value = obj.default_fill_value self.attrs.default_kind = obj.default_kind self.write_index('columns', obj.columns)
[ "def", "write", "(", "self", ",", "obj", ",", "*", "*", "kwargs", ")", ":", "super", "(", ")", ".", "write", "(", "obj", ",", "*", "*", "kwargs", ")", "for", "name", ",", "ss", "in", "obj", ".", "items", "(", ")", ":", "key", "=", "'sparse_series_{name}'", ".", "format", "(", "name", "=", "name", ")", "if", "key", "not", "in", "self", ".", "group", ".", "_v_children", ":", "node", "=", "self", ".", "_handle", ".", "create_group", "(", "self", ".", "group", ",", "key", ")", "else", ":", "node", "=", "getattr", "(", "self", ".", "group", ",", "key", ")", "s", "=", "SparseSeriesFixed", "(", "self", ".", "parent", ",", "node", ")", "s", ".", "write", "(", "ss", ")", "self", ".", "attrs", ".", "default_fill_value", "=", "obj", ".", "default_fill_value", "self", ".", "attrs", ".", "default_kind", "=", "obj", ".", "default_kind", "self", ".", "write_index", "(", "'columns'", ",", "obj", ".", "columns", ")" ]
write it as a collection of individual sparse series
[ "write", "it", "as", "a", "collection", "of", "individual", "sparse", "series" ]
python
train
45.857143
bcbio/bcbio-nextgen
bcbio/variation/validateplot.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/validateplot.py#L304-L315
def _ggplot(df, out_file): """Plot faceted items with ggplot wrapper on top of matplotlib. XXX Not yet functional """ import ggplot as gg df["variant.type"] = [vtype_labels[x] for x in df["variant.type"]] df["category"] = [cat_labels[x] for x in df["category"]] df["caller"] = [caller_labels.get(x, None) for x in df["caller"]] p = (gg.ggplot(df, gg.aes(x="caller", y="value.floor")) + gg.geom_bar() + gg.facet_wrap("variant.type", "category") + gg.theme_seaborn()) gg.ggsave(p, out_file)
[ "def", "_ggplot", "(", "df", ",", "out_file", ")", ":", "import", "ggplot", "as", "gg", "df", "[", "\"variant.type\"", "]", "=", "[", "vtype_labels", "[", "x", "]", "for", "x", "in", "df", "[", "\"variant.type\"", "]", "]", "df", "[", "\"category\"", "]", "=", "[", "cat_labels", "[", "x", "]", "for", "x", "in", "df", "[", "\"category\"", "]", "]", "df", "[", "\"caller\"", "]", "=", "[", "caller_labels", ".", "get", "(", "x", ",", "None", ")", "for", "x", "in", "df", "[", "\"caller\"", "]", "]", "p", "=", "(", "gg", ".", "ggplot", "(", "df", ",", "gg", ".", "aes", "(", "x", "=", "\"caller\"", ",", "y", "=", "\"value.floor\"", ")", ")", "+", "gg", ".", "geom_bar", "(", ")", "+", "gg", ".", "facet_wrap", "(", "\"variant.type\"", ",", "\"category\"", ")", "+", "gg", ".", "theme_seaborn", "(", ")", ")", "gg", ".", "ggsave", "(", "p", ",", "out_file", ")" ]
Plot faceted items with ggplot wrapper on top of matplotlib. XXX Not yet functional
[ "Plot", "faceted", "items", "with", "ggplot", "wrapper", "on", "top", "of", "matplotlib", ".", "XXX", "Not", "yet", "functional" ]
python
train
44.25
angr/angr
angr/state_plugins/javavm_classloader.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/javavm_classloader.py#L68-L106
def init_class(self, class_, step_func=None): """ This method simulates the loading of a class by the JVM, during which parts of the class (e.g. static fields) are initialized. For this, we run the class initializer method <clinit> (if available) and update the state accordingly. Note: Initialization is skipped, if the class has already been initialized (or if it's not loaded in CLE). """ if self.is_class_initialized(class_): l.debug("Class %r already initialized.", class_) return l.debug("Initialize class %r.", class_) self.initialized_classes.add(class_) if not class_.is_loaded: l.warning("Class %r is not loaded in CLE. Skip initializiation.", class_) return clinit_method = resolve_method(self.state, '<clinit>', class_.name, include_superclasses=False, init_class=False) if clinit_method.is_loaded: javavm_simos = self.state.project.simos clinit_state = javavm_simos.state_call(addr=SootAddressDescriptor(clinit_method, 0, 0), base_state=self.state, ret_addr=SootAddressTerminator()) simgr = self.state.project.factory.simgr(clinit_state) l.info(">"*15 + " Run class initializer %r ... " + ">"*15, clinit_method) simgr.run(step_func=step_func) l.debug("<"*15 + " Run class initializer %r ... done " + "<"*15, clinit_method) # The only thing that can be updated during initialization are # static or rather global information, which are either stored on # the heap or in the vm_static_table self.state.memory.vm_static_table = simgr.deadended[-1].memory.vm_static_table.copy() self.state.memory.heap = simgr.deadended[-1].memory.heap.copy() else: l.debug("Class initializer <clinit> is not loaded in CLE. Skip initializiation.")
[ "def", "init_class", "(", "self", ",", "class_", ",", "step_func", "=", "None", ")", ":", "if", "self", ".", "is_class_initialized", "(", "class_", ")", ":", "l", ".", "debug", "(", "\"Class %r already initialized.\"", ",", "class_", ")", "return", "l", ".", "debug", "(", "\"Initialize class %r.\"", ",", "class_", ")", "self", ".", "initialized_classes", ".", "add", "(", "class_", ")", "if", "not", "class_", ".", "is_loaded", ":", "l", ".", "warning", "(", "\"Class %r is not loaded in CLE. Skip initializiation.\"", ",", "class_", ")", "return", "clinit_method", "=", "resolve_method", "(", "self", ".", "state", ",", "'<clinit>'", ",", "class_", ".", "name", ",", "include_superclasses", "=", "False", ",", "init_class", "=", "False", ")", "if", "clinit_method", ".", "is_loaded", ":", "javavm_simos", "=", "self", ".", "state", ".", "project", ".", "simos", "clinit_state", "=", "javavm_simos", ".", "state_call", "(", "addr", "=", "SootAddressDescriptor", "(", "clinit_method", ",", "0", ",", "0", ")", ",", "base_state", "=", "self", ".", "state", ",", "ret_addr", "=", "SootAddressTerminator", "(", ")", ")", "simgr", "=", "self", ".", "state", ".", "project", ".", "factory", ".", "simgr", "(", "clinit_state", ")", "l", ".", "info", "(", "\">\"", "*", "15", "+", "\" Run class initializer %r ... \"", "+", "\">\"", "*", "15", ",", "clinit_method", ")", "simgr", ".", "run", "(", "step_func", "=", "step_func", ")", "l", ".", "debug", "(", "\"<\"", "*", "15", "+", "\" Run class initializer %r ... done \"", "+", "\"<\"", "*", "15", ",", "clinit_method", ")", "# The only thing that can be updated during initialization are", "# static or rather global information, which are either stored on", "# the heap or in the vm_static_table", "self", ".", "state", ".", "memory", ".", "vm_static_table", "=", "simgr", ".", "deadended", "[", "-", "1", "]", ".", "memory", ".", "vm_static_table", ".", "copy", "(", ")", "self", ".", "state", ".", "memory", ".", "heap", "=", "simgr", ".", "deadended", "[", "-", "1", "]", ".", "memory", ".", "heap", ".", "copy", "(", ")", "else", ":", "l", ".", "debug", "(", "\"Class initializer <clinit> is not loaded in CLE. Skip initializiation.\"", ")" ]
This method simulates the loading of a class by the JVM, during which parts of the class (e.g. static fields) are initialized. For this, we run the class initializer method <clinit> (if available) and update the state accordingly. Note: Initialization is skipped, if the class has already been initialized (or if it's not loaded in CLE).
[ "This", "method", "simulates", "the", "loading", "of", "a", "class", "by", "the", "JVM", "during", "which", "parts", "of", "the", "class", "(", "e", ".", "g", ".", "static", "fields", ")", "are", "initialized", ".", "For", "this", "we", "run", "the", "class", "initializer", "method", "<clinit", ">", "(", "if", "available", ")", "and", "update", "the", "state", "accordingly", "." ]
python
train
52.923077
mjirik/imcut
imcut/image_manipulation.py
https://github.com/mjirik/imcut/blob/1b38e7cd18a7a38fe683c1cabe1222fe5fa03aa3/imcut/image_manipulation.py#L85-L121
def seed_zoom(seeds, zoom): """ Smart zoom for sparse matrix. If there is resize to bigger resolution thin line of label could be lost. This function prefers labels larger then zero. If there is only one small voxel in larger volume with zeros it is selected. """ # import scipy # loseeds=seeds labels = np.unique(seeds) # remove first label - 0 labels = np.delete(labels, 0) # @TODO smart interpolation for seeds in one block # loseeds = scipy.ndimage.interpolation.zoom( # seeds, zoom, order=0) loshape = np.ceil(np.array(seeds.shape) * 1.0 / zoom).astype(np.int) loseeds = np.zeros(loshape, dtype=np.int8) loseeds = loseeds.astype(np.int8) for label in labels: a, b, c = np.where(seeds == label) loa = np.round(a // zoom) lob = np.round(b // zoom) loc = np.round(c // zoom) # loseeds = np.zeros(loshape) loseeds[loa, lob, loc] += label # this is to detect conflict seeds loseeds[loseeds > label] = 100 # remove conflict seeds loseeds[loseeds > 99] = 0 # import py3DSeedEditor # ped = py3DSeedEditor.py3DSeedEditor(loseeds) # ped.show() return loseeds
[ "def", "seed_zoom", "(", "seeds", ",", "zoom", ")", ":", "# import scipy", "# loseeds=seeds", "labels", "=", "np", ".", "unique", "(", "seeds", ")", "# remove first label - 0", "labels", "=", "np", ".", "delete", "(", "labels", ",", "0", ")", "# @TODO smart interpolation for seeds in one block", "# loseeds = scipy.ndimage.interpolation.zoom(", "# seeds, zoom, order=0)", "loshape", "=", "np", ".", "ceil", "(", "np", ".", "array", "(", "seeds", ".", "shape", ")", "*", "1.0", "/", "zoom", ")", ".", "astype", "(", "np", ".", "int", ")", "loseeds", "=", "np", ".", "zeros", "(", "loshape", ",", "dtype", "=", "np", ".", "int8", ")", "loseeds", "=", "loseeds", ".", "astype", "(", "np", ".", "int8", ")", "for", "label", "in", "labels", ":", "a", ",", "b", ",", "c", "=", "np", ".", "where", "(", "seeds", "==", "label", ")", "loa", "=", "np", ".", "round", "(", "a", "//", "zoom", ")", "lob", "=", "np", ".", "round", "(", "b", "//", "zoom", ")", "loc", "=", "np", ".", "round", "(", "c", "//", "zoom", ")", "# loseeds = np.zeros(loshape)", "loseeds", "[", "loa", ",", "lob", ",", "loc", "]", "+=", "label", "# this is to detect conflict seeds", "loseeds", "[", "loseeds", ">", "label", "]", "=", "100", "# remove conflict seeds", "loseeds", "[", "loseeds", ">", "99", "]", "=", "0", "# import py3DSeedEditor", "# ped = py3DSeedEditor.py3DSeedEditor(loseeds)", "# ped.show()", "return", "loseeds" ]
Smart zoom for sparse matrix. If there is resize to bigger resolution thin line of label could be lost. This function prefers labels larger then zero. If there is only one small voxel in larger volume with zeros it is selected.
[ "Smart", "zoom", "for", "sparse", "matrix", ".", "If", "there", "is", "resize", "to", "bigger", "resolution", "thin", "line", "of", "label", "could", "be", "lost", ".", "This", "function", "prefers", "labels", "larger", "then", "zero", ".", "If", "there", "is", "only", "one", "small", "voxel", "in", "larger", "volume", "with", "zeros", "it", "is", "selected", "." ]
python
train
32.405405
mwhooker/jsonselect
jsonselect/jsonselect.py
https://github.com/mwhooker/jsonselect/blob/c64aa9ea930de0344797ff87b04c753c8fc096a6/jsonselect/jsonselect.py#L432-L442
def _match_nodes(self, validators, obj): """Apply each validator in validators to each node in obj. Return each node in obj which matches all validators. """ results = [] for node in object_iter(obj): if all([validate(node) for validate in validators]): results.append(node) return results
[ "def", "_match_nodes", "(", "self", ",", "validators", ",", "obj", ")", ":", "results", "=", "[", "]", "for", "node", "in", "object_iter", "(", "obj", ")", ":", "if", "all", "(", "[", "validate", "(", "node", ")", "for", "validate", "in", "validators", "]", ")", ":", "results", ".", "append", "(", "node", ")", "return", "results" ]
Apply each validator in validators to each node in obj. Return each node in obj which matches all validators.
[ "Apply", "each", "validator", "in", "validators", "to", "each", "node", "in", "obj", "." ]
python
test
32.454545
facelessuser/pyspelling
pyspelling/filters/cpp.py
https://github.com/facelessuser/pyspelling/blob/c25d5292cc2687ad65891a12ead43f7182ca8bb3/pyspelling/filters/cpp.py#L169-L172
def match_string(self, stype): """Match string type.""" return not (stype - self.string_types) or bool(stype & self.wild_string_types)
[ "def", "match_string", "(", "self", ",", "stype", ")", ":", "return", "not", "(", "stype", "-", "self", ".", "string_types", ")", "or", "bool", "(", "stype", "&", "self", ".", "wild_string_types", ")" ]
Match string type.
[ "Match", "string", "type", "." ]
python
train
37
twilio/twilio-python
twilio/rest/video/v1/composition_hook.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/video/v1/composition_hook.py#L104-L139
def page(self, enabled=values.unset, date_created_after=values.unset, date_created_before=values.unset, friendly_name=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of CompositionHookInstance records from the API. Request is executed immediately :param bool enabled: Only show Composition Hooks enabled or disabled. :param datetime date_created_after: Only show Composition Hooks created on or after this ISO8601 date-time with timezone. :param datetime date_created_before: Only show Composition Hooks created before this ISO8601 date-time with timezone. :param unicode friendly_name: Only show Composition Hooks with friendly name that match this name. :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of CompositionHookInstance :rtype: twilio.rest.video.v1.composition_hook.CompositionHookPage """ params = values.of({ 'Enabled': enabled, 'DateCreatedAfter': serialize.iso8601_datetime(date_created_after), 'DateCreatedBefore': serialize.iso8601_datetime(date_created_before), 'FriendlyName': friendly_name, 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return CompositionHookPage(self._version, response, self._solution)
[ "def", "page", "(", "self", ",", "enabled", "=", "values", ".", "unset", ",", "date_created_after", "=", "values", ".", "unset", ",", "date_created_before", "=", "values", ".", "unset", ",", "friendly_name", "=", "values", ".", "unset", ",", "page_token", "=", "values", ".", "unset", ",", "page_number", "=", "values", ".", "unset", ",", "page_size", "=", "values", ".", "unset", ")", ":", "params", "=", "values", ".", "of", "(", "{", "'Enabled'", ":", "enabled", ",", "'DateCreatedAfter'", ":", "serialize", ".", "iso8601_datetime", "(", "date_created_after", ")", ",", "'DateCreatedBefore'", ":", "serialize", ".", "iso8601_datetime", "(", "date_created_before", ")", ",", "'FriendlyName'", ":", "friendly_name", ",", "'PageToken'", ":", "page_token", ",", "'Page'", ":", "page_number", ",", "'PageSize'", ":", "page_size", ",", "}", ")", "response", "=", "self", ".", "_version", ".", "page", "(", "'GET'", ",", "self", ".", "_uri", ",", "params", "=", "params", ",", ")", "return", "CompositionHookPage", "(", "self", ".", "_version", ",", "response", ",", "self", ".", "_solution", ")" ]
Retrieve a single page of CompositionHookInstance records from the API. Request is executed immediately :param bool enabled: Only show Composition Hooks enabled or disabled. :param datetime date_created_after: Only show Composition Hooks created on or after this ISO8601 date-time with timezone. :param datetime date_created_before: Only show Composition Hooks created before this ISO8601 date-time with timezone. :param unicode friendly_name: Only show Composition Hooks with friendly name that match this name. :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of CompositionHookInstance :rtype: twilio.rest.video.v1.composition_hook.CompositionHookPage
[ "Retrieve", "a", "single", "page", "of", "CompositionHookInstance", "records", "from", "the", "API", ".", "Request", "is", "executed", "immediately" ]
python
train
47.722222
ff0000/scarlet
scarlet/versioning/models.py
https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/versioning/models.py#L678-L727
def publish(self, user=None, when=None): """ Publishes a item and any sub items. A new transaction will be started if we aren't already in a transaction. Should only be run on draft items """ assert self.state == self.DRAFT user_published = 'code' if user: user_published = user.username now = timezone.now() with xact(): # If this item hasn't got live yet and no new date was specified # delete the old scheduled items and schedule this one on that date published = False if getattr(self._meta, '_is_view', False): published = self.is_published else: published = self.object.is_published if not when and not published and self.last_scheduled: klass = self.get_version_class() for obj in klass.normal.filter(object_id=self.object_id, last_scheduled=self.last_scheduled, state=self.SCHEDULED): when = self.date_published obj.delete() when = when or now # Drafts get preserved so save the # time we last cloned this if self.state == self.DRAFT: self.last_scheduled = now self.date_published = when self.save(last_save=now) self._clone() self.user_published = user_published self.state = self.SCHEDULED self.save() self.schedule(when=when)
[ "def", "publish", "(", "self", ",", "user", "=", "None", ",", "when", "=", "None", ")", ":", "assert", "self", ".", "state", "==", "self", ".", "DRAFT", "user_published", "=", "'code'", "if", "user", ":", "user_published", "=", "user", ".", "username", "now", "=", "timezone", ".", "now", "(", ")", "with", "xact", "(", ")", ":", "# If this item hasn't got live yet and no new date was specified", "# delete the old scheduled items and schedule this one on that date", "published", "=", "False", "if", "getattr", "(", "self", ".", "_meta", ",", "'_is_view'", ",", "False", ")", ":", "published", "=", "self", ".", "is_published", "else", ":", "published", "=", "self", ".", "object", ".", "is_published", "if", "not", "when", "and", "not", "published", "and", "self", ".", "last_scheduled", ":", "klass", "=", "self", ".", "get_version_class", "(", ")", "for", "obj", "in", "klass", ".", "normal", ".", "filter", "(", "object_id", "=", "self", ".", "object_id", ",", "last_scheduled", "=", "self", ".", "last_scheduled", ",", "state", "=", "self", ".", "SCHEDULED", ")", ":", "when", "=", "self", ".", "date_published", "obj", ".", "delete", "(", ")", "when", "=", "when", "or", "now", "# Drafts get preserved so save the", "# time we last cloned this", "if", "self", ".", "state", "==", "self", ".", "DRAFT", ":", "self", ".", "last_scheduled", "=", "now", "self", ".", "date_published", "=", "when", "self", ".", "save", "(", "last_save", "=", "now", ")", "self", ".", "_clone", "(", ")", "self", ".", "user_published", "=", "user_published", "self", ".", "state", "=", "self", ".", "SCHEDULED", "self", ".", "save", "(", ")", "self", ".", "schedule", "(", "when", "=", "when", ")" ]
Publishes a item and any sub items. A new transaction will be started if we aren't already in a transaction. Should only be run on draft items
[ "Publishes", "a", "item", "and", "any", "sub", "items", ".", "A", "new", "transaction", "will", "be", "started", "if", "we", "aren", "t", "already", "in", "a", "transaction", "." ]
python
train
32.48
sublee/zeronimo
zeronimo/helpers.py
https://github.com/sublee/zeronimo/blob/b216638232932718d2cbc5eabd870c8f5b5e83fb/zeronimo/helpers.py#L111-L113
def eintr_retry_zmq(f, *args, **kwargs): """The specialization of :func:`eintr_retry` by :exc:`zmq.ZMQError`.""" return eintr_retry(zmq.ZMQError, f, *args, **kwargs)
[ "def", "eintr_retry_zmq", "(", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "eintr_retry", "(", "zmq", ".", "ZMQError", ",", "f", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
The specialization of :func:`eintr_retry` by :exc:`zmq.ZMQError`.
[ "The", "specialization", "of", ":", "func", ":", "eintr_retry", "by", ":", "exc", ":", "zmq", ".", "ZMQError", "." ]
python
test
57
skorch-dev/skorch
skorch/cli.py
https://github.com/skorch-dev/skorch/blob/5b9b8b7b7712cb6e5aaa759d9608ea6269d5bcd3/skorch/cli.py#L102-L135
def _resolve_dotted_name(dotted_name): """Returns objects from strings Deals e.g. with 'torch.nn.Softmax(dim=-1)'. Modified from palladium: https://github.com/ottogroup/palladium/blob/8a066a9a7690557d9b1b6ed54b7d1a1502ba59e3/palladium/util.py with added support for instantiated objects. """ if not isinstance(dotted_name, str): return dotted_name if '.' not in dotted_name: return dotted_name args = None params = None match = P_PARAMS.match(dotted_name) if match: dotted_name = match.group('name') params = match.group('params') module, name = dotted_name.rsplit('.', 1) attr = import_module(module) attr = getattr(attr, name) if params: args, kwargs = _parse_args_kwargs(params[1:-1]) attr = attr(*args, **kwargs) return attr
[ "def", "_resolve_dotted_name", "(", "dotted_name", ")", ":", "if", "not", "isinstance", "(", "dotted_name", ",", "str", ")", ":", "return", "dotted_name", "if", "'.'", "not", "in", "dotted_name", ":", "return", "dotted_name", "args", "=", "None", "params", "=", "None", "match", "=", "P_PARAMS", ".", "match", "(", "dotted_name", ")", "if", "match", ":", "dotted_name", "=", "match", ".", "group", "(", "'name'", ")", "params", "=", "match", ".", "group", "(", "'params'", ")", "module", ",", "name", "=", "dotted_name", ".", "rsplit", "(", "'.'", ",", "1", ")", "attr", "=", "import_module", "(", "module", ")", "attr", "=", "getattr", "(", "attr", ",", "name", ")", "if", "params", ":", "args", ",", "kwargs", "=", "_parse_args_kwargs", "(", "params", "[", "1", ":", "-", "1", "]", ")", "attr", "=", "attr", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "attr" ]
Returns objects from strings Deals e.g. with 'torch.nn.Softmax(dim=-1)'. Modified from palladium: https://github.com/ottogroup/palladium/blob/8a066a9a7690557d9b1b6ed54b7d1a1502ba59e3/palladium/util.py with added support for instantiated objects.
[ "Returns", "objects", "from", "strings" ]
python
train
24.147059
prompt-toolkit/pymux
pymux/commands/commands.py
https://github.com/prompt-toolkit/pymux/blob/3f66e62b9de4b2251c7f9afad6c516dc5a30ec67/pymux/commands/commands.py#L595-L599
def display_message(pymux, variables): " Display a message. " message = variables['<message>'] client_state = pymux.get_client_state() client_state.message = message
[ "def", "display_message", "(", "pymux", ",", "variables", ")", ":", "message", "=", "variables", "[", "'<message>'", "]", "client_state", "=", "pymux", ".", "get_client_state", "(", ")", "client_state", ".", "message", "=", "message" ]
Display a message.
[ "Display", "a", "message", "." ]
python
train
35.4
ThreatConnect-Inc/tcex
tcex/tcex_bin_run.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_run.py#L290-L338
def clear_tc(self, owner, data, clear_type): """Delete threat intel from ThreatConnect platform. Args: owner (str): The ThreatConnect owner. data (dict): The data for the threat intel to clear. clear_type (str): The type of clear action. """ batch = self.tcex.batch(owner, action='Delete') tc_type = data.get('type') path = data.get('path') if tc_type in self.tcex.group_types: name = self.tcex.playbook.read(data.get('name')) name = self.path_data(name, path) if name is not None: print( 'Deleting ThreatConnect Group: {}{}{}'.format( c.Style.BRIGHT, c.Fore.MAGENTA, name ) ) self.log.info( '[{}] Deleting ThreatConnect {} with name: {}.'.format( clear_type, tc_type, name ) ) batch.group(tc_type, name) elif tc_type in self.tcex.indicator_types: if data.get('summary') is not None: summary = self.tcex.playbook.read(data.get('summary')) else: resource = self.tcex.resource(tc_type) summary = resource.summary(data) summary = self.path_data(summary, path) if summary is not None: print( 'Deleting ThreatConnect Indicator: {}{}{}'.format( c.Style.BRIGHT, c.Fore.MAGENTA, summary ) ) self.log.info( '[{}] Deleting ThreatConnect {} with value: {}.'.format( clear_type, tc_type, summary ) ) batch.indicator(tc_type, summary) batch_results = batch.submit() self.log.debug('[{}] Batch Results: {}'.format(clear_type, batch_results)) for error in batch_results.get('errors') or []: self.log.error('[{}] Batch Error: {}'.format(clear_type, error))
[ "def", "clear_tc", "(", "self", ",", "owner", ",", "data", ",", "clear_type", ")", ":", "batch", "=", "self", ".", "tcex", ".", "batch", "(", "owner", ",", "action", "=", "'Delete'", ")", "tc_type", "=", "data", ".", "get", "(", "'type'", ")", "path", "=", "data", ".", "get", "(", "'path'", ")", "if", "tc_type", "in", "self", ".", "tcex", ".", "group_types", ":", "name", "=", "self", ".", "tcex", ".", "playbook", ".", "read", "(", "data", ".", "get", "(", "'name'", ")", ")", "name", "=", "self", ".", "path_data", "(", "name", ",", "path", ")", "if", "name", "is", "not", "None", ":", "print", "(", "'Deleting ThreatConnect Group: {}{}{}'", ".", "format", "(", "c", ".", "Style", ".", "BRIGHT", ",", "c", ".", "Fore", ".", "MAGENTA", ",", "name", ")", ")", "self", ".", "log", ".", "info", "(", "'[{}] Deleting ThreatConnect {} with name: {}.'", ".", "format", "(", "clear_type", ",", "tc_type", ",", "name", ")", ")", "batch", ".", "group", "(", "tc_type", ",", "name", ")", "elif", "tc_type", "in", "self", ".", "tcex", ".", "indicator_types", ":", "if", "data", ".", "get", "(", "'summary'", ")", "is", "not", "None", ":", "summary", "=", "self", ".", "tcex", ".", "playbook", ".", "read", "(", "data", ".", "get", "(", "'summary'", ")", ")", "else", ":", "resource", "=", "self", ".", "tcex", ".", "resource", "(", "tc_type", ")", "summary", "=", "resource", ".", "summary", "(", "data", ")", "summary", "=", "self", ".", "path_data", "(", "summary", ",", "path", ")", "if", "summary", "is", "not", "None", ":", "print", "(", "'Deleting ThreatConnect Indicator: {}{}{}'", ".", "format", "(", "c", ".", "Style", ".", "BRIGHT", ",", "c", ".", "Fore", ".", "MAGENTA", ",", "summary", ")", ")", "self", ".", "log", ".", "info", "(", "'[{}] Deleting ThreatConnect {} with value: {}.'", ".", "format", "(", "clear_type", ",", "tc_type", ",", "summary", ")", ")", "batch", ".", "indicator", "(", "tc_type", ",", "summary", ")", "batch_results", "=", "batch", ".", "submit", "(", ")", "self", ".", "log", ".", "debug", "(", "'[{}] Batch Results: {}'", ".", "format", "(", "clear_type", ",", "batch_results", ")", ")", "for", "error", "in", "batch_results", ".", "get", "(", "'errors'", ")", "or", "[", "]", ":", "self", ".", "log", ".", "error", "(", "'[{}] Batch Error: {}'", ".", "format", "(", "clear_type", ",", "error", ")", ")" ]
Delete threat intel from ThreatConnect platform. Args: owner (str): The ThreatConnect owner. data (dict): The data for the threat intel to clear. clear_type (str): The type of clear action.
[ "Delete", "threat", "intel", "from", "ThreatConnect", "platform", "." ]
python
train
42.387755
arviz-devs/arviz
arviz/data/io_tfp.py
https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/data/io_tfp.py#L123-L150
def sample_stats_to_xarray(self): """Extract sample_stats from tfp trace.""" if self.model_fn is None or self.observed is None: return None log_likelihood = [] sample_size = self.posterior[0].shape[0] for i in range(sample_size): variables = {} for var_i, var_name in enumerate(self.var_names): variables[var_name] = self.posterior[var_i][i] with self.ed.interception(self._value_setter(variables)): log_likelihood.append((self.model_fn().distribution.log_prob(self.observed))) data = {} if self.dims is not None: coord_name = self.dims.get("obs") else: coord_name = None dims = {"log_likelihood": coord_name} with self.tf.Session() as sess: data["log_likelihood"] = np.expand_dims( sess.run(log_likelihood, feed_dict=self.feed_dict), axis=0 ) return dict_to_dataset(data, library=self.tfp, coords=self.coords, dims=dims)
[ "def", "sample_stats_to_xarray", "(", "self", ")", ":", "if", "self", ".", "model_fn", "is", "None", "or", "self", ".", "observed", "is", "None", ":", "return", "None", "log_likelihood", "=", "[", "]", "sample_size", "=", "self", ".", "posterior", "[", "0", "]", ".", "shape", "[", "0", "]", "for", "i", "in", "range", "(", "sample_size", ")", ":", "variables", "=", "{", "}", "for", "var_i", ",", "var_name", "in", "enumerate", "(", "self", ".", "var_names", ")", ":", "variables", "[", "var_name", "]", "=", "self", ".", "posterior", "[", "var_i", "]", "[", "i", "]", "with", "self", ".", "ed", ".", "interception", "(", "self", ".", "_value_setter", "(", "variables", ")", ")", ":", "log_likelihood", ".", "append", "(", "(", "self", ".", "model_fn", "(", ")", ".", "distribution", ".", "log_prob", "(", "self", ".", "observed", ")", ")", ")", "data", "=", "{", "}", "if", "self", ".", "dims", "is", "not", "None", ":", "coord_name", "=", "self", ".", "dims", ".", "get", "(", "\"obs\"", ")", "else", ":", "coord_name", "=", "None", "dims", "=", "{", "\"log_likelihood\"", ":", "coord_name", "}", "with", "self", ".", "tf", ".", "Session", "(", ")", "as", "sess", ":", "data", "[", "\"log_likelihood\"", "]", "=", "np", ".", "expand_dims", "(", "sess", ".", "run", "(", "log_likelihood", ",", "feed_dict", "=", "self", ".", "feed_dict", ")", ",", "axis", "=", "0", ")", "return", "dict_to_dataset", "(", "data", ",", "library", "=", "self", ".", "tfp", ",", "coords", "=", "self", ".", "coords", ",", "dims", "=", "dims", ")" ]
Extract sample_stats from tfp trace.
[ "Extract", "sample_stats", "from", "tfp", "trace", "." ]
python
train
37.785714
tchellomello/python-arlo
pyarlo/base_station.py
https://github.com/tchellomello/python-arlo/blob/db70aeb81705309c56ad32bbab1094f6cd146524/pyarlo/base_station.py#L305-L312
def available_modes(self): """Return list of available mode names.""" if not self._available_modes: modes = self.available_modes_with_ids if not modes: return None self._available_modes = list(modes.keys()) return self._available_modes
[ "def", "available_modes", "(", "self", ")", ":", "if", "not", "self", ".", "_available_modes", ":", "modes", "=", "self", ".", "available_modes_with_ids", "if", "not", "modes", ":", "return", "None", "self", ".", "_available_modes", "=", "list", "(", "modes", ".", "keys", "(", ")", ")", "return", "self", ".", "_available_modes" ]
Return list of available mode names.
[ "Return", "list", "of", "available", "mode", "names", "." ]
python
train
38
LonamiWebs/Telethon
telethon/crypto/rsa.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/crypto/rsa.py#L21-L34
def get_byte_array(integer): """Return the variable length bytes corresponding to the given int""" # Operate in big endian (unlike most of Telegram API) since: # > "...pq is a representation of a natural number # (in binary *big endian* format)..." # > "...current value of dh_prime equals # (in *big-endian* byte order)..." # Reference: https://core.telegram.org/mtproto/auth_key return int.to_bytes( integer, (integer.bit_length() + 8 - 1) // 8, # 8 bits per byte, byteorder='big', signed=False )
[ "def", "get_byte_array", "(", "integer", ")", ":", "# Operate in big endian (unlike most of Telegram API) since:", "# > \"...pq is a representation of a natural number", "# (in binary *big endian* format)...\"", "# > \"...current value of dh_prime equals", "# (in *big-endian* byte order)...\"", "# Reference: https://core.telegram.org/mtproto/auth_key", "return", "int", ".", "to_bytes", "(", "integer", ",", "(", "integer", ".", "bit_length", "(", ")", "+", "8", "-", "1", ")", "//", "8", ",", "# 8 bits per byte,", "byteorder", "=", "'big'", ",", "signed", "=", "False", ")" ]
Return the variable length bytes corresponding to the given int
[ "Return", "the", "variable", "length", "bytes", "corresponding", "to", "the", "given", "int" ]
python
train
40
MAVENSDC/cdflib
cdflib/cdfwrite.py
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L605-L818
def write_var(self, var_spec, var_attrs=None, var_data=None): ''' Writes a variable, along with variable attributes and data. Parameters ---------- var_spec : dict The specifications of the variable. The required/optional keys for creating a variable: Required keys: - ['Variable']: The name of the variable - ['Data_Type']: the CDF data type - ['Num_Elements']: The number of elements. Always 1 the for numeric type. The char length for string type. - ['Rec_Vary']: Record variance For zVariables: - ['Dims_Sizes']: The dimensional sizes for zVariables only. Use [] for 0-dimension. Each and every dimension is varying for zVariables. For rVariables: - ['Dim_Vary']: The dimensional variances for rVariables only. Optional keys: - ['Var_Type']: Whether the variable is a zVariable or rVariable. Valid values: "zVariable" and "rVariable". The default is "zVariable". - ['Sparse']: Whether the variable has sparse records. Valid values are "no_sparse", "pad_sparse", and "prev_sparse". The default is 'no_sparse'. - ['Compress']: Set the gzip compression level (0 to 9), 0 for no compression. The default is to compress with level 6 (done only if the compressed data is less than the uncompressed data). - ['Block_Factor']: The blocking factor, the number of records in a chunk when the variable is compressed. - ['Pad']: The padded value (in bytes, numpy.ndarray or string) var_attrs : dict {attribute:value} pairs. The attribute is the name of a variable attribute. The value can have its data type specified for the numeric data. If not, based on Python's type, a corresponding CDF type is assumed: CDF_INT4 for int, CDF_DOUBLE for float, CDF_EPOCH16 for complex and and CDF_INT8 for long. For example, the following defined attributes will have the same types in the CDF:: var_attrs= { 'attr1': 'value1', 'attr2': 12.45, 'attr3': [3,4,5], ..... } With data type (in the list form):: var_attrs= { 'attr1': 'value1', 'attr2': [12.45, 'CDF_DOUBLE'], 'attr3': [[3,4,5], 'CDF_INT4'], ..... } var_data : The data for the variable. If the variable is a regular variable without sparse records, it must be in a single structure of bytes, or numpy.ndarray for numeric variable, or str or list of strs for string variable. If the variable has sparse records, var_data should be presented in a list/tuple with two elements, the first being a list/tuple that contains the physical record number(s), the second being the variable data in bytes, numpy.ndarray, or a list of strings. Variable data can have just physical records' data (with the same number of records as the first element) or have data from both physical records and virtual records (which with filled data). The var_data has the form:: [[rec_#1,rec_#2,rec_#3,...], [data_#1,data_#2,data_#3,...]] See the sample for its setup. ''' if not isinstance(var_spec, dict): raise TypeError('Variable should be in dictionary form.') # Get variable info from var_spec try: dataType = int(var_spec['Data_Type']) numElems = int(var_spec['Num_Elements']) name = var_spec['Variable'] recVary = var_spec['Rec_Vary'] except Exception: raise ValueError('Missing/invalid required spec for creating variable.') # Get whether or not it is a z variable var_type = var_spec.setdefault('Var_Type', 'zvariable') if (var_type.lower() == 'zvariable'): zVar = True else: var_spec['Var_Type'] = 'rVariable' zVar = False if (dataType == CDF.CDF_CHAR or dataType == CDF.CDF_UCHAR): if numElems < 1: raise ValueError('Invalid Num_Elements for string data type variable') else: if numElems != 1: raise ValueError('Invalid Num_Elements for numeric data type variable') # If its a z variable, get the dimension info # Otherwise, use r variable info if zVar: try: dimSizes = var_spec['Dim_Sizes'] numDims = len(dimSizes) dimVary = [] for _ in range(0, numDims): dimVary.append(True) except Exception: raise ValueError('Missing/invalid required spec for creating variable.') else: dimSizes = self.rdim_sizes numDims = self.num_rdim try: dimVary = var_spec['Dim_Vary'] if (len(dimVary) != numDims): raise ValueError('Invalid Dim_Vary size for the rVariable.') except Exception: raise ValueError('Missing/invalid required spec for Dim_Vary for rVariable') # Get Sparseness info sparse = CDF._sparse_token(var_spec.get('Sparse', 'no_sparse')) # Get compression info compression = var_spec.get('Compress', 6) if (isinstance(compression, int)): if not 0 <= compression <= 9: compression = 0 else: compression = 6 if compression else 0 # Get blocking factor blockingfactor = int(var_spec.get('Block_Factor', 1)) # Get pad value pad = var_spec.get('Pad', None) if (isinstance(pad, list) or isinstance(pad, tuple)): pad = pad[0] if (name in self.zvars or name in self.rvars): raise ValueError('{} already exists'.format(name)) with self.path.open('rb+') as f: f.seek(0, 2) # EOF (appending) varNum, offset = self._write_vdr(f, dataType, numElems, numDims, dimSizes, name, dimVary, recVary, sparse, blockingfactor, compression, pad, zVar) # Update the GDR pointers if needed if zVar: if len(self.zvars) == 1: # GDR's zVDRhead self._update_offset_value(f, self.gdr_head+20, 8, offset) else: if len(self.rvars) == 1: # GDR's rVDRhead self._update_offset_value(f, self.gdr_head+12, 8, offset) # Write the variable attributes if var_attrs is not None: self._write_var_attrs(f, varNum, var_attrs, zVar) # Write the actual data to the file if not (var_data is None): if (sparse == 0): varMaxRec = self._write_var_data_nonsparse(f, zVar, varNum, dataType, numElems, recVary, compression, blockingfactor, var_data) else: notsupport = False if not isinstance(var_data, (list, tuple)): notsupport = True if notsupport or len(var_data) != 2: print('Sparse record #s and data are not of list/tuple form:') print(' [ [rec_#1, rec_#2, rec_#3, ],') print(' [data_#1, data_#2, data_#3, ....] ]') return # Format data into: [[recstart1, recend1, data1], # [recstart2,recend2,data2], ...] var_data = self._make_sparse_blocks(var_spec, var_data[0], var_data[1]) for block in var_data: varMaxRec = self._write_var_data_sparse(f, zVar, varNum, dataType, numElems, recVary, block) # Update GDR MaxRec if writing an r variable if not zVar: # GDR's rMaxRec f.seek(self.gdr_head+52) maxRec = int.from_bytes(f.read(4), 'big', signed=True) if (maxRec < varMaxRec): self._update_offset_value(f, self.gdr_head+52, 4, varMaxRec)
[ "def", "write_var", "(", "self", ",", "var_spec", ",", "var_attrs", "=", "None", ",", "var_data", "=", "None", ")", ":", "if", "not", "isinstance", "(", "var_spec", ",", "dict", ")", ":", "raise", "TypeError", "(", "'Variable should be in dictionary form.'", ")", "# Get variable info from var_spec", "try", ":", "dataType", "=", "int", "(", "var_spec", "[", "'Data_Type'", "]", ")", "numElems", "=", "int", "(", "var_spec", "[", "'Num_Elements'", "]", ")", "name", "=", "var_spec", "[", "'Variable'", "]", "recVary", "=", "var_spec", "[", "'Rec_Vary'", "]", "except", "Exception", ":", "raise", "ValueError", "(", "'Missing/invalid required spec for creating variable.'", ")", "# Get whether or not it is a z variable", "var_type", "=", "var_spec", ".", "setdefault", "(", "'Var_Type'", ",", "'zvariable'", ")", "if", "(", "var_type", ".", "lower", "(", ")", "==", "'zvariable'", ")", ":", "zVar", "=", "True", "else", ":", "var_spec", "[", "'Var_Type'", "]", "=", "'rVariable'", "zVar", "=", "False", "if", "(", "dataType", "==", "CDF", ".", "CDF_CHAR", "or", "dataType", "==", "CDF", ".", "CDF_UCHAR", ")", ":", "if", "numElems", "<", "1", ":", "raise", "ValueError", "(", "'Invalid Num_Elements for string data type variable'", ")", "else", ":", "if", "numElems", "!=", "1", ":", "raise", "ValueError", "(", "'Invalid Num_Elements for numeric data type variable'", ")", "# If its a z variable, get the dimension info", "# Otherwise, use r variable info", "if", "zVar", ":", "try", ":", "dimSizes", "=", "var_spec", "[", "'Dim_Sizes'", "]", "numDims", "=", "len", "(", "dimSizes", ")", "dimVary", "=", "[", "]", "for", "_", "in", "range", "(", "0", ",", "numDims", ")", ":", "dimVary", ".", "append", "(", "True", ")", "except", "Exception", ":", "raise", "ValueError", "(", "'Missing/invalid required spec for creating variable.'", ")", "else", ":", "dimSizes", "=", "self", ".", "rdim_sizes", "numDims", "=", "self", ".", "num_rdim", "try", ":", "dimVary", "=", "var_spec", "[", "'Dim_Vary'", "]", "if", "(", "len", "(", "dimVary", ")", "!=", "numDims", ")", ":", "raise", "ValueError", "(", "'Invalid Dim_Vary size for the rVariable.'", ")", "except", "Exception", ":", "raise", "ValueError", "(", "'Missing/invalid required spec for Dim_Vary for rVariable'", ")", "# Get Sparseness info", "sparse", "=", "CDF", ".", "_sparse_token", "(", "var_spec", ".", "get", "(", "'Sparse'", ",", "'no_sparse'", ")", ")", "# Get compression info", "compression", "=", "var_spec", ".", "get", "(", "'Compress'", ",", "6", ")", "if", "(", "isinstance", "(", "compression", ",", "int", ")", ")", ":", "if", "not", "0", "<=", "compression", "<=", "9", ":", "compression", "=", "0", "else", ":", "compression", "=", "6", "if", "compression", "else", "0", "# Get blocking factor", "blockingfactor", "=", "int", "(", "var_spec", ".", "get", "(", "'Block_Factor'", ",", "1", ")", ")", "# Get pad value", "pad", "=", "var_spec", ".", "get", "(", "'Pad'", ",", "None", ")", "if", "(", "isinstance", "(", "pad", ",", "list", ")", "or", "isinstance", "(", "pad", ",", "tuple", ")", ")", ":", "pad", "=", "pad", "[", "0", "]", "if", "(", "name", "in", "self", ".", "zvars", "or", "name", "in", "self", ".", "rvars", ")", ":", "raise", "ValueError", "(", "'{} already exists'", ".", "format", "(", "name", ")", ")", "with", "self", ".", "path", ".", "open", "(", "'rb+'", ")", "as", "f", ":", "f", ".", "seek", "(", "0", ",", "2", ")", "# EOF (appending)", "varNum", ",", "offset", "=", "self", ".", "_write_vdr", "(", "f", ",", "dataType", ",", "numElems", ",", "numDims", ",", "dimSizes", ",", "name", ",", "dimVary", ",", "recVary", ",", "sparse", ",", "blockingfactor", ",", "compression", ",", "pad", ",", "zVar", ")", "# Update the GDR pointers if needed", "if", "zVar", ":", "if", "len", "(", "self", ".", "zvars", ")", "==", "1", ":", "# GDR's zVDRhead", "self", ".", "_update_offset_value", "(", "f", ",", "self", ".", "gdr_head", "+", "20", ",", "8", ",", "offset", ")", "else", ":", "if", "len", "(", "self", ".", "rvars", ")", "==", "1", ":", "# GDR's rVDRhead", "self", ".", "_update_offset_value", "(", "f", ",", "self", ".", "gdr_head", "+", "12", ",", "8", ",", "offset", ")", "# Write the variable attributes", "if", "var_attrs", "is", "not", "None", ":", "self", ".", "_write_var_attrs", "(", "f", ",", "varNum", ",", "var_attrs", ",", "zVar", ")", "# Write the actual data to the file", "if", "not", "(", "var_data", "is", "None", ")", ":", "if", "(", "sparse", "==", "0", ")", ":", "varMaxRec", "=", "self", ".", "_write_var_data_nonsparse", "(", "f", ",", "zVar", ",", "varNum", ",", "dataType", ",", "numElems", ",", "recVary", ",", "compression", ",", "blockingfactor", ",", "var_data", ")", "else", ":", "notsupport", "=", "False", "if", "not", "isinstance", "(", "var_data", ",", "(", "list", ",", "tuple", ")", ")", ":", "notsupport", "=", "True", "if", "notsupport", "or", "len", "(", "var_data", ")", "!=", "2", ":", "print", "(", "'Sparse record #s and data are not of list/tuple form:'", ")", "print", "(", "' [ [rec_#1, rec_#2, rec_#3, ],'", ")", "print", "(", "' [data_#1, data_#2, data_#3, ....] ]'", ")", "return", "# Format data into: [[recstart1, recend1, data1],", "# [recstart2,recend2,data2], ...]", "var_data", "=", "self", ".", "_make_sparse_blocks", "(", "var_spec", ",", "var_data", "[", "0", "]", ",", "var_data", "[", "1", "]", ")", "for", "block", "in", "var_data", ":", "varMaxRec", "=", "self", ".", "_write_var_data_sparse", "(", "f", ",", "zVar", ",", "varNum", ",", "dataType", ",", "numElems", ",", "recVary", ",", "block", ")", "# Update GDR MaxRec if writing an r variable", "if", "not", "zVar", ":", "# GDR's rMaxRec", "f", ".", "seek", "(", "self", ".", "gdr_head", "+", "52", ")", "maxRec", "=", "int", ".", "from_bytes", "(", "f", ".", "read", "(", "4", ")", ",", "'big'", ",", "signed", "=", "True", ")", "if", "(", "maxRec", "<", "varMaxRec", ")", ":", "self", ".", "_update_offset_value", "(", "f", ",", "self", ".", "gdr_head", "+", "52", ",", "4", ",", "varMaxRec", ")" ]
Writes a variable, along with variable attributes and data. Parameters ---------- var_spec : dict The specifications of the variable. The required/optional keys for creating a variable: Required keys: - ['Variable']: The name of the variable - ['Data_Type']: the CDF data type - ['Num_Elements']: The number of elements. Always 1 the for numeric type. The char length for string type. - ['Rec_Vary']: Record variance For zVariables: - ['Dims_Sizes']: The dimensional sizes for zVariables only. Use [] for 0-dimension. Each and every dimension is varying for zVariables. For rVariables: - ['Dim_Vary']: The dimensional variances for rVariables only. Optional keys: - ['Var_Type']: Whether the variable is a zVariable or rVariable. Valid values: "zVariable" and "rVariable". The default is "zVariable". - ['Sparse']: Whether the variable has sparse records. Valid values are "no_sparse", "pad_sparse", and "prev_sparse". The default is 'no_sparse'. - ['Compress']: Set the gzip compression level (0 to 9), 0 for no compression. The default is to compress with level 6 (done only if the compressed data is less than the uncompressed data). - ['Block_Factor']: The blocking factor, the number of records in a chunk when the variable is compressed. - ['Pad']: The padded value (in bytes, numpy.ndarray or string) var_attrs : dict {attribute:value} pairs. The attribute is the name of a variable attribute. The value can have its data type specified for the numeric data. If not, based on Python's type, a corresponding CDF type is assumed: CDF_INT4 for int, CDF_DOUBLE for float, CDF_EPOCH16 for complex and and CDF_INT8 for long. For example, the following defined attributes will have the same types in the CDF:: var_attrs= { 'attr1': 'value1', 'attr2': 12.45, 'attr3': [3,4,5], ..... } With data type (in the list form):: var_attrs= { 'attr1': 'value1', 'attr2': [12.45, 'CDF_DOUBLE'], 'attr3': [[3,4,5], 'CDF_INT4'], ..... } var_data : The data for the variable. If the variable is a regular variable without sparse records, it must be in a single structure of bytes, or numpy.ndarray for numeric variable, or str or list of strs for string variable. If the variable has sparse records, var_data should be presented in a list/tuple with two elements, the first being a list/tuple that contains the physical record number(s), the second being the variable data in bytes, numpy.ndarray, or a list of strings. Variable data can have just physical records' data (with the same number of records as the first element) or have data from both physical records and virtual records (which with filled data). The var_data has the form:: [[rec_#1,rec_#2,rec_#3,...], [data_#1,data_#2,data_#3,...]] See the sample for its setup.
[ "Writes", "a", "variable", "along", "with", "variable", "attributes", "and", "data", "." ]
python
train
42.560748
secdev/scapy
scapy/contrib/diameter.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/contrib/diameter.py#L4819-L4824
def DiamAns(cmd, **fields): """Craft Diameter answer commands""" upfields, name = getCmdParams(cmd, False, **fields) p = DiamG(**upfields) p.name = name return p
[ "def", "DiamAns", "(", "cmd", ",", "*", "*", "fields", ")", ":", "upfields", ",", "name", "=", "getCmdParams", "(", "cmd", ",", "False", ",", "*", "*", "fields", ")", "p", "=", "DiamG", "(", "*", "*", "upfields", ")", "p", ".", "name", "=", "name", "return", "p" ]
Craft Diameter answer commands
[ "Craft", "Diameter", "answer", "commands" ]
python
train
29.333333
broadinstitute/fiss
firecloud/fiss.py
https://github.com/broadinstitute/fiss/blob/dddf91547479506dbbafb69ec84d44dcc4a94ab4/firecloud/fiss.py#L915-L1000
def attr_delete(args): ''' Delete key=value attributes: if entity name & type are specified then attributes will be deleted from that entity, otherwise the attribute will be removed from the workspace''' if args.entity_type and args.entities: # Since there is no attribute deletion endpoint, we must perform 2 steps # here: first we retrieve the entity_ids, and any foreign keys (e.g. # participant_id for sample_id); and then construct a loadfile which # specifies which entities are to have what attributes removed. Note # that FireCloud uses the magic keyword __DELETE__ to indicate that # an attribute should be deleted from an entity. # Step 1: see what entities are present, and filter to those requested entities = _entity_paginator(args.project, args.workspace, args.entity_type, page_size=1000, filter_terms=None, sort_direction="asc") if args.entities: entities = [e for e in entities if e['name'] in args.entities] # Step 2: construct a loadfile to delete these attributes attrs = sorted(args.attributes) etype = args.entity_type entity_data = [] for entity_dict in entities: name = entity_dict['name'] line = name # TODO: Fix other types? if etype == "sample": line += "\t" + entity_dict['attributes']['participant']['entityName'] for attr in attrs: line += "\t__DELETE__" # Improve performance by only updating records that have changed entity_data.append(line) entity_header = ["entity:" + etype + "_id"] if etype == "sample": entity_header.append("participant_id") entity_header = '\t'.join(entity_header + list(attrs)) # Remove attributes from an entity message = "WARNING: this will delete these attributes:\n\n" + \ ','.join(args.attributes) + "\n\n" if args.entities: message += 'on these {0}s:\n\n'.format(args.entity_type) + \ ', '.join(args.entities) else: message += 'on all {0}s'.format(args.entity_type) message += "\n\nin workspace {0}/{1}\n".format(args.project, args.workspace) if not args.yes and not _confirm_prompt(message): return 0 # TODO: reconcile with other batch updates # Chunk the entities into batches of 500, and upload to FC if args.verbose: print("Batching " + str(len(entity_data)) + " updates to Firecloud...") chunk_len = 500 total = int(len(entity_data) / chunk_len) + 1 batch = 0 for i in range(0, len(entity_data), chunk_len): batch += 1 if args.verbose: print("Updating samples {0}-{1}, batch {2}/{3}".format( i+1, min(i+chunk_len, len(entity_data)), batch, total )) this_data = entity_header + '\n' + '\n'.join(entity_data[i:i+chunk_len]) # Now push the entity data back to firecloud r = fapi.upload_entities(args.project, args.workspace, this_data) fapi._check_response_code(r, 200) else: message = "WARNING: this will delete the following attributes in " + \ "{0}/{1}\n\t".format(args.project, args.workspace) + \ "\n\t".join(args.attributes) if not (args.yes or _confirm_prompt(message)): return 0 updates = [fapi._attr_rem(a) for a in args.attributes] r = fapi.update_workspace_attributes(args.project, args.workspace, updates) fapi._check_response_code(r, 200) return 0
[ "def", "attr_delete", "(", "args", ")", ":", "if", "args", ".", "entity_type", "and", "args", ".", "entities", ":", "# Since there is no attribute deletion endpoint, we must perform 2 steps", "# here: first we retrieve the entity_ids, and any foreign keys (e.g.", "# participant_id for sample_id); and then construct a loadfile which", "# specifies which entities are to have what attributes removed. Note", "# that FireCloud uses the magic keyword __DELETE__ to indicate that", "# an attribute should be deleted from an entity.", "# Step 1: see what entities are present, and filter to those requested", "entities", "=", "_entity_paginator", "(", "args", ".", "project", ",", "args", ".", "workspace", ",", "args", ".", "entity_type", ",", "page_size", "=", "1000", ",", "filter_terms", "=", "None", ",", "sort_direction", "=", "\"asc\"", ")", "if", "args", ".", "entities", ":", "entities", "=", "[", "e", "for", "e", "in", "entities", "if", "e", "[", "'name'", "]", "in", "args", ".", "entities", "]", "# Step 2: construct a loadfile to delete these attributes", "attrs", "=", "sorted", "(", "args", ".", "attributes", ")", "etype", "=", "args", ".", "entity_type", "entity_data", "=", "[", "]", "for", "entity_dict", "in", "entities", ":", "name", "=", "entity_dict", "[", "'name'", "]", "line", "=", "name", "# TODO: Fix other types?", "if", "etype", "==", "\"sample\"", ":", "line", "+=", "\"\\t\"", "+", "entity_dict", "[", "'attributes'", "]", "[", "'participant'", "]", "[", "'entityName'", "]", "for", "attr", "in", "attrs", ":", "line", "+=", "\"\\t__DELETE__\"", "# Improve performance by only updating records that have changed", "entity_data", ".", "append", "(", "line", ")", "entity_header", "=", "[", "\"entity:\"", "+", "etype", "+", "\"_id\"", "]", "if", "etype", "==", "\"sample\"", ":", "entity_header", ".", "append", "(", "\"participant_id\"", ")", "entity_header", "=", "'\\t'", ".", "join", "(", "entity_header", "+", "list", "(", "attrs", ")", ")", "# Remove attributes from an entity", "message", "=", "\"WARNING: this will delete these attributes:\\n\\n\"", "+", "','", ".", "join", "(", "args", ".", "attributes", ")", "+", "\"\\n\\n\"", "if", "args", ".", "entities", ":", "message", "+=", "'on these {0}s:\\n\\n'", ".", "format", "(", "args", ".", "entity_type", ")", "+", "', '", ".", "join", "(", "args", ".", "entities", ")", "else", ":", "message", "+=", "'on all {0}s'", ".", "format", "(", "args", ".", "entity_type", ")", "message", "+=", "\"\\n\\nin workspace {0}/{1}\\n\"", ".", "format", "(", "args", ".", "project", ",", "args", ".", "workspace", ")", "if", "not", "args", ".", "yes", "and", "not", "_confirm_prompt", "(", "message", ")", ":", "return", "0", "# TODO: reconcile with other batch updates", "# Chunk the entities into batches of 500, and upload to FC", "if", "args", ".", "verbose", ":", "print", "(", "\"Batching \"", "+", "str", "(", "len", "(", "entity_data", ")", ")", "+", "\" updates to Firecloud...\"", ")", "chunk_len", "=", "500", "total", "=", "int", "(", "len", "(", "entity_data", ")", "/", "chunk_len", ")", "+", "1", "batch", "=", "0", "for", "i", "in", "range", "(", "0", ",", "len", "(", "entity_data", ")", ",", "chunk_len", ")", ":", "batch", "+=", "1", "if", "args", ".", "verbose", ":", "print", "(", "\"Updating samples {0}-{1}, batch {2}/{3}\"", ".", "format", "(", "i", "+", "1", ",", "min", "(", "i", "+", "chunk_len", ",", "len", "(", "entity_data", ")", ")", ",", "batch", ",", "total", ")", ")", "this_data", "=", "entity_header", "+", "'\\n'", "+", "'\\n'", ".", "join", "(", "entity_data", "[", "i", ":", "i", "+", "chunk_len", "]", ")", "# Now push the entity data back to firecloud", "r", "=", "fapi", ".", "upload_entities", "(", "args", ".", "project", ",", "args", ".", "workspace", ",", "this_data", ")", "fapi", ".", "_check_response_code", "(", "r", ",", "200", ")", "else", ":", "message", "=", "\"WARNING: this will delete the following attributes in \"", "+", "\"{0}/{1}\\n\\t\"", ".", "format", "(", "args", ".", "project", ",", "args", ".", "workspace", ")", "+", "\"\\n\\t\"", ".", "join", "(", "args", ".", "attributes", ")", "if", "not", "(", "args", ".", "yes", "or", "_confirm_prompt", "(", "message", ")", ")", ":", "return", "0", "updates", "=", "[", "fapi", ".", "_attr_rem", "(", "a", ")", "for", "a", "in", "args", ".", "attributes", "]", "r", "=", "fapi", ".", "update_workspace_attributes", "(", "args", ".", "project", ",", "args", ".", "workspace", ",", "updates", ")", "fapi", ".", "_check_response_code", "(", "r", ",", "200", ")", "return", "0" ]
Delete key=value attributes: if entity name & type are specified then attributes will be deleted from that entity, otherwise the attribute will be removed from the workspace
[ "Delete", "key", "=", "value", "attributes", ":", "if", "entity", "name", "&", "type", "are", "specified", "then", "attributes", "will", "be", "deleted", "from", "that", "entity", "otherwise", "the", "attribute", "will", "be", "removed", "from", "the", "workspace" ]
python
train
44.337209
ejeschke/ginga
ginga/rv/plugins/FBrowser.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/FBrowser.py#L269-L296
def open_files(self, path): """Load file(s) -- image*.fits, image*.fits[ext]. Returns success code (True or False). """ paths = [] input_list = _patt.findall(path) if not input_list: input_list = [path] for path in input_list: # Strips trailing wildcard if path.endswith('*'): path = path[:-1] if os.path.isdir(path): continue self.logger.debug('Opening files matched by {0}'.format(path)) info = iohelper.get_fileinfo(path) ext = iohelper.get_hdu_suffix(info.numhdu) files = glob.glob(info.filepath) # Expand wildcard paths.extend(['{0}{1}'.format(f, ext) for f in files]) if len(paths) > 0: self.load_paths(paths) return True return False
[ "def", "open_files", "(", "self", ",", "path", ")", ":", "paths", "=", "[", "]", "input_list", "=", "_patt", ".", "findall", "(", "path", ")", "if", "not", "input_list", ":", "input_list", "=", "[", "path", "]", "for", "path", "in", "input_list", ":", "# Strips trailing wildcard", "if", "path", ".", "endswith", "(", "'*'", ")", ":", "path", "=", "path", "[", ":", "-", "1", "]", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "continue", "self", ".", "logger", ".", "debug", "(", "'Opening files matched by {0}'", ".", "format", "(", "path", ")", ")", "info", "=", "iohelper", ".", "get_fileinfo", "(", "path", ")", "ext", "=", "iohelper", ".", "get_hdu_suffix", "(", "info", ".", "numhdu", ")", "files", "=", "glob", ".", "glob", "(", "info", ".", "filepath", ")", "# Expand wildcard", "paths", ".", "extend", "(", "[", "'{0}{1}'", ".", "format", "(", "f", ",", "ext", ")", "for", "f", "in", "files", "]", ")", "if", "len", "(", "paths", ")", ">", "0", ":", "self", ".", "load_paths", "(", "paths", ")", "return", "True", "return", "False" ]
Load file(s) -- image*.fits, image*.fits[ext]. Returns success code (True or False).
[ "Load", "file", "(", "s", ")", "--", "image", "*", ".", "fits", "image", "*", ".", "fits", "[", "ext", "]", ".", "Returns", "success", "code", "(", "True", "or", "False", ")", "." ]
python
train
30.535714
jluttine/d3py
d3py/core.py
https://github.com/jluttine/d3py/blob/2856eb3aa23c4ae17897fcd6a25b68203eb1e9e0/d3py/core.py#L79-L95
def create_graph_html(js_template, css_template, html_template=None): """ Create HTML code block given the graph Javascript and CSS. """ if html_template is None: html_template = read_lib('html', 'graph') # Create div ID for the graph and give it to the JS and CSS templates so # they can reference the graph. graph_id = 'graph-{0}'.format(_get_random_id()) js = populate_template(js_template, graph_id=graph_id) css = populate_template(css_template, graph_id=graph_id) return populate_template( html_template, graph_id=graph_id, css=css, js=js )
[ "def", "create_graph_html", "(", "js_template", ",", "css_template", ",", "html_template", "=", "None", ")", ":", "if", "html_template", "is", "None", ":", "html_template", "=", "read_lib", "(", "'html'", ",", "'graph'", ")", "# Create div ID for the graph and give it to the JS and CSS templates so", "# they can reference the graph.", "graph_id", "=", "'graph-{0}'", ".", "format", "(", "_get_random_id", "(", ")", ")", "js", "=", "populate_template", "(", "js_template", ",", "graph_id", "=", "graph_id", ")", "css", "=", "populate_template", "(", "css_template", ",", "graph_id", "=", "graph_id", ")", "return", "populate_template", "(", "html_template", ",", "graph_id", "=", "graph_id", ",", "css", "=", "css", ",", "js", "=", "js", ")" ]
Create HTML code block given the graph Javascript and CSS.
[ "Create", "HTML", "code", "block", "given", "the", "graph", "Javascript", "and", "CSS", "." ]
python
train
35.764706
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L326-L372
def configuration_check(config): """Perform a sanity check on configuration. First it performs a sanity check against settings for daemon and then against settings for each service check. Arguments: config (obj): A configparser object which holds our configuration. Returns: None if all checks are successfully passed otherwise raises a ValueError exception. """ log_level = config.get('daemon', 'loglevel') num_level = getattr(logging, log_level.upper(), None) pidfile = config.get('daemon', 'pidfile') # Catch the case where the directory, under which we store the pid file, is # missing. if not os.path.isdir(os.path.dirname(pidfile)): raise ValueError("{d} doesn't exit".format(d=os.path.dirname(pidfile))) if not isinstance(num_level, int): raise ValueError('Invalid log level: {}'.format(log_level)) for _file in 'log_file', 'stderr_file': if config.has_option('daemon', _file): try: touch(config.get('daemon', _file)) except OSError as exc: raise ValueError(exc) for option, getter in DAEMON_OPTIONS_TYPE.items(): try: getattr(config, getter)('daemon', option) except configparser.NoOptionError as error: if option not in DAEMON_OPTIONAL_OPTIONS: raise ValueError(error) except configparser.Error as error: raise ValueError(error) except ValueError as exc: msg = ("invalid data for '{opt}' option in daemon section: {err}" .format(opt=option, err=exc)) raise ValueError(msg) service_configuration_check(config)
[ "def", "configuration_check", "(", "config", ")", ":", "log_level", "=", "config", ".", "get", "(", "'daemon'", ",", "'loglevel'", ")", "num_level", "=", "getattr", "(", "logging", ",", "log_level", ".", "upper", "(", ")", ",", "None", ")", "pidfile", "=", "config", ".", "get", "(", "'daemon'", ",", "'pidfile'", ")", "# Catch the case where the directory, under which we store the pid file, is", "# missing.", "if", "not", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", ".", "dirname", "(", "pidfile", ")", ")", ":", "raise", "ValueError", "(", "\"{d} doesn't exit\"", ".", "format", "(", "d", "=", "os", ".", "path", ".", "dirname", "(", "pidfile", ")", ")", ")", "if", "not", "isinstance", "(", "num_level", ",", "int", ")", ":", "raise", "ValueError", "(", "'Invalid log level: {}'", ".", "format", "(", "log_level", ")", ")", "for", "_file", "in", "'log_file'", ",", "'stderr_file'", ":", "if", "config", ".", "has_option", "(", "'daemon'", ",", "_file", ")", ":", "try", ":", "touch", "(", "config", ".", "get", "(", "'daemon'", ",", "_file", ")", ")", "except", "OSError", "as", "exc", ":", "raise", "ValueError", "(", "exc", ")", "for", "option", ",", "getter", "in", "DAEMON_OPTIONS_TYPE", ".", "items", "(", ")", ":", "try", ":", "getattr", "(", "config", ",", "getter", ")", "(", "'daemon'", ",", "option", ")", "except", "configparser", ".", "NoOptionError", "as", "error", ":", "if", "option", "not", "in", "DAEMON_OPTIONAL_OPTIONS", ":", "raise", "ValueError", "(", "error", ")", "except", "configparser", ".", "Error", "as", "error", ":", "raise", "ValueError", "(", "error", ")", "except", "ValueError", "as", "exc", ":", "msg", "=", "(", "\"invalid data for '{opt}' option in daemon section: {err}\"", ".", "format", "(", "opt", "=", "option", ",", "err", "=", "exc", ")", ")", "raise", "ValueError", "(", "msg", ")", "service_configuration_check", "(", "config", ")" ]
Perform a sanity check on configuration. First it performs a sanity check against settings for daemon and then against settings for each service check. Arguments: config (obj): A configparser object which holds our configuration. Returns: None if all checks are successfully passed otherwise raises a ValueError exception.
[ "Perform", "a", "sanity", "check", "on", "configuration", "." ]
python
train
35.638298
pdkit/pdkit
pdkit/tremor_time_series.py
https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/tremor_time_series.py#L31-L55
def load(self, filename, format_file='cloudupdrs'): """ This is a general load data method where the format of data to load can be passed as a parameter, :param str filename: The path to load data from :param str format_file: format of the file. Default is CloudUPDRS. Set to mpower for mpower data. :return dataframe: data_frame.x, data_frame.y, data_frame.z: x, y, z components of the acceleration \ data_frame.index is the datetime-like index """ try: ts = load_data(filename, format_file) validator = CloudUPDRSDataFrameValidator() if validator.is_valid(ts): return ts else: logging.error('Error loading data, wrong format.') return None except IOError as e: ierr = "({}): {}".format(e.errno, e.strerror) logging.error("load data, file not found, I/O error %s", ierr) except ValueError as verr: logging.error("load data ValueError ->%s", verr.message) except: logging.error("Unexpected error on load data method: %s", sys.exc_info()[0])
[ "def", "load", "(", "self", ",", "filename", ",", "format_file", "=", "'cloudupdrs'", ")", ":", "try", ":", "ts", "=", "load_data", "(", "filename", ",", "format_file", ")", "validator", "=", "CloudUPDRSDataFrameValidator", "(", ")", "if", "validator", ".", "is_valid", "(", "ts", ")", ":", "return", "ts", "else", ":", "logging", ".", "error", "(", "'Error loading data, wrong format.'", ")", "return", "None", "except", "IOError", "as", "e", ":", "ierr", "=", "\"({}): {}\"", ".", "format", "(", "e", ".", "errno", ",", "e", ".", "strerror", ")", "logging", ".", "error", "(", "\"load data, file not found, I/O error %s\"", ",", "ierr", ")", "except", "ValueError", "as", "verr", ":", "logging", ".", "error", "(", "\"load data ValueError ->%s\"", ",", "verr", ".", "message", ")", "except", ":", "logging", ".", "error", "(", "\"Unexpected error on load data method: %s\"", ",", "sys", ".", "exc_info", "(", ")", "[", "0", "]", ")" ]
This is a general load data method where the format of data to load can be passed as a parameter, :param str filename: The path to load data from :param str format_file: format of the file. Default is CloudUPDRS. Set to mpower for mpower data. :return dataframe: data_frame.x, data_frame.y, data_frame.z: x, y, z components of the acceleration \ data_frame.index is the datetime-like index
[ "This", "is", "a", "general", "load", "data", "method", "where", "the", "format", "of", "data", "to", "load", "can", "be", "passed", "as", "a", "parameter" ]
python
train
46.8
shexSpec/grammar
parsers/python/pyshexc/parser_impl/shex_doc_parser.py
https://github.com/shexSpec/grammar/blob/4497cd1f73fa6703bca6e2cb53ba9c120f22e48c/parsers/python/pyshexc/parser_impl/shex_doc_parser.py#L55-L60
def visitPrefixDecl(self, ctx: ShExDocParser.PrefixDeclContext): """ prefixDecl: KW_PREFIX PNAME_NS IRIREF """ iri = self.context.iriref_to_shexj_iriref(ctx.IRIREF()) prefix = ctx.PNAME_NS().getText() if iri not in self.context.ld_prefixes: self.context.prefixes.setdefault(prefix, iri.val)
[ "def", "visitPrefixDecl", "(", "self", ",", "ctx", ":", "ShExDocParser", ".", "PrefixDeclContext", ")", ":", "iri", "=", "self", ".", "context", ".", "iriref_to_shexj_iriref", "(", "ctx", ".", "IRIREF", "(", ")", ")", "prefix", "=", "ctx", ".", "PNAME_NS", "(", ")", ".", "getText", "(", ")", "if", "iri", "not", "in", "self", ".", "context", ".", "ld_prefixes", ":", "self", ".", "context", ".", "prefixes", ".", "setdefault", "(", "prefix", ",", "iri", ".", "val", ")" ]
prefixDecl: KW_PREFIX PNAME_NS IRIREF
[ "prefixDecl", ":", "KW_PREFIX", "PNAME_NS", "IRIREF" ]
python
train
54.833333
HiPERCAM/hcam_widgets
hcam_widgets/widgets.py
https://github.com/HiPERCAM/hcam_widgets/blob/7219f0d96dd3a8ebe3139c7f542a72c02d02fce8/hcam_widgets/widgets.py#L757-L762
def add(self, num): """ Adds num to the current value """ self.index = max(0, min(len(self.allowed)-1, self.index+num)) self.set(self.allowed[self.index])
[ "def", "add", "(", "self", ",", "num", ")", ":", "self", ".", "index", "=", "max", "(", "0", ",", "min", "(", "len", "(", "self", ".", "allowed", ")", "-", "1", ",", "self", ".", "index", "+", "num", ")", ")", "self", ".", "set", "(", "self", ".", "allowed", "[", "self", ".", "index", "]", ")" ]
Adds num to the current value
[ "Adds", "num", "to", "the", "current", "value" ]
python
train
31.5
saltstack/salt
salt/modules/consul.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/consul.py#L50-L113
def _query(function, consul_url, token=None, method='GET', api_version='v1', data=None, query_params=None): ''' Consul object method function to construct and execute on the API URL. :param api_url: The Consul api url. :param api_version The Consul api version :param function: The Consul api function to perform. :param method: The HTTP method, e.g. GET or POST. :param data: The data to be sent for POST method. This param is ignored for GET requests. :return: The json response from the API call or False. ''' if not query_params: query_params = {} ret = {'data': '', 'res': True} if not token: token = _get_token() headers = {"X-Consul-Token": token, "Content-Type": "application/json"} base_url = urllib.parse.urljoin(consul_url, '{0}/'.format(api_version)) url = urllib.parse.urljoin(base_url, function, False) if method == 'GET': data = None else: if data is None: data = {} data = salt.utils.json.dumps(data) result = salt.utils.http.query( url, method=method, params=query_params, data=data, decode=True, status=True, header_dict=headers, opts=__opts__, ) if result.get('status', None) == http_client.OK: ret['data'] = result.get('dict', result) ret['res'] = True elif result.get('status', None) == http_client.NO_CONTENT: ret['res'] = False elif result.get('status', None) == http_client.NOT_FOUND: ret['data'] = 'Key not found.' ret['res'] = False else: if result: ret['data'] = result ret['res'] = True else: ret['res'] = False return ret
[ "def", "_query", "(", "function", ",", "consul_url", ",", "token", "=", "None", ",", "method", "=", "'GET'", ",", "api_version", "=", "'v1'", ",", "data", "=", "None", ",", "query_params", "=", "None", ")", ":", "if", "not", "query_params", ":", "query_params", "=", "{", "}", "ret", "=", "{", "'data'", ":", "''", ",", "'res'", ":", "True", "}", "if", "not", "token", ":", "token", "=", "_get_token", "(", ")", "headers", "=", "{", "\"X-Consul-Token\"", ":", "token", ",", "\"Content-Type\"", ":", "\"application/json\"", "}", "base_url", "=", "urllib", ".", "parse", ".", "urljoin", "(", "consul_url", ",", "'{0}/'", ".", "format", "(", "api_version", ")", ")", "url", "=", "urllib", ".", "parse", ".", "urljoin", "(", "base_url", ",", "function", ",", "False", ")", "if", "method", "==", "'GET'", ":", "data", "=", "None", "else", ":", "if", "data", "is", "None", ":", "data", "=", "{", "}", "data", "=", "salt", ".", "utils", ".", "json", ".", "dumps", "(", "data", ")", "result", "=", "salt", ".", "utils", ".", "http", ".", "query", "(", "url", ",", "method", "=", "method", ",", "params", "=", "query_params", ",", "data", "=", "data", ",", "decode", "=", "True", ",", "status", "=", "True", ",", "header_dict", "=", "headers", ",", "opts", "=", "__opts__", ",", ")", "if", "result", ".", "get", "(", "'status'", ",", "None", ")", "==", "http_client", ".", "OK", ":", "ret", "[", "'data'", "]", "=", "result", ".", "get", "(", "'dict'", ",", "result", ")", "ret", "[", "'res'", "]", "=", "True", "elif", "result", ".", "get", "(", "'status'", ",", "None", ")", "==", "http_client", ".", "NO_CONTENT", ":", "ret", "[", "'res'", "]", "=", "False", "elif", "result", ".", "get", "(", "'status'", ",", "None", ")", "==", "http_client", ".", "NOT_FOUND", ":", "ret", "[", "'data'", "]", "=", "'Key not found.'", "ret", "[", "'res'", "]", "=", "False", "else", ":", "if", "result", ":", "ret", "[", "'data'", "]", "=", "result", "ret", "[", "'res'", "]", "=", "True", "else", ":", "ret", "[", "'res'", "]", "=", "False", "return", "ret" ]
Consul object method function to construct and execute on the API URL. :param api_url: The Consul api url. :param api_version The Consul api version :param function: The Consul api function to perform. :param method: The HTTP method, e.g. GET or POST. :param data: The data to be sent for POST method. This param is ignored for GET requests. :return: The json response from the API call or False.
[ "Consul", "object", "method", "function", "to", "construct", "and", "execute", "on", "the", "API", "URL", "." ]
python
train
28.234375
arviz-devs/arviz
arviz/plots/hpdplot.py
https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/plots/hpdplot.py#L10-L86
def plot_hpd( x, y, credible_interval=0.94, color="C1", circular=False, smooth=True, smooth_kwargs=None, fill_kwargs=None, plot_kwargs=None, ax=None, ): """ Plot hpd intervals for regression data. Parameters ---------- x : array-like Values to plot y : array-like values ​​from which to compute the hpd credible_interval : float, optional Credible interval to plot. Defaults to 0.94. color : str Color used for the limits of the HPD interval and fill. Should be a valid matplotlib color circular : bool, optional Whether to compute the hpd taking into account `x` is a circular variable (in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables). smooth : boolean If True the result will be smoothed by first computing a linear interpolation of the data over a regular grid and then applying the Savitzky-Golay filter to the interpolated data. Defaults to True. smooth_kwargs : dict, optional Additional keywords modifying the Savitzky-Golay filter. See Scipy's documentation for details fill_kwargs : dict Keywords passed to `fill_between` (use fill_kwargs={'alpha': 0} to disable fill). plot_kwargs : dict Keywords passed to HPD limits ax : matplotlib axes Returns ------- ax : matplotlib axes """ if plot_kwargs is None: plot_kwargs = {} plot_kwargs.setdefault("color", color) plot_kwargs.setdefault("alpha", 0) if fill_kwargs is None: fill_kwargs = {} fill_kwargs.setdefault("color", color) fill_kwargs.setdefault("alpha", 0.5) if ax is None: ax = gca() hpd_ = hpd(y, credible_interval=credible_interval, circular=circular) if smooth: if smooth_kwargs is None: smooth_kwargs = {} smooth_kwargs.setdefault("window_length", 55) smooth_kwargs.setdefault("polyorder", 2) x_data = np.linspace(x.min(), x.max(), 200) hpd_interp = griddata(x, hpd_, x_data) y_data = savgol_filter(hpd_interp, axis=0, **smooth_kwargs) else: idx = np.argsort(x) x_data = x[idx] y_data = hpd_[idx] ax.plot(x_data, y_data, **plot_kwargs) ax.fill_between(x_data, y_data[:, 0], y_data[:, 1], **fill_kwargs) return ax
[ "def", "plot_hpd", "(", "x", ",", "y", ",", "credible_interval", "=", "0.94", ",", "color", "=", "\"C1\"", ",", "circular", "=", "False", ",", "smooth", "=", "True", ",", "smooth_kwargs", "=", "None", ",", "fill_kwargs", "=", "None", ",", "plot_kwargs", "=", "None", ",", "ax", "=", "None", ",", ")", ":", "if", "plot_kwargs", "is", "None", ":", "plot_kwargs", "=", "{", "}", "plot_kwargs", ".", "setdefault", "(", "\"color\"", ",", "color", ")", "plot_kwargs", ".", "setdefault", "(", "\"alpha\"", ",", "0", ")", "if", "fill_kwargs", "is", "None", ":", "fill_kwargs", "=", "{", "}", "fill_kwargs", ".", "setdefault", "(", "\"color\"", ",", "color", ")", "fill_kwargs", ".", "setdefault", "(", "\"alpha\"", ",", "0.5", ")", "if", "ax", "is", "None", ":", "ax", "=", "gca", "(", ")", "hpd_", "=", "hpd", "(", "y", ",", "credible_interval", "=", "credible_interval", ",", "circular", "=", "circular", ")", "if", "smooth", ":", "if", "smooth_kwargs", "is", "None", ":", "smooth_kwargs", "=", "{", "}", "smooth_kwargs", ".", "setdefault", "(", "\"window_length\"", ",", "55", ")", "smooth_kwargs", ".", "setdefault", "(", "\"polyorder\"", ",", "2", ")", "x_data", "=", "np", ".", "linspace", "(", "x", ".", "min", "(", ")", ",", "x", ".", "max", "(", ")", ",", "200", ")", "hpd_interp", "=", "griddata", "(", "x", ",", "hpd_", ",", "x_data", ")", "y_data", "=", "savgol_filter", "(", "hpd_interp", ",", "axis", "=", "0", ",", "*", "*", "smooth_kwargs", ")", "else", ":", "idx", "=", "np", ".", "argsort", "(", "x", ")", "x_data", "=", "x", "[", "idx", "]", "y_data", "=", "hpd_", "[", "idx", "]", "ax", ".", "plot", "(", "x_data", ",", "y_data", ",", "*", "*", "plot_kwargs", ")", "ax", ".", "fill_between", "(", "x_data", ",", "y_data", "[", ":", ",", "0", "]", ",", "y_data", "[", ":", ",", "1", "]", ",", "*", "*", "fill_kwargs", ")", "return", "ax" ]
Plot hpd intervals for regression data. Parameters ---------- x : array-like Values to plot y : array-like values ​​from which to compute the hpd credible_interval : float, optional Credible interval to plot. Defaults to 0.94. color : str Color used for the limits of the HPD interval and fill. Should be a valid matplotlib color circular : bool, optional Whether to compute the hpd taking into account `x` is a circular variable (in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables). smooth : boolean If True the result will be smoothed by first computing a linear interpolation of the data over a regular grid and then applying the Savitzky-Golay filter to the interpolated data. Defaults to True. smooth_kwargs : dict, optional Additional keywords modifying the Savitzky-Golay filter. See Scipy's documentation for details fill_kwargs : dict Keywords passed to `fill_between` (use fill_kwargs={'alpha': 0} to disable fill). plot_kwargs : dict Keywords passed to HPD limits ax : matplotlib axes Returns ------- ax : matplotlib axes
[ "Plot", "hpd", "intervals", "for", "regression", "data", "." ]
python
train
30.194805
metachris/RPIO
source/RPIO/__init__.py
https://github.com/metachris/RPIO/blob/be1942a69b2592ddacd9dc833d2668a19aafd8d2/source/RPIO/__init__.py#L203-L220
def add_interrupt_callback(gpio_id, callback, edge='both', \ pull_up_down=PUD_OFF, threaded_callback=False, \ debounce_timeout_ms=None): """ Add a callback to be executed when the value on 'gpio_id' changes to the edge specified via the 'edge' parameter (default='both'). `pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and `RPIO.PUD_OFF`. If `threaded_callback` is True, the callback will be started inside a Thread. If debounce_timeout_ms is set, new interrupts will not be forwarded until after the specified amount of milliseconds. """ _rpio.add_interrupt_callback(gpio_id, callback, edge, pull_up_down, \ threaded_callback, debounce_timeout_ms)
[ "def", "add_interrupt_callback", "(", "gpio_id", ",", "callback", ",", "edge", "=", "'both'", ",", "pull_up_down", "=", "PUD_OFF", ",", "threaded_callback", "=", "False", ",", "debounce_timeout_ms", "=", "None", ")", ":", "_rpio", ".", "add_interrupt_callback", "(", "gpio_id", ",", "callback", ",", "edge", ",", "pull_up_down", ",", "threaded_callback", ",", "debounce_timeout_ms", ")" ]
Add a callback to be executed when the value on 'gpio_id' changes to the edge specified via the 'edge' parameter (default='both'). `pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and `RPIO.PUD_OFF`. If `threaded_callback` is True, the callback will be started inside a Thread. If debounce_timeout_ms is set, new interrupts will not be forwarded until after the specified amount of milliseconds.
[ "Add", "a", "callback", "to", "be", "executed", "when", "the", "value", "on", "gpio_id", "changes", "to", "the", "edge", "specified", "via", "the", "edge", "parameter", "(", "default", "=", "both", ")", "." ]
python
train
40
spdx/tools-python
spdx/parsers/tagvalue.py
https://github.com/spdx/tools-python/blob/301d72f6ae57c832c1da7f6402fa49b192de6810/spdx/parsers/tagvalue.py#L1341-L1350
def p_created_1(self, p): """created : CREATED DATE""" try: if six.PY2: value = p[2].decode(encoding='utf-8') else: value = p[2] self.builder.set_created_date(self.document, value) except CardinalityError: self.more_than_one_error('Created', p.lineno(1))
[ "def", "p_created_1", "(", "self", ",", "p", ")", ":", "try", ":", "if", "six", ".", "PY2", ":", "value", "=", "p", "[", "2", "]", ".", "decode", "(", "encoding", "=", "'utf-8'", ")", "else", ":", "value", "=", "p", "[", "2", "]", "self", ".", "builder", ".", "set_created_date", "(", "self", ".", "document", ",", "value", ")", "except", "CardinalityError", ":", "self", ".", "more_than_one_error", "(", "'Created'", ",", "p", ".", "lineno", "(", "1", ")", ")" ]
created : CREATED DATE
[ "created", ":", "CREATED", "DATE" ]
python
valid
34.9
gabrielfalcao/dominic
dominic/xpath/expr.py
https://github.com/gabrielfalcao/dominic/blob/a42f418fc288f3b70cb95847b405eaf7b83bb3a0/dominic/xpath/expr.py#L17-L33
def string_value(node): """Compute the string-value of a node.""" if (node.nodeType == node.DOCUMENT_NODE or node.nodeType == node.ELEMENT_NODE): s = u'' for n in axes['descendant'](node): if n.nodeType == n.TEXT_NODE: s += n.data return s elif node.nodeType == node.ATTRIBUTE_NODE: return node.value elif (node.nodeType == node.PROCESSING_INSTRUCTION_NODE or node.nodeType == node.COMMENT_NODE or node.nodeType == node.TEXT_NODE): return node.data
[ "def", "string_value", "(", "node", ")", ":", "if", "(", "node", ".", "nodeType", "==", "node", ".", "DOCUMENT_NODE", "or", "node", ".", "nodeType", "==", "node", ".", "ELEMENT_NODE", ")", ":", "s", "=", "u''", "for", "n", "in", "axes", "[", "'descendant'", "]", "(", "node", ")", ":", "if", "n", ".", "nodeType", "==", "n", ".", "TEXT_NODE", ":", "s", "+=", "n", ".", "data", "return", "s", "elif", "node", ".", "nodeType", "==", "node", ".", "ATTRIBUTE_NODE", ":", "return", "node", ".", "value", "elif", "(", "node", ".", "nodeType", "==", "node", ".", "PROCESSING_INSTRUCTION_NODE", "or", "node", ".", "nodeType", "==", "node", ".", "COMMENT_NODE", "or", "node", ".", "nodeType", "==", "node", ".", "TEXT_NODE", ")", ":", "return", "node", ".", "data" ]
Compute the string-value of a node.
[ "Compute", "the", "string", "-", "value", "of", "a", "node", "." ]
python
train
32.117647
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/extensions_v1beta1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/extensions_v1beta1_api.py#L3759-L3787
def list_replica_set_for_all_namespaces(self, **kwargs): # noqa: E501 """list_replica_set_for_all_namespaces # noqa: E501 list or watch objects of kind ReplicaSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_replica_set_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1beta1ReplicaSetList If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.list_replica_set_for_all_namespaces_with_http_info(**kwargs) # noqa: E501 else: (data) = self.list_replica_set_for_all_namespaces_with_http_info(**kwargs) # noqa: E501 return data
[ "def", "list_replica_set_for_all_namespaces", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "list_replica_set_for_all_namespaces_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "list_replica_set_for_all_namespaces_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
list_replica_set_for_all_namespaces # noqa: E501 list or watch objects of kind ReplicaSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.list_replica_set_for_all_namespaces(async_req=True) >>> result = thread.get() :param async_req bool :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str pretty: If 'true', then the output is pretty printed. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1beta1ReplicaSetList If the method is called asynchronously, returns the request thread.
[ "list_replica_set_for_all_namespaces", "#", "noqa", ":", "E501" ]
python
train
165.034483
ECRL/ecabc
ecabc/abc.py
https://github.com/ECRL/ecabc/blob/4e73125ff90bfeeae359a5ab1badba8894d70eaa/ecabc/abc.py#L452-L478
def _merge_bee(self, bee): '''Shifts a random value for a supplied bee with in accordance with another random bee's value Args: bee (EmployerBee): supplied bee to merge Returns: tuple: (score of new position, values of new position, fitness function return value of new position) ''' random_dimension = randint(0, len(self._value_ranges) - 1) second_bee = randint(0, self._num_employers - 1) while (bee.id == self._employers[second_bee].id): second_bee = randint(0, self._num_employers - 1) new_bee = deepcopy(bee) new_bee.values[random_dimension] = self.__onlooker.calculate_positions( new_bee.values[random_dimension], self._employers[second_bee].values[random_dimension], self._value_ranges[random_dimension] ) fitness_score = new_bee.get_score(self._fitness_fxn( new_bee.values, **self._args )) return (fitness_score, new_bee.values, new_bee.error)
[ "def", "_merge_bee", "(", "self", ",", "bee", ")", ":", "random_dimension", "=", "randint", "(", "0", ",", "len", "(", "self", ".", "_value_ranges", ")", "-", "1", ")", "second_bee", "=", "randint", "(", "0", ",", "self", ".", "_num_employers", "-", "1", ")", "while", "(", "bee", ".", "id", "==", "self", ".", "_employers", "[", "second_bee", "]", ".", "id", ")", ":", "second_bee", "=", "randint", "(", "0", ",", "self", ".", "_num_employers", "-", "1", ")", "new_bee", "=", "deepcopy", "(", "bee", ")", "new_bee", ".", "values", "[", "random_dimension", "]", "=", "self", ".", "__onlooker", ".", "calculate_positions", "(", "new_bee", ".", "values", "[", "random_dimension", "]", ",", "self", ".", "_employers", "[", "second_bee", "]", ".", "values", "[", "random_dimension", "]", ",", "self", ".", "_value_ranges", "[", "random_dimension", "]", ")", "fitness_score", "=", "new_bee", ".", "get_score", "(", "self", ".", "_fitness_fxn", "(", "new_bee", ".", "values", ",", "*", "*", "self", ".", "_args", ")", ")", "return", "(", "fitness_score", ",", "new_bee", ".", "values", ",", "new_bee", ".", "error", ")" ]
Shifts a random value for a supplied bee with in accordance with another random bee's value Args: bee (EmployerBee): supplied bee to merge Returns: tuple: (score of new position, values of new position, fitness function return value of new position)
[ "Shifts", "a", "random", "value", "for", "a", "supplied", "bee", "with", "in", "accordance", "with", "another", "random", "bee", "s", "value" ]
python
train
39
IdentityPython/oidcendpoint
src/oidcendpoint/sso_db.py
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/sso_db.py#L62-L70
def map_sid2sub(self, sid, sub): """ Store the connection between a Session ID and a subject ID. :param sid: Session ID :param sub: subject ID """ self.set('sid2sub', sid, sub) self.set('sub2sid', sub, sid)
[ "def", "map_sid2sub", "(", "self", ",", "sid", ",", "sub", ")", ":", "self", ".", "set", "(", "'sid2sub'", ",", "sid", ",", "sub", ")", "self", ".", "set", "(", "'sub2sid'", ",", "sub", ",", "sid", ")" ]
Store the connection between a Session ID and a subject ID. :param sid: Session ID :param sub: subject ID
[ "Store", "the", "connection", "between", "a", "Session", "ID", "and", "a", "subject", "ID", "." ]
python
train
28.333333
manns/pyspread
pyspread/src/lib/_grid_cairo_renderer.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/_grid_cairo_renderer.py#L865-L911
def draw(self): """Draws cell content to context""" # Content is only rendered within rect self.context.save() self.context.rectangle(*self.rect) self.context.clip() content = self.get_cell_content() pos_x, pos_y = self.rect[:2] self.context.translate(pos_x + 2, pos_y + 2) cell_attributes = self.code_array.cell_attributes # Do not draw cell content if cell is too small # This allows blending out small cells by reducing height to 0 if self.rect[2] < cell_attributes[self.key]["borderwidth_right"] or \ self.rect[3] < cell_attributes[self.key]["borderwidth_bottom"]: self.context.restore() return if self.code_array.cell_attributes[self.key]["button_cell"]: # Render a button instead of the cell label = self.code_array.cell_attributes[self.key]["button_cell"] self.draw_button(1, 1, self.rect[2]-5, self.rect[3]-5, label) elif isinstance(content, wx._gdi.Bitmap): # A bitmap is returned --> Draw it! self.draw_bitmap(content) elif pyplot is not None and isinstance(content, pyplot.Figure): # A matplotlib figure is returned --> Draw it! self.draw_matplotlib_figure(content) elif isinstance(content, basestring) and is_svg(content): # The content is a vaid SVG xml string self.draw_svg(content) elif content is not None: self.draw_text(content) self.context.translate(-pos_x - 2, -pos_y - 2) # Remove clipping to rect self.context.restore()
[ "def", "draw", "(", "self", ")", ":", "# Content is only rendered within rect", "self", ".", "context", ".", "save", "(", ")", "self", ".", "context", ".", "rectangle", "(", "*", "self", ".", "rect", ")", "self", ".", "context", ".", "clip", "(", ")", "content", "=", "self", ".", "get_cell_content", "(", ")", "pos_x", ",", "pos_y", "=", "self", ".", "rect", "[", ":", "2", "]", "self", ".", "context", ".", "translate", "(", "pos_x", "+", "2", ",", "pos_y", "+", "2", ")", "cell_attributes", "=", "self", ".", "code_array", ".", "cell_attributes", "# Do not draw cell content if cell is too small", "# This allows blending out small cells by reducing height to 0", "if", "self", ".", "rect", "[", "2", "]", "<", "cell_attributes", "[", "self", ".", "key", "]", "[", "\"borderwidth_right\"", "]", "or", "self", ".", "rect", "[", "3", "]", "<", "cell_attributes", "[", "self", ".", "key", "]", "[", "\"borderwidth_bottom\"", "]", ":", "self", ".", "context", ".", "restore", "(", ")", "return", "if", "self", ".", "code_array", ".", "cell_attributes", "[", "self", ".", "key", "]", "[", "\"button_cell\"", "]", ":", "# Render a button instead of the cell", "label", "=", "self", ".", "code_array", ".", "cell_attributes", "[", "self", ".", "key", "]", "[", "\"button_cell\"", "]", "self", ".", "draw_button", "(", "1", ",", "1", ",", "self", ".", "rect", "[", "2", "]", "-", "5", ",", "self", ".", "rect", "[", "3", "]", "-", "5", ",", "label", ")", "elif", "isinstance", "(", "content", ",", "wx", ".", "_gdi", ".", "Bitmap", ")", ":", "# A bitmap is returned --> Draw it!", "self", ".", "draw_bitmap", "(", "content", ")", "elif", "pyplot", "is", "not", "None", "and", "isinstance", "(", "content", ",", "pyplot", ".", "Figure", ")", ":", "# A matplotlib figure is returned --> Draw it!", "self", ".", "draw_matplotlib_figure", "(", "content", ")", "elif", "isinstance", "(", "content", ",", "basestring", ")", "and", "is_svg", "(", "content", ")", ":", "# The content is a vaid SVG xml string", "self", ".", "draw_svg", "(", "content", ")", "elif", "content", "is", "not", "None", ":", "self", ".", "draw_text", "(", "content", ")", "self", ".", "context", ".", "translate", "(", "-", "pos_x", "-", "2", ",", "-", "pos_y", "-", "2", ")", "# Remove clipping to rect", "self", ".", "context", ".", "restore", "(", ")" ]
Draws cell content to context
[ "Draws", "cell", "content", "to", "context" ]
python
train
34.553191
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/mavproxy.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/mavproxy.py#L108-L112
def write(self): '''write status to status.txt''' f = open('status.txt', mode='w') self.show(f) f.close()
[ "def", "write", "(", "self", ")", ":", "f", "=", "open", "(", "'status.txt'", ",", "mode", "=", "'w'", ")", "self", ".", "show", "(", "f", ")", "f", ".", "close", "(", ")" ]
write status to status.txt
[ "write", "status", "to", "status", ".", "txt" ]
python
train
26.6
sdss/sdss_access
python/sdss_access/misc/docupaths.py
https://github.com/sdss/sdss_access/blob/76375bbf37d39d2e4ccbed90bdfa9a4298784470/python/sdss_access/misc/docupaths.py#L34-L64
def _format_templates(name, command, templates): ''' Creates a list-table directive for a set of defined environment variables Parameters: name (str): The name of the config section command (object): The sdss_access path instance templates (dict): A dictionary of the path templates Yields: A string rst-formated list-table directive ''' yield '.. list-table:: {0}'.format(name) yield _indent(':widths: 20 50 70') yield _indent(':header-rows: 1') yield '' yield _indent('* - Name') yield _indent(' - Template') yield _indent(' - Kwargs') for key, var in templates.items(): kwargs = command.lookup_keys(key) yield _indent('* - {0}'.format(key)) yield _indent(' - {0}'.format(var)) yield _indent(' - {0}'.format(', '.join(kwargs))) yield ''
[ "def", "_format_templates", "(", "name", ",", "command", ",", "templates", ")", ":", "yield", "'.. list-table:: {0}'", ".", "format", "(", "name", ")", "yield", "_indent", "(", "':widths: 20 50 70'", ")", "yield", "_indent", "(", "':header-rows: 1'", ")", "yield", "''", "yield", "_indent", "(", "'* - Name'", ")", "yield", "_indent", "(", "' - Template'", ")", "yield", "_indent", "(", "' - Kwargs'", ")", "for", "key", ",", "var", "in", "templates", ".", "items", "(", ")", ":", "kwargs", "=", "command", ".", "lookup_keys", "(", "key", ")", "yield", "_indent", "(", "'* - {0}'", ".", "format", "(", "key", ")", ")", "yield", "_indent", "(", "' - {0}'", ".", "format", "(", "var", ")", ")", "yield", "_indent", "(", "' - {0}'", ".", "format", "(", "', '", ".", "join", "(", "kwargs", ")", ")", ")", "yield", "''" ]
Creates a list-table directive for a set of defined environment variables Parameters: name (str): The name of the config section command (object): The sdss_access path instance templates (dict): A dictionary of the path templates Yields: A string rst-formated list-table directive
[ "Creates", "a", "list", "-", "table", "directive" ]
python
train
28.16129
aldur/cryptonator
cryptonator.py
https://github.com/aldur/cryptonator/blob/4e98bc7ddb19f9da2388e241ea90c89798718dd6/cryptonator.py#L43-L67
def get_exchange_rate(self, base, target, raise_errors=True): """Return the ::base:: to ::target:: exchange rate.""" assert base and target base, target = base.lower(), target.lower() r = self.session.get(API_SIMPLE_TICKER.format(base, target)) if r.status_code != requests.codes.ok: if not raise_errors: return None raise CryptonatorException( ("An error occurred while getting requested exchange rate " "({} from Cryptonator).").format(r.status_code) ) j = r.json() if not j['success'] or j['error']: if not raise_errors: return None raise CryptonatorException( ("An error occurred while getting requested exchange rate ({}, {})" "('{}').").format(base, target, j['error']) ) return float(j['ticker']['price'])
[ "def", "get_exchange_rate", "(", "self", ",", "base", ",", "target", ",", "raise_errors", "=", "True", ")", ":", "assert", "base", "and", "target", "base", ",", "target", "=", "base", ".", "lower", "(", ")", ",", "target", ".", "lower", "(", ")", "r", "=", "self", ".", "session", ".", "get", "(", "API_SIMPLE_TICKER", ".", "format", "(", "base", ",", "target", ")", ")", "if", "r", ".", "status_code", "!=", "requests", ".", "codes", ".", "ok", ":", "if", "not", "raise_errors", ":", "return", "None", "raise", "CryptonatorException", "(", "(", "\"An error occurred while getting requested exchange rate \"", "\"({} from Cryptonator).\"", ")", ".", "format", "(", "r", ".", "status_code", ")", ")", "j", "=", "r", ".", "json", "(", ")", "if", "not", "j", "[", "'success'", "]", "or", "j", "[", "'error'", "]", ":", "if", "not", "raise_errors", ":", "return", "None", "raise", "CryptonatorException", "(", "(", "\"An error occurred while getting requested exchange rate ({}, {})\"", "\"('{}').\"", ")", ".", "format", "(", "base", ",", "target", ",", "j", "[", "'error'", "]", ")", ")", "return", "float", "(", "j", "[", "'ticker'", "]", "[", "'price'", "]", ")" ]
Return the ::base:: to ::target:: exchange rate.
[ "Return", "the", "::", "base", "::", "to", "::", "target", "::", "exchange", "rate", "." ]
python
train
37.04
DataBiosphere/toil
src/toil/leader.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/leader.py#L689-L696
def killJobs(self, jobsToKill): """ Kills the given set of jobs and then sends them for processing """ if len(jobsToKill) > 0: self.batchSystem.killBatchJobs(jobsToKill) for jobBatchSystemID in jobsToKill: self.processFinishedJob(jobBatchSystemID, 1)
[ "def", "killJobs", "(", "self", ",", "jobsToKill", ")", ":", "if", "len", "(", "jobsToKill", ")", ">", "0", ":", "self", ".", "batchSystem", ".", "killBatchJobs", "(", "jobsToKill", ")", "for", "jobBatchSystemID", "in", "jobsToKill", ":", "self", ".", "processFinishedJob", "(", "jobBatchSystemID", ",", "1", ")" ]
Kills the given set of jobs and then sends them for processing
[ "Kills", "the", "given", "set", "of", "jobs", "and", "then", "sends", "them", "for", "processing" ]
python
train
39.375
welbornprod/colr
colr/colr.py
https://github.com/welbornprod/colr/blob/417117fdbddbc53142096685ac2af006b2bd0220/colr/colr.py#L1065-L1108
def _iter_text_wave( self, text, numbers, step=1, fore=None, back=None, style=None, rgb_mode=False): """ Yield colorized characters from `text`, using a wave of `numbers`. Arguments: text : String to be colorized. numbers : A list/tuple of numbers (256 colors). step : Number of characters to colorize per color. fore : Fore color to use (name or number). (Back will be gradient) back : Background color to use (name or number). (Fore will be gradient) style : Style name to use. rgb_mode : Use number for rgb value. This should never be used when the numbers are rgb values themselves. """ if fore and back: raise ValueError('Both fore and back colors cannot be specified.') pos = 0 end = len(text) numbergen = self._iter_wave(numbers) def make_color(n): try: r, g, b = n except TypeError: if rgb_mode: return n, n, n return n return r, g, b for value in numbergen: lastchar = pos + step yield self.color( text[pos:lastchar], fore=make_color(value) if fore is None else fore, back=make_color(value) if fore is not None else back, style=style ) if lastchar >= end: numbergen.send(True) pos = lastchar
[ "def", "_iter_text_wave", "(", "self", ",", "text", ",", "numbers", ",", "step", "=", "1", ",", "fore", "=", "None", ",", "back", "=", "None", ",", "style", "=", "None", ",", "rgb_mode", "=", "False", ")", ":", "if", "fore", "and", "back", ":", "raise", "ValueError", "(", "'Both fore and back colors cannot be specified.'", ")", "pos", "=", "0", "end", "=", "len", "(", "text", ")", "numbergen", "=", "self", ".", "_iter_wave", "(", "numbers", ")", "def", "make_color", "(", "n", ")", ":", "try", ":", "r", ",", "g", ",", "b", "=", "n", "except", "TypeError", ":", "if", "rgb_mode", ":", "return", "n", ",", "n", ",", "n", "return", "n", "return", "r", ",", "g", ",", "b", "for", "value", "in", "numbergen", ":", "lastchar", "=", "pos", "+", "step", "yield", "self", ".", "color", "(", "text", "[", "pos", ":", "lastchar", "]", ",", "fore", "=", "make_color", "(", "value", ")", "if", "fore", "is", "None", "else", "fore", ",", "back", "=", "make_color", "(", "value", ")", "if", "fore", "is", "not", "None", "else", "back", ",", "style", "=", "style", ")", "if", "lastchar", ">=", "end", ":", "numbergen", ".", "send", "(", "True", ")", "pos", "=", "lastchar" ]
Yield colorized characters from `text`, using a wave of `numbers`. Arguments: text : String to be colorized. numbers : A list/tuple of numbers (256 colors). step : Number of characters to colorize per color. fore : Fore color to use (name or number). (Back will be gradient) back : Background color to use (name or number). (Fore will be gradient) style : Style name to use. rgb_mode : Use number for rgb value. This should never be used when the numbers are rgb values themselves.
[ "Yield", "colorized", "characters", "from", "text", "using", "a", "wave", "of", "numbers", ".", "Arguments", ":", "text", ":", "String", "to", "be", "colorized", ".", "numbers", ":", "A", "list", "/", "tuple", "of", "numbers", "(", "256", "colors", ")", ".", "step", ":", "Number", "of", "characters", "to", "colorize", "per", "color", ".", "fore", ":", "Fore", "color", "to", "use", "(", "name", "or", "number", ")", ".", "(", "Back", "will", "be", "gradient", ")", "back", ":", "Background", "color", "to", "use", "(", "name", "or", "number", ")", ".", "(", "Fore", "will", "be", "gradient", ")", "style", ":", "Style", "name", "to", "use", ".", "rgb_mode", ":", "Use", "number", "for", "rgb", "value", ".", "This", "should", "never", "be", "used", "when", "the", "numbers", "are", "rgb", "values", "themselves", "." ]
python
train
37.886364
twilio/twilio-python
twilio/rest/taskrouter/v1/workspace/event.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/taskrouter/v1/workspace/event.py#L137-L189
def page(self, end_date=values.unset, event_type=values.unset, minutes=values.unset, reservation_sid=values.unset, start_date=values.unset, task_queue_sid=values.unset, task_sid=values.unset, worker_sid=values.unset, workflow_sid=values.unset, task_channel=values.unset, sid=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of EventInstance records from the API. Request is executed immediately :param datetime end_date: Filter events by an end date. :param unicode event_type: Filter events by those of a certain event type :param unicode minutes: Filter events by up to 'x' minutes in the past. :param unicode reservation_sid: Filter events by those pertaining to a particular reservation :param datetime start_date: Filter events by a start date. :param unicode task_queue_sid: Filter events by those pertaining to a particular queue :param unicode task_sid: Filter events by those pertaining to a particular task :param unicode worker_sid: Filter events by those pertaining to a particular worker :param unicode workflow_sid: Filter events by those pertaining to a particular workflow :param unicode task_channel: Filter events by those pertaining to a particular task channel :param unicode sid: Filter events by those pertaining to a particular event :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of EventInstance :rtype: twilio.rest.taskrouter.v1.workspace.event.EventPage """ params = values.of({ 'EndDate': serialize.iso8601_datetime(end_date), 'EventType': event_type, 'Minutes': minutes, 'ReservationSid': reservation_sid, 'StartDate': serialize.iso8601_datetime(start_date), 'TaskQueueSid': task_queue_sid, 'TaskSid': task_sid, 'WorkerSid': worker_sid, 'WorkflowSid': workflow_sid, 'TaskChannel': task_channel, 'Sid': sid, 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page( 'GET', self._uri, params=params, ) return EventPage(self._version, response, self._solution)
[ "def", "page", "(", "self", ",", "end_date", "=", "values", ".", "unset", ",", "event_type", "=", "values", ".", "unset", ",", "minutes", "=", "values", ".", "unset", ",", "reservation_sid", "=", "values", ".", "unset", ",", "start_date", "=", "values", ".", "unset", ",", "task_queue_sid", "=", "values", ".", "unset", ",", "task_sid", "=", "values", ".", "unset", ",", "worker_sid", "=", "values", ".", "unset", ",", "workflow_sid", "=", "values", ".", "unset", ",", "task_channel", "=", "values", ".", "unset", ",", "sid", "=", "values", ".", "unset", ",", "page_token", "=", "values", ".", "unset", ",", "page_number", "=", "values", ".", "unset", ",", "page_size", "=", "values", ".", "unset", ")", ":", "params", "=", "values", ".", "of", "(", "{", "'EndDate'", ":", "serialize", ".", "iso8601_datetime", "(", "end_date", ")", ",", "'EventType'", ":", "event_type", ",", "'Minutes'", ":", "minutes", ",", "'ReservationSid'", ":", "reservation_sid", ",", "'StartDate'", ":", "serialize", ".", "iso8601_datetime", "(", "start_date", ")", ",", "'TaskQueueSid'", ":", "task_queue_sid", ",", "'TaskSid'", ":", "task_sid", ",", "'WorkerSid'", ":", "worker_sid", ",", "'WorkflowSid'", ":", "workflow_sid", ",", "'TaskChannel'", ":", "task_channel", ",", "'Sid'", ":", "sid", ",", "'PageToken'", ":", "page_token", ",", "'Page'", ":", "page_number", ",", "'PageSize'", ":", "page_size", ",", "}", ")", "response", "=", "self", ".", "_version", ".", "page", "(", "'GET'", ",", "self", ".", "_uri", ",", "params", "=", "params", ",", ")", "return", "EventPage", "(", "self", ".", "_version", ",", "response", ",", "self", ".", "_solution", ")" ]
Retrieve a single page of EventInstance records from the API. Request is executed immediately :param datetime end_date: Filter events by an end date. :param unicode event_type: Filter events by those of a certain event type :param unicode minutes: Filter events by up to 'x' minutes in the past. :param unicode reservation_sid: Filter events by those pertaining to a particular reservation :param datetime start_date: Filter events by a start date. :param unicode task_queue_sid: Filter events by those pertaining to a particular queue :param unicode task_sid: Filter events by those pertaining to a particular task :param unicode worker_sid: Filter events by those pertaining to a particular worker :param unicode workflow_sid: Filter events by those pertaining to a particular workflow :param unicode task_channel: Filter events by those pertaining to a particular task channel :param unicode sid: Filter events by those pertaining to a particular event :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of EventInstance :rtype: twilio.rest.taskrouter.v1.workspace.event.EventPage
[ "Retrieve", "a", "single", "page", "of", "EventInstance", "records", "from", "the", "API", ".", "Request", "is", "executed", "immediately" ]
python
train
48.962264
inspirehep/refextract
refextract/references/engine.py
https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/engine.py#L639-L716
def parse_reference_line(ref_line, kbs, bad_titles_count={}, linker_callback=None): """Parse one reference line @input a string representing a single reference bullet @output parsed references (a list of elements objects) """ # Strip the 'marker' (e.g. [1]) from this reference line: line_marker, ref_line = remove_reference_line_marker(ref_line) # Find DOI sections in citation ref_line, identified_dois = identify_and_tag_DOI(ref_line) # Identify and replace URLs in the line: ref_line, identified_urls = identify_and_tag_URLs(ref_line) # Tag <cds.JOURNAL>, etc. tagged_line, bad_titles_count = tag_reference_line(ref_line, kbs, bad_titles_count) # Debug print tagging (authors, titles, volumes, etc.) LOGGER.debug("tags %r", tagged_line) # Using the recorded information, create a MARC XML representation # of the rebuilt line: # At the same time, get stats of citations found in the reference line # (titles, urls, etc): citation_elements, line_marker, counts = \ parse_tagged_reference_line(line_marker, tagged_line, identified_dois, identified_urls) # Transformations on elements split_volume_from_journal(citation_elements) format_volume(citation_elements) handle_special_journals(citation_elements, kbs) format_report_number(citation_elements) format_author_ed(citation_elements) look_for_books(citation_elements, kbs) format_hep(citation_elements) remove_b_for_nucl_phys(citation_elements) mangle_volume(citation_elements) arxiv_urls_to_report_numbers(citation_elements) look_for_hdl(citation_elements) look_for_hdl_urls(citation_elements) # Link references if desired if linker_callback: associate_recids(citation_elements, linker_callback) # Split the reference in multiple ones if needed splitted_citations = split_citations(citation_elements) # Look for implied ibids look_for_implied_ibids(splitted_citations) # Find year add_year_elements(splitted_citations) # Look for books in misc field look_for_undetected_books(splitted_citations, kbs) if linker_callback: # Link references with the newly added ibids/books information for citations in splitted_citations: associate_recids(citations, linker_callback) # FIXME: Needed? # Remove references with only misc text # splitted_citations = remove_invalid_references(splitted_citations) # Merge references with only misc text # splitted_citations = merge_invalid_references(splitted_citations) remove_duplicated_authors(splitted_citations) remove_duplicated_dois(splitted_citations) remove_duplicated_collaborations(splitted_citations) add_recid_elements(splitted_citations) # For debugging purposes print_citations(splitted_citations, line_marker) return splitted_citations, line_marker, counts, bad_titles_count
[ "def", "parse_reference_line", "(", "ref_line", ",", "kbs", ",", "bad_titles_count", "=", "{", "}", ",", "linker_callback", "=", "None", ")", ":", "# Strip the 'marker' (e.g. [1]) from this reference line:", "line_marker", ",", "ref_line", "=", "remove_reference_line_marker", "(", "ref_line", ")", "# Find DOI sections in citation", "ref_line", ",", "identified_dois", "=", "identify_and_tag_DOI", "(", "ref_line", ")", "# Identify and replace URLs in the line:", "ref_line", ",", "identified_urls", "=", "identify_and_tag_URLs", "(", "ref_line", ")", "# Tag <cds.JOURNAL>, etc.", "tagged_line", ",", "bad_titles_count", "=", "tag_reference_line", "(", "ref_line", ",", "kbs", ",", "bad_titles_count", ")", "# Debug print tagging (authors, titles, volumes, etc.)", "LOGGER", ".", "debug", "(", "\"tags %r\"", ",", "tagged_line", ")", "# Using the recorded information, create a MARC XML representation", "# of the rebuilt line:", "# At the same time, get stats of citations found in the reference line", "# (titles, urls, etc):", "citation_elements", ",", "line_marker", ",", "counts", "=", "parse_tagged_reference_line", "(", "line_marker", ",", "tagged_line", ",", "identified_dois", ",", "identified_urls", ")", "# Transformations on elements", "split_volume_from_journal", "(", "citation_elements", ")", "format_volume", "(", "citation_elements", ")", "handle_special_journals", "(", "citation_elements", ",", "kbs", ")", "format_report_number", "(", "citation_elements", ")", "format_author_ed", "(", "citation_elements", ")", "look_for_books", "(", "citation_elements", ",", "kbs", ")", "format_hep", "(", "citation_elements", ")", "remove_b_for_nucl_phys", "(", "citation_elements", ")", "mangle_volume", "(", "citation_elements", ")", "arxiv_urls_to_report_numbers", "(", "citation_elements", ")", "look_for_hdl", "(", "citation_elements", ")", "look_for_hdl_urls", "(", "citation_elements", ")", "# Link references if desired", "if", "linker_callback", ":", "associate_recids", "(", "citation_elements", ",", "linker_callback", ")", "# Split the reference in multiple ones if needed", "splitted_citations", "=", "split_citations", "(", "citation_elements", ")", "# Look for implied ibids", "look_for_implied_ibids", "(", "splitted_citations", ")", "# Find year", "add_year_elements", "(", "splitted_citations", ")", "# Look for books in misc field", "look_for_undetected_books", "(", "splitted_citations", ",", "kbs", ")", "if", "linker_callback", ":", "# Link references with the newly added ibids/books information", "for", "citations", "in", "splitted_citations", ":", "associate_recids", "(", "citations", ",", "linker_callback", ")", "# FIXME: Needed?", "# Remove references with only misc text", "# splitted_citations = remove_invalid_references(splitted_citations)", "# Merge references with only misc text", "# splitted_citations = merge_invalid_references(splitted_citations)", "remove_duplicated_authors", "(", "splitted_citations", ")", "remove_duplicated_dois", "(", "splitted_citations", ")", "remove_duplicated_collaborations", "(", "splitted_citations", ")", "add_recid_elements", "(", "splitted_citations", ")", "# For debugging purposes", "print_citations", "(", "splitted_citations", ",", "line_marker", ")", "return", "splitted_citations", ",", "line_marker", ",", "counts", ",", "bad_titles_count" ]
Parse one reference line @input a string representing a single reference bullet @output parsed references (a list of elements objects)
[ "Parse", "one", "reference", "line" ]
python
train
39.615385
saltstack/salt
salt/modules/azurearm_network.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/azurearm_network.py#L447-L480
def security_rule_get(security_rule, security_group, resource_group, **kwargs): ''' .. versionadded:: 2019.2.0 Get a security rule within a specified network security group. :param name: The name of the security rule to query. :param security_group: The network security group containing the security rule. :param resource_group: The resource group name assigned to the network security group. CLI Example: .. code-block:: bash salt-call azurearm_network.security_rule_get testrule1 testnsg testgroup ''' netconn = __utils__['azurearm.get_client']('network', **kwargs) try: secrule = netconn.security_rules.get( network_security_group_name=security_group, resource_group_name=resource_group, security_rule_name=security_rule ) result = secrule.as_dict() except CloudError as exc: __utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs) result = {'error': str(exc)} return result
[ "def", "security_rule_get", "(", "security_rule", ",", "security_group", ",", "resource_group", ",", "*", "*", "kwargs", ")", ":", "netconn", "=", "__utils__", "[", "'azurearm.get_client'", "]", "(", "'network'", ",", "*", "*", "kwargs", ")", "try", ":", "secrule", "=", "netconn", ".", "security_rules", ".", "get", "(", "network_security_group_name", "=", "security_group", ",", "resource_group_name", "=", "resource_group", ",", "security_rule_name", "=", "security_rule", ")", "result", "=", "secrule", ".", "as_dict", "(", ")", "except", "CloudError", "as", "exc", ":", "__utils__", "[", "'azurearm.log_cloud_error'", "]", "(", "'network'", ",", "str", "(", "exc", ")", ",", "*", "*", "kwargs", ")", "result", "=", "{", "'error'", ":", "str", "(", "exc", ")", "}", "return", "result" ]
.. versionadded:: 2019.2.0 Get a security rule within a specified network security group. :param name: The name of the security rule to query. :param security_group: The network security group containing the security rule. :param resource_group: The resource group name assigned to the network security group. CLI Example: .. code-block:: bash salt-call azurearm_network.security_rule_get testrule1 testnsg testgroup
[ "..", "versionadded", "::", "2019", ".", "2", ".", "0" ]
python
train
29.970588
hagenw/sphinxcontrib-katex
sphinxcontrib/katex.py
https://github.com/hagenw/sphinxcontrib-katex/blob/52e235b93a2471df9a7477e04b697e4274399623/sphinxcontrib/katex.py#L159-L177
def katex_rendering_delimiters(app): """Delimiters for rendering KaTeX math. If no delimiters are specified in katex_options, add the katex_inline and katex_display delimiters. See also https://khan.github.io/KaTeX/docs/autorender.html """ # Return if we have user defined rendering delimiters if 'delimiters' in app.config.katex_options: return '' katex_inline = [d.replace('\\', '\\\\') for d in app.config.katex_inline] katex_display = [d.replace('\\', '\\\\') for d in app.config.katex_display] katex_delimiters = {'inline': katex_inline, 'display': katex_display} # Set chosen delimiters for the auto-rendering options of KaTeX delimiters = r'''delimiters: [ {{ left: "{inline[0]}", right: "{inline[1]}", display: false }}, {{ left: "{display[0]}", right: "{display[1]}", display: true }} ]'''.format(**katex_delimiters) return delimiters
[ "def", "katex_rendering_delimiters", "(", "app", ")", ":", "# Return if we have user defined rendering delimiters", "if", "'delimiters'", "in", "app", ".", "config", ".", "katex_options", ":", "return", "''", "katex_inline", "=", "[", "d", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")", "for", "d", "in", "app", ".", "config", ".", "katex_inline", "]", "katex_display", "=", "[", "d", ".", "replace", "(", "'\\\\'", ",", "'\\\\\\\\'", ")", "for", "d", "in", "app", ".", "config", ".", "katex_display", "]", "katex_delimiters", "=", "{", "'inline'", ":", "katex_inline", ",", "'display'", ":", "katex_display", "}", "# Set chosen delimiters for the auto-rendering options of KaTeX", "delimiters", "=", "r'''delimiters: [\n {{ left: \"{inline[0]}\", right: \"{inline[1]}\", display: false }},\n {{ left: \"{display[0]}\", right: \"{display[1]}\", display: true }}\n ]'''", ".", "format", "(", "*", "*", "katex_delimiters", ")", "return", "delimiters" ]
Delimiters for rendering KaTeX math. If no delimiters are specified in katex_options, add the katex_inline and katex_display delimiters. See also https://khan.github.io/KaTeX/docs/autorender.html
[ "Delimiters", "for", "rendering", "KaTeX", "math", "." ]
python
train
47.894737
log2timeline/plaso
plaso/storage/sqlite/sqlite_file.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/storage/sqlite/sqlite_file.py#L1074-L1090
def ReadPreprocessingInformation(self, knowledge_base): """Reads preprocessing information. The preprocessing information contains the system configuration which contains information about various system specific configuration data, for example the user accounts. Args: knowledge_base (KnowledgeBase): is used to store the preprocessing information. """ generator = self._GetAttributeContainers( self._CONTAINER_TYPE_SYSTEM_CONFIGURATION) for stream_number, system_configuration in enumerate(generator): # TODO: replace stream_number by session_identifier. knowledge_base.ReadSystemConfigurationArtifact( system_configuration, session_identifier=stream_number)
[ "def", "ReadPreprocessingInformation", "(", "self", ",", "knowledge_base", ")", ":", "generator", "=", "self", ".", "_GetAttributeContainers", "(", "self", ".", "_CONTAINER_TYPE_SYSTEM_CONFIGURATION", ")", "for", "stream_number", ",", "system_configuration", "in", "enumerate", "(", "generator", ")", ":", "# TODO: replace stream_number by session_identifier.", "knowledge_base", ".", "ReadSystemConfigurationArtifact", "(", "system_configuration", ",", "session_identifier", "=", "stream_number", ")" ]
Reads preprocessing information. The preprocessing information contains the system configuration which contains information about various system specific configuration data, for example the user accounts. Args: knowledge_base (KnowledgeBase): is used to store the preprocessing information.
[ "Reads", "preprocessing", "information", "." ]
python
train
42.588235
joshspeagle/dynesty
dynesty/bounding.py
https://github.com/joshspeagle/dynesty/blob/9e482aafeb5cf84bedb896fa6f07a761d917983e/dynesty/bounding.py#L431-L484
def sample(self, rstate=None, return_q=False): """ Sample a point uniformly distributed within the *union* of ellipsoids. Returns ------- x : `~numpy.ndarray` with shape (ndim,) A coordinate within the set of ellipsoids. idx : int The index of the ellipsoid `x` was sampled from. q : int, optional The number of ellipsoids `x` falls within. """ if rstate is None: rstate = np.random # If there is only one ellipsoid, sample from it. if self.nells == 1: x = self.ells[0].sample(rstate=rstate) idx = 0 q = 1 if return_q: return x, idx, q else: return x, idx # Select an ellipsoid at random proportional to its volume. idx = rstate.choice(self.nells, p=self.vols/self.vol_tot) # Select a point from the chosen ellipsoid. x = self.ells[idx].sample(rstate=rstate) # Check how many ellipsoids the point lies within, passing over # the `idx`-th ellipsoid `x` was sampled from. q = self.overlap(x, j=idx) + 1 if return_q: # If `q` is being returned, assume the user wants to # explicitly apply the `1. / q` acceptance criterion to # properly sample from the union of ellipsoids. return x, idx, q else: # If `q` is not being returned, assume the user wants this # done internally. while rstate.rand() > (1. / q): idx = rstate.choice(self.nells, p=self.vols/self.vol_tot) x = self.ells[idx].sample(rstate=rstate) q = self.overlap(x, j=idx) + 1 return x, idx
[ "def", "sample", "(", "self", ",", "rstate", "=", "None", ",", "return_q", "=", "False", ")", ":", "if", "rstate", "is", "None", ":", "rstate", "=", "np", ".", "random", "# If there is only one ellipsoid, sample from it.", "if", "self", ".", "nells", "==", "1", ":", "x", "=", "self", ".", "ells", "[", "0", "]", ".", "sample", "(", "rstate", "=", "rstate", ")", "idx", "=", "0", "q", "=", "1", "if", "return_q", ":", "return", "x", ",", "idx", ",", "q", "else", ":", "return", "x", ",", "idx", "# Select an ellipsoid at random proportional to its volume.", "idx", "=", "rstate", ".", "choice", "(", "self", ".", "nells", ",", "p", "=", "self", ".", "vols", "/", "self", ".", "vol_tot", ")", "# Select a point from the chosen ellipsoid.", "x", "=", "self", ".", "ells", "[", "idx", "]", ".", "sample", "(", "rstate", "=", "rstate", ")", "# Check how many ellipsoids the point lies within, passing over", "# the `idx`-th ellipsoid `x` was sampled from.", "q", "=", "self", ".", "overlap", "(", "x", ",", "j", "=", "idx", ")", "+", "1", "if", "return_q", ":", "# If `q` is being returned, assume the user wants to", "# explicitly apply the `1. / q` acceptance criterion to", "# properly sample from the union of ellipsoids.", "return", "x", ",", "idx", ",", "q", "else", ":", "# If `q` is not being returned, assume the user wants this", "# done internally.", "while", "rstate", ".", "rand", "(", ")", ">", "(", "1.", "/", "q", ")", ":", "idx", "=", "rstate", ".", "choice", "(", "self", ".", "nells", ",", "p", "=", "self", ".", "vols", "/", "self", ".", "vol_tot", ")", "x", "=", "self", ".", "ells", "[", "idx", "]", ".", "sample", "(", "rstate", "=", "rstate", ")", "q", "=", "self", ".", "overlap", "(", "x", ",", "j", "=", "idx", ")", "+", "1", "return", "x", ",", "idx" ]
Sample a point uniformly distributed within the *union* of ellipsoids. Returns ------- x : `~numpy.ndarray` with shape (ndim,) A coordinate within the set of ellipsoids. idx : int The index of the ellipsoid `x` was sampled from. q : int, optional The number of ellipsoids `x` falls within.
[ "Sample", "a", "point", "uniformly", "distributed", "within", "the", "*", "union", "*", "of", "ellipsoids", "." ]
python
train
32.388889
agile4you/bottle-neck
bottle_neck/routing.py
https://github.com/agile4you/bottle-neck/blob/ebc670a4b178255473d68e9b4122ba04e38f4810/bottle_neck/routing.py#L130-L142
def mount(self, app=None): """Mounts all registered routes to a bottle.py application instance. Args: app (instance): A `bottle.Bottle()` application instance. Returns: The Router instance (for chaining purposes). """ for endpoint in self._routes: endpoint.register_app(app) return self
[ "def", "mount", "(", "self", ",", "app", "=", "None", ")", ":", "for", "endpoint", "in", "self", ".", "_routes", ":", "endpoint", ".", "register_app", "(", "app", ")", "return", "self" ]
Mounts all registered routes to a bottle.py application instance. Args: app (instance): A `bottle.Bottle()` application instance. Returns: The Router instance (for chaining purposes).
[ "Mounts", "all", "registered", "routes", "to", "a", "bottle", ".", "py", "application", "instance", "." ]
python
train
27.769231
hactar-is/frink
frink/orm.py
https://github.com/hactar-is/frink/blob/0d2c11daca8ef6d4365e98914bdc0bc65478ae72/frink/orm.py#L33-L90
def save(self): """ Save the current instance to the DB """ with rconnect() as conn: try: self.validate() except ValidationError as e: log.warn(e.messages) raise except ModelValidationError as e: log.warn(e.messages) raise except ModelConversionError as e: log.warn(e.messages) raise except ValueError as e: log.warn(e) raise except FrinkError as e: log.warn(e.messages) raise except Exception as e: log.warn(e) raise else: # If this is a new unsaved object, it'll likely have an # id of None, which RethinkDB won't like. So if it's None, # generate a UUID for it. If the save fails, we should re-set # it to None. if self.id is None: self.id = str(uuid.uuid4()) log.debug(self.id) try: query = r.db(self._db).table(self._table).insert( self.to_primitive(), conflict="replace" ) log.debug(query) rv = query.run(conn) # Returns something like this: # { # u'errors': 0, # u'deleted': 0, # u'generated_keys': [u'dd8ad1bc-8609-4484-b6c4-ed96c72c03f2'], # u'unchanged': 0, # u'skipped': 0, # u'replaced': 0, # u'inserted': 1 # } log.debug(rv) except Exception as e: log.warn(e) self.id = None raise else: return self
[ "def", "save", "(", "self", ")", ":", "with", "rconnect", "(", ")", "as", "conn", ":", "try", ":", "self", ".", "validate", "(", ")", "except", "ValidationError", "as", "e", ":", "log", ".", "warn", "(", "e", ".", "messages", ")", "raise", "except", "ModelValidationError", "as", "e", ":", "log", ".", "warn", "(", "e", ".", "messages", ")", "raise", "except", "ModelConversionError", "as", "e", ":", "log", ".", "warn", "(", "e", ".", "messages", ")", "raise", "except", "ValueError", "as", "e", ":", "log", ".", "warn", "(", "e", ")", "raise", "except", "FrinkError", "as", "e", ":", "log", ".", "warn", "(", "e", ".", "messages", ")", "raise", "except", "Exception", "as", "e", ":", "log", ".", "warn", "(", "e", ")", "raise", "else", ":", "# If this is a new unsaved object, it'll likely have an", "# id of None, which RethinkDB won't like. So if it's None,", "# generate a UUID for it. If the save fails, we should re-set", "# it to None.", "if", "self", ".", "id", "is", "None", ":", "self", ".", "id", "=", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "log", ".", "debug", "(", "self", ".", "id", ")", "try", ":", "query", "=", "r", ".", "db", "(", "self", ".", "_db", ")", ".", "table", "(", "self", ".", "_table", ")", ".", "insert", "(", "self", ".", "to_primitive", "(", ")", ",", "conflict", "=", "\"replace\"", ")", "log", ".", "debug", "(", "query", ")", "rv", "=", "query", ".", "run", "(", "conn", ")", "# Returns something like this:", "# {", "# u'errors': 0,", "# u'deleted': 0,", "# u'generated_keys': [u'dd8ad1bc-8609-4484-b6c4-ed96c72c03f2'],", "# u'unchanged': 0,", "# u'skipped': 0,", "# u'replaced': 0,", "# u'inserted': 1", "# }", "log", ".", "debug", "(", "rv", ")", "except", "Exception", "as", "e", ":", "log", ".", "warn", "(", "e", ")", "self", ".", "id", "=", "None", "raise", "else", ":", "return", "self" ]
Save the current instance to the DB
[ "Save", "the", "current", "instance", "to", "the", "DB" ]
python
train
34.568966
rmed/pyemtmad
pyemtmad/api/geo.py
https://github.com/rmed/pyemtmad/blob/c21c42d0c7b50035dfed29540d7e64ab67833728/pyemtmad/api/geo.py#L39-L65
def get_arrive_stop(self, **kwargs): """Obtain bus arrival info in target stop. Args: stop_number (int): Stop number to query. lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[Arrival]), or message string in case of error. """ # Endpoint parameters params = { 'idStop': kwargs.get('stop_number'), 'cultureInfo': util.language_code(kwargs.get('lang')) } # Request result = self.make_request('geo', 'get_arrive_stop', **params) # Funny endpoint, no status code if not util.check_result(result, 'arrives'): return False, 'UNKNOWN ERROR' # Parse values = util.response_list(result, 'arrives') return True, [emtype.Arrival(**a) for a in values]
[ "def", "get_arrive_stop", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# Endpoint parameters", "params", "=", "{", "'idStop'", ":", "kwargs", ".", "get", "(", "'stop_number'", ")", ",", "'cultureInfo'", ":", "util", ".", "language_code", "(", "kwargs", ".", "get", "(", "'lang'", ")", ")", "}", "# Request", "result", "=", "self", ".", "make_request", "(", "'geo'", ",", "'get_arrive_stop'", ",", "*", "*", "params", ")", "# Funny endpoint, no status code", "if", "not", "util", ".", "check_result", "(", "result", ",", "'arrives'", ")", ":", "return", "False", ",", "'UNKNOWN ERROR'", "# Parse", "values", "=", "util", ".", "response_list", "(", "result", ",", "'arrives'", ")", "return", "True", ",", "[", "emtype", ".", "Arrival", "(", "*", "*", "a", ")", "for", "a", "in", "values", "]" ]
Obtain bus arrival info in target stop. Args: stop_number (int): Stop number to query. lang (str): Language code (*es* or *en*). Returns: Status boolean and parsed response (list[Arrival]), or message string in case of error.
[ "Obtain", "bus", "arrival", "info", "in", "target", "stop", "." ]
python
train
31.740741
Kronuz/pyScss
scss/compiler.py
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/compiler.py#L913-L952
def _at_for(self, calculator, rule, scope, block): """ Implements @for """ var, _, name = block.argument.partition(' from ') frm, _, through = name.partition(' through ') if through: inclusive = True else: inclusive = False frm, _, through = frm.partition(' to ') frm = calculator.calculate(frm) through = calculator.calculate(through) try: frm = int(float(frm)) through = int(float(through)) except ValueError: return if frm > through: # DEVIATION: allow reversed '@for .. from .. through' (same as enumerate() and range()) frm, through = through, frm rev = reversed else: rev = lambda x: x var = var.strip() var = calculator.do_glob_math(var) var = normalize_var(var) inner_rule = rule.copy() inner_rule.unparsed_contents = block.unparsed_contents if not self.should_scope_loop_in_rule(inner_rule): # DEVIATION: Allow not creating a new namespace inner_rule.namespace = rule.namespace if inclusive: through += 1 for i in rev(range(frm, through)): inner_rule.namespace.set_variable(var, Number(i)) self.manage_children(inner_rule, scope)
[ "def", "_at_for", "(", "self", ",", "calculator", ",", "rule", ",", "scope", ",", "block", ")", ":", "var", ",", "_", ",", "name", "=", "block", ".", "argument", ".", "partition", "(", "' from '", ")", "frm", ",", "_", ",", "through", "=", "name", ".", "partition", "(", "' through '", ")", "if", "through", ":", "inclusive", "=", "True", "else", ":", "inclusive", "=", "False", "frm", ",", "_", ",", "through", "=", "frm", ".", "partition", "(", "' to '", ")", "frm", "=", "calculator", ".", "calculate", "(", "frm", ")", "through", "=", "calculator", ".", "calculate", "(", "through", ")", "try", ":", "frm", "=", "int", "(", "float", "(", "frm", ")", ")", "through", "=", "int", "(", "float", "(", "through", ")", ")", "except", "ValueError", ":", "return", "if", "frm", ">", "through", ":", "# DEVIATION: allow reversed '@for .. from .. through' (same as enumerate() and range())", "frm", ",", "through", "=", "through", ",", "frm", "rev", "=", "reversed", "else", ":", "rev", "=", "lambda", "x", ":", "x", "var", "=", "var", ".", "strip", "(", ")", "var", "=", "calculator", ".", "do_glob_math", "(", "var", ")", "var", "=", "normalize_var", "(", "var", ")", "inner_rule", "=", "rule", ".", "copy", "(", ")", "inner_rule", ".", "unparsed_contents", "=", "block", ".", "unparsed_contents", "if", "not", "self", ".", "should_scope_loop_in_rule", "(", "inner_rule", ")", ":", "# DEVIATION: Allow not creating a new namespace", "inner_rule", ".", "namespace", "=", "rule", ".", "namespace", "if", "inclusive", ":", "through", "+=", "1", "for", "i", "in", "rev", "(", "range", "(", "frm", ",", "through", ")", ")", ":", "inner_rule", ".", "namespace", ".", "set_variable", "(", "var", ",", "Number", "(", "i", ")", ")", "self", ".", "manage_children", "(", "inner_rule", ",", "scope", ")" ]
Implements @for
[ "Implements" ]
python
train
33.75
bids-standard/pybids
bids/analysis/hrf.py
https://github.com/bids-standard/pybids/blob/30d924ce770622bda0e390d613a8da42a2a20c32/bids/analysis/hrf.py#L144-L166
def glover_time_derivative(tr, oversampling=50, time_length=32., onset=0.): """Implementation of the Glover time derivative hrf (dhrf) model Parameters ---------- tr: float scan repeat time, in seconds oversampling: int, temporal oversampling factor, optional time_length: float, hrf kernel length, in seconds onset: float, onset of the response Returns ------- dhrf: array of shape(length / tr), dtype=float dhrf sampling on the provided grid """ do = .1 dhrf = 1. / do * (glover_hrf(tr, oversampling, time_length, onset) - glover_hrf(tr, oversampling, time_length, onset + do)) return dhrf
[ "def", "glover_time_derivative", "(", "tr", ",", "oversampling", "=", "50", ",", "time_length", "=", "32.", ",", "onset", "=", "0.", ")", ":", "do", "=", ".1", "dhrf", "=", "1.", "/", "do", "*", "(", "glover_hrf", "(", "tr", ",", "oversampling", ",", "time_length", ",", "onset", ")", "-", "glover_hrf", "(", "tr", ",", "oversampling", ",", "time_length", ",", "onset", "+", "do", ")", ")", "return", "dhrf" ]
Implementation of the Glover time derivative hrf (dhrf) model Parameters ---------- tr: float scan repeat time, in seconds oversampling: int, temporal oversampling factor, optional time_length: float, hrf kernel length, in seconds onset: float, onset of the response Returns ------- dhrf: array of shape(length / tr), dtype=float dhrf sampling on the provided grid
[ "Implementation", "of", "the", "Glover", "time", "derivative", "hrf", "(", "dhrf", ")", "model" ]
python
train
30.043478
pyscaffold/configupdater
src/configupdater/configupdater.py
https://github.com/pyscaffold/configupdater/blob/6ebac0b1fa7b8222baacdd4991d18cfc61659f84/src/configupdater/configupdater.py#L176-L190
def space(self, newlines=1): """Creates a vertical space of newlines Args: newlines (int): number of empty lines Returns: self for chaining """ space = Space() for line in range(newlines): space.add_line('\n') self._container.structure.insert(self._idx, space) self._idx += 1 return self
[ "def", "space", "(", "self", ",", "newlines", "=", "1", ")", ":", "space", "=", "Space", "(", ")", "for", "line", "in", "range", "(", "newlines", ")", ":", "space", ".", "add_line", "(", "'\\n'", ")", "self", ".", "_container", ".", "structure", ".", "insert", "(", "self", ".", "_idx", ",", "space", ")", "self", ".", "_idx", "+=", "1", "return", "self" ]
Creates a vertical space of newlines Args: newlines (int): number of empty lines Returns: self for chaining
[ "Creates", "a", "vertical", "space", "of", "newlines" ]
python
train
25.533333
jwhitlock/drf-cached-instances
drf_cached_instances/cache.py
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/cache.py#L86-L157
def get_instances(self, object_specs, version=None): """Get the cached native representation for one or more objects. Keyword arguments: object_specs - A sequence of triples (model name, pk, obj): - model name - the name of the model - pk - the primary key of the instance - obj - the instance, or None to load it version - The cache version to use, or None for default To get the 'new object' representation, set pk and obj to None Return is a dictionary: key - (model name, pk) value - (native representation, pk, object or None) """ ret = dict() spec_keys = set() cache_keys = [] version = version or self.default_version # Construct all the cache keys to fetch for model_name, obj_pk, obj in object_specs: assert model_name assert obj_pk # Get cache keys to fetch obj_key = self.key_for(version, model_name, obj_pk) spec_keys.add((model_name, obj_pk, obj, obj_key)) cache_keys.append(obj_key) # Fetch the cache keys if cache_keys and self.cache: cache_vals = self.cache.get_many(cache_keys) else: cache_vals = {} # Use cached representations, or recreate cache_to_set = {} for model_name, obj_pk, obj, obj_key in spec_keys: # Load cached objects obj_val = cache_vals.get(obj_key) obj_native = json.loads(obj_val) if obj_val else None # Invalid or not set - load from database if not obj_native: if not obj: loader = self.model_function(model_name, version, 'loader') obj = loader(obj_pk) serializer = self.model_function( model_name, version, 'serializer') obj_native = serializer(obj) or {} if obj_native: cache_to_set[obj_key] = json.dumps(obj_native) # Get fields to convert keys = [key for key in obj_native.keys() if ':' in key] for key in keys: json_value = obj_native.pop(key) name, value = self.field_from_json(key, json_value) assert name not in obj_native obj_native[name] = value if obj_native: ret[(model_name, obj_pk)] = (obj_native, obj_key, obj) # Save any new cached representations if cache_to_set and self.cache: self.cache.set_many(cache_to_set) return ret
[ "def", "get_instances", "(", "self", ",", "object_specs", ",", "version", "=", "None", ")", ":", "ret", "=", "dict", "(", ")", "spec_keys", "=", "set", "(", ")", "cache_keys", "=", "[", "]", "version", "=", "version", "or", "self", ".", "default_version", "# Construct all the cache keys to fetch", "for", "model_name", ",", "obj_pk", ",", "obj", "in", "object_specs", ":", "assert", "model_name", "assert", "obj_pk", "# Get cache keys to fetch", "obj_key", "=", "self", ".", "key_for", "(", "version", ",", "model_name", ",", "obj_pk", ")", "spec_keys", ".", "add", "(", "(", "model_name", ",", "obj_pk", ",", "obj", ",", "obj_key", ")", ")", "cache_keys", ".", "append", "(", "obj_key", ")", "# Fetch the cache keys", "if", "cache_keys", "and", "self", ".", "cache", ":", "cache_vals", "=", "self", ".", "cache", ".", "get_many", "(", "cache_keys", ")", "else", ":", "cache_vals", "=", "{", "}", "# Use cached representations, or recreate", "cache_to_set", "=", "{", "}", "for", "model_name", ",", "obj_pk", ",", "obj", ",", "obj_key", "in", "spec_keys", ":", "# Load cached objects", "obj_val", "=", "cache_vals", ".", "get", "(", "obj_key", ")", "obj_native", "=", "json", ".", "loads", "(", "obj_val", ")", "if", "obj_val", "else", "None", "# Invalid or not set - load from database", "if", "not", "obj_native", ":", "if", "not", "obj", ":", "loader", "=", "self", ".", "model_function", "(", "model_name", ",", "version", ",", "'loader'", ")", "obj", "=", "loader", "(", "obj_pk", ")", "serializer", "=", "self", ".", "model_function", "(", "model_name", ",", "version", ",", "'serializer'", ")", "obj_native", "=", "serializer", "(", "obj", ")", "or", "{", "}", "if", "obj_native", ":", "cache_to_set", "[", "obj_key", "]", "=", "json", ".", "dumps", "(", "obj_native", ")", "# Get fields to convert", "keys", "=", "[", "key", "for", "key", "in", "obj_native", ".", "keys", "(", ")", "if", "':'", "in", "key", "]", "for", "key", "in", "keys", ":", "json_value", "=", "obj_native", ".", "pop", "(", "key", ")", "name", ",", "value", "=", "self", ".", "field_from_json", "(", "key", ",", "json_value", ")", "assert", "name", "not", "in", "obj_native", "obj_native", "[", "name", "]", "=", "value", "if", "obj_native", ":", "ret", "[", "(", "model_name", ",", "obj_pk", ")", "]", "=", "(", "obj_native", ",", "obj_key", ",", "obj", ")", "# Save any new cached representations", "if", "cache_to_set", "and", "self", ".", "cache", ":", "self", ".", "cache", ".", "set_many", "(", "cache_to_set", ")", "return", "ret" ]
Get the cached native representation for one or more objects. Keyword arguments: object_specs - A sequence of triples (model name, pk, obj): - model name - the name of the model - pk - the primary key of the instance - obj - the instance, or None to load it version - The cache version to use, or None for default To get the 'new object' representation, set pk and obj to None Return is a dictionary: key - (model name, pk) value - (native representation, pk, object or None)
[ "Get", "the", "cached", "native", "representation", "for", "one", "or", "more", "objects", "." ]
python
train
35.791667
angr/angr
angr/analyses/cfg/cfg_base.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/cfg/cfg_base.py#L1185-L1320
def make_functions(self): """ Revisit the entire control flow graph, create Function instances accordingly, and correctly put blocks into each function. Although Function objects are crated during the CFG recovery, they are neither sound nor accurate. With a pre-constructed CFG, this method rebuilds all functions bearing the following rules: - A block may only belong to one function. - Small functions lying inside the startpoint and the endpoint of another function will be merged with the other function - Tail call optimizations are detected. - PLT stubs are aligned by 16. :return: None """ # TODO: Is it required that PLT stubs are always aligned by 16? If so, on what architectures and platforms is it # TODO: enforced? tmp_functions = self.kb.functions.copy() for function in tmp_functions.values(): function.mark_nonreturning_calls_endpoints() # Clear old functions dict self.kb.functions.clear() blockaddr_to_function = { } traversed_cfg_nodes = set() function_nodes = set() # Find nodes for beginnings of all functions for _, dst, data in self.graph.edges(data=True): jumpkind = data.get('jumpkind', "") if jumpkind == 'Ijk_Call' or jumpkind.startswith('Ijk_Sys'): function_nodes.add(dst) entry_node = self.model.get_any_node(self._binary.entry) if entry_node is not None: function_nodes.add(entry_node) # aggressively remove and merge functions # For any function, if there is a call to it, it won't be removed called_function_addrs = { n.addr for n in function_nodes } removed_functions_a = self._process_irrational_functions(tmp_functions, called_function_addrs, blockaddr_to_function ) removed_functions_b, adjusted_cfgnodes = self._process_irrational_function_starts(tmp_functions, called_function_addrs, blockaddr_to_function ) removed_functions = removed_functions_a | removed_functions_b # Remove all nodes that are adjusted function_nodes.difference_update(adjusted_cfgnodes) for n in self.graph.nodes(): if n.addr in tmp_functions or n.addr in removed_functions: function_nodes.add(n) # traverse the graph starting from each node, not following call edges # it's important that we traverse all functions in order so that we have a greater chance to come across # rational functions before its irrational counterparts (e.g. due to failed jump table resolution) min_stage_2_progress = 50.0 max_stage_2_progress = 90.0 nodes_count = len(function_nodes) for i, fn in enumerate(sorted(function_nodes, key=lambda n: n.addr)): if self._low_priority: self._release_gil(i, 20) if self._show_progressbar or self._progress_callback: progress = min_stage_2_progress + (max_stage_2_progress - min_stage_2_progress) * (i * 1.0 / nodes_count) self._update_progress(progress) self._graph_bfs_custom(self.graph, [ fn ], self._graph_traversal_handler, blockaddr_to_function, tmp_functions, traversed_cfg_nodes ) # Don't forget those small function chunks that are not called by anything. # There might be references to them from data, or simply references that we cannot find via static analysis secondary_function_nodes = set() # add all function chunks ("functions" that are not called from anywhere) for func_addr in tmp_functions: node = self.model.get_any_node(func_addr) if node is None: continue if node.addr not in blockaddr_to_function: secondary_function_nodes.add(node) missing_cfg_nodes = set(self.graph.nodes()) - traversed_cfg_nodes missing_cfg_nodes = { node for node in missing_cfg_nodes if node.function_address is not None } if missing_cfg_nodes: l.debug('%d CFGNodes are missing in the first traversal.', len(missing_cfg_nodes)) secondary_function_nodes |= missing_cfg_nodes min_stage_3_progress = 90.0 max_stage_3_progress = 99.9 nodes_count = len(secondary_function_nodes) for i, fn in enumerate(sorted(secondary_function_nodes, key=lambda n: n.addr)): if self._show_progressbar or self._progress_callback: progress = min_stage_3_progress + (max_stage_3_progress - min_stage_3_progress) * (i * 1.0 / nodes_count) self._update_progress(progress) self._graph_bfs_custom(self.graph, [fn], self._graph_traversal_handler, blockaddr_to_function, tmp_functions ) to_remove = set() # Remove all stubs after PLT entries if not is_arm_arch(self.project.arch): for fn in self.kb.functions.values(): addr = fn.addr - (fn.addr % 16) if addr != fn.addr and addr in self.kb.functions and self.kb.functions[addr].is_plt: to_remove.add(fn.addr) # remove empty functions for func in self.kb.functions.values(): if func.startpoint is None: to_remove.add(func.addr) for addr in to_remove: del self.kb.functions[addr] # Update CFGNode.function_address for node in self._nodes.values(): if node.addr in blockaddr_to_function: node.function_address = blockaddr_to_function[node.addr].addr
[ "def", "make_functions", "(", "self", ")", ":", "# TODO: Is it required that PLT stubs are always aligned by 16? If so, on what architectures and platforms is it", "# TODO: enforced?", "tmp_functions", "=", "self", ".", "kb", ".", "functions", ".", "copy", "(", ")", "for", "function", "in", "tmp_functions", ".", "values", "(", ")", ":", "function", ".", "mark_nonreturning_calls_endpoints", "(", ")", "# Clear old functions dict", "self", ".", "kb", ".", "functions", ".", "clear", "(", ")", "blockaddr_to_function", "=", "{", "}", "traversed_cfg_nodes", "=", "set", "(", ")", "function_nodes", "=", "set", "(", ")", "# Find nodes for beginnings of all functions", "for", "_", ",", "dst", ",", "data", "in", "self", ".", "graph", ".", "edges", "(", "data", "=", "True", ")", ":", "jumpkind", "=", "data", ".", "get", "(", "'jumpkind'", ",", "\"\"", ")", "if", "jumpkind", "==", "'Ijk_Call'", "or", "jumpkind", ".", "startswith", "(", "'Ijk_Sys'", ")", ":", "function_nodes", ".", "add", "(", "dst", ")", "entry_node", "=", "self", ".", "model", ".", "get_any_node", "(", "self", ".", "_binary", ".", "entry", ")", "if", "entry_node", "is", "not", "None", ":", "function_nodes", ".", "add", "(", "entry_node", ")", "# aggressively remove and merge functions", "# For any function, if there is a call to it, it won't be removed", "called_function_addrs", "=", "{", "n", ".", "addr", "for", "n", "in", "function_nodes", "}", "removed_functions_a", "=", "self", ".", "_process_irrational_functions", "(", "tmp_functions", ",", "called_function_addrs", ",", "blockaddr_to_function", ")", "removed_functions_b", ",", "adjusted_cfgnodes", "=", "self", ".", "_process_irrational_function_starts", "(", "tmp_functions", ",", "called_function_addrs", ",", "blockaddr_to_function", ")", "removed_functions", "=", "removed_functions_a", "|", "removed_functions_b", "# Remove all nodes that are adjusted", "function_nodes", ".", "difference_update", "(", "adjusted_cfgnodes", ")", "for", "n", "in", "self", ".", "graph", ".", "nodes", "(", ")", ":", "if", "n", ".", "addr", "in", "tmp_functions", "or", "n", ".", "addr", "in", "removed_functions", ":", "function_nodes", ".", "add", "(", "n", ")", "# traverse the graph starting from each node, not following call edges", "# it's important that we traverse all functions in order so that we have a greater chance to come across", "# rational functions before its irrational counterparts (e.g. due to failed jump table resolution)", "min_stage_2_progress", "=", "50.0", "max_stage_2_progress", "=", "90.0", "nodes_count", "=", "len", "(", "function_nodes", ")", "for", "i", ",", "fn", "in", "enumerate", "(", "sorted", "(", "function_nodes", ",", "key", "=", "lambda", "n", ":", "n", ".", "addr", ")", ")", ":", "if", "self", ".", "_low_priority", ":", "self", ".", "_release_gil", "(", "i", ",", "20", ")", "if", "self", ".", "_show_progressbar", "or", "self", ".", "_progress_callback", ":", "progress", "=", "min_stage_2_progress", "+", "(", "max_stage_2_progress", "-", "min_stage_2_progress", ")", "*", "(", "i", "*", "1.0", "/", "nodes_count", ")", "self", ".", "_update_progress", "(", "progress", ")", "self", ".", "_graph_bfs_custom", "(", "self", ".", "graph", ",", "[", "fn", "]", ",", "self", ".", "_graph_traversal_handler", ",", "blockaddr_to_function", ",", "tmp_functions", ",", "traversed_cfg_nodes", ")", "# Don't forget those small function chunks that are not called by anything.", "# There might be references to them from data, or simply references that we cannot find via static analysis", "secondary_function_nodes", "=", "set", "(", ")", "# add all function chunks (\"functions\" that are not called from anywhere)", "for", "func_addr", "in", "tmp_functions", ":", "node", "=", "self", ".", "model", ".", "get_any_node", "(", "func_addr", ")", "if", "node", "is", "None", ":", "continue", "if", "node", ".", "addr", "not", "in", "blockaddr_to_function", ":", "secondary_function_nodes", ".", "add", "(", "node", ")", "missing_cfg_nodes", "=", "set", "(", "self", ".", "graph", ".", "nodes", "(", ")", ")", "-", "traversed_cfg_nodes", "missing_cfg_nodes", "=", "{", "node", "for", "node", "in", "missing_cfg_nodes", "if", "node", ".", "function_address", "is", "not", "None", "}", "if", "missing_cfg_nodes", ":", "l", ".", "debug", "(", "'%d CFGNodes are missing in the first traversal.'", ",", "len", "(", "missing_cfg_nodes", ")", ")", "secondary_function_nodes", "|=", "missing_cfg_nodes", "min_stage_3_progress", "=", "90.0", "max_stage_3_progress", "=", "99.9", "nodes_count", "=", "len", "(", "secondary_function_nodes", ")", "for", "i", ",", "fn", "in", "enumerate", "(", "sorted", "(", "secondary_function_nodes", ",", "key", "=", "lambda", "n", ":", "n", ".", "addr", ")", ")", ":", "if", "self", ".", "_show_progressbar", "or", "self", ".", "_progress_callback", ":", "progress", "=", "min_stage_3_progress", "+", "(", "max_stage_3_progress", "-", "min_stage_3_progress", ")", "*", "(", "i", "*", "1.0", "/", "nodes_count", ")", "self", ".", "_update_progress", "(", "progress", ")", "self", ".", "_graph_bfs_custom", "(", "self", ".", "graph", ",", "[", "fn", "]", ",", "self", ".", "_graph_traversal_handler", ",", "blockaddr_to_function", ",", "tmp_functions", ")", "to_remove", "=", "set", "(", ")", "# Remove all stubs after PLT entries", "if", "not", "is_arm_arch", "(", "self", ".", "project", ".", "arch", ")", ":", "for", "fn", "in", "self", ".", "kb", ".", "functions", ".", "values", "(", ")", ":", "addr", "=", "fn", ".", "addr", "-", "(", "fn", ".", "addr", "%", "16", ")", "if", "addr", "!=", "fn", ".", "addr", "and", "addr", "in", "self", ".", "kb", ".", "functions", "and", "self", ".", "kb", ".", "functions", "[", "addr", "]", ".", "is_plt", ":", "to_remove", ".", "add", "(", "fn", ".", "addr", ")", "# remove empty functions", "for", "func", "in", "self", ".", "kb", ".", "functions", ".", "values", "(", ")", ":", "if", "func", ".", "startpoint", "is", "None", ":", "to_remove", ".", "add", "(", "func", ".", "addr", ")", "for", "addr", "in", "to_remove", ":", "del", "self", ".", "kb", ".", "functions", "[", "addr", "]", "# Update CFGNode.function_address", "for", "node", "in", "self", ".", "_nodes", ".", "values", "(", ")", ":", "if", "node", ".", "addr", "in", "blockaddr_to_function", ":", "node", ".", "function_address", "=", "blockaddr_to_function", "[", "node", ".", "addr", "]", ".", "addr" ]
Revisit the entire control flow graph, create Function instances accordingly, and correctly put blocks into each function. Although Function objects are crated during the CFG recovery, they are neither sound nor accurate. With a pre-constructed CFG, this method rebuilds all functions bearing the following rules: - A block may only belong to one function. - Small functions lying inside the startpoint and the endpoint of another function will be merged with the other function - Tail call optimizations are detected. - PLT stubs are aligned by 16. :return: None
[ "Revisit", "the", "entire", "control", "flow", "graph", "create", "Function", "instances", "accordingly", "and", "correctly", "put", "blocks", "into", "each", "function", "." ]
python
train
45.294118
Opentrons/opentrons
api/src/opentrons/config/__init__.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/config/__init__.py#L176-L190
def load_and_migrate() -> Dict[str, Path]: """ Ensure the settings directory tree is properly configured. This function does most of its work on the actual robot. It will move all settings files from wherever they happen to be to the proper place. On non-robots, this mostly just loads. In addition, it writes a default config and makes sure all directories required exist (though the files in them may not). """ if IS_ROBOT: _migrate_robot() base = infer_config_base_dir() base.mkdir(parents=True, exist_ok=True) index = _load_with_overrides(base) return _ensure_paths_and_types(index)
[ "def", "load_and_migrate", "(", ")", "->", "Dict", "[", "str", ",", "Path", "]", ":", "if", "IS_ROBOT", ":", "_migrate_robot", "(", ")", "base", "=", "infer_config_base_dir", "(", ")", "base", ".", "mkdir", "(", "parents", "=", "True", ",", "exist_ok", "=", "True", ")", "index", "=", "_load_with_overrides", "(", "base", ")", "return", "_ensure_paths_and_types", "(", "index", ")" ]
Ensure the settings directory tree is properly configured. This function does most of its work on the actual robot. It will move all settings files from wherever they happen to be to the proper place. On non-robots, this mostly just loads. In addition, it writes a default config and makes sure all directories required exist (though the files in them may not).
[ "Ensure", "the", "settings", "directory", "tree", "is", "properly", "configured", "." ]
python
train
41.933333
jobovy/galpy
galpy/orbit/Orbit.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/Orbit.py#L2287-L2319
def ra(self,*args,**kwargs): """ NAME: ra PURPOSE: return the right ascension INPUT: t - (optional) time at which to get ra (can be Quantity) obs=[X,Y,Z] - (optional) position of observer (in kpc; entries can be Quantity) (default=[8.0,0.,0.]) OR Orbit object that corresponds to the orbit of the observer (default=Object-wide default; can be Quantity) Y is ignored and always assumed to be zero ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity) OUTPUT: ra(t) in deg HISTORY: 2011-02-23 - Written - Bovy (NYU) """ out= self._orb.ra(*args,**kwargs) if len(out) == 1: return out[0] else: return out
[ "def", "ra", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "out", "=", "self", ".", "_orb", ".", "ra", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "len", "(", "out", ")", "==", "1", ":", "return", "out", "[", "0", "]", "else", ":", "return", "out" ]
NAME: ra PURPOSE: return the right ascension INPUT: t - (optional) time at which to get ra (can be Quantity) obs=[X,Y,Z] - (optional) position of observer (in kpc; entries can be Quantity) (default=[8.0,0.,0.]) OR Orbit object that corresponds to the orbit of the observer (default=Object-wide default; can be Quantity) Y is ignored and always assumed to be zero ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity) OUTPUT: ra(t) in deg HISTORY: 2011-02-23 - Written - Bovy (NYU)
[ "NAME", ":" ]
python
train
24.151515
peri-source/peri
peri/opt/optimize.py
https://github.com/peri-source/peri/blob/61beed5deaaf978ab31ed716e8470d86ba639867/peri/opt/optimize.py#L1469-L1472
def update_function(self, param_vals): """Updates the opt_obj, returns new error.""" self.opt_obj.update_function(param_vals) return self.opt_obj.get_error()
[ "def", "update_function", "(", "self", ",", "param_vals", ")", ":", "self", ".", "opt_obj", ".", "update_function", "(", "param_vals", ")", "return", "self", ".", "opt_obj", ".", "get_error", "(", ")" ]
Updates the opt_obj, returns new error.
[ "Updates", "the", "opt_obj", "returns", "new", "error", "." ]
python
valid
44.5
angr/angr
angr/analyses/cfg/cfg_emulated.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/cfg/cfg_emulated.py#L975-L1016
def _get_one_pending_job(self): """ Retrieve a pending job. :return: A CFGJob instance or None """ pending_job_key, pending_job = self._pending_jobs.popitem() pending_job_state = pending_job.state pending_job_call_stack = pending_job.call_stack pending_job_src_block_id = pending_job.src_block_id pending_job_src_exit_stmt_idx = pending_job.src_exit_stmt_idx self._deregister_analysis_job(pending_job.caller_func_addr, pending_job) # Let's check whether this address has been traced before. if pending_job_key in self._nodes: node = self._nodes[pending_job_key] if node in self.graph: pending_exit_addr = self._block_id_addr(pending_job_key) # That block has been traced before. Let's forget about it l.debug("Target 0x%08x has been traced before. Trying the next one...", pending_exit_addr) # However, we should still create the FakeRet edge self._graph_add_edge(pending_job_src_block_id, pending_job_key, jumpkind="Ijk_FakeRet", stmt_idx=pending_job_src_exit_stmt_idx, ins_addr=pending_job.src_exit_ins_addr) return None pending_job_state.history.jumpkind = 'Ijk_FakeRet' job = CFGJob(pending_job_state.addr, pending_job_state, self._context_sensitivity_level, src_block_id=pending_job_src_block_id, src_exit_stmt_idx=pending_job_src_exit_stmt_idx, src_ins_addr=pending_job.src_exit_ins_addr, call_stack=pending_job_call_stack, ) l.debug("Tracing a missing return exit %s", self._block_id_repr(pending_job_key)) return job
[ "def", "_get_one_pending_job", "(", "self", ")", ":", "pending_job_key", ",", "pending_job", "=", "self", ".", "_pending_jobs", ".", "popitem", "(", ")", "pending_job_state", "=", "pending_job", ".", "state", "pending_job_call_stack", "=", "pending_job", ".", "call_stack", "pending_job_src_block_id", "=", "pending_job", ".", "src_block_id", "pending_job_src_exit_stmt_idx", "=", "pending_job", ".", "src_exit_stmt_idx", "self", ".", "_deregister_analysis_job", "(", "pending_job", ".", "caller_func_addr", ",", "pending_job", ")", "# Let's check whether this address has been traced before.", "if", "pending_job_key", "in", "self", ".", "_nodes", ":", "node", "=", "self", ".", "_nodes", "[", "pending_job_key", "]", "if", "node", "in", "self", ".", "graph", ":", "pending_exit_addr", "=", "self", ".", "_block_id_addr", "(", "pending_job_key", ")", "# That block has been traced before. Let's forget about it", "l", ".", "debug", "(", "\"Target 0x%08x has been traced before. Trying the next one...\"", ",", "pending_exit_addr", ")", "# However, we should still create the FakeRet edge", "self", ".", "_graph_add_edge", "(", "pending_job_src_block_id", ",", "pending_job_key", ",", "jumpkind", "=", "\"Ijk_FakeRet\"", ",", "stmt_idx", "=", "pending_job_src_exit_stmt_idx", ",", "ins_addr", "=", "pending_job", ".", "src_exit_ins_addr", ")", "return", "None", "pending_job_state", ".", "history", ".", "jumpkind", "=", "'Ijk_FakeRet'", "job", "=", "CFGJob", "(", "pending_job_state", ".", "addr", ",", "pending_job_state", ",", "self", ".", "_context_sensitivity_level", ",", "src_block_id", "=", "pending_job_src_block_id", ",", "src_exit_stmt_idx", "=", "pending_job_src_exit_stmt_idx", ",", "src_ins_addr", "=", "pending_job", ".", "src_exit_ins_addr", ",", "call_stack", "=", "pending_job_call_stack", ",", ")", "l", ".", "debug", "(", "\"Tracing a missing return exit %s\"", ",", "self", ".", "_block_id_repr", "(", "pending_job_key", ")", ")", "return", "job" ]
Retrieve a pending job. :return: A CFGJob instance or None
[ "Retrieve", "a", "pending", "job", "." ]
python
train
43.119048
Workiva/furious
furious/async.py
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/async.py#L497-L512
def _increment_recursion_level(self): """Increment current_depth based on either defaults or the enclosing Async. """ # Update the recursion info. This is done so that if an async created # outside an executing context, or one previously created is later # loaded from storage, that the "current" setting is correctly set. self._initialize_recursion_depth() recursion_options = self._options.get('_recursion', {}) current_depth = recursion_options.get('current', 0) + 1 max_depth = recursion_options.get('max', MAX_DEPTH) # Increment and store self.update_options(_recursion={'current': current_depth, 'max': max_depth})
[ "def", "_increment_recursion_level", "(", "self", ")", ":", "# Update the recursion info. This is done so that if an async created", "# outside an executing context, or one previously created is later", "# loaded from storage, that the \"current\" setting is correctly set.", "self", ".", "_initialize_recursion_depth", "(", ")", "recursion_options", "=", "self", ".", "_options", ".", "get", "(", "'_recursion'", ",", "{", "}", ")", "current_depth", "=", "recursion_options", ".", "get", "(", "'current'", ",", "0", ")", "+", "1", "max_depth", "=", "recursion_options", ".", "get", "(", "'max'", ",", "MAX_DEPTH", ")", "# Increment and store", "self", ".", "update_options", "(", "_recursion", "=", "{", "'current'", ":", "current_depth", ",", "'max'", ":", "max_depth", "}", ")" ]
Increment current_depth based on either defaults or the enclosing Async.
[ "Increment", "current_depth", "based", "on", "either", "defaults", "or", "the", "enclosing", "Async", "." ]
python
train
46.5
pneff/wsgiservice
wsgiservice/status.py
https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/status.py#L49-L60
def raise_204(instance): """Abort the current request with a 204 (No Content) response code. Clears out the body of the response. :param instance: Resource instance (used to access the response) :type instance: :class:`webob.resource.Resource` :raises: :class:`webob.exceptions.ResponseException` of status 204 """ instance.response.status = 204 instance.response.body = '' instance.response.body_raw = None raise ResponseException(instance.response)
[ "def", "raise_204", "(", "instance", ")", ":", "instance", ".", "response", ".", "status", "=", "204", "instance", ".", "response", ".", "body", "=", "''", "instance", ".", "response", ".", "body_raw", "=", "None", "raise", "ResponseException", "(", "instance", ".", "response", ")" ]
Abort the current request with a 204 (No Content) response code. Clears out the body of the response. :param instance: Resource instance (used to access the response) :type instance: :class:`webob.resource.Resource` :raises: :class:`webob.exceptions.ResponseException` of status 204
[ "Abort", "the", "current", "request", "with", "a", "204", "(", "No", "Content", ")", "response", "code", ".", "Clears", "out", "the", "body", "of", "the", "response", "." ]
python
train
40