repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
ungarj/s2reader
s2reader/s2reader.py
https://github.com/ungarj/s2reader/blob/376fd7ee1d15cce0849709c149d694663a7bc0ef/s2reader/s2reader.py#L291-L307
def tci_path(self): """Return the path to the granules TrueColorImage.""" tci_paths = [ path for path in self.dataset._product_metadata.xpath( ".//Granule[@granuleIdentifier='%s']/IMAGE_FILE/text()" % self.granule_identifier ) if path.endswith('TCI') ] try: tci_path = tci_paths[0] except IndexError: return None return os.path.join( self.dataset._zip_root if self.dataset.is_zip else self.dataset.path, tci_path ) + '.jp2'
[ "def", "tci_path", "(", "self", ")", ":", "tci_paths", "=", "[", "path", "for", "path", "in", "self", ".", "dataset", ".", "_product_metadata", ".", "xpath", "(", "\".//Granule[@granuleIdentifier='%s']/IMAGE_FILE/text()\"", "%", "self", ".", "granule_identifier", ")", "if", "path", ".", "endswith", "(", "'TCI'", ")", "]", "try", ":", "tci_path", "=", "tci_paths", "[", "0", "]", "except", "IndexError", ":", "return", "None", "return", "os", ".", "path", ".", "join", "(", "self", ".", "dataset", ".", "_zip_root", "if", "self", ".", "dataset", ".", "is_zip", "else", "self", ".", "dataset", ".", "path", ",", "tci_path", ")", "+", "'.jp2'" ]
Return the path to the granules TrueColorImage.
[ "Return", "the", "path", "to", "the", "granules", "TrueColorImage", "." ]
python
train
33.411765
carljm/django-adminfiles
adminfiles/flickr.py
https://github.com/carljm/django-adminfiles/blob/b01dc7be266305d575c11d5ff9a37ccac04a78c2/adminfiles/flickr.py#L741-L750
def contacts_getPublicList(user_id): """Gets the contacts (Users) for the user_id""" method = 'flickr.contacts.getPublicList' data = _doget(method, auth=False, user_id=user_id) if isinstance(data.rsp.contacts.contact, list): return [User(user.nsid, username=user.username) \ for user in data.rsp.contacts.contact] else: user = data.rsp.contacts.contact return [User(user.nsid, username=user.username)]
[ "def", "contacts_getPublicList", "(", "user_id", ")", ":", "method", "=", "'flickr.contacts.getPublicList'", "data", "=", "_doget", "(", "method", ",", "auth", "=", "False", ",", "user_id", "=", "user_id", ")", "if", "isinstance", "(", "data", ".", "rsp", ".", "contacts", ".", "contact", ",", "list", ")", ":", "return", "[", "User", "(", "user", ".", "nsid", ",", "username", "=", "user", ".", "username", ")", "for", "user", "in", "data", ".", "rsp", ".", "contacts", ".", "contact", "]", "else", ":", "user", "=", "data", ".", "rsp", ".", "contacts", ".", "contact", "return", "[", "User", "(", "user", ".", "nsid", ",", "username", "=", "user", ".", "username", ")", "]" ]
Gets the contacts (Users) for the user_id
[ "Gets", "the", "contacts", "(", "Users", ")", "for", "the", "user_id" ]
python
train
45.2
LISE-B26/pylabcontrol
build/lib/pylabcontrol/src/core/scripts.py
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/core/scripts.py#L1233-L1250
def get_script_module(script_information, package='pylabcontrol', verbose=False): """ wrapper to get the module for a script Args: script_information: information of the script. This can be - a dictionary - a Script instance - name of Script class package (optional): name of the package to which the script belongs, i.e. pylabcontrol or b26toolkit only used when script_information is a string Returns: module """ module, _, _, _, _, _, _ = Script.get_script_information(script_information=script_information, package=package, verbose=verbose) return module
[ "def", "get_script_module", "(", "script_information", ",", "package", "=", "'pylabcontrol'", ",", "verbose", "=", "False", ")", ":", "module", ",", "_", ",", "_", ",", "_", ",", "_", ",", "_", ",", "_", "=", "Script", ".", "get_script_information", "(", "script_information", "=", "script_information", ",", "package", "=", "package", ",", "verbose", "=", "verbose", ")", "return", "module" ]
wrapper to get the module for a script Args: script_information: information of the script. This can be - a dictionary - a Script instance - name of Script class package (optional): name of the package to which the script belongs, i.e. pylabcontrol or b26toolkit only used when script_information is a string Returns: module
[ "wrapper", "to", "get", "the", "module", "for", "a", "script" ]
python
train
38.055556
raiden-network/raiden
raiden/connection_manager.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/connection_manager.py#L375-L435
def _open_channels(self) -> bool: """ Open channels until there are `self.initial_channel_target` channels open. Do nothing if there are enough channels open already. Note: - This method must be called with the lock held. Return: - False if no channels could be opened """ open_channels = views.get_channelstate_open( chain_state=views.state_from_raiden(self.raiden), payment_network_id=self.registry_address, token_address=self.token_address, ) open_channels = [ channel_state for channel_state in open_channels if channel_state.partner_state.address != self.BOOTSTRAP_ADDR ] funded_channels = [ channel_state for channel_state in open_channels if channel_state.our_state.contract_balance >= self._initial_funding_per_partner ] nonfunded_channels = [ channel_state for channel_state in open_channels if channel_state not in funded_channels ] possible_new_partners = self._find_new_partners() if possible_new_partners == 0: return False # if we already met our target, break if len(funded_channels) >= self.initial_channel_target: return False # if we didn't, but there's no nonfunded channels and no available partners # it means the network is smaller than our target, so we should also break if not nonfunded_channels and possible_new_partners == 0: return False n_to_join = self.initial_channel_target - len(funded_channels) nonfunded_partners = [ channel_state.partner_state.address for channel_state in nonfunded_channels ] # first, fund nonfunded channels, then open and fund with possible_new_partners, # until initial_channel_target of funded channels is met join_partners = (nonfunded_partners + possible_new_partners)[:n_to_join] log.debug( 'Spawning greenlets to join partners', node=pex(self.raiden.address), num_greenlets=len(join_partners), ) greenlets = set( gevent.spawn(self._join_partner, partner) for partner in join_partners ) gevent.joinall(greenlets, raise_error=True) return True
[ "def", "_open_channels", "(", "self", ")", "->", "bool", ":", "open_channels", "=", "views", ".", "get_channelstate_open", "(", "chain_state", "=", "views", ".", "state_from_raiden", "(", "self", ".", "raiden", ")", ",", "payment_network_id", "=", "self", ".", "registry_address", ",", "token_address", "=", "self", ".", "token_address", ",", ")", "open_channels", "=", "[", "channel_state", "for", "channel_state", "in", "open_channels", "if", "channel_state", ".", "partner_state", ".", "address", "!=", "self", ".", "BOOTSTRAP_ADDR", "]", "funded_channels", "=", "[", "channel_state", "for", "channel_state", "in", "open_channels", "if", "channel_state", ".", "our_state", ".", "contract_balance", ">=", "self", ".", "_initial_funding_per_partner", "]", "nonfunded_channels", "=", "[", "channel_state", "for", "channel_state", "in", "open_channels", "if", "channel_state", "not", "in", "funded_channels", "]", "possible_new_partners", "=", "self", ".", "_find_new_partners", "(", ")", "if", "possible_new_partners", "==", "0", ":", "return", "False", "# if we already met our target, break", "if", "len", "(", "funded_channels", ")", ">=", "self", ".", "initial_channel_target", ":", "return", "False", "# if we didn't, but there's no nonfunded channels and no available partners", "# it means the network is smaller than our target, so we should also break", "if", "not", "nonfunded_channels", "and", "possible_new_partners", "==", "0", ":", "return", "False", "n_to_join", "=", "self", ".", "initial_channel_target", "-", "len", "(", "funded_channels", ")", "nonfunded_partners", "=", "[", "channel_state", ".", "partner_state", ".", "address", "for", "channel_state", "in", "nonfunded_channels", "]", "# first, fund nonfunded channels, then open and fund with possible_new_partners,", "# until initial_channel_target of funded channels is met", "join_partners", "=", "(", "nonfunded_partners", "+", "possible_new_partners", ")", "[", ":", "n_to_join", "]", "log", ".", "debug", "(", "'Spawning greenlets to join partners'", ",", "node", "=", "pex", "(", "self", ".", "raiden", ".", "address", ")", ",", "num_greenlets", "=", "len", "(", "join_partners", ")", ",", ")", "greenlets", "=", "set", "(", "gevent", ".", "spawn", "(", "self", ".", "_join_partner", ",", "partner", ")", "for", "partner", "in", "join_partners", ")", "gevent", ".", "joinall", "(", "greenlets", ",", "raise_error", "=", "True", ")", "return", "True" ]
Open channels until there are `self.initial_channel_target` channels open. Do nothing if there are enough channels open already. Note: - This method must be called with the lock held. Return: - False if no channels could be opened
[ "Open", "channels", "until", "there", "are", "self", ".", "initial_channel_target", "channels", "open", ".", "Do", "nothing", "if", "there", "are", "enough", "channels", "open", "already", "." ]
python
train
38.836066
google/grr
grr/server/grr_response_server/gui/wsgiapp.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/gui/wsgiapp.py#L212-L224
def _BuildToken(self, request, execution_time): """Build an ACLToken from the request.""" token = access_control.ACLToken( username=request.user, reason=request.args.get("reason", ""), process="GRRAdminUI", expiry=rdfvalue.RDFDatetime.Now() + execution_time) for field in ["Remote_Addr", "X-Forwarded-For"]: remote_addr = request.headers.get(field, "") if remote_addr: token.source_ips.append(remote_addr) return token
[ "def", "_BuildToken", "(", "self", ",", "request", ",", "execution_time", ")", ":", "token", "=", "access_control", ".", "ACLToken", "(", "username", "=", "request", ".", "user", ",", "reason", "=", "request", ".", "args", ".", "get", "(", "\"reason\"", ",", "\"\"", ")", ",", "process", "=", "\"GRRAdminUI\"", ",", "expiry", "=", "rdfvalue", ".", "RDFDatetime", ".", "Now", "(", ")", "+", "execution_time", ")", "for", "field", "in", "[", "\"Remote_Addr\"", ",", "\"X-Forwarded-For\"", "]", ":", "remote_addr", "=", "request", ".", "headers", ".", "get", "(", "field", ",", "\"\"", ")", "if", "remote_addr", ":", "token", ".", "source_ips", ".", "append", "(", "remote_addr", ")", "return", "token" ]
Build an ACLToken from the request.
[ "Build", "an", "ACLToken", "from", "the", "request", "." ]
python
train
36.538462
inveniosoftware/invenio-userprofiles
invenio_userprofiles/views.py
https://github.com/inveniosoftware/invenio-userprofiles/blob/4c682e7d67a4cab8dc38472a31fa1c34cbba03dd/invenio_userprofiles/views.py#L44-L51
def init_common(app): """Post initialization.""" if app.config['USERPROFILES_EXTEND_SECURITY_FORMS']: security_ext = app.extensions['security'] security_ext.confirm_register_form = confirm_register_form_factory( security_ext.confirm_register_form) security_ext.register_form = register_form_factory( security_ext.register_form)
[ "def", "init_common", "(", "app", ")", ":", "if", "app", ".", "config", "[", "'USERPROFILES_EXTEND_SECURITY_FORMS'", "]", ":", "security_ext", "=", "app", ".", "extensions", "[", "'security'", "]", "security_ext", ".", "confirm_register_form", "=", "confirm_register_form_factory", "(", "security_ext", ".", "confirm_register_form", ")", "security_ext", ".", "register_form", "=", "register_form_factory", "(", "security_ext", ".", "register_form", ")" ]
Post initialization.
[ "Post", "initialization", "." ]
python
train
47
Telefonica/toolium
toolium/utils.py
https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/utils.py#L339-L356
def wait_until_first_element_is_found(self, elements, timeout=None): """Search list of elements and wait until one of them is found :param elements: list of PageElements or element locators as a tuple (locator_type, locator_value) to be found sequentially :param timeout: max time to wait :returns: first element found :rtype: toolium.pageelements.PageElement or tuple :raises TimeoutException: If no element in the list is found after the timeout """ try: return self._wait_until(self._expected_condition_find_first_element, elements, timeout) except TimeoutException as exception: msg = 'None of the page elements has been found after %s seconds' timeout = timeout if timeout else self.get_explicitly_wait() self.logger.error(msg, timeout) exception.msg += "\n {}".format(msg % timeout) raise exception
[ "def", "wait_until_first_element_is_found", "(", "self", ",", "elements", ",", "timeout", "=", "None", ")", ":", "try", ":", "return", "self", ".", "_wait_until", "(", "self", ".", "_expected_condition_find_first_element", ",", "elements", ",", "timeout", ")", "except", "TimeoutException", "as", "exception", ":", "msg", "=", "'None of the page elements has been found after %s seconds'", "timeout", "=", "timeout", "if", "timeout", "else", "self", ".", "get_explicitly_wait", "(", ")", "self", ".", "logger", ".", "error", "(", "msg", ",", "timeout", ")", "exception", ".", "msg", "+=", "\"\\n {}\"", ".", "format", "(", "msg", "%", "timeout", ")", "raise", "exception" ]
Search list of elements and wait until one of them is found :param elements: list of PageElements or element locators as a tuple (locator_type, locator_value) to be found sequentially :param timeout: max time to wait :returns: first element found :rtype: toolium.pageelements.PageElement or tuple :raises TimeoutException: If no element in the list is found after the timeout
[ "Search", "list", "of", "elements", "and", "wait", "until", "one", "of", "them", "is", "found" ]
python
train
53.222222
docker/docker-py
docker/api/image.py
https://github.com/docker/docker-py/blob/613d6aad83acc9931ff2ecfd6a6c7bd8061dc125/docker/api/image.py#L421-L480
def push(self, repository, tag=None, stream=False, auth_config=None, decode=False): """ Push an image or a repository to the registry. Similar to the ``docker push`` command. Args: repository (str): The repository to push to tag (str): An optional tag to push stream (bool): Stream the output as a blocking generator auth_config (dict): Override the credentials that are found in the config for this request. ``auth_config`` should contain the ``username`` and ``password`` keys to be valid. decode (bool): Decode the JSON data from the server into dicts. Only applies with ``stream=True`` Returns: (generator or str): The output from the server. Raises: :py:class:`docker.errors.APIError` If the server returns an error. Example: >>> for line in cli.push('yourname/app', stream=True, decode=True): ... print(line) {'status': 'Pushing repository yourname/app (1 tags)'} {'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'} {'status': 'Image already pushed, skipping', 'progressDetail':{}, 'id': '511136ea3c5a'} ... """ if not tag: repository, tag = utils.parse_repository_tag(repository) registry, repo_name = auth.resolve_repository_name(repository) u = self._url("/images/{0}/push", repository) params = { 'tag': tag } headers = {} if auth_config is None: header = auth.get_config_header(self, registry) if header: headers['X-Registry-Auth'] = header else: log.debug('Sending supplied auth config') headers['X-Registry-Auth'] = auth.encode_header(auth_config) response = self._post_json( u, None, headers=headers, stream=stream, params=params ) self._raise_for_status(response) if stream: return self._stream_helper(response, decode=decode) return self._result(response)
[ "def", "push", "(", "self", ",", "repository", ",", "tag", "=", "None", ",", "stream", "=", "False", ",", "auth_config", "=", "None", ",", "decode", "=", "False", ")", ":", "if", "not", "tag", ":", "repository", ",", "tag", "=", "utils", ".", "parse_repository_tag", "(", "repository", ")", "registry", ",", "repo_name", "=", "auth", ".", "resolve_repository_name", "(", "repository", ")", "u", "=", "self", ".", "_url", "(", "\"/images/{0}/push\"", ",", "repository", ")", "params", "=", "{", "'tag'", ":", "tag", "}", "headers", "=", "{", "}", "if", "auth_config", "is", "None", ":", "header", "=", "auth", ".", "get_config_header", "(", "self", ",", "registry", ")", "if", "header", ":", "headers", "[", "'X-Registry-Auth'", "]", "=", "header", "else", ":", "log", ".", "debug", "(", "'Sending supplied auth config'", ")", "headers", "[", "'X-Registry-Auth'", "]", "=", "auth", ".", "encode_header", "(", "auth_config", ")", "response", "=", "self", ".", "_post_json", "(", "u", ",", "None", ",", "headers", "=", "headers", ",", "stream", "=", "stream", ",", "params", "=", "params", ")", "self", ".", "_raise_for_status", "(", "response", ")", "if", "stream", ":", "return", "self", ".", "_stream_helper", "(", "response", ",", "decode", "=", "decode", ")", "return", "self", ".", "_result", "(", "response", ")" ]
Push an image or a repository to the registry. Similar to the ``docker push`` command. Args: repository (str): The repository to push to tag (str): An optional tag to push stream (bool): Stream the output as a blocking generator auth_config (dict): Override the credentials that are found in the config for this request. ``auth_config`` should contain the ``username`` and ``password`` keys to be valid. decode (bool): Decode the JSON data from the server into dicts. Only applies with ``stream=True`` Returns: (generator or str): The output from the server. Raises: :py:class:`docker.errors.APIError` If the server returns an error. Example: >>> for line in cli.push('yourname/app', stream=True, decode=True): ... print(line) {'status': 'Pushing repository yourname/app (1 tags)'} {'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'} {'status': 'Image already pushed, skipping', 'progressDetail':{}, 'id': '511136ea3c5a'} ...
[ "Push", "an", "image", "or", "a", "repository", "to", "the", "registry", ".", "Similar", "to", "the", "docker", "push", "command", "." ]
python
train
36.066667
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/bulk.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/bulk.py#L241-L252
def add_replace(self, selector, replacement, upsert=False, collation=None): """Create a replace document and add it to the list of ops. """ validate_ok_for_replace(replacement) cmd = SON([('q', selector), ('u', replacement), ('multi', False), ('upsert', upsert)]) collation = validate_collation_or_none(collation) if collation is not None: self.uses_collation = True cmd['collation'] = collation self.ops.append((_UPDATE, cmd))
[ "def", "add_replace", "(", "self", ",", "selector", ",", "replacement", ",", "upsert", "=", "False", ",", "collation", "=", "None", ")", ":", "validate_ok_for_replace", "(", "replacement", ")", "cmd", "=", "SON", "(", "[", "(", "'q'", ",", "selector", ")", ",", "(", "'u'", ",", "replacement", ")", ",", "(", "'multi'", ",", "False", ")", ",", "(", "'upsert'", ",", "upsert", ")", "]", ")", "collation", "=", "validate_collation_or_none", "(", "collation", ")", "if", "collation", "is", "not", "None", ":", "self", ".", "uses_collation", "=", "True", "cmd", "[", "'collation'", "]", "=", "collation", "self", ".", "ops", ".", "append", "(", "(", "_UPDATE", ",", "cmd", ")", ")" ]
Create a replace document and add it to the list of ops.
[ "Create", "a", "replace", "document", "and", "add", "it", "to", "the", "list", "of", "ops", "." ]
python
train
44.583333
orbingol/NURBS-Python
geomdl/helpers.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/helpers.py#L173-L190
def basis_functions(degree, knot_vector, spans, knots): """ Computes the non-vanishing basis functions for a list of parameters. :param degree: degree, :math:`p` :type degree: int :param knot_vector: knot vector, :math:`U` :type knot_vector: list, tuple :param spans: list of knot spans :type spans: list, tuple :param knots: list of knots or parameters :type knots: list, tuple :return: basis functions :rtype: list """ basis = [] for span, knot in zip(spans, knots): basis.append(basis_function(degree, knot_vector, span, knot)) return basis
[ "def", "basis_functions", "(", "degree", ",", "knot_vector", ",", "spans", ",", "knots", ")", ":", "basis", "=", "[", "]", "for", "span", ",", "knot", "in", "zip", "(", "spans", ",", "knots", ")", ":", "basis", ".", "append", "(", "basis_function", "(", "degree", ",", "knot_vector", ",", "span", ",", "knot", ")", ")", "return", "basis" ]
Computes the non-vanishing basis functions for a list of parameters. :param degree: degree, :math:`p` :type degree: int :param knot_vector: knot vector, :math:`U` :type knot_vector: list, tuple :param spans: list of knot spans :type spans: list, tuple :param knots: list of knots or parameters :type knots: list, tuple :return: basis functions :rtype: list
[ "Computes", "the", "non", "-", "vanishing", "basis", "functions", "for", "a", "list", "of", "parameters", "." ]
python
train
33.111111
mwickert/scikit-dsp-comm
sk_dsp_comm/fec_conv.py
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/fec_conv.py#L621-L708
def depuncture(self,soft_bits,puncture_pattern = ('110','101'), erase_value = 3.5): """ Apply de-puncturing to the soft bits coming from the channel. Erasure bits are inserted to return the soft bit values back to a form that can be Viterbi decoded. :param soft_bits: :param puncture_pattern: :param erase_value: :return: Examples -------- This example uses the following puncture matrix: .. math:: \\begin{align*} \\mathbf{A} = \\begin{bmatrix} 1 & 1 & 0 \\\\ 1 & 0 & 1 \\end{bmatrix} \\end{align*} The upper row operates on the outputs for the :math:`G_{1}` polynomial and the lower row operates on the outputs of the :math:`G_{2}` polynomial. >>> import numpy as np >>> from sk_dsp_comm.fec_conv import fec_conv >>> cc = fec_conv(('101','111')) >>> x = np.array([0, 0, 1, 1, 1, 0, 0, 0, 0, 0]) >>> state = '00' >>> y, state = cc.conv_encoder(x, state) >>> yp = cc.puncture(y, ('110','101')) >>> cc.depuncture(yp, ('110', '101'), 1) array([ 0., 0., 0., 1., 1., 1., 1., 0., 0., 1., 1., 0., 1., 1., 0., 1., 1., 0.] """ # Check to see that the length of soft_bits is consistent with a rate # 1/2 code. L_pp = len(puncture_pattern[0]) L_pp1 = len([g1 for g1 in puncture_pattern[0] if g1 == '1']) L_pp0 = len([g1 for g1 in puncture_pattern[0] if g1 == '0']) #L_pp0 = len([g1 for g1 in pp1 if g1 == '0']) N_softwords = int(np.floor(len(soft_bits)/float(2))) if 2*N_softwords != len(soft_bits): warnings.warn('Number of soft bits must be even!') warnings.warn('Truncating bits to be compatible.') soft_bits = soft_bits[:2*N_softwords] # Extract the G1p and G2p encoded bits from the serial stream. # Assume the stream is of the form [G1p G2p G1p G2p ... ], # which for QPSK may be of the form [Ip Qp Ip Qp Ip Qp ... ] x_G1 = soft_bits.reshape(N_softwords,2).take([0], axis=1).reshape(1,N_softwords).flatten() x_G2 = soft_bits.reshape(N_softwords,2).take([1], axis=1).reshape(1,N_softwords).flatten() # Check to see that the length of x_G1 and x_G2 is consistent with the # puncture length period of the soft bits N_punct_periods = int(np.floor(N_softwords/float(L_pp1))) if L_pp1*N_punct_periods != N_softwords: warnings.warn('Number of soft bits per puncture period is %d' % L_pp1) warnings.warn('The number of soft bits is not a multiple') warnings.warn('Truncating soft bits to be compatible.') x_G1 = x_G1[:L_pp1*N_punct_periods] x_G2 = x_G2[:L_pp1*N_punct_periods] x_G1 = x_G1.reshape(N_punct_periods,L_pp1) x_G2 = x_G2.reshape(N_punct_periods,L_pp1) #Depuncture x_G1 and x_G1 g1_pp1 = [k for k,g1 in enumerate(puncture_pattern[0]) if g1 == '1'] g1_pp0 = [k for k,g1 in enumerate(puncture_pattern[0]) if g1 == '0'] g2_pp1 = [k for k,g2 in enumerate(puncture_pattern[1]) if g2 == '1'] g2_pp0 = [k for k,g2 in enumerate(puncture_pattern[1]) if g2 == '0'] x_E = erase_value*np.ones((N_punct_periods,L_pp0)) y_G1 = np.hstack((x_G1,x_E)) y_G2 = np.hstack((x_G2,x_E)) [g1_pp1.append(val) for idx,val in enumerate(g1_pp0)] g1_comp = list(zip(g1_pp1,list(range(L_pp)))) g1_comp.sort() G1_col_permute = [g1_comp[idx][1] for idx in range(L_pp)] [g2_pp1.append(val) for idx,val in enumerate(g2_pp0)] g2_comp = list(zip(g2_pp1,list(range(L_pp)))) g2_comp.sort() G2_col_permute = [g2_comp[idx][1] for idx in range(L_pp)] #permute columns to place erasure bits in the correct position y = np.hstack((y_G1[:,G1_col_permute].reshape(L_pp*N_punct_periods,1), y_G2[:,G2_col_permute].reshape(L_pp*N_punct_periods, 1))).reshape(1,2*L_pp*N_punct_periods).flatten() return y
[ "def", "depuncture", "(", "self", ",", "soft_bits", ",", "puncture_pattern", "=", "(", "'110'", ",", "'101'", ")", ",", "erase_value", "=", "3.5", ")", ":", "# Check to see that the length of soft_bits is consistent with a rate\r", "# 1/2 code.\r", "L_pp", "=", "len", "(", "puncture_pattern", "[", "0", "]", ")", "L_pp1", "=", "len", "(", "[", "g1", "for", "g1", "in", "puncture_pattern", "[", "0", "]", "if", "g1", "==", "'1'", "]", ")", "L_pp0", "=", "len", "(", "[", "g1", "for", "g1", "in", "puncture_pattern", "[", "0", "]", "if", "g1", "==", "'0'", "]", ")", "#L_pp0 = len([g1 for g1 in pp1 if g1 == '0'])\r", "N_softwords", "=", "int", "(", "np", ".", "floor", "(", "len", "(", "soft_bits", ")", "/", "float", "(", "2", ")", ")", ")", "if", "2", "*", "N_softwords", "!=", "len", "(", "soft_bits", ")", ":", "warnings", ".", "warn", "(", "'Number of soft bits must be even!'", ")", "warnings", ".", "warn", "(", "'Truncating bits to be compatible.'", ")", "soft_bits", "=", "soft_bits", "[", ":", "2", "*", "N_softwords", "]", "# Extract the G1p and G2p encoded bits from the serial stream.\r", "# Assume the stream is of the form [G1p G2p G1p G2p ... ],\r", "# which for QPSK may be of the form [Ip Qp Ip Qp Ip Qp ... ]\r", "x_G1", "=", "soft_bits", ".", "reshape", "(", "N_softwords", ",", "2", ")", ".", "take", "(", "[", "0", "]", ",", "axis", "=", "1", ")", ".", "reshape", "(", "1", ",", "N_softwords", ")", ".", "flatten", "(", ")", "x_G2", "=", "soft_bits", ".", "reshape", "(", "N_softwords", ",", "2", ")", ".", "take", "(", "[", "1", "]", ",", "axis", "=", "1", ")", ".", "reshape", "(", "1", ",", "N_softwords", ")", ".", "flatten", "(", ")", "# Check to see that the length of x_G1 and x_G2 is consistent with the\r", "# puncture length period of the soft bits\r", "N_punct_periods", "=", "int", "(", "np", ".", "floor", "(", "N_softwords", "/", "float", "(", "L_pp1", ")", ")", ")", "if", "L_pp1", "*", "N_punct_periods", "!=", "N_softwords", ":", "warnings", ".", "warn", "(", "'Number of soft bits per puncture period is %d'", "%", "L_pp1", ")", "warnings", ".", "warn", "(", "'The number of soft bits is not a multiple'", ")", "warnings", ".", "warn", "(", "'Truncating soft bits to be compatible.'", ")", "x_G1", "=", "x_G1", "[", ":", "L_pp1", "*", "N_punct_periods", "]", "x_G2", "=", "x_G2", "[", ":", "L_pp1", "*", "N_punct_periods", "]", "x_G1", "=", "x_G1", ".", "reshape", "(", "N_punct_periods", ",", "L_pp1", ")", "x_G2", "=", "x_G2", ".", "reshape", "(", "N_punct_periods", ",", "L_pp1", ")", "#Depuncture x_G1 and x_G1\r", "g1_pp1", "=", "[", "k", "for", "k", ",", "g1", "in", "enumerate", "(", "puncture_pattern", "[", "0", "]", ")", "if", "g1", "==", "'1'", "]", "g1_pp0", "=", "[", "k", "for", "k", ",", "g1", "in", "enumerate", "(", "puncture_pattern", "[", "0", "]", ")", "if", "g1", "==", "'0'", "]", "g2_pp1", "=", "[", "k", "for", "k", ",", "g2", "in", "enumerate", "(", "puncture_pattern", "[", "1", "]", ")", "if", "g2", "==", "'1'", "]", "g2_pp0", "=", "[", "k", "for", "k", ",", "g2", "in", "enumerate", "(", "puncture_pattern", "[", "1", "]", ")", "if", "g2", "==", "'0'", "]", "x_E", "=", "erase_value", "*", "np", ".", "ones", "(", "(", "N_punct_periods", ",", "L_pp0", ")", ")", "y_G1", "=", "np", ".", "hstack", "(", "(", "x_G1", ",", "x_E", ")", ")", "y_G2", "=", "np", ".", "hstack", "(", "(", "x_G2", ",", "x_E", ")", ")", "[", "g1_pp1", ".", "append", "(", "val", ")", "for", "idx", ",", "val", "in", "enumerate", "(", "g1_pp0", ")", "]", "g1_comp", "=", "list", "(", "zip", "(", "g1_pp1", ",", "list", "(", "range", "(", "L_pp", ")", ")", ")", ")", "g1_comp", ".", "sort", "(", ")", "G1_col_permute", "=", "[", "g1_comp", "[", "idx", "]", "[", "1", "]", "for", "idx", "in", "range", "(", "L_pp", ")", "]", "[", "g2_pp1", ".", "append", "(", "val", ")", "for", "idx", ",", "val", "in", "enumerate", "(", "g2_pp0", ")", "]", "g2_comp", "=", "list", "(", "zip", "(", "g2_pp1", ",", "list", "(", "range", "(", "L_pp", ")", ")", ")", ")", "g2_comp", ".", "sort", "(", ")", "G2_col_permute", "=", "[", "g2_comp", "[", "idx", "]", "[", "1", "]", "for", "idx", "in", "range", "(", "L_pp", ")", "]", "#permute columns to place erasure bits in the correct position\r", "y", "=", "np", ".", "hstack", "(", "(", "y_G1", "[", ":", ",", "G1_col_permute", "]", ".", "reshape", "(", "L_pp", "*", "N_punct_periods", ",", "1", ")", ",", "y_G2", "[", ":", ",", "G2_col_permute", "]", ".", "reshape", "(", "L_pp", "*", "N_punct_periods", ",", "1", ")", ")", ")", ".", "reshape", "(", "1", ",", "2", "*", "L_pp", "*", "N_punct_periods", ")", ".", "flatten", "(", ")", "return", "y" ]
Apply de-puncturing to the soft bits coming from the channel. Erasure bits are inserted to return the soft bit values back to a form that can be Viterbi decoded. :param soft_bits: :param puncture_pattern: :param erase_value: :return: Examples -------- This example uses the following puncture matrix: .. math:: \\begin{align*} \\mathbf{A} = \\begin{bmatrix} 1 & 1 & 0 \\\\ 1 & 0 & 1 \\end{bmatrix} \\end{align*} The upper row operates on the outputs for the :math:`G_{1}` polynomial and the lower row operates on the outputs of the :math:`G_{2}` polynomial. >>> import numpy as np >>> from sk_dsp_comm.fec_conv import fec_conv >>> cc = fec_conv(('101','111')) >>> x = np.array([0, 0, 1, 1, 1, 0, 0, 0, 0, 0]) >>> state = '00' >>> y, state = cc.conv_encoder(x, state) >>> yp = cc.puncture(y, ('110','101')) >>> cc.depuncture(yp, ('110', '101'), 1) array([ 0., 0., 0., 1., 1., 1., 1., 0., 0., 1., 1., 0., 1., 1., 0., 1., 1., 0.]
[ "Apply", "de", "-", "puncturing", "to", "the", "soft", "bits", "coming", "from", "the", "channel", ".", "Erasure", "bits", "are", "inserted", "to", "return", "the", "soft", "bit", "values", "back", "to", "a", "form", "that", "can", "be", "Viterbi", "decoded", ".", ":", "param", "soft_bits", ":", ":", "param", "puncture_pattern", ":", ":", "param", "erase_value", ":", ":", "return", ":", "Examples", "--------", "This", "example", "uses", "the", "following", "puncture", "matrix", ":", "..", "math", "::", "\\\\", "begin", "{", "align", "*", "}", "\\\\", "mathbf", "{", "A", "}", "=", "\\\\", "begin", "{", "bmatrix", "}", "1", "&", "1", "&", "0", "\\\\\\\\", "1", "&", "0", "&", "1", "\\\\", "end", "{", "bmatrix", "}", "\\\\", "end", "{", "align", "*", "}", "The", "upper", "row", "operates", "on", "the", "outputs", "for", "the", ":", "math", ":", "G_", "{", "1", "}", "polynomial", "and", "the", "lower", "row", "operates", "on", "the", "outputs", "of", "the", ":", "math", ":", "G_", "{", "2", "}", "polynomial", ".", ">>>", "import", "numpy", "as", "np", ">>>", "from", "sk_dsp_comm", ".", "fec_conv", "import", "fec_conv", ">>>", "cc", "=", "fec_conv", "((", "101", "111", "))", ">>>", "x", "=", "np", ".", "array", "(", "[", "0", "0", "1", "1", "1", "0", "0", "0", "0", "0", "]", ")", ">>>", "state", "=", "00", ">>>", "y", "state", "=", "cc", ".", "conv_encoder", "(", "x", "state", ")", ">>>", "yp", "=", "cc", ".", "puncture", "(", "y", "(", "110", "101", "))", ">>>", "cc", ".", "depuncture", "(", "yp", "(", "110", "101", ")", "1", ")", "array", "(", "[", "0", ".", "0", ".", "0", ".", "1", ".", "1", ".", "1", ".", "1", ".", "0", ".", "0", ".", "1", ".", "1", ".", "0", ".", "1", ".", "1", ".", "0", ".", "1", ".", "1", ".", "0", ".", "]" ]
python
valid
48.556818
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L3202-L3209
def tintWith(self, red, green, blue): """tintWith(self, red, green, blue)""" if not self.colorspace or self.colorspace.n > 3: print("warning: colorspace invalid for function") return return _fitz.Pixmap_tintWith(self, red, green, blue)
[ "def", "tintWith", "(", "self", ",", "red", ",", "green", ",", "blue", ")", ":", "if", "not", "self", ".", "colorspace", "or", "self", ".", "colorspace", ".", "n", ">", "3", ":", "print", "(", "\"warning: colorspace invalid for function\"", ")", "return", "return", "_fitz", ".", "Pixmap_tintWith", "(", "self", ",", "red", ",", "green", ",", "blue", ")" ]
tintWith(self, red, green, blue)
[ "tintWith", "(", "self", "red", "green", "blue", ")" ]
python
train
34.75
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L4002-L4006
def user_organization_memberships(self, user_id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/organization_memberships#list-memberships" api_path = "/api/v2/users/{user_id}/organization_memberships.json" api_path = api_path.format(user_id=user_id) return self.call(api_path, **kwargs)
[ "def", "user_organization_memberships", "(", "self", ",", "user_id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/users/{user_id}/organization_memberships.json\"", "api_path", "=", "api_path", ".", "format", "(", "user_id", "=", "user_id", ")", "return", "self", ".", "call", "(", "api_path", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/core/organization_memberships#list-memberships
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "organization_memberships#list", "-", "memberships" ]
python
train
65.6
NetworkAutomation/jaide
jaide/core.py
https://github.com/NetworkAutomation/jaide/blob/8571b987a8c24c246dc09f1bcc11cb0f045ec33f/jaide/core.py#L780-L832
def op_cmd(self, command, req_format='text', xpath_expr=""): """ Execute an operational mode command. Purpose: Used to send an operational mode command to the connected | device. This requires and uses a paramiko.SSHClient() as | the handler so that we can easily pass and allow all pipe | commands to be used. | | We indiscriminately attach ' | no-more' on the end of | every command so the device doesn't hold output. The | req_format parameter can be set to 'xml' to force raw | xml output in the reply. @param command: The single command that to retrieve output from the | device. Any pipes will be taken into account. @type command: str @param req_format: The desired format of the response, defaults to | 'text', but also accepts 'xml'. **NOTE**: 'xml' | will still return a string, not a libxml ElementTree @type req_format: str @returns: The reply from the device. @rtype: str """ if not command: raise InvalidCommandError("Parameter 'command' cannot be empty") if req_format.lower() == 'xml' or xpath_expr: command = command.strip() + ' | display xml' command = command.strip() + ' | no-more\n' out = '' # when logging in as root, we use _shell to get the response. if self.username == 'root': self._shell.send(command) time.sleep(3) while self._shell.recv_ready(): out += self._shell.recv(999999) time.sleep(.75) # take off the command being sent and the prompt at the end. out = '\n'.join(out.split('\n')[1:-2]) # not logging in as root, and can grab the output as normal. else: stdin, stdout, stderr = self._session.exec_command(command=command, timeout=float(self.session_timeout)) stdin.close() # read normal output while not stdout.channel.exit_status_ready(): out += stdout.read() stdout.close() # read errors while not stderr.channel.exit_status_ready(): out += stderr.read() stderr.close() return out if not xpath_expr else xpath(out, xpath_expr)
[ "def", "op_cmd", "(", "self", ",", "command", ",", "req_format", "=", "'text'", ",", "xpath_expr", "=", "\"\"", ")", ":", "if", "not", "command", ":", "raise", "InvalidCommandError", "(", "\"Parameter 'command' cannot be empty\"", ")", "if", "req_format", ".", "lower", "(", ")", "==", "'xml'", "or", "xpath_expr", ":", "command", "=", "command", ".", "strip", "(", ")", "+", "' | display xml'", "command", "=", "command", ".", "strip", "(", ")", "+", "' | no-more\\n'", "out", "=", "''", "# when logging in as root, we use _shell to get the response.", "if", "self", ".", "username", "==", "'root'", ":", "self", ".", "_shell", ".", "send", "(", "command", ")", "time", ".", "sleep", "(", "3", ")", "while", "self", ".", "_shell", ".", "recv_ready", "(", ")", ":", "out", "+=", "self", ".", "_shell", ".", "recv", "(", "999999", ")", "time", ".", "sleep", "(", ".75", ")", "# take off the command being sent and the prompt at the end.", "out", "=", "'\\n'", ".", "join", "(", "out", ".", "split", "(", "'\\n'", ")", "[", "1", ":", "-", "2", "]", ")", "# not logging in as root, and can grab the output as normal.", "else", ":", "stdin", ",", "stdout", ",", "stderr", "=", "self", ".", "_session", ".", "exec_command", "(", "command", "=", "command", ",", "timeout", "=", "float", "(", "self", ".", "session_timeout", ")", ")", "stdin", ".", "close", "(", ")", "# read normal output", "while", "not", "stdout", ".", "channel", ".", "exit_status_ready", "(", ")", ":", "out", "+=", "stdout", ".", "read", "(", ")", "stdout", ".", "close", "(", ")", "# read errors", "while", "not", "stderr", ".", "channel", ".", "exit_status_ready", "(", ")", ":", "out", "+=", "stderr", ".", "read", "(", ")", "stderr", ".", "close", "(", ")", "return", "out", "if", "not", "xpath_expr", "else", "xpath", "(", "out", ",", "xpath_expr", ")" ]
Execute an operational mode command. Purpose: Used to send an operational mode command to the connected | device. This requires and uses a paramiko.SSHClient() as | the handler so that we can easily pass and allow all pipe | commands to be used. | | We indiscriminately attach ' | no-more' on the end of | every command so the device doesn't hold output. The | req_format parameter can be set to 'xml' to force raw | xml output in the reply. @param command: The single command that to retrieve output from the | device. Any pipes will be taken into account. @type command: str @param req_format: The desired format of the response, defaults to | 'text', but also accepts 'xml'. **NOTE**: 'xml' | will still return a string, not a libxml ElementTree @type req_format: str @returns: The reply from the device. @rtype: str
[ "Execute", "an", "operational", "mode", "command", "." ]
python
train
46
IntegralDefense/critsapi
critsapi/critsapi.py
https://github.com/IntegralDefense/critsapi/blob/e770bd81e124eaaeb5f1134ba95f4a35ff345c5a/critsapi/critsapi.py#L682-L721
def status_update(self, crits_id, crits_type, status): """ Update the status of the TLO. By default, the options are: - New - In Progress - Analyzed - Deprecated Args: crits_id: The object id of the TLO crits_type: The type of TLO. This must be 'Indicator', '' status: The status to change. Returns: True if the status was updated. False otherwise. Raises: CRITsInvalidTypeError """ obj_type = self._type_translation(crits_type) patch_url = "{0}/{1}/{2}/".format(self.url, obj_type, crits_id) params = { 'api_key': self.api_key, 'username': self.username, } data = { 'action': 'status_update', 'value': status, } r = requests.patch(patch_url, params=params, data=data, verify=self.verify, proxies=self.proxies) if r.status_code == 200: log.debug('Object {} set to {}'.format(crits_id, status)) return True else: log.error('Attempted to set object id {} to ' 'Informational, but did not receive a ' '200'.format(crits_id)) log.error('Error message was: {}'.format(r.text)) return False
[ "def", "status_update", "(", "self", ",", "crits_id", ",", "crits_type", ",", "status", ")", ":", "obj_type", "=", "self", ".", "_type_translation", "(", "crits_type", ")", "patch_url", "=", "\"{0}/{1}/{2}/\"", ".", "format", "(", "self", ".", "url", ",", "obj_type", ",", "crits_id", ")", "params", "=", "{", "'api_key'", ":", "self", ".", "api_key", ",", "'username'", ":", "self", ".", "username", ",", "}", "data", "=", "{", "'action'", ":", "'status_update'", ",", "'value'", ":", "status", ",", "}", "r", "=", "requests", ".", "patch", "(", "patch_url", ",", "params", "=", "params", ",", "data", "=", "data", ",", "verify", "=", "self", ".", "verify", ",", "proxies", "=", "self", ".", "proxies", ")", "if", "r", ".", "status_code", "==", "200", ":", "log", ".", "debug", "(", "'Object {} set to {}'", ".", "format", "(", "crits_id", ",", "status", ")", ")", "return", "True", "else", ":", "log", ".", "error", "(", "'Attempted to set object id {} to '", "'Informational, but did not receive a '", "'200'", ".", "format", "(", "crits_id", ")", ")", "log", ".", "error", "(", "'Error message was: {}'", ".", "format", "(", "r", ".", "text", ")", ")", "return", "False" ]
Update the status of the TLO. By default, the options are: - New - In Progress - Analyzed - Deprecated Args: crits_id: The object id of the TLO crits_type: The type of TLO. This must be 'Indicator', '' status: The status to change. Returns: True if the status was updated. False otherwise. Raises: CRITsInvalidTypeError
[ "Update", "the", "status", "of", "the", "TLO", ".", "By", "default", "the", "options", "are", ":", "-", "New", "-", "In", "Progress", "-", "Analyzed", "-", "Deprecated" ]
python
train
33.375
django-parler/django-parler
parler/admin.py
https://github.com/django-parler/django-parler/blob/11ae4af5e8faddb74c69c848870122df4006a54e/parler/admin.py#L295-L303
def get_form(self, request, obj=None, **kwargs): """ Pass the current language to the form. """ form_class = super(TranslatableAdmin, self).get_form(request, obj, **kwargs) if self._has_translatable_model(): form_class.language_code = self.get_form_language(request, obj) return form_class
[ "def", "get_form", "(", "self", ",", "request", ",", "obj", "=", "None", ",", "*", "*", "kwargs", ")", ":", "form_class", "=", "super", "(", "TranslatableAdmin", ",", "self", ")", ".", "get_form", "(", "request", ",", "obj", ",", "*", "*", "kwargs", ")", "if", "self", ".", "_has_translatable_model", "(", ")", ":", "form_class", ".", "language_code", "=", "self", ".", "get_form_language", "(", "request", ",", "obj", ")", "return", "form_class" ]
Pass the current language to the form.
[ "Pass", "the", "current", "language", "to", "the", "form", "." ]
python
train
38
Cymmetria/honeycomb
honeycomb/integrationmanager/registration.py
https://github.com/Cymmetria/honeycomb/blob/33ea91b5cf675000e4e85dd02efe580ea6e95c86/honeycomb/integrationmanager/registration.py#L25-L45
def get_integration_module(integration_path): """Add custom paths to sys and import integration module. :param integration_path: Path to integration folder """ # add custom paths so imports would work paths = [ os.path.join(__file__, "..", ".."), # to import integrationmanager os.path.join(integration_path, ".."), # to import integration itself os.path.join(integration_path, DEPS_DIR), # to import integration deps ] for path in paths: path = os.path.realpath(path) logger.debug("adding %s to path", path) sys.path.insert(0, path) # get our integration class instance integration_name = os.path.basename(integration_path) logger.debug("importing %s", ".".join([integration_name, INTEGRATION])) return importlib.import_module(".".join([integration_name, INTEGRATION]))
[ "def", "get_integration_module", "(", "integration_path", ")", ":", "# add custom paths so imports would work", "paths", "=", "[", "os", ".", "path", ".", "join", "(", "__file__", ",", "\"..\"", ",", "\"..\"", ")", ",", "# to import integrationmanager", "os", ".", "path", ".", "join", "(", "integration_path", ",", "\"..\"", ")", ",", "# to import integration itself", "os", ".", "path", ".", "join", "(", "integration_path", ",", "DEPS_DIR", ")", ",", "# to import integration deps", "]", "for", "path", "in", "paths", ":", "path", "=", "os", ".", "path", ".", "realpath", "(", "path", ")", "logger", ".", "debug", "(", "\"adding %s to path\"", ",", "path", ")", "sys", ".", "path", ".", "insert", "(", "0", ",", "path", ")", "# get our integration class instance", "integration_name", "=", "os", ".", "path", ".", "basename", "(", "integration_path", ")", "logger", ".", "debug", "(", "\"importing %s\"", ",", "\".\"", ".", "join", "(", "[", "integration_name", ",", "INTEGRATION", "]", ")", ")", "return", "importlib", ".", "import_module", "(", "\".\"", ".", "join", "(", "[", "integration_name", ",", "INTEGRATION", "]", ")", ")" ]
Add custom paths to sys and import integration module. :param integration_path: Path to integration folder
[ "Add", "custom", "paths", "to", "sys", "and", "import", "integration", "module", "." ]
python
train
40.380952
rocky/python-xdis
xdis/bytecode.py
https://github.com/rocky/python-xdis/blob/46a2902ae8f5d8eee495eed67ac0690fd545453d/xdis/bytecode.py#L172-L188
def _get_const_info(const_index, const_list): """Helper to get optional details about const references Returns the dereferenced constant and its repr if the constant list is defined. Otherwise returns the constant index and its repr(). """ argval = const_index if const_list is not None: argval = const_list[const_index] # float values nan and inf are not directly representable in Python at least # before 3.5 and even there it is via a library constant. # So we will canonicalize their representation as float('nan') and float('inf') if isinstance(argval, float) and str(argval) in frozenset(['nan', '-nan', 'inf', '-inf']): return argval, "float('%s')" % argval return argval, repr(argval)
[ "def", "_get_const_info", "(", "const_index", ",", "const_list", ")", ":", "argval", "=", "const_index", "if", "const_list", "is", "not", "None", ":", "argval", "=", "const_list", "[", "const_index", "]", "# float values nan and inf are not directly representable in Python at least", "# before 3.5 and even there it is via a library constant.", "# So we will canonicalize their representation as float('nan') and float('inf')", "if", "isinstance", "(", "argval", ",", "float", ")", "and", "str", "(", "argval", ")", "in", "frozenset", "(", "[", "'nan'", ",", "'-nan'", ",", "'inf'", ",", "'-inf'", "]", ")", ":", "return", "argval", ",", "\"float('%s')\"", "%", "argval", "return", "argval", ",", "repr", "(", "argval", ")" ]
Helper to get optional details about const references Returns the dereferenced constant and its repr if the constant list is defined. Otherwise returns the constant index and its repr().
[ "Helper", "to", "get", "optional", "details", "about", "const", "references" ]
python
train
44.176471
danielhrisca/asammdf
asammdf/blocks/mdf_v3.py
https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/mdf_v3.py#L2453-L2902
def get( self, name=None, group=None, index=None, raster=None, samples_only=False, data=None, raw=False, ignore_invalidation_bits=False, source=None, record_offset=0, record_count=None, copy_master=True, ): """Gets channel samples. Channel can be specified in two ways: * using the first positional argument *name* * if *source* is given this will be first used to validate the channel selection * if there are multiple occurances for this channel then the *group* and *index* arguments can be used to select a specific group. * if there are multiple occurances for this channel and either the *group* or *index* arguments is None then a warning is issued * using the group number (keyword argument *group*) and the channel number (keyword argument *index*). Use *info* method for group and channel numbers If the *raster* keyword argument is not *None* the output is interpolated accordingly. Parameters ---------- name : string name of channel group : int 0-based group index index : int 0-based channel index raster : float time raster in seconds samples_only : bool if *True* return only the channel samples as numpy array; if *False* return a *Signal* object data : bytes prevent redundant data read by providing the raw data group samples raw : bool return channel samples without appling the conversion rule; default `False` ignore_invalidation_bits : bool only defined to have the same API with the MDF v4 source : str source name used to select the channel record_offset : int if *data=None* use this to select the record offset from which the group data should be loaded copy_master : bool make a copy of the timebase for this channel Returns ------- res : (numpy.array, None) | Signal returns *Signal* if *samples_only*=*False* (default option), otherwise returns a (numpy.array, None) tuple (for compatibility with MDF v4 class. The *Signal* samples are * numpy recarray for channels that have CDBLOCK or BYTEARRAY type channels * numpy array for all the rest Raises ------ MdfException : * if the channel name is not found * if the group index is out of range * if the channel index is out of range Examples -------- >>> from asammdf import MDF, Signal >>> import numpy as np >>> t = np.arange(5) >>> s = np.ones(5) >>> mdf = MDF(version='3.30') >>> for i in range(4): ... sigs = [Signal(s*(i*10+j), t, name='Sig') for j in range(1, 4)] ... mdf.append(sigs) ... >>> # first group and channel index of the specified channel name ... >>> mdf.get('Sig') UserWarning: Multiple occurances for channel "Sig". Using first occurance from data group 4. Provide both "group" and "index" arguments to select another data group <Signal Sig: samples=[ 1. 1. 1. 1. 1.] timestamps=[0 1 2 3 4] unit="" info=None comment=""> >>> # first channel index in the specified group ... >>> mdf.get('Sig', 1) <Signal Sig: samples=[ 11. 11. 11. 11. 11.] timestamps=[0 1 2 3 4] unit="" info=None comment=""> >>> # channel named Sig from group 1 channel index 2 ... >>> mdf.get('Sig', 1, 2) <Signal Sig: samples=[ 12. 12. 12. 12. 12.] timestamps=[0 1 2 3 4] unit="" info=None comment=""> >>> # channel index 1 or group 2 ... >>> mdf.get(None, 2, 1) <Signal Sig: samples=[ 21. 21. 21. 21. 21.] timestamps=[0 1 2 3 4] unit="" info=None comment=""> >>> mdf.get(group=2, index=1) <Signal Sig: samples=[ 21. 21. 21. 21. 21.] timestamps=[0 1 2 3 4] unit="" info=None comment=""> >>> mdf.get('Sig', source='VN7060') <Signal Sig: samples=[ 12. 12. 12. 12. 12.] timestamps=[0 1 2 3 4] unit="" info=None comment=""> """ gp_nr, ch_nr = self._validate_channel_selection( name, group, index, source=source ) original_data = data grp = self.groups[gp_nr] if grp.data_location == v23c.LOCATION_ORIGINAL_FILE: stream = self._file else: stream = self._tempfile channel = grp.channels[ch_nr] conversion = channel.conversion name = channel.name display_name = channel.display_name bit_count = channel.bit_count or 64 dep = grp.channel_dependencies[ch_nr] cycles_nr = grp.channel_group.cycles_nr encoding = None # get data group record if data is None: data = self._load_data( grp, record_offset=record_offset, record_count=record_count ) else: data = (data,) # check if this is a channel array if dep: if dep.dependency_type == v23c.DEPENDENCY_TYPE_VECTOR: shape = [dep.sd_nr] elif dep.dependency_type >= v23c.DEPENDENCY_TYPE_NDIM: shape = [] i = 0 while True: try: dim = dep[f"dim_{i}"] shape.append(dim) i += 1 except KeyError: break shape = shape[::-1] record_shape = tuple(shape) arrays = [ self.get( group=dg_nr, index=ch_nr, samples_only=True, raw=raw, data=original_data, record_offset=record_offset, record_count=record_count, )[0] for dg_nr, ch_nr in dep.referenced_channels ] shape.insert(0, cycles_nr) vals = column_stack(arrays).flatten().reshape(tuple(shape)) arrays = [vals] types = [(channel.name, vals.dtype, record_shape)] types = dtype(types) vals = fromarrays(arrays, dtype=types) if not samples_only or raster: timestamps = self.get_master( gp_nr, original_data, record_offset=record_offset, record_count=record_count, copy_master=copy_master, ) if raster and len(timestamps) > 1: num = float(float32((timestamps[-1] - timestamps[0]) / raster)) if num.is_integer(): t = linspace(timestamps[0], timestamps[-1], int(num)) else: t = arange(timestamps[0], timestamps[-1], raster) vals = ( Signal(vals, timestamps, name="_") .interp(t, interpolation_mode=self._integer_interpolation) .samples ) timestamps = t else: # get channel values channel_values = [] timestamps = [] count = 0 for fragment in data: data_bytes, _offset, _count = fragment parents, dtypes = self._prepare_record(grp) try: parent, bit_offset = parents[ch_nr] except KeyError: parent, bit_offset = None, None bits = channel.bit_count if parent is not None: if grp.record is None: if dtypes.itemsize: record = fromstring(data_bytes, dtype=dtypes) else: record = None else: record = grp.record record.setflags(write=False) vals = record[parent] data_type = channel.data_type size = vals.dtype.itemsize if data_type == v23c.DATA_TYPE_BYTEARRAY: size *= vals.shape[1] vals_dtype = vals.dtype.kind if vals_dtype not in "ui" and (bit_offset or not bits == size * 8): vals = self._get_not_byte_aligned_data(data_bytes, grp, ch_nr) else: dtype_ = vals.dtype kind_ = dtype_.kind if data_type in v23c.INT_TYPES: if kind_ == 'f': if bits != size * 8: vals = self._get_not_byte_aligned_data( data_bytes, grp, ch_nr ) else: dtype_fmt = get_fmt_v3(data_type, bits) channel_dtype = dtype(dtype_fmt.split(')')[-1]) vals = vals.view(channel_dtype) else: if dtype_.byteorder == ">": if bit_offset or bits != size << 3: vals = self._get_not_byte_aligned_data( data_bytes, grp, ch_nr ) else: if bit_offset: if dtype_.kind == "i": vals = vals.astype( dtype(f"{dtype_.byteorder}u{size}") ) vals >>= bit_offset else: vals = vals >> bit_offset if bits != size << 3: if data_type in v23c.SIGNED_INT: vals = as_non_byte_sized_signed_int(vals, bits) else: mask = (1 << bits) - 1 if vals.flags.writeable: vals &= mask else: vals = vals & mask else: if bits != size * 8: vals = self._get_not_byte_aligned_data( data_bytes, grp, ch_nr ) else: if kind_ in "ui": dtype_fmt = get_fmt_v3(data_type, bits) channel_dtype = dtype(dtype_fmt.split(')')[-1]) vals = vals.view(channel_dtype) else: vals = self._get_not_byte_aligned_data(data_bytes, grp, ch_nr) if not samples_only or raster: timestamps.append( self.get_master(gp_nr, fragment, copy_master=copy_master) ) if bits == 1 and self._single_bit_uint_as_bool: vals = array(vals, dtype=bool) else: data_type = channel.data_type channel_dtype = array([], dtype=get_fmt_v3(data_type, bits)) if vals.dtype != channel_dtype.dtype: vals = vals.astype(channel_dtype.dtype) channel_values.append(vals.copy()) count += 1 if count > 1: vals = concatenate(channel_values) elif count == 1: vals = channel_values[0] else: vals = [] if not samples_only or raster: if count > 1: timestamps = concatenate(timestamps) else: timestamps = timestamps[0] if raster and len(timestamps) > 1: num = float(float32((timestamps[-1] - timestamps[0]) / raster)) if num.is_integer(): t = linspace(timestamps[0], timestamps[-1], int(num)) else: t = arange(timestamps[0], timestamps[-1], raster) vals = ( Signal(vals, timestamps, name="_") .interp(t, interpolation_mode=self._integer_interpolation) .samples ) timestamps = t if conversion is None: conversion_type = v23c.CONVERSION_TYPE_NONE else: conversion_type = conversion.conversion_type if conversion_type == v23c.CONVERSION_TYPE_NONE: if vals.dtype.kind == "S": encoding = "latin-1" if not raw: if conversion: vals = conversion.convert(vals) conversion = None if samples_only: res = vals, None else: if conversion: unit = conversion.unit else: unit = "" comment = channel.comment description = channel.description.decode("latin-1").strip(" \t\n\0") if comment: comment = f"{comment}\n{description}" else: comment = description source = channel.source if source: if source["type"] == v23c.SOURCE_ECU: source = SignalSource( source.name, source.path, source.comment, 0, # source type other 0, # bus type none ) else: source = SignalSource( source.name, source.path, source.comment, 2, # source type bus 2, # bus type CAN ) master_metadata = self._master_channel_metadata.get(gp_nr, None) res = Signal( samples=vals, timestamps=timestamps, unit=unit, name=channel.name, comment=comment, conversion=conversion, raw=raw, master_metadata=master_metadata, display_name=display_name, source=source, bit_count=bit_count, encoding=encoding, ) return res
[ "def", "get", "(", "self", ",", "name", "=", "None", ",", "group", "=", "None", ",", "index", "=", "None", ",", "raster", "=", "None", ",", "samples_only", "=", "False", ",", "data", "=", "None", ",", "raw", "=", "False", ",", "ignore_invalidation_bits", "=", "False", ",", "source", "=", "None", ",", "record_offset", "=", "0", ",", "record_count", "=", "None", ",", "copy_master", "=", "True", ",", ")", ":", "gp_nr", ",", "ch_nr", "=", "self", ".", "_validate_channel_selection", "(", "name", ",", "group", ",", "index", ",", "source", "=", "source", ")", "original_data", "=", "data", "grp", "=", "self", ".", "groups", "[", "gp_nr", "]", "if", "grp", ".", "data_location", "==", "v23c", ".", "LOCATION_ORIGINAL_FILE", ":", "stream", "=", "self", ".", "_file", "else", ":", "stream", "=", "self", ".", "_tempfile", "channel", "=", "grp", ".", "channels", "[", "ch_nr", "]", "conversion", "=", "channel", ".", "conversion", "name", "=", "channel", ".", "name", "display_name", "=", "channel", ".", "display_name", "bit_count", "=", "channel", ".", "bit_count", "or", "64", "dep", "=", "grp", ".", "channel_dependencies", "[", "ch_nr", "]", "cycles_nr", "=", "grp", ".", "channel_group", ".", "cycles_nr", "encoding", "=", "None", "# get data group record", "if", "data", "is", "None", ":", "data", "=", "self", ".", "_load_data", "(", "grp", ",", "record_offset", "=", "record_offset", ",", "record_count", "=", "record_count", ")", "else", ":", "data", "=", "(", "data", ",", ")", "# check if this is a channel array", "if", "dep", ":", "if", "dep", ".", "dependency_type", "==", "v23c", ".", "DEPENDENCY_TYPE_VECTOR", ":", "shape", "=", "[", "dep", ".", "sd_nr", "]", "elif", "dep", ".", "dependency_type", ">=", "v23c", ".", "DEPENDENCY_TYPE_NDIM", ":", "shape", "=", "[", "]", "i", "=", "0", "while", "True", ":", "try", ":", "dim", "=", "dep", "[", "f\"dim_{i}\"", "]", "shape", ".", "append", "(", "dim", ")", "i", "+=", "1", "except", "KeyError", ":", "break", "shape", "=", "shape", "[", ":", ":", "-", "1", "]", "record_shape", "=", "tuple", "(", "shape", ")", "arrays", "=", "[", "self", ".", "get", "(", "group", "=", "dg_nr", ",", "index", "=", "ch_nr", ",", "samples_only", "=", "True", ",", "raw", "=", "raw", ",", "data", "=", "original_data", ",", "record_offset", "=", "record_offset", ",", "record_count", "=", "record_count", ",", ")", "[", "0", "]", "for", "dg_nr", ",", "ch_nr", "in", "dep", ".", "referenced_channels", "]", "shape", ".", "insert", "(", "0", ",", "cycles_nr", ")", "vals", "=", "column_stack", "(", "arrays", ")", ".", "flatten", "(", ")", ".", "reshape", "(", "tuple", "(", "shape", ")", ")", "arrays", "=", "[", "vals", "]", "types", "=", "[", "(", "channel", ".", "name", ",", "vals", ".", "dtype", ",", "record_shape", ")", "]", "types", "=", "dtype", "(", "types", ")", "vals", "=", "fromarrays", "(", "arrays", ",", "dtype", "=", "types", ")", "if", "not", "samples_only", "or", "raster", ":", "timestamps", "=", "self", ".", "get_master", "(", "gp_nr", ",", "original_data", ",", "record_offset", "=", "record_offset", ",", "record_count", "=", "record_count", ",", "copy_master", "=", "copy_master", ",", ")", "if", "raster", "and", "len", "(", "timestamps", ")", ">", "1", ":", "num", "=", "float", "(", "float32", "(", "(", "timestamps", "[", "-", "1", "]", "-", "timestamps", "[", "0", "]", ")", "/", "raster", ")", ")", "if", "num", ".", "is_integer", "(", ")", ":", "t", "=", "linspace", "(", "timestamps", "[", "0", "]", ",", "timestamps", "[", "-", "1", "]", ",", "int", "(", "num", ")", ")", "else", ":", "t", "=", "arange", "(", "timestamps", "[", "0", "]", ",", "timestamps", "[", "-", "1", "]", ",", "raster", ")", "vals", "=", "(", "Signal", "(", "vals", ",", "timestamps", ",", "name", "=", "\"_\"", ")", ".", "interp", "(", "t", ",", "interpolation_mode", "=", "self", ".", "_integer_interpolation", ")", ".", "samples", ")", "timestamps", "=", "t", "else", ":", "# get channel values", "channel_values", "=", "[", "]", "timestamps", "=", "[", "]", "count", "=", "0", "for", "fragment", "in", "data", ":", "data_bytes", ",", "_offset", ",", "_count", "=", "fragment", "parents", ",", "dtypes", "=", "self", ".", "_prepare_record", "(", "grp", ")", "try", ":", "parent", ",", "bit_offset", "=", "parents", "[", "ch_nr", "]", "except", "KeyError", ":", "parent", ",", "bit_offset", "=", "None", ",", "None", "bits", "=", "channel", ".", "bit_count", "if", "parent", "is", "not", "None", ":", "if", "grp", ".", "record", "is", "None", ":", "if", "dtypes", ".", "itemsize", ":", "record", "=", "fromstring", "(", "data_bytes", ",", "dtype", "=", "dtypes", ")", "else", ":", "record", "=", "None", "else", ":", "record", "=", "grp", ".", "record", "record", ".", "setflags", "(", "write", "=", "False", ")", "vals", "=", "record", "[", "parent", "]", "data_type", "=", "channel", ".", "data_type", "size", "=", "vals", ".", "dtype", ".", "itemsize", "if", "data_type", "==", "v23c", ".", "DATA_TYPE_BYTEARRAY", ":", "size", "*=", "vals", ".", "shape", "[", "1", "]", "vals_dtype", "=", "vals", ".", "dtype", ".", "kind", "if", "vals_dtype", "not", "in", "\"ui\"", "and", "(", "bit_offset", "or", "not", "bits", "==", "size", "*", "8", ")", ":", "vals", "=", "self", ".", "_get_not_byte_aligned_data", "(", "data_bytes", ",", "grp", ",", "ch_nr", ")", "else", ":", "dtype_", "=", "vals", ".", "dtype", "kind_", "=", "dtype_", ".", "kind", "if", "data_type", "in", "v23c", ".", "INT_TYPES", ":", "if", "kind_", "==", "'f'", ":", "if", "bits", "!=", "size", "*", "8", ":", "vals", "=", "self", ".", "_get_not_byte_aligned_data", "(", "data_bytes", ",", "grp", ",", "ch_nr", ")", "else", ":", "dtype_fmt", "=", "get_fmt_v3", "(", "data_type", ",", "bits", ")", "channel_dtype", "=", "dtype", "(", "dtype_fmt", ".", "split", "(", "')'", ")", "[", "-", "1", "]", ")", "vals", "=", "vals", ".", "view", "(", "channel_dtype", ")", "else", ":", "if", "dtype_", ".", "byteorder", "==", "\">\"", ":", "if", "bit_offset", "or", "bits", "!=", "size", "<<", "3", ":", "vals", "=", "self", ".", "_get_not_byte_aligned_data", "(", "data_bytes", ",", "grp", ",", "ch_nr", ")", "else", ":", "if", "bit_offset", ":", "if", "dtype_", ".", "kind", "==", "\"i\"", ":", "vals", "=", "vals", ".", "astype", "(", "dtype", "(", "f\"{dtype_.byteorder}u{size}\"", ")", ")", "vals", ">>=", "bit_offset", "else", ":", "vals", "=", "vals", ">>", "bit_offset", "if", "bits", "!=", "size", "<<", "3", ":", "if", "data_type", "in", "v23c", ".", "SIGNED_INT", ":", "vals", "=", "as_non_byte_sized_signed_int", "(", "vals", ",", "bits", ")", "else", ":", "mask", "=", "(", "1", "<<", "bits", ")", "-", "1", "if", "vals", ".", "flags", ".", "writeable", ":", "vals", "&=", "mask", "else", ":", "vals", "=", "vals", "&", "mask", "else", ":", "if", "bits", "!=", "size", "*", "8", ":", "vals", "=", "self", ".", "_get_not_byte_aligned_data", "(", "data_bytes", ",", "grp", ",", "ch_nr", ")", "else", ":", "if", "kind_", "in", "\"ui\"", ":", "dtype_fmt", "=", "get_fmt_v3", "(", "data_type", ",", "bits", ")", "channel_dtype", "=", "dtype", "(", "dtype_fmt", ".", "split", "(", "')'", ")", "[", "-", "1", "]", ")", "vals", "=", "vals", ".", "view", "(", "channel_dtype", ")", "else", ":", "vals", "=", "self", ".", "_get_not_byte_aligned_data", "(", "data_bytes", ",", "grp", ",", "ch_nr", ")", "if", "not", "samples_only", "or", "raster", ":", "timestamps", ".", "append", "(", "self", ".", "get_master", "(", "gp_nr", ",", "fragment", ",", "copy_master", "=", "copy_master", ")", ")", "if", "bits", "==", "1", "and", "self", ".", "_single_bit_uint_as_bool", ":", "vals", "=", "array", "(", "vals", ",", "dtype", "=", "bool", ")", "else", ":", "data_type", "=", "channel", ".", "data_type", "channel_dtype", "=", "array", "(", "[", "]", ",", "dtype", "=", "get_fmt_v3", "(", "data_type", ",", "bits", ")", ")", "if", "vals", ".", "dtype", "!=", "channel_dtype", ".", "dtype", ":", "vals", "=", "vals", ".", "astype", "(", "channel_dtype", ".", "dtype", ")", "channel_values", ".", "append", "(", "vals", ".", "copy", "(", ")", ")", "count", "+=", "1", "if", "count", ">", "1", ":", "vals", "=", "concatenate", "(", "channel_values", ")", "elif", "count", "==", "1", ":", "vals", "=", "channel_values", "[", "0", "]", "else", ":", "vals", "=", "[", "]", "if", "not", "samples_only", "or", "raster", ":", "if", "count", ">", "1", ":", "timestamps", "=", "concatenate", "(", "timestamps", ")", "else", ":", "timestamps", "=", "timestamps", "[", "0", "]", "if", "raster", "and", "len", "(", "timestamps", ")", ">", "1", ":", "num", "=", "float", "(", "float32", "(", "(", "timestamps", "[", "-", "1", "]", "-", "timestamps", "[", "0", "]", ")", "/", "raster", ")", ")", "if", "num", ".", "is_integer", "(", ")", ":", "t", "=", "linspace", "(", "timestamps", "[", "0", "]", ",", "timestamps", "[", "-", "1", "]", ",", "int", "(", "num", ")", ")", "else", ":", "t", "=", "arange", "(", "timestamps", "[", "0", "]", ",", "timestamps", "[", "-", "1", "]", ",", "raster", ")", "vals", "=", "(", "Signal", "(", "vals", ",", "timestamps", ",", "name", "=", "\"_\"", ")", ".", "interp", "(", "t", ",", "interpolation_mode", "=", "self", ".", "_integer_interpolation", ")", ".", "samples", ")", "timestamps", "=", "t", "if", "conversion", "is", "None", ":", "conversion_type", "=", "v23c", ".", "CONVERSION_TYPE_NONE", "else", ":", "conversion_type", "=", "conversion", ".", "conversion_type", "if", "conversion_type", "==", "v23c", ".", "CONVERSION_TYPE_NONE", ":", "if", "vals", ".", "dtype", ".", "kind", "==", "\"S\"", ":", "encoding", "=", "\"latin-1\"", "if", "not", "raw", ":", "if", "conversion", ":", "vals", "=", "conversion", ".", "convert", "(", "vals", ")", "conversion", "=", "None", "if", "samples_only", ":", "res", "=", "vals", ",", "None", "else", ":", "if", "conversion", ":", "unit", "=", "conversion", ".", "unit", "else", ":", "unit", "=", "\"\"", "comment", "=", "channel", ".", "comment", "description", "=", "channel", ".", "description", ".", "decode", "(", "\"latin-1\"", ")", ".", "strip", "(", "\" \\t\\n\\0\"", ")", "if", "comment", ":", "comment", "=", "f\"{comment}\\n{description}\"", "else", ":", "comment", "=", "description", "source", "=", "channel", ".", "source", "if", "source", ":", "if", "source", "[", "\"type\"", "]", "==", "v23c", ".", "SOURCE_ECU", ":", "source", "=", "SignalSource", "(", "source", ".", "name", ",", "source", ".", "path", ",", "source", ".", "comment", ",", "0", ",", "# source type other", "0", ",", "# bus type none", ")", "else", ":", "source", "=", "SignalSource", "(", "source", ".", "name", ",", "source", ".", "path", ",", "source", ".", "comment", ",", "2", ",", "# source type bus", "2", ",", "# bus type CAN", ")", "master_metadata", "=", "self", ".", "_master_channel_metadata", ".", "get", "(", "gp_nr", ",", "None", ")", "res", "=", "Signal", "(", "samples", "=", "vals", ",", "timestamps", "=", "timestamps", ",", "unit", "=", "unit", ",", "name", "=", "channel", ".", "name", ",", "comment", "=", "comment", ",", "conversion", "=", "conversion", ",", "raw", "=", "raw", ",", "master_metadata", "=", "master_metadata", ",", "display_name", "=", "display_name", ",", "source", "=", "source", ",", "bit_count", "=", "bit_count", ",", "encoding", "=", "encoding", ",", ")", "return", "res" ]
Gets channel samples. Channel can be specified in two ways: * using the first positional argument *name* * if *source* is given this will be first used to validate the channel selection * if there are multiple occurances for this channel then the *group* and *index* arguments can be used to select a specific group. * if there are multiple occurances for this channel and either the *group* or *index* arguments is None then a warning is issued * using the group number (keyword argument *group*) and the channel number (keyword argument *index*). Use *info* method for group and channel numbers If the *raster* keyword argument is not *None* the output is interpolated accordingly. Parameters ---------- name : string name of channel group : int 0-based group index index : int 0-based channel index raster : float time raster in seconds samples_only : bool if *True* return only the channel samples as numpy array; if *False* return a *Signal* object data : bytes prevent redundant data read by providing the raw data group samples raw : bool return channel samples without appling the conversion rule; default `False` ignore_invalidation_bits : bool only defined to have the same API with the MDF v4 source : str source name used to select the channel record_offset : int if *data=None* use this to select the record offset from which the group data should be loaded copy_master : bool make a copy of the timebase for this channel Returns ------- res : (numpy.array, None) | Signal returns *Signal* if *samples_only*=*False* (default option), otherwise returns a (numpy.array, None) tuple (for compatibility with MDF v4 class. The *Signal* samples are * numpy recarray for channels that have CDBLOCK or BYTEARRAY type channels * numpy array for all the rest Raises ------ MdfException : * if the channel name is not found * if the group index is out of range * if the channel index is out of range Examples -------- >>> from asammdf import MDF, Signal >>> import numpy as np >>> t = np.arange(5) >>> s = np.ones(5) >>> mdf = MDF(version='3.30') >>> for i in range(4): ... sigs = [Signal(s*(i*10+j), t, name='Sig') for j in range(1, 4)] ... mdf.append(sigs) ... >>> # first group and channel index of the specified channel name ... >>> mdf.get('Sig') UserWarning: Multiple occurances for channel "Sig". Using first occurance from data group 4. Provide both "group" and "index" arguments to select another data group <Signal Sig: samples=[ 1. 1. 1. 1. 1.] timestamps=[0 1 2 3 4] unit="" info=None comment=""> >>> # first channel index in the specified group ... >>> mdf.get('Sig', 1) <Signal Sig: samples=[ 11. 11. 11. 11. 11.] timestamps=[0 1 2 3 4] unit="" info=None comment=""> >>> # channel named Sig from group 1 channel index 2 ... >>> mdf.get('Sig', 1, 2) <Signal Sig: samples=[ 12. 12. 12. 12. 12.] timestamps=[0 1 2 3 4] unit="" info=None comment=""> >>> # channel index 1 or group 2 ... >>> mdf.get(None, 2, 1) <Signal Sig: samples=[ 21. 21. 21. 21. 21.] timestamps=[0 1 2 3 4] unit="" info=None comment=""> >>> mdf.get(group=2, index=1) <Signal Sig: samples=[ 21. 21. 21. 21. 21.] timestamps=[0 1 2 3 4] unit="" info=None comment=""> >>> mdf.get('Sig', source='VN7060') <Signal Sig: samples=[ 12. 12. 12. 12. 12.] timestamps=[0 1 2 3 4] unit="" info=None comment="">
[ "Gets", "channel", "samples", ".", "Channel", "can", "be", "specified", "in", "two", "ways", ":" ]
python
train
34.744444
libtcod/python-tcod
tcod/bsp.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/bsp.py#L228-L242
def inverted_level_order(self) -> Iterator["BSP"]: """Iterate over this BSP's hierarchy in inverse level order. .. versionadded:: 8.3 """ levels = [] # type: List[List['BSP']] next = [self] # type: List['BSP'] while next: levels.append(next) level = next # type: List['BSP'] next = [] for node in level: next.extend(node.children) while levels: yield from levels.pop()
[ "def", "inverted_level_order", "(", "self", ")", "->", "Iterator", "[", "\"BSP\"", "]", ":", "levels", "=", "[", "]", "# type: List[List['BSP']]", "next", "=", "[", "self", "]", "# type: List['BSP']", "while", "next", ":", "levels", ".", "append", "(", "next", ")", "level", "=", "next", "# type: List['BSP']", "next", "=", "[", "]", "for", "node", "in", "level", ":", "next", ".", "extend", "(", "node", ".", "children", ")", "while", "levels", ":", "yield", "from", "levels", ".", "pop", "(", ")" ]
Iterate over this BSP's hierarchy in inverse level order. .. versionadded:: 8.3
[ "Iterate", "over", "this", "BSP", "s", "hierarchy", "in", "inverse", "level", "order", "." ]
python
train
32.666667
avalente/appmetrics
appmetrics/statistics.py
https://github.com/avalente/appmetrics/blob/366fc7e1ca897e49a2227cbfa43bfa02a47f1acc/appmetrics/statistics.py#L369-L379
def pstdev(data, mu=None): """Return the square root of the population variance. See ``pvariance`` for arguments and other details. """ var = pvariance(data, mu) try: return var.sqrt() except AttributeError: return math.sqrt(var)
[ "def", "pstdev", "(", "data", ",", "mu", "=", "None", ")", ":", "var", "=", "pvariance", "(", "data", ",", "mu", ")", "try", ":", "return", "var", ".", "sqrt", "(", ")", "except", "AttributeError", ":", "return", "math", ".", "sqrt", "(", "var", ")" ]
Return the square root of the population variance. See ``pvariance`` for arguments and other details.
[ "Return", "the", "square", "root", "of", "the", "population", "variance", "." ]
python
train
23.727273
driftx/Telephus
telephus/pool.py
https://github.com/driftx/Telephus/blob/860a03a0fafe71605e1a4316dfdd8d0c29094703/telephus/pool.py#L1002-L1038
def fill_pool(self): """ Add connections as necessary to meet the target pool size. If there are no nodes to connect to (because we maxed out connections-per-node on all active connections and any unconnected nodes have pending reconnect timers), call the on_insufficient_nodes callback. """ time_since_last_called = self.fill_pool_throttle if self.fill_pool_last_called is not None: time_since_last_called = time() - self.fill_pool_last_called need = self.target_pool_size - self.num_connectors() if need <= 0 or (self.throttle_timer is not None and self.throttle_timer.active()): return elif time_since_last_called < self.fill_pool_throttle: self.log("Filling pool too quickly, calling again in %.1f seconds" % self.fill_pool_throttle) self._set_fill_pool_timer() return else: try: for num, node in izip(xrange(need), self.choose_nodes_to_connect()): self.make_conn(node) self.fill_pool_last_called = time() except NoNodesAvailable, e: waittime = e.args[0] pending_requests = len(self.request_queue.pending) if self.on_insufficient_nodes: self.on_insufficient_nodes(self.num_active_conns(), self.target_pool_size, pending_requests, waittime if waittime != float('Inf') else None) self.schedule_future_fill_pool(e.args[0]) if self.num_connectors() == 0 and pending_requests > 0: if self.on_insufficient_conns: self.on_insufficient_conns(self.num_connectors(), pending_requests)
[ "def", "fill_pool", "(", "self", ")", ":", "time_since_last_called", "=", "self", ".", "fill_pool_throttle", "if", "self", ".", "fill_pool_last_called", "is", "not", "None", ":", "time_since_last_called", "=", "time", "(", ")", "-", "self", ".", "fill_pool_last_called", "need", "=", "self", ".", "target_pool_size", "-", "self", ".", "num_connectors", "(", ")", "if", "need", "<=", "0", "or", "(", "self", ".", "throttle_timer", "is", "not", "None", "and", "self", ".", "throttle_timer", ".", "active", "(", ")", ")", ":", "return", "elif", "time_since_last_called", "<", "self", ".", "fill_pool_throttle", ":", "self", ".", "log", "(", "\"Filling pool too quickly, calling again in %.1f seconds\"", "%", "self", ".", "fill_pool_throttle", ")", "self", ".", "_set_fill_pool_timer", "(", ")", "return", "else", ":", "try", ":", "for", "num", ",", "node", "in", "izip", "(", "xrange", "(", "need", ")", ",", "self", ".", "choose_nodes_to_connect", "(", ")", ")", ":", "self", ".", "make_conn", "(", "node", ")", "self", ".", "fill_pool_last_called", "=", "time", "(", ")", "except", "NoNodesAvailable", ",", "e", ":", "waittime", "=", "e", ".", "args", "[", "0", "]", "pending_requests", "=", "len", "(", "self", ".", "request_queue", ".", "pending", ")", "if", "self", ".", "on_insufficient_nodes", ":", "self", ".", "on_insufficient_nodes", "(", "self", ".", "num_active_conns", "(", ")", ",", "self", ".", "target_pool_size", ",", "pending_requests", ",", "waittime", "if", "waittime", "!=", "float", "(", "'Inf'", ")", "else", "None", ")", "self", ".", "schedule_future_fill_pool", "(", "e", ".", "args", "[", "0", "]", ")", "if", "self", ".", "num_connectors", "(", ")", "==", "0", "and", "pending_requests", ">", "0", ":", "if", "self", ".", "on_insufficient_conns", ":", "self", ".", "on_insufficient_conns", "(", "self", ".", "num_connectors", "(", ")", ",", "pending_requests", ")" ]
Add connections as necessary to meet the target pool size. If there are no nodes to connect to (because we maxed out connections-per-node on all active connections and any unconnected nodes have pending reconnect timers), call the on_insufficient_nodes callback.
[ "Add", "connections", "as", "necessary", "to", "meet", "the", "target", "pool", "size", ".", "If", "there", "are", "no", "nodes", "to", "connect", "to", "(", "because", "we", "maxed", "out", "connections", "-", "per", "-", "node", "on", "all", "active", "connections", "and", "any", "unconnected", "nodes", "have", "pending", "reconnect", "timers", ")", "call", "the", "on_insufficient_nodes", "callback", "." ]
python
train
51.675676
pycontribs/pyrax
pyrax/autoscale.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/autoscale.py#L172-L176
def execute_policy(self, policy): """ Executes the specified policy for this scaling group. """ return self.manager.execute_policy(scaling_group=self, policy=policy)
[ "def", "execute_policy", "(", "self", ",", "policy", ")", ":", "return", "self", ".", "manager", ".", "execute_policy", "(", "scaling_group", "=", "self", ",", "policy", "=", "policy", ")" ]
Executes the specified policy for this scaling group.
[ "Executes", "the", "specified", "policy", "for", "this", "scaling", "group", "." ]
python
train
38.6
AlexandreDecan/python-intervals
intervals.py
https://github.com/AlexandreDecan/python-intervals/blob/eda4da7dd39afabab2c1689e0b5158abae08c831/intervals.py#L355-L364
def is_empty(self): """ Test interval emptiness. :return: True if interval is empty, False otherwise. """ return ( self._lower > self._upper or (self._lower == self._upper and (self._left == OPEN or self._right == OPEN)) )
[ "def", "is_empty", "(", "self", ")", ":", "return", "(", "self", ".", "_lower", ">", "self", ".", "_upper", "or", "(", "self", ".", "_lower", "==", "self", ".", "_upper", "and", "(", "self", ".", "_left", "==", "OPEN", "or", "self", ".", "_right", "==", "OPEN", ")", ")", ")" ]
Test interval emptiness. :return: True if interval is empty, False otherwise.
[ "Test", "interval", "emptiness", "." ]
python
train
28.6
rsgalloway/grit
grit/server/cherrypy/__init__.py
https://github.com/rsgalloway/grit/blob/e6434ad8a1f4ac5d0903ebad630c81f8a5164d78/grit/server/cherrypy/__init__.py#L1327-L1333
def format_exc(limit=None): """Like print_exc() but return a string. Backport for Python 2.3.""" try: etype, value, tb = sys.exc_info() return ''.join(traceback.format_exception(etype, value, tb, limit)) finally: etype = value = tb = None
[ "def", "format_exc", "(", "limit", "=", "None", ")", ":", "try", ":", "etype", ",", "value", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "return", "''", ".", "join", "(", "traceback", ".", "format_exception", "(", "etype", ",", "value", ",", "tb", ",", "limit", ")", ")", "finally", ":", "etype", "=", "value", "=", "tb", "=", "None" ]
Like print_exc() but return a string. Backport for Python 2.3.
[ "Like", "print_exc", "()", "but", "return", "a", "string", ".", "Backport", "for", "Python", "2", ".", "3", "." ]
python
train
38.285714
PmagPy/PmagPy
pmagpy/pmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L9618-L9634
def squish(incs, f): """ returns 'flattened' inclination, assuming factor, f and King (1955) formula: tan (I_o) = f tan (I_f) Parameters __________ incs : array of inclination (I_f) data to flatten f : flattening factor Returns _______ I_o : inclinations after flattening """ incs = np.radians(incs) I_o = f * np.tan(incs) # multiply tangent by flattening factor return np.degrees(np.arctan(I_o))
[ "def", "squish", "(", "incs", ",", "f", ")", ":", "incs", "=", "np", ".", "radians", "(", "incs", ")", "I_o", "=", "f", "*", "np", ".", "tan", "(", "incs", ")", "# multiply tangent by flattening factor", "return", "np", ".", "degrees", "(", "np", ".", "arctan", "(", "I_o", ")", ")" ]
returns 'flattened' inclination, assuming factor, f and King (1955) formula: tan (I_o) = f tan (I_f) Parameters __________ incs : array of inclination (I_f) data to flatten f : flattening factor Returns _______ I_o : inclinations after flattening
[ "returns", "flattened", "inclination", "assuming", "factor", "f", "and", "King", "(", "1955", ")", "formula", ":", "tan", "(", "I_o", ")", "=", "f", "tan", "(", "I_f", ")" ]
python
train
25.941176
a-tal/kezmenu3
kezmenu3/kezmenu.py
https://github.com/a-tal/kezmenu3/blob/3b06f9cb67fdc98a73928f877eea86692f832fa4/kezmenu3/kezmenu.py#L154-L166
def _checkMousePositionForFocus(self): """Check the mouse position to know if move focus on a option""" i = 0 cur_pos = pygame.mouse.get_pos() ml, mt = self.position for o in self.options: rect = o.get('label_rect') if rect: if rect.collidepoint(cur_pos) and self.mouse_pos != cur_pos: self.option = i self.mouse_pos = cur_pos break i += 1
[ "def", "_checkMousePositionForFocus", "(", "self", ")", ":", "i", "=", "0", "cur_pos", "=", "pygame", ".", "mouse", ".", "get_pos", "(", ")", "ml", ",", "mt", "=", "self", ".", "position", "for", "o", "in", "self", ".", "options", ":", "rect", "=", "o", ".", "get", "(", "'label_rect'", ")", "if", "rect", ":", "if", "rect", ".", "collidepoint", "(", "cur_pos", ")", "and", "self", ".", "mouse_pos", "!=", "cur_pos", ":", "self", ".", "option", "=", "i", "self", ".", "mouse_pos", "=", "cur_pos", "break", "i", "+=", "1" ]
Check the mouse position to know if move focus on a option
[ "Check", "the", "mouse", "position", "to", "know", "if", "move", "focus", "on", "a", "option" ]
python
train
36.846154
arviz-devs/arviz
arviz/stats/stats.py
https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/stats/stats.py#L909-L954
def _mc_error(x, batches=5, circular=False): """Calculate the simulation standard error, accounting for non-independent samples. The trace is divided into batches, and the standard deviation of the batch means is calculated. Parameters ---------- x : Numpy array An array containing MCMC samples batches : integer Number of batches circular : bool Whether to compute the error taking into account `x` is a circular variable (in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables). Returns ------- mc_error : float Simulation standard error """ if x.ndim > 1: dims = np.shape(x) trace = np.transpose([t.ravel() for t in x]) return np.reshape([_mc_error(t, batches) for t in trace], dims[1:]) else: if batches == 1: if circular: std = st.circstd(x, high=np.pi, low=-np.pi) else: std = np.std(x) return std / np.sqrt(len(x)) batched_traces = np.resize(x, (batches, int(len(x) / batches))) if circular: means = st.circmean(batched_traces, high=np.pi, low=-np.pi, axis=1) std = st.circstd(means, high=np.pi, low=-np.pi) else: means = np.mean(batched_traces, 1) std = np.std(means) return std / np.sqrt(batches)
[ "def", "_mc_error", "(", "x", ",", "batches", "=", "5", ",", "circular", "=", "False", ")", ":", "if", "x", ".", "ndim", ">", "1", ":", "dims", "=", "np", ".", "shape", "(", "x", ")", "trace", "=", "np", ".", "transpose", "(", "[", "t", ".", "ravel", "(", ")", "for", "t", "in", "x", "]", ")", "return", "np", ".", "reshape", "(", "[", "_mc_error", "(", "t", ",", "batches", ")", "for", "t", "in", "trace", "]", ",", "dims", "[", "1", ":", "]", ")", "else", ":", "if", "batches", "==", "1", ":", "if", "circular", ":", "std", "=", "st", ".", "circstd", "(", "x", ",", "high", "=", "np", ".", "pi", ",", "low", "=", "-", "np", ".", "pi", ")", "else", ":", "std", "=", "np", ".", "std", "(", "x", ")", "return", "std", "/", "np", ".", "sqrt", "(", "len", "(", "x", ")", ")", "batched_traces", "=", "np", ".", "resize", "(", "x", ",", "(", "batches", ",", "int", "(", "len", "(", "x", ")", "/", "batches", ")", ")", ")", "if", "circular", ":", "means", "=", "st", ".", "circmean", "(", "batched_traces", ",", "high", "=", "np", ".", "pi", ",", "low", "=", "-", "np", ".", "pi", ",", "axis", "=", "1", ")", "std", "=", "st", ".", "circstd", "(", "means", ",", "high", "=", "np", ".", "pi", ",", "low", "=", "-", "np", ".", "pi", ")", "else", ":", "means", "=", "np", ".", "mean", "(", "batched_traces", ",", "1", ")", "std", "=", "np", ".", "std", "(", "means", ")", "return", "std", "/", "np", ".", "sqrt", "(", "batches", ")" ]
Calculate the simulation standard error, accounting for non-independent samples. The trace is divided into batches, and the standard deviation of the batch means is calculated. Parameters ---------- x : Numpy array An array containing MCMC samples batches : integer Number of batches circular : bool Whether to compute the error taking into account `x` is a circular variable (in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables). Returns ------- mc_error : float Simulation standard error
[ "Calculate", "the", "simulation", "standard", "error", "accounting", "for", "non", "-", "independent", "samples", "." ]
python
train
29.891304
dropbox/stone
stone/ir/data_types.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/ir/data_types.py#L1370-L1384
def set_attributes(self, doc, fields, # pylint: disable=arguments-differ parent_type=None, catch_all_field=None): """ :param UnionField catch_all_field: The field designated as the catch-all. This field should be a member of the list of fields. See :meth:`Composite.set_attributes` for parameter definitions. """ if parent_type: assert isinstance(parent_type, Union) super(Union, self).set_attributes(doc, fields, parent_type) self.catch_all_field = catch_all_field self.parent_type = parent_type
[ "def", "set_attributes", "(", "self", ",", "doc", ",", "fields", ",", "# pylint: disable=arguments-differ", "parent_type", "=", "None", ",", "catch_all_field", "=", "None", ")", ":", "if", "parent_type", ":", "assert", "isinstance", "(", "parent_type", ",", "Union", ")", "super", "(", "Union", ",", "self", ")", ".", "set_attributes", "(", "doc", ",", "fields", ",", "parent_type", ")", "self", ".", "catch_all_field", "=", "catch_all_field", "self", ".", "parent_type", "=", "parent_type" ]
:param UnionField catch_all_field: The field designated as the catch-all. This field should be a member of the list of fields. See :meth:`Composite.set_attributes` for parameter definitions.
[ ":", "param", "UnionField", "catch_all_field", ":", "The", "field", "designated", "as", "the", "catch", "-", "all", ".", "This", "field", "should", "be", "a", "member", "of", "the", "list", "of", "fields", "." ]
python
train
39.066667
WebarchivCZ/WA-KAT
src/wa_kat/templates/static/js/Lib/site-packages/components/keyword_handler.py
https://github.com/WebarchivCZ/WA-KAT/blob/16d064a3a775dc1d2713debda7847ded52dd2a06/src/wa_kat/templates/static/js/Lib/site-packages/components/keyword_handler.py#L144-L157
def set_kw_typeahead_input(cls): """ Map the typeahead input to remote dataset. """ # get reference to parent element parent_id = cls.intput_el.parent.id if "typeahead" not in parent_id.lower(): parent_id = cls.intput_el.parent.parent.id window.make_keyword_typeahead_tag( "#" + parent_id, join(settings.API_PATH, "kw_list.json"), cls.on_select_callback, )
[ "def", "set_kw_typeahead_input", "(", "cls", ")", ":", "# get reference to parent element", "parent_id", "=", "cls", ".", "intput_el", ".", "parent", ".", "id", "if", "\"typeahead\"", "not", "in", "parent_id", ".", "lower", "(", ")", ":", "parent_id", "=", "cls", ".", "intput_el", ".", "parent", ".", "parent", ".", "id", "window", ".", "make_keyword_typeahead_tag", "(", "\"#\"", "+", "parent_id", ",", "join", "(", "settings", ".", "API_PATH", ",", "\"kw_list.json\"", ")", ",", "cls", ".", "on_select_callback", ",", ")" ]
Map the typeahead input to remote dataset.
[ "Map", "the", "typeahead", "input", "to", "remote", "dataset", "." ]
python
train
32.571429
BD2KGenomics/protect
src/protect/mutation_calling/somaticsniper.py
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/somaticsniper.py#L50-L64
def run_somaticsniper_with_merge(job, tumor_bam, normal_bam, univ_options, somaticsniper_options): """ A wrapper for the the entire SomaticSniper sub-graph. :param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq :param dict normal_bam: Dict of bam and bai for normal DNA-Seq :param dict univ_options: Dict of universal options used by almost all tools :param dict somaticsniper_options: Options specific to SomaticSniper :return: fsID to the merged SomaticSniper calls :rtype: toil.fileStore.FileID """ spawn = job.wrapJobFn(run_somaticsniper, tumor_bam, normal_bam, univ_options, somaticsniper_options, split=False).encapsulate() job.addChild(spawn) return spawn.rv()
[ "def", "run_somaticsniper_with_merge", "(", "job", ",", "tumor_bam", ",", "normal_bam", ",", "univ_options", ",", "somaticsniper_options", ")", ":", "spawn", "=", "job", ".", "wrapJobFn", "(", "run_somaticsniper", ",", "tumor_bam", ",", "normal_bam", ",", "univ_options", ",", "somaticsniper_options", ",", "split", "=", "False", ")", ".", "encapsulate", "(", ")", "job", ".", "addChild", "(", "spawn", ")", "return", "spawn", ".", "rv", "(", ")" ]
A wrapper for the the entire SomaticSniper sub-graph. :param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq :param dict normal_bam: Dict of bam and bai for normal DNA-Seq :param dict univ_options: Dict of universal options used by almost all tools :param dict somaticsniper_options: Options specific to SomaticSniper :return: fsID to the merged SomaticSniper calls :rtype: toil.fileStore.FileID
[ "A", "wrapper", "for", "the", "the", "entire", "SomaticSniper", "sub", "-", "graph", "." ]
python
train
49
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L4349-L4353
def view_tickets(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/views#list-tickets-from-a-view" api_path = "/api/v2/views/{id}/tickets.json" api_path = api_path.format(id=id) return self.call(api_path, **kwargs)
[ "def", "view_tickets", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/views/{id}/tickets.json\"", "api_path", "=", "api_path", ".", "format", "(", "id", "=", "id", ")", "return", "self", ".", "call", "(", "api_path", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/core/views#list-tickets-from-a-view
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "core", "/", "views#list", "-", "tickets", "-", "from", "-", "a", "-", "view" ]
python
train
52.6
cons3rt/pycons3rt
pycons3rt/bash.py
https://github.com/cons3rt/pycons3rt/blob/f004ab3a35c5bff2f698131fef3b2a8ed5a7596d/pycons3rt/bash.py#L1226-L1262
def run_remote_command(host, command, timeout_sec=5.0): """Retrieves the value of an environment variable of a remote host over SSH :param host: (str) host to query :param command: (str) command :param timeout_sec (float) seconds to wait before killing the command. :return: (str) command output :raises: TypeError, CommandError """ log = logging.getLogger(mod_logger + '.run_remote_command') if not isinstance(host, basestring): msg = 'host argument must be a string' raise TypeError(msg) if not isinstance(command, basestring): msg = 'command argument must be a string' raise TypeError(msg) log.debug('Running remote command on host: {h}: {c}...'.format(h=host, c=command)) command = ['ssh', '{h}'.format(h=host), '{c}'.format(c=command)] try: result = run_command(command, timeout_sec=timeout_sec) code = result['code'] except CommandError: raise if code != 0: msg = 'There was a problem running command [{m}] on host {h} over SSH, return code: {c}, and ' \ 'produced output:\n{o}'.format(h=host, c=code, m=' '.join(command), o=result['output']) raise CommandError(msg) else: output_text = result['output'].strip() log.debug('Running command [{m}] host {h} over SSH produced output: {o}'.format( m=command, h=host, o=output_text)) output = { 'output': output_text, 'code': code } return output
[ "def", "run_remote_command", "(", "host", ",", "command", ",", "timeout_sec", "=", "5.0", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "mod_logger", "+", "'.run_remote_command'", ")", "if", "not", "isinstance", "(", "host", ",", "basestring", ")", ":", "msg", "=", "'host argument must be a string'", "raise", "TypeError", "(", "msg", ")", "if", "not", "isinstance", "(", "command", ",", "basestring", ")", ":", "msg", "=", "'command argument must be a string'", "raise", "TypeError", "(", "msg", ")", "log", ".", "debug", "(", "'Running remote command on host: {h}: {c}...'", ".", "format", "(", "h", "=", "host", ",", "c", "=", "command", ")", ")", "command", "=", "[", "'ssh'", ",", "'{h}'", ".", "format", "(", "h", "=", "host", ")", ",", "'{c}'", ".", "format", "(", "c", "=", "command", ")", "]", "try", ":", "result", "=", "run_command", "(", "command", ",", "timeout_sec", "=", "timeout_sec", ")", "code", "=", "result", "[", "'code'", "]", "except", "CommandError", ":", "raise", "if", "code", "!=", "0", ":", "msg", "=", "'There was a problem running command [{m}] on host {h} over SSH, return code: {c}, and '", "'produced output:\\n{o}'", ".", "format", "(", "h", "=", "host", ",", "c", "=", "code", ",", "m", "=", "' '", ".", "join", "(", "command", ")", ",", "o", "=", "result", "[", "'output'", "]", ")", "raise", "CommandError", "(", "msg", ")", "else", ":", "output_text", "=", "result", "[", "'output'", "]", ".", "strip", "(", ")", "log", ".", "debug", "(", "'Running command [{m}] host {h} over SSH produced output: {o}'", ".", "format", "(", "m", "=", "command", ",", "h", "=", "host", ",", "o", "=", "output_text", ")", ")", "output", "=", "{", "'output'", ":", "output_text", ",", "'code'", ":", "code", "}", "return", "output" ]
Retrieves the value of an environment variable of a remote host over SSH :param host: (str) host to query :param command: (str) command :param timeout_sec (float) seconds to wait before killing the command. :return: (str) command output :raises: TypeError, CommandError
[ "Retrieves", "the", "value", "of", "an", "environment", "variable", "of", "a", "remote", "host", "over", "SSH" ]
python
train
40.189189
lemieuxl/pyGenClean
pyGenClean/SampleMissingness/sample_missingness.py
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/SampleMissingness/sample_missingness.py#L56-L83
def runPlink(options): """Run Plink with the ``mind`` option. :param options: the options. :type options: argparse.Namespace """ # The plink command plinkCommand = [ "plink", "--noweb", "--bfile" if options.is_bfile else "--tfile", options.ifile, "--mind", str(options.mind), "--make-bed", "--out", options.out, ] output = None try: output = subprocess.check_output(plinkCommand, stderr=subprocess.STDOUT, shell=False) except subprocess.CalledProcessError: msg = "plink: couldn't run plink" raise ProgramError(msg)
[ "def", "runPlink", "(", "options", ")", ":", "# The plink command", "plinkCommand", "=", "[", "\"plink\"", ",", "\"--noweb\"", ",", "\"--bfile\"", "if", "options", ".", "is_bfile", "else", "\"--tfile\"", ",", "options", ".", "ifile", ",", "\"--mind\"", ",", "str", "(", "options", ".", "mind", ")", ",", "\"--make-bed\"", ",", "\"--out\"", ",", "options", ".", "out", ",", "]", "output", "=", "None", "try", ":", "output", "=", "subprocess", ".", "check_output", "(", "plinkCommand", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", "shell", "=", "False", ")", "except", "subprocess", ".", "CalledProcessError", ":", "msg", "=", "\"plink: couldn't run plink\"", "raise", "ProgramError", "(", "msg", ")" ]
Run Plink with the ``mind`` option. :param options: the options. :type options: argparse.Namespace
[ "Run", "Plink", "with", "the", "mind", "option", "." ]
python
train
23.857143
brocade/pynos
pynos/versions/base/interface.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/interface.py#L67-L95
def add_vlan_int(self, vlan_id): """ Add VLAN Interface. VLAN interfaces are required for VLANs even when not wanting to use the interface for any L3 features. Args: vlan_id: ID for the VLAN interface being created. Value of 2-4096. Returns: True if command completes successfully or False if not. Raises: None """ config = ET.Element('config') vlinterface = ET.SubElement(config, 'interface-vlan', xmlns=("urn:brocade.com:mgmt:" "brocade-interface")) interface = ET.SubElement(vlinterface, 'interface') vlan = ET.SubElement(interface, 'vlan') name = ET.SubElement(vlan, 'name') name.text = vlan_id try: self._callback(config) return True # TODO add logging and narrow exception window. except Exception as error: logging.error(error) return False
[ "def", "add_vlan_int", "(", "self", ",", "vlan_id", ")", ":", "config", "=", "ET", ".", "Element", "(", "'config'", ")", "vlinterface", "=", "ET", ".", "SubElement", "(", "config", ",", "'interface-vlan'", ",", "xmlns", "=", "(", "\"urn:brocade.com:mgmt:\"", "\"brocade-interface\"", ")", ")", "interface", "=", "ET", ".", "SubElement", "(", "vlinterface", ",", "'interface'", ")", "vlan", "=", "ET", ".", "SubElement", "(", "interface", ",", "'vlan'", ")", "name", "=", "ET", ".", "SubElement", "(", "vlan", ",", "'name'", ")", "name", ".", "text", "=", "vlan_id", "try", ":", "self", ".", "_callback", "(", "config", ")", "return", "True", "# TODO add logging and narrow exception window.", "except", "Exception", "as", "error", ":", "logging", ".", "error", "(", "error", ")", "return", "False" ]
Add VLAN Interface. VLAN interfaces are required for VLANs even when not wanting to use the interface for any L3 features. Args: vlan_id: ID for the VLAN interface being created. Value of 2-4096. Returns: True if command completes successfully or False if not. Raises: None
[ "Add", "VLAN", "Interface", ".", "VLAN", "interfaces", "are", "required", "for", "VLANs", "even", "when", "not", "wanting", "to", "use", "the", "interface", "for", "any", "L3", "features", "." ]
python
train
34.931034
infothrill/python-dyndnsc
dyndnsc/updater/afraid.py
https://github.com/infothrill/python-dyndnsc/blob/2196d48aa6098da9835a7611fbdb0b5f0fbf51e4/dyndnsc/updater/afraid.py#L60-L74
def compute_auth_key(userid, password): """ Compute the authentication key for freedns.afraid.org. This is the SHA1 hash of the string b'userid|password'. :param userid: ascii username :param password: ascii password :return: ascii authentication key (SHA1 at this point) """ import sys if sys.version_info >= (3, 0): return hashlib.sha1(b"|".join((userid.encode("ascii"), # noqa: S303 password.encode("ascii")))).hexdigest() return hashlib.sha1("|".join((userid, password))).hexdigest()
[ "def", "compute_auth_key", "(", "userid", ",", "password", ")", ":", "import", "sys", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "0", ")", ":", "return", "hashlib", ".", "sha1", "(", "b\"|\"", ".", "join", "(", "(", "userid", ".", "encode", "(", "\"ascii\"", ")", ",", "# noqa: S303", "password", ".", "encode", "(", "\"ascii\"", ")", ")", ")", ")", ".", "hexdigest", "(", ")", "return", "hashlib", ".", "sha1", "(", "\"|\"", ".", "join", "(", "(", "userid", ",", "password", ")", ")", ")", ".", "hexdigest", "(", ")" ]
Compute the authentication key for freedns.afraid.org. This is the SHA1 hash of the string b'userid|password'. :param userid: ascii username :param password: ascii password :return: ascii authentication key (SHA1 at this point)
[ "Compute", "the", "authentication", "key", "for", "freedns", ".", "afraid", ".", "org", "." ]
python
train
37.533333
Gandi/gandi.cli
gandi/cli/commands/docker.py
https://github.com/Gandi/gandi.cli/blob/6ee5b8fc8ec44b0a6c232043ca610606ad8f693d/gandi/cli/commands/docker.py#L15-L46
def docker(gandi, vm, args): """ Manage docker instance """ if not [basedir for basedir in os.getenv('PATH', '.:/usr/bin').split(':') if os.path.exists('%s/docker' % basedir)]: gandi.echo("""'docker' not found in $PATH, required for this command \ to work See https://docs.docker.com/installation/#installation to install, or use: # curl https://get.docker.io/ | sh""") return if vm: gandi.configure(True, 'dockervm', vm) else: vm = gandi.get('dockervm') if not vm: gandi.echo(""" No docker vm specified. You can create one: $ gandi vm create --hostname docker --image "Ubuntu 14.04 64 bits LTS (HVM)" \\ --run 'wget -O - https://get.docker.io/ | sh' Then configure it using: $ gandi docker --vm docker ps Or to both change target vm and spawn a process (note the -- separator): $ gandi docker --vm myvm -- run -i -t debian bash """) # noqa return return gandi.docker.handle(vm, args)
[ "def", "docker", "(", "gandi", ",", "vm", ",", "args", ")", ":", "if", "not", "[", "basedir", "for", "basedir", "in", "os", ".", "getenv", "(", "'PATH'", ",", "'.:/usr/bin'", ")", ".", "split", "(", "':'", ")", "if", "os", ".", "path", ".", "exists", "(", "'%s/docker'", "%", "basedir", ")", "]", ":", "gandi", ".", "echo", "(", "\"\"\"'docker' not found in $PATH, required for this command \\\nto work\nSee https://docs.docker.com/installation/#installation to install, or use:\n # curl https://get.docker.io/ | sh\"\"\"", ")", "return", "if", "vm", ":", "gandi", ".", "configure", "(", "True", ",", "'dockervm'", ",", "vm", ")", "else", ":", "vm", "=", "gandi", ".", "get", "(", "'dockervm'", ")", "if", "not", "vm", ":", "gandi", ".", "echo", "(", "\"\"\"\nNo docker vm specified. You can create one:\n $ gandi vm create --hostname docker --image \"Ubuntu 14.04 64 bits LTS (HVM)\" \\\\\n --run 'wget -O - https://get.docker.io/ | sh'\n\nThen configure it using:\n $ gandi docker --vm docker ps\n\nOr to both change target vm and spawn a process (note the -- separator):\n $ gandi docker --vm myvm -- run -i -t debian bash\n\"\"\"", ")", "# noqa", "return", "return", "gandi", ".", "docker", ".", "handle", "(", "vm", ",", "args", ")" ]
Manage docker instance
[ "Manage", "docker", "instance" ]
python
train
30.46875
brechtm/rinohtype
src/rinoh/paragraph.py
https://github.com/brechtm/rinohtype/blob/40a63c4e5ad7550f62b6860f1812cb67cafb9dc7/src/rinoh/paragraph.py#L608-L696
def render(self, container, descender, state, space_below=0, first_line_only=False): """Typeset the paragraph The paragraph is typeset in the given container starting below the current cursor position of the container. When the end of the container is reached, the rendering state is preserved to continue setting the rest of the paragraph when this method is called with a new container. Args: container (Container): the container to render to descender (float or None): descender height of the preceeding line state (ParagraphState): the state where rendering will continue first_line_only (bool): typeset only the first line """ indent_first = (float(self.get_style('indent_first', container)) if state.initial else 0) line_width = float(container.width) line_spacing = self.get_style('line_spacing', container) text_align = self.get_style('text_align', container) tab_stops = self.get_style('tab_stops', container) if not tab_stops: tab_width = 2 * self.get_style('font_size', container) tab_stops = DefaultTabStops(tab_width) # `saved_state` is updated after successfully rendering each line, so # that when `container` overflows on rendering a line, the words in that # line are yielded again on the next typeset() call. saved_state = copy(state) prev_state = copy(state) max_line_width = 0 def typeset_line(line, last_line=False): """Typeset `line` and, if no exception is raised, update the paragraph's internal rendering state.""" nonlocal state, saved_state, max_line_width, descender, space_below max_line_width = max(max_line_width, line.cursor) advance = (line.ascender(container) if descender is None else line_spacing.advance(line, descender, container)) descender = line.descender(container) # descender <= 0 line.advance = advance total_advance = advance + (space_below if last_line else 0) - descender if container.remaining_height < total_advance: raise EndOfContainer(saved_state) assert container.advance2(advance) line.typeset(container, text_align, last_line) assert container.advance2(- descender) state.initial = False saved_state = copy(state) return Line(tab_stops, line_width, container, significant_whitespace=self.significant_whitespace) first_line = line = Line(tab_stops, line_width, container, indent_first, self.significant_whitespace) while True: try: word = state.next_word() except StopIteration: break try: if not line.append_word(word): for first, second in word.hyphenate(container): if line.append_word(first): state.prepend_word(second) # prepend second part break else: state = prev_state line = typeset_line(line) if first_line_only: break continue except NewLineException: line.append(word.glyphs_span) line = typeset_line(line, last_line=True) if first_line_only: break prev_state = copy(state) if line: typeset_line(line, last_line=True) # Correct the horizontal text placement for auto-width paragraphs if self._width(container) == FlowableWidth.AUTO: if text_align == TextAlign.CENTER: container.left -= float(container.width - max_line_width) / 2 if text_align == TextAlign.RIGHT: container.left -= float(container.width - max_line_width) return max_line_width, first_line.advance, descender
[ "def", "render", "(", "self", ",", "container", ",", "descender", ",", "state", ",", "space_below", "=", "0", ",", "first_line_only", "=", "False", ")", ":", "indent_first", "=", "(", "float", "(", "self", ".", "get_style", "(", "'indent_first'", ",", "container", ")", ")", "if", "state", ".", "initial", "else", "0", ")", "line_width", "=", "float", "(", "container", ".", "width", ")", "line_spacing", "=", "self", ".", "get_style", "(", "'line_spacing'", ",", "container", ")", "text_align", "=", "self", ".", "get_style", "(", "'text_align'", ",", "container", ")", "tab_stops", "=", "self", ".", "get_style", "(", "'tab_stops'", ",", "container", ")", "if", "not", "tab_stops", ":", "tab_width", "=", "2", "*", "self", ".", "get_style", "(", "'font_size'", ",", "container", ")", "tab_stops", "=", "DefaultTabStops", "(", "tab_width", ")", "# `saved_state` is updated after successfully rendering each line, so", "# that when `container` overflows on rendering a line, the words in that", "# line are yielded again on the next typeset() call.", "saved_state", "=", "copy", "(", "state", ")", "prev_state", "=", "copy", "(", "state", ")", "max_line_width", "=", "0", "def", "typeset_line", "(", "line", ",", "last_line", "=", "False", ")", ":", "\"\"\"Typeset `line` and, if no exception is raised, update the\n paragraph's internal rendering state.\"\"\"", "nonlocal", "state", ",", "saved_state", ",", "max_line_width", ",", "descender", ",", "space_below", "max_line_width", "=", "max", "(", "max_line_width", ",", "line", ".", "cursor", ")", "advance", "=", "(", "line", ".", "ascender", "(", "container", ")", "if", "descender", "is", "None", "else", "line_spacing", ".", "advance", "(", "line", ",", "descender", ",", "container", ")", ")", "descender", "=", "line", ".", "descender", "(", "container", ")", "# descender <= 0", "line", ".", "advance", "=", "advance", "total_advance", "=", "advance", "+", "(", "space_below", "if", "last_line", "else", "0", ")", "-", "descender", "if", "container", ".", "remaining_height", "<", "total_advance", ":", "raise", "EndOfContainer", "(", "saved_state", ")", "assert", "container", ".", "advance2", "(", "advance", ")", "line", ".", "typeset", "(", "container", ",", "text_align", ",", "last_line", ")", "assert", "container", ".", "advance2", "(", "-", "descender", ")", "state", ".", "initial", "=", "False", "saved_state", "=", "copy", "(", "state", ")", "return", "Line", "(", "tab_stops", ",", "line_width", ",", "container", ",", "significant_whitespace", "=", "self", ".", "significant_whitespace", ")", "first_line", "=", "line", "=", "Line", "(", "tab_stops", ",", "line_width", ",", "container", ",", "indent_first", ",", "self", ".", "significant_whitespace", ")", "while", "True", ":", "try", ":", "word", "=", "state", ".", "next_word", "(", ")", "except", "StopIteration", ":", "break", "try", ":", "if", "not", "line", ".", "append_word", "(", "word", ")", ":", "for", "first", ",", "second", "in", "word", ".", "hyphenate", "(", "container", ")", ":", "if", "line", ".", "append_word", "(", "first", ")", ":", "state", ".", "prepend_word", "(", "second", ")", "# prepend second part", "break", "else", ":", "state", "=", "prev_state", "line", "=", "typeset_line", "(", "line", ")", "if", "first_line_only", ":", "break", "continue", "except", "NewLineException", ":", "line", ".", "append", "(", "word", ".", "glyphs_span", ")", "line", "=", "typeset_line", "(", "line", ",", "last_line", "=", "True", ")", "if", "first_line_only", ":", "break", "prev_state", "=", "copy", "(", "state", ")", "if", "line", ":", "typeset_line", "(", "line", ",", "last_line", "=", "True", ")", "# Correct the horizontal text placement for auto-width paragraphs", "if", "self", ".", "_width", "(", "container", ")", "==", "FlowableWidth", ".", "AUTO", ":", "if", "text_align", "==", "TextAlign", ".", "CENTER", ":", "container", ".", "left", "-=", "float", "(", "container", ".", "width", "-", "max_line_width", ")", "/", "2", "if", "text_align", "==", "TextAlign", ".", "RIGHT", ":", "container", ".", "left", "-=", "float", "(", "container", ".", "width", "-", "max_line_width", ")", "return", "max_line_width", ",", "first_line", ".", "advance", ",", "descender" ]
Typeset the paragraph The paragraph is typeset in the given container starting below the current cursor position of the container. When the end of the container is reached, the rendering state is preserved to continue setting the rest of the paragraph when this method is called with a new container. Args: container (Container): the container to render to descender (float or None): descender height of the preceeding line state (ParagraphState): the state where rendering will continue first_line_only (bool): typeset only the first line
[ "Typeset", "the", "paragraph" ]
python
train
46.41573
zhmcclient/python-zhmcclient
zhmcclient_mock/_urihandler.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient_mock/_urihandler.py#L847-L862
def get(method, hmc, uri, uri_parms, logon_required): """Operation: List Password Rules.""" query_str = uri_parms[0] try: console = hmc.consoles.lookup_by_oid(None) except KeyError: raise InvalidResourceError(method, uri) result_password_rules = [] filter_args = parse_query_parms(method, uri, query_str) for password_rule in console.password_rules.list(filter_args): result_password_rule = {} for prop in password_rule.properties: if prop in ('element-uri', 'name', 'type'): result_password_rule[prop] = password_rule.properties[prop] result_password_rules.append(result_password_rule) return {'password-rules': result_password_rules}
[ "def", "get", "(", "method", ",", "hmc", ",", "uri", ",", "uri_parms", ",", "logon_required", ")", ":", "query_str", "=", "uri_parms", "[", "0", "]", "try", ":", "console", "=", "hmc", ".", "consoles", ".", "lookup_by_oid", "(", "None", ")", "except", "KeyError", ":", "raise", "InvalidResourceError", "(", "method", ",", "uri", ")", "result_password_rules", "=", "[", "]", "filter_args", "=", "parse_query_parms", "(", "method", ",", "uri", ",", "query_str", ")", "for", "password_rule", "in", "console", ".", "password_rules", ".", "list", "(", "filter_args", ")", ":", "result_password_rule", "=", "{", "}", "for", "prop", "in", "password_rule", ".", "properties", ":", "if", "prop", "in", "(", "'element-uri'", ",", "'name'", ",", "'type'", ")", ":", "result_password_rule", "[", "prop", "]", "=", "password_rule", ".", "properties", "[", "prop", "]", "result_password_rules", ".", "append", "(", "result_password_rule", ")", "return", "{", "'password-rules'", ":", "result_password_rules", "}" ]
Operation: List Password Rules.
[ "Operation", ":", "List", "Password", "Rules", "." ]
python
train
48.75
kislyuk/aegea
aegea/packages/github3/session.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/session.py#L90-L97
def retrieve_client_credentials(self): """Return the client credentials. :returns: tuple(client_id, client_secret) """ client_id = self.params.get('client_id') client_secret = self.params.get('client_secret') return (client_id, client_secret)
[ "def", "retrieve_client_credentials", "(", "self", ")", ":", "client_id", "=", "self", ".", "params", ".", "get", "(", "'client_id'", ")", "client_secret", "=", "self", ".", "params", ".", "get", "(", "'client_secret'", ")", "return", "(", "client_id", ",", "client_secret", ")" ]
Return the client credentials. :returns: tuple(client_id, client_secret)
[ "Return", "the", "client", "credentials", "." ]
python
train
35.5
ZELLMECHANIK-DRESDEN/fcswrite
fcswrite/fcswrite.py
https://github.com/ZELLMECHANIK-DRESDEN/fcswrite/blob/5584983aa1eb927660183252039e73285c0724b3/fcswrite/fcswrite.py#L13-L201
def write_fcs(filename, chn_names, data, endianness="big", compat_chn_names=True, compat_copy=True, compat_negative=True, compat_percent=True, compat_max_int16=10000): """Write numpy data to an .fcs file (FCS3.0 file format) Parameters ---------- filename: str or pathlib.Path Path to the output .fcs file ch_names: list of str, length C Names of the output channels data: 2d ndarray of shape (N,C) The numpy array data to store as .fcs file format. endianness: str Set to "little" or "big" to define the byte order used. compat_chn_names: bool Compatibility mode for 3rd party flow analysis software: The characters " ", "?", and "_" are removed in the output channel names. compat_copy: bool Do not override the input array `data` when modified in compatibility mode. compat_negative: bool Compatibliity mode for 3rd party flow analysis software: Flip the sign of `data` if its mean is smaller than zero. compat_percent: bool Compatibliity mode for 3rd party flow analysis software: If a column in `data` contains values only between 0 and 1, they are multiplied by 100. compat_max_int16: int Compatibliity mode for 3rd party flow analysis software: If a column in `data` has a maximum above this value, then the display-maximum is set to 2**15. Notes ----- - These commonly used unicode characters are replaced: "µ", "²" - If the input data contain NaN values, the corresponding rows are excluded due to incompatibility with the FCS file format. """ filename = pathlib.Path(filename) if not isinstance(data, np.ndarray): data = np.array(data, dtype=float) # remove rows with nan values nanrows = np.isnan(data).any(axis=1) if np.sum(nanrows): msg = "Rows containing NaNs are not written to {}!".format(filename) warnings.warn(msg) data = data[~nanrows] if endianness not in ["little", "big"]: raise ValueError("`endianness` must be 'little' or 'big'!") msg = "length of `chn_names` must match length of 2nd axis of `data`" assert len(chn_names) == data.shape[1], msg rpl = [["µ", "u"], ["²", "2"], ] if compat_chn_names: # Compatibility mode: Clean up headers. rpl += [[" ", ""], ["?", ""], ["_", ""], ] for ii in range(len(chn_names)): for (a, b) in rpl: chn_names[ii] = chn_names[ii].replace(a, b) # Data with values between 0 and 1 pcnt_cands = [] for ch in range(data.shape[1]): if data[:, ch].min() >= 0 and data[:, ch].max() <= 1: pcnt_cands.append(ch) if compat_percent and pcnt_cands: # Compatibility mode: Scale values b/w 0 and 1 to percent if compat_copy: # copy if requested data = data.copy() for ch in pcnt_cands: data[:, ch] *= 100 if compat_negative: toflip = [] for ch in range(data.shape[1]): if np.mean(data[:, ch]) < 0: toflip.append(ch) if len(toflip): if compat_copy: # copy if requested data = data.copy() for ch in toflip: data[:, ch] *= -1 # DATA segment data1 = data.flatten().tolist() DATA = struct.pack('>%sf' % len(data1), *data1) # TEXT segment header_size = 256 if endianness == "little": # use little endian byteord = '1,2,3,4' else: # use big endian byteord = '4,3,2,1' TEXT = '/$BEGINANALYSIS/0/$ENDANALYSIS/0' TEXT += '/$BEGINSTEXT/0/$ENDSTEXT/0' # Add placeholders for $BEGINDATA and $ENDDATA, because we don't # know yet how long TEXT is. TEXT += '/$BEGINDATA/{data_start_byte}/$ENDDATA/{data_end_byte}' TEXT += '/$BYTEORD/{0}/$DATATYPE/F'.format(byteord) TEXT += '/$MODE/L/$NEXTDATA/0/$TOT/{0}'.format(data.shape[0]) TEXT += '/$PAR/{0}'.format(data.shape[1]) # Check for content of data columns and set range for jj in range(data.shape[1]): # Set data maximum to that of int16 if (compat_max_int16 and np.max(data[:, jj]) > compat_max_int16 and np.max(data[:, jj]) < 2**15): pnrange = int(2**15) # Set range for data with values between 0 and 1 elif jj in pcnt_cands: if compat_percent: # scaled to 100% pnrange = 100 else: # not scaled pnrange = 1 # default: set range to maxium value found in column else: pnrange = int(abs(np.max(data[:, jj]))) # TODO: # - Set log/lin fmt_str = '/$P{0}B/32/$P{0}E/0,0/$P{0}N/{1}/$P{0}R/{2}/$P{0}D/Linear' TEXT += fmt_str.format(jj+1, chn_names[jj], pnrange) TEXT += '/' # SET $BEGINDATA and $ENDDATA using the current size of TEXT plus padding. text_padding = 47 # for visual separation and safety data_start_byte = header_size + len(TEXT) + text_padding data_end_byte = data_start_byte + len(DATA) - 1 TEXT = TEXT.format(data_start_byte=data_start_byte, data_end_byte=data_end_byte) lentxt = len(TEXT) # Pad TEXT segment with spaces until data_start_byte TEXT = TEXT.ljust(data_start_byte - header_size, " ") # HEADER segment ver = 'FCS3.0' textfirst = '{0: >8}'.format(header_size) textlast = '{0: >8}'.format(lentxt + header_size - 1) # Starting with FCS 3.0, data segment can end beyond byte 99,999,999, # in which case a zero is written in each of the two header fields (the # values are given in the text segment keywords $BEGINDATA and $ENDDATA) if data_end_byte <= 99999999: datafirst = '{0: >8}'.format(data_start_byte) datalast = '{0: >8}'.format(data_end_byte) else: datafirst = '{0: >8}'.format(0) datalast = '{0: >8}'.format(0) anafirst = '{0: >8}'.format(0) analast = '{0: >8}'.format(0) HEADER = '{0: <256}'.format(ver + ' ' + textfirst + textlast + datafirst + datalast + anafirst + analast) # Write data with filename.open("wb") as fd: fd.write(HEADER.encode("ascii", "replace")) fd.write(TEXT.encode("ascii", "replace")) fd.write(DATA) fd.write(b'00000000')
[ "def", "write_fcs", "(", "filename", ",", "chn_names", ",", "data", ",", "endianness", "=", "\"big\"", ",", "compat_chn_names", "=", "True", ",", "compat_copy", "=", "True", ",", "compat_negative", "=", "True", ",", "compat_percent", "=", "True", ",", "compat_max_int16", "=", "10000", ")", ":", "filename", "=", "pathlib", ".", "Path", "(", "filename", ")", "if", "not", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", ":", "data", "=", "np", ".", "array", "(", "data", ",", "dtype", "=", "float", ")", "# remove rows with nan values", "nanrows", "=", "np", ".", "isnan", "(", "data", ")", ".", "any", "(", "axis", "=", "1", ")", "if", "np", ".", "sum", "(", "nanrows", ")", ":", "msg", "=", "\"Rows containing NaNs are not written to {}!\"", ".", "format", "(", "filename", ")", "warnings", ".", "warn", "(", "msg", ")", "data", "=", "data", "[", "~", "nanrows", "]", "if", "endianness", "not", "in", "[", "\"little\"", ",", "\"big\"", "]", ":", "raise", "ValueError", "(", "\"`endianness` must be 'little' or 'big'!\"", ")", "msg", "=", "\"length of `chn_names` must match length of 2nd axis of `data`\"", "assert", "len", "(", "chn_names", ")", "==", "data", ".", "shape", "[", "1", "]", ",", "msg", "rpl", "=", "[", "[", "\"µ\",", " ", "u\"]", ",", "", "[", "\"²\",", " ", "2\"]", ",", "", "]", "if", "compat_chn_names", ":", "# Compatibility mode: Clean up headers.", "rpl", "+=", "[", "[", "\" \"", ",", "\"\"", "]", ",", "[", "\"?\"", ",", "\"\"", "]", ",", "[", "\"_\"", ",", "\"\"", "]", ",", "]", "for", "ii", "in", "range", "(", "len", "(", "chn_names", ")", ")", ":", "for", "(", "a", ",", "b", ")", "in", "rpl", ":", "chn_names", "[", "ii", "]", "=", "chn_names", "[", "ii", "]", ".", "replace", "(", "a", ",", "b", ")", "# Data with values between 0 and 1", "pcnt_cands", "=", "[", "]", "for", "ch", "in", "range", "(", "data", ".", "shape", "[", "1", "]", ")", ":", "if", "data", "[", ":", ",", "ch", "]", ".", "min", "(", ")", ">=", "0", "and", "data", "[", ":", ",", "ch", "]", ".", "max", "(", ")", "<=", "1", ":", "pcnt_cands", ".", "append", "(", "ch", ")", "if", "compat_percent", "and", "pcnt_cands", ":", "# Compatibility mode: Scale values b/w 0 and 1 to percent", "if", "compat_copy", ":", "# copy if requested", "data", "=", "data", ".", "copy", "(", ")", "for", "ch", "in", "pcnt_cands", ":", "data", "[", ":", ",", "ch", "]", "*=", "100", "if", "compat_negative", ":", "toflip", "=", "[", "]", "for", "ch", "in", "range", "(", "data", ".", "shape", "[", "1", "]", ")", ":", "if", "np", ".", "mean", "(", "data", "[", ":", ",", "ch", "]", ")", "<", "0", ":", "toflip", ".", "append", "(", "ch", ")", "if", "len", "(", "toflip", ")", ":", "if", "compat_copy", ":", "# copy if requested", "data", "=", "data", ".", "copy", "(", ")", "for", "ch", "in", "toflip", ":", "data", "[", ":", ",", "ch", "]", "*=", "-", "1", "# DATA segment", "data1", "=", "data", ".", "flatten", "(", ")", ".", "tolist", "(", ")", "DATA", "=", "struct", ".", "pack", "(", "'>%sf'", "%", "len", "(", "data1", ")", ",", "*", "data1", ")", "# TEXT segment", "header_size", "=", "256", "if", "endianness", "==", "\"little\"", ":", "# use little endian", "byteord", "=", "'1,2,3,4'", "else", ":", "# use big endian", "byteord", "=", "'4,3,2,1'", "TEXT", "=", "'/$BEGINANALYSIS/0/$ENDANALYSIS/0'", "TEXT", "+=", "'/$BEGINSTEXT/0/$ENDSTEXT/0'", "# Add placeholders for $BEGINDATA and $ENDDATA, because we don't", "# know yet how long TEXT is.", "TEXT", "+=", "'/$BEGINDATA/{data_start_byte}/$ENDDATA/{data_end_byte}'", "TEXT", "+=", "'/$BYTEORD/{0}/$DATATYPE/F'", ".", "format", "(", "byteord", ")", "TEXT", "+=", "'/$MODE/L/$NEXTDATA/0/$TOT/{0}'", ".", "format", "(", "data", ".", "shape", "[", "0", "]", ")", "TEXT", "+=", "'/$PAR/{0}'", ".", "format", "(", "data", ".", "shape", "[", "1", "]", ")", "# Check for content of data columns and set range", "for", "jj", "in", "range", "(", "data", ".", "shape", "[", "1", "]", ")", ":", "# Set data maximum to that of int16", "if", "(", "compat_max_int16", "and", "np", ".", "max", "(", "data", "[", ":", ",", "jj", "]", ")", ">", "compat_max_int16", "and", "np", ".", "max", "(", "data", "[", ":", ",", "jj", "]", ")", "<", "2", "**", "15", ")", ":", "pnrange", "=", "int", "(", "2", "**", "15", ")", "# Set range for data with values between 0 and 1", "elif", "jj", "in", "pcnt_cands", ":", "if", "compat_percent", ":", "# scaled to 100%", "pnrange", "=", "100", "else", ":", "# not scaled", "pnrange", "=", "1", "# default: set range to maxium value found in column", "else", ":", "pnrange", "=", "int", "(", "abs", "(", "np", ".", "max", "(", "data", "[", ":", ",", "jj", "]", ")", ")", ")", "# TODO:", "# - Set log/lin", "fmt_str", "=", "'/$P{0}B/32/$P{0}E/0,0/$P{0}N/{1}/$P{0}R/{2}/$P{0}D/Linear'", "TEXT", "+=", "fmt_str", ".", "format", "(", "jj", "+", "1", ",", "chn_names", "[", "jj", "]", ",", "pnrange", ")", "TEXT", "+=", "'/'", "# SET $BEGINDATA and $ENDDATA using the current size of TEXT plus padding.", "text_padding", "=", "47", "# for visual separation and safety", "data_start_byte", "=", "header_size", "+", "len", "(", "TEXT", ")", "+", "text_padding", "data_end_byte", "=", "data_start_byte", "+", "len", "(", "DATA", ")", "-", "1", "TEXT", "=", "TEXT", ".", "format", "(", "data_start_byte", "=", "data_start_byte", ",", "data_end_byte", "=", "data_end_byte", ")", "lentxt", "=", "len", "(", "TEXT", ")", "# Pad TEXT segment with spaces until data_start_byte", "TEXT", "=", "TEXT", ".", "ljust", "(", "data_start_byte", "-", "header_size", ",", "\" \"", ")", "# HEADER segment", "ver", "=", "'FCS3.0'", "textfirst", "=", "'{0: >8}'", ".", "format", "(", "header_size", ")", "textlast", "=", "'{0: >8}'", ".", "format", "(", "lentxt", "+", "header_size", "-", "1", ")", "# Starting with FCS 3.0, data segment can end beyond byte 99,999,999,", "# in which case a zero is written in each of the two header fields (the", "# values are given in the text segment keywords $BEGINDATA and $ENDDATA)", "if", "data_end_byte", "<=", "99999999", ":", "datafirst", "=", "'{0: >8}'", ".", "format", "(", "data_start_byte", ")", "datalast", "=", "'{0: >8}'", ".", "format", "(", "data_end_byte", ")", "else", ":", "datafirst", "=", "'{0: >8}'", ".", "format", "(", "0", ")", "datalast", "=", "'{0: >8}'", ".", "format", "(", "0", ")", "anafirst", "=", "'{0: >8}'", ".", "format", "(", "0", ")", "analast", "=", "'{0: >8}'", ".", "format", "(", "0", ")", "HEADER", "=", "'{0: <256}'", ".", "format", "(", "ver", "+", "' '", "+", "textfirst", "+", "textlast", "+", "datafirst", "+", "datalast", "+", "anafirst", "+", "analast", ")", "# Write data", "with", "filename", ".", "open", "(", "\"wb\"", ")", "as", "fd", ":", "fd", ".", "write", "(", "HEADER", ".", "encode", "(", "\"ascii\"", ",", "\"replace\"", ")", ")", "fd", ".", "write", "(", "TEXT", ".", "encode", "(", "\"ascii\"", ",", "\"replace\"", ")", ")", "fd", ".", "write", "(", "DATA", ")", "fd", ".", "write", "(", "b'00000000'", ")" ]
Write numpy data to an .fcs file (FCS3.0 file format) Parameters ---------- filename: str or pathlib.Path Path to the output .fcs file ch_names: list of str, length C Names of the output channels data: 2d ndarray of shape (N,C) The numpy array data to store as .fcs file format. endianness: str Set to "little" or "big" to define the byte order used. compat_chn_names: bool Compatibility mode for 3rd party flow analysis software: The characters " ", "?", and "_" are removed in the output channel names. compat_copy: bool Do not override the input array `data` when modified in compatibility mode. compat_negative: bool Compatibliity mode for 3rd party flow analysis software: Flip the sign of `data` if its mean is smaller than zero. compat_percent: bool Compatibliity mode for 3rd party flow analysis software: If a column in `data` contains values only between 0 and 1, they are multiplied by 100. compat_max_int16: int Compatibliity mode for 3rd party flow analysis software: If a column in `data` has a maximum above this value, then the display-maximum is set to 2**15. Notes ----- - These commonly used unicode characters are replaced: "µ", "²" - If the input data contain NaN values, the corresponding rows are excluded due to incompatibility with the FCS file format.
[ "Write", "numpy", "data", "to", "an", ".", "fcs", "file", "(", "FCS3", ".", "0", "file", "format", ")" ]
python
test
34.708995
iotile/coretools
iotilecore/iotile/core/hw/transport/adapter/legacy.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/transport/adapter/legacy.py#L189-L212
def disconnect_sync(self, conn_id): """Synchronously disconnect from a connected device Args: conn_id (int): A unique identifier that will refer to this connection Returns: dict: A dictionary with two elements 'success': a bool with the result of the connection attempt 'failure_reason': a string with the reason for the failure if we failed """ done = threading.Event() result = {} def disconnect_done(conn_id, adapter_id, status, reason): result['success'] = status result['failure_reason'] = reason done.set() self.disconnect_async(conn_id, disconnect_done) done.wait() return result
[ "def", "disconnect_sync", "(", "self", ",", "conn_id", ")", ":", "done", "=", "threading", ".", "Event", "(", ")", "result", "=", "{", "}", "def", "disconnect_done", "(", "conn_id", ",", "adapter_id", ",", "status", ",", "reason", ")", ":", "result", "[", "'success'", "]", "=", "status", "result", "[", "'failure_reason'", "]", "=", "reason", "done", ".", "set", "(", ")", "self", ".", "disconnect_async", "(", "conn_id", ",", "disconnect_done", ")", "done", ".", "wait", "(", ")", "return", "result" ]
Synchronously disconnect from a connected device Args: conn_id (int): A unique identifier that will refer to this connection Returns: dict: A dictionary with two elements 'success': a bool with the result of the connection attempt 'failure_reason': a string with the reason for the failure if we failed
[ "Synchronously", "disconnect", "from", "a", "connected", "device" ]
python
train
30.875
AgeOfLearning/coeus-unity-python-framework
coeus_unity/transform.py
https://github.com/AgeOfLearning/coeus-unity-python-framework/blob/cf8ca6800ace1425d917ea2628dbd05ed959fdd7/coeus_unity/transform.py#L53-L67
def get_rendered_transform_path_relative(self, relative_transform_ref): """ Generates a rendered transform path relative to parent. :param relative_transform_ref: :return: """ path = self.transform_path parent = self.parent while parent is not None and parent is not relative_transform_ref: path = "{0}/{1}".format(parent.transform_path, path) parent = parent.parent return path
[ "def", "get_rendered_transform_path_relative", "(", "self", ",", "relative_transform_ref", ")", ":", "path", "=", "self", ".", "transform_path", "parent", "=", "self", ".", "parent", "while", "parent", "is", "not", "None", "and", "parent", "is", "not", "relative_transform_ref", ":", "path", "=", "\"{0}/{1}\"", ".", "format", "(", "parent", ".", "transform_path", ",", "path", ")", "parent", "=", "parent", ".", "parent", "return", "path" ]
Generates a rendered transform path relative to parent. :param relative_transform_ref: :return:
[ "Generates", "a", "rendered", "transform", "path", "relative", "to", "parent", ".", ":", "param", "relative_transform_ref", ":", ":", "return", ":" ]
python
train
31.333333
annoviko/pyclustering
pyclustering/cluster/kmedoids.py
https://github.com/annoviko/pyclustering/blob/98aa0dd89fd36f701668fb1eb29c8fb5662bf7d0/pyclustering/cluster/kmedoids.py#L212-L229
def __create_distance_calculator(self): """! @brief Creates distance calculator in line with algorithms parameters. @return (callable) Distance calculator. """ if self.__data_type == 'points': return lambda index1, index2: self.__metric(self.__pointer_data[index1], self.__pointer_data[index2]) elif self.__data_type == 'distance_matrix': if isinstance(self.__pointer_data, numpy.matrix): return lambda index1, index2: self.__pointer_data.item((index1, index2)) return lambda index1, index2: self.__pointer_data[index1][index2] else: raise TypeError("Unknown type of data is specified '%s'" % self.__data_type)
[ "def", "__create_distance_calculator", "(", "self", ")", ":", "if", "self", ".", "__data_type", "==", "'points'", ":", "return", "lambda", "index1", ",", "index2", ":", "self", ".", "__metric", "(", "self", ".", "__pointer_data", "[", "index1", "]", ",", "self", ".", "__pointer_data", "[", "index2", "]", ")", "elif", "self", ".", "__data_type", "==", "'distance_matrix'", ":", "if", "isinstance", "(", "self", ".", "__pointer_data", ",", "numpy", ".", "matrix", ")", ":", "return", "lambda", "index1", ",", "index2", ":", "self", ".", "__pointer_data", ".", "item", "(", "(", "index1", ",", "index2", ")", ")", "return", "lambda", "index1", ",", "index2", ":", "self", ".", "__pointer_data", "[", "index1", "]", "[", "index2", "]", "else", ":", "raise", "TypeError", "(", "\"Unknown type of data is specified '%s'\"", "%", "self", ".", "__data_type", ")" ]
! @brief Creates distance calculator in line with algorithms parameters. @return (callable) Distance calculator.
[ "!" ]
python
valid
40.833333
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/hooks.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/hooks.py#L213-L230
def clipboard_get(self): """ Get text from the clipboard. """ from IPython.lib.clipboard import ( osx_clipboard_get, tkinter_clipboard_get, win32_clipboard_get ) if sys.platform == 'win32': chain = [win32_clipboard_get, tkinter_clipboard_get] elif sys.platform == 'darwin': chain = [osx_clipboard_get, tkinter_clipboard_get] else: chain = [tkinter_clipboard_get] dispatcher = CommandChainDispatcher() for func in chain: dispatcher.add(func) text = dispatcher() return text
[ "def", "clipboard_get", "(", "self", ")", ":", "from", "IPython", ".", "lib", ".", "clipboard", "import", "(", "osx_clipboard_get", ",", "tkinter_clipboard_get", ",", "win32_clipboard_get", ")", "if", "sys", ".", "platform", "==", "'win32'", ":", "chain", "=", "[", "win32_clipboard_get", ",", "tkinter_clipboard_get", "]", "elif", "sys", ".", "platform", "==", "'darwin'", ":", "chain", "=", "[", "osx_clipboard_get", ",", "tkinter_clipboard_get", "]", "else", ":", "chain", "=", "[", "tkinter_clipboard_get", "]", "dispatcher", "=", "CommandChainDispatcher", "(", ")", "for", "func", "in", "chain", ":", "dispatcher", ".", "add", "(", "func", ")", "text", "=", "dispatcher", "(", ")", "return", "text" ]
Get text from the clipboard.
[ "Get", "text", "from", "the", "clipboard", "." ]
python
test
30.388889
SBRG/ssbio
ssbio/protein/sequence/seqprop.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/protein/sequence/seqprop.py#L573-L604
def get_subsequence(self, resnums, new_id=None, copy_letter_annotations=True): """Get a subsequence as a new SeqProp object given a list of residue numbers""" # XTODO: documentation biop_compound_list = [] for resnum in resnums: # XTODO can be sped up by separating into ranges based on continuous resnums feat = FeatureLocation(resnum - 1, resnum) biop_compound_list.append(feat) if len(biop_compound_list) == 0: log.debug('Zero length subsequence') return elif len(biop_compound_list) == 1: log.debug('Subsequence only one residue long') sub_feature_location = biop_compound_list[0] else: sub_feature_location = CompoundLocation(biop_compound_list) try: sub_feature = sub_feature_location.extract(self) except TypeError: log.critical('SeqProp {}: unknown error when trying to get subsequence - please investigate! ' 'Try using a feature to extract a subsequence from the SeqProp'.format(self.id)) return if not new_id: new_id = '{}_subseq'.format(self.id) new_sp = SeqProp(id=new_id, seq=sub_feature.seq) if copy_letter_annotations: new_sp.letter_annotations = sub_feature.letter_annotations return new_sp
[ "def", "get_subsequence", "(", "self", ",", "resnums", ",", "new_id", "=", "None", ",", "copy_letter_annotations", "=", "True", ")", ":", "# XTODO: documentation", "biop_compound_list", "=", "[", "]", "for", "resnum", "in", "resnums", ":", "# XTODO can be sped up by separating into ranges based on continuous resnums", "feat", "=", "FeatureLocation", "(", "resnum", "-", "1", ",", "resnum", ")", "biop_compound_list", ".", "append", "(", "feat", ")", "if", "len", "(", "biop_compound_list", ")", "==", "0", ":", "log", ".", "debug", "(", "'Zero length subsequence'", ")", "return", "elif", "len", "(", "biop_compound_list", ")", "==", "1", ":", "log", ".", "debug", "(", "'Subsequence only one residue long'", ")", "sub_feature_location", "=", "biop_compound_list", "[", "0", "]", "else", ":", "sub_feature_location", "=", "CompoundLocation", "(", "biop_compound_list", ")", "try", ":", "sub_feature", "=", "sub_feature_location", ".", "extract", "(", "self", ")", "except", "TypeError", ":", "log", ".", "critical", "(", "'SeqProp {}: unknown error when trying to get subsequence - please investigate! '", "'Try using a feature to extract a subsequence from the SeqProp'", ".", "format", "(", "self", ".", "id", ")", ")", "return", "if", "not", "new_id", ":", "new_id", "=", "'{}_subseq'", ".", "format", "(", "self", ".", "id", ")", "new_sp", "=", "SeqProp", "(", "id", "=", "new_id", ",", "seq", "=", "sub_feature", ".", "seq", ")", "if", "copy_letter_annotations", ":", "new_sp", ".", "letter_annotations", "=", "sub_feature", ".", "letter_annotations", "return", "new_sp" ]
Get a subsequence as a new SeqProp object given a list of residue numbers
[ "Get", "a", "subsequence", "as", "a", "new", "SeqProp", "object", "given", "a", "list", "of", "residue", "numbers" ]
python
train
42.6875
titusjan/argos
argos/config/qtctis.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/config/qtctis.py#L269-L279
def data(self, data): """ Sets the font data of this item. Does type conversion to ensure data is always of the correct type. Also updates the children (which is the reason for this property to be overloaded. """ self._data = self._enforceDataType(data) # Enforce self._data to be a QFont self.familyCti.data = fontFamilyIndex(self.data, list(self.familyCti.iterConfigValues)) self.pointSizeCti.data = self.data.pointSize() self.weightCti.data = fontWeightIndex(self.data, list(self.weightCti.iterConfigValues)) self.italicCti.data = self.data.italic()
[ "def", "data", "(", "self", ",", "data", ")", ":", "self", ".", "_data", "=", "self", ".", "_enforceDataType", "(", "data", ")", "# Enforce self._data to be a QFont", "self", ".", "familyCti", ".", "data", "=", "fontFamilyIndex", "(", "self", ".", "data", ",", "list", "(", "self", ".", "familyCti", ".", "iterConfigValues", ")", ")", "self", ".", "pointSizeCti", ".", "data", "=", "self", ".", "data", ".", "pointSize", "(", ")", "self", ".", "weightCti", ".", "data", "=", "fontWeightIndex", "(", "self", ".", "data", ",", "list", "(", "self", ".", "weightCti", ".", "iterConfigValues", ")", ")", "self", ".", "italicCti", ".", "data", "=", "self", ".", "data", ".", "italic", "(", ")" ]
Sets the font data of this item. Does type conversion to ensure data is always of the correct type. Also updates the children (which is the reason for this property to be overloaded.
[ "Sets", "the", "font", "data", "of", "this", "item", ".", "Does", "type", "conversion", "to", "ensure", "data", "is", "always", "of", "the", "correct", "type", "." ]
python
train
56.636364
andreikop/qutepart
qutepart/rectangularselection.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/rectangularselection.py#L154-L169
def selections(self): """Build list of extra selections for rectangular selection""" selections = [] cursors = self.cursors() if cursors: background = self._qpart.palette().color(QPalette.Highlight) foreground = self._qpart.palette().color(QPalette.HighlightedText) for cursor in cursors: selection = QTextEdit.ExtraSelection() selection.format.setBackground(background) selection.format.setForeground(foreground) selection.cursor = cursor selections.append(selection) return selections
[ "def", "selections", "(", "self", ")", ":", "selections", "=", "[", "]", "cursors", "=", "self", ".", "cursors", "(", ")", "if", "cursors", ":", "background", "=", "self", ".", "_qpart", ".", "palette", "(", ")", ".", "color", "(", "QPalette", ".", "Highlight", ")", "foreground", "=", "self", ".", "_qpart", ".", "palette", "(", ")", ".", "color", "(", "QPalette", ".", "HighlightedText", ")", "for", "cursor", "in", "cursors", ":", "selection", "=", "QTextEdit", ".", "ExtraSelection", "(", ")", "selection", ".", "format", ".", "setBackground", "(", "background", ")", "selection", ".", "format", ".", "setForeground", "(", "foreground", ")", "selection", ".", "cursor", "=", "cursor", "selections", ".", "append", "(", "selection", ")", "return", "selections" ]
Build list of extra selections for rectangular selection
[ "Build", "list", "of", "extra", "selections", "for", "rectangular", "selection" ]
python
train
39.3125
apache/incubator-mxnet
python/mxnet/gluon/utils.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/python/mxnet/gluon/utils.py#L176-L199
def check_sha1(filename, sha1_hash): """Check whether the sha1 hash of the file content matches the expected hash. Parameters ---------- filename : str Path to the file. sha1_hash : str Expected sha1 hash in hexadecimal digits. Returns ------- bool Whether the file content matches the expected hash. """ sha1 = hashlib.sha1() with open(filename, 'rb') as f: while True: data = f.read(1048576) if not data: break sha1.update(data) return sha1.hexdigest() == sha1_hash
[ "def", "check_sha1", "(", "filename", ",", "sha1_hash", ")", ":", "sha1", "=", "hashlib", ".", "sha1", "(", ")", "with", "open", "(", "filename", ",", "'rb'", ")", "as", "f", ":", "while", "True", ":", "data", "=", "f", ".", "read", "(", "1048576", ")", "if", "not", "data", ":", "break", "sha1", ".", "update", "(", "data", ")", "return", "sha1", ".", "hexdigest", "(", ")", "==", "sha1_hash" ]
Check whether the sha1 hash of the file content matches the expected hash. Parameters ---------- filename : str Path to the file. sha1_hash : str Expected sha1 hash in hexadecimal digits. Returns ------- bool Whether the file content matches the expected hash.
[ "Check", "whether", "the", "sha1", "hash", "of", "the", "file", "content", "matches", "the", "expected", "hash", "." ]
python
train
24.125
mathandy/svgpathtools
svgpathtools/path.py
https://github.com/mathandy/svgpathtools/blob/fd7348a1dfd88b65ea61da02325c6605aedf8c4f/svgpathtools/path.py#L1031-L1044
def is_smooth_from(self, previous, warning_on=True): """[Warning: The name of this method is somewhat misleading (yet kept for compatibility with scripts created using svg.path 2.0). This method is meant only for d string creation and should not be used to check for kinks. To check a segment for differentiability, use the joins_smoothly_with() method instead.]""" if warning_on: warn(_is_smooth_from_warning) if isinstance(previous, CubicBezier): return (self.start == previous.end and (self.control1 - self.start) == ( previous.end - previous.control2)) else: return self.control1 == self.start
[ "def", "is_smooth_from", "(", "self", ",", "previous", ",", "warning_on", "=", "True", ")", ":", "if", "warning_on", ":", "warn", "(", "_is_smooth_from_warning", ")", "if", "isinstance", "(", "previous", ",", "CubicBezier", ")", ":", "return", "(", "self", ".", "start", "==", "previous", ".", "end", "and", "(", "self", ".", "control1", "-", "self", ".", "start", ")", "==", "(", "previous", ".", "end", "-", "previous", ".", "control2", ")", ")", "else", ":", "return", "self", ".", "control1", "==", "self", ".", "start" ]
[Warning: The name of this method is somewhat misleading (yet kept for compatibility with scripts created using svg.path 2.0). This method is meant only for d string creation and should not be used to check for kinks. To check a segment for differentiability, use the joins_smoothly_with() method instead.]
[ "[", "Warning", ":", "The", "name", "of", "this", "method", "is", "somewhat", "misleading", "(", "yet", "kept", "for", "compatibility", "with", "scripts", "created", "using", "svg", ".", "path", "2", ".", "0", ")", ".", "This", "method", "is", "meant", "only", "for", "d", "string", "creation", "and", "should", "not", "be", "used", "to", "check", "for", "kinks", ".", "To", "check", "a", "segment", "for", "differentiability", "use", "the", "joins_smoothly_with", "()", "method", "instead", ".", "]" ]
python
train
52.142857
reanahub/reana-db
reana_db/utils.py
https://github.com/reanahub/reana-db/blob/4efcb46d23af035689964d8c25a804c5a8f1dfc3/reana_db/utils.py#L152-L170
def _get_workflow_by_uuid(workflow_uuid): """Get Workflow with UUIDv4. :param workflow_uuid: UUIDv4 of a Workflow. :type workflow_uuid: String representing a valid UUIDv4. :rtype: reana-db.models.Workflow """ from reana_db.models import Workflow workflow = Workflow.query.filter(Workflow.id_ == workflow_uuid).first() if not workflow: raise ValueError( 'REANA_WORKON is set to {0}, but ' 'that workflow does not exist. ' 'Please set your REANA_WORKON environment ' 'variable appropriately.'. format(workflow_uuid)) return workflow
[ "def", "_get_workflow_by_uuid", "(", "workflow_uuid", ")", ":", "from", "reana_db", ".", "models", "import", "Workflow", "workflow", "=", "Workflow", ".", "query", ".", "filter", "(", "Workflow", ".", "id_", "==", "workflow_uuid", ")", ".", "first", "(", ")", "if", "not", "workflow", ":", "raise", "ValueError", "(", "'REANA_WORKON is set to {0}, but '", "'that workflow does not exist. '", "'Please set your REANA_WORKON environment '", "'variable appropriately.'", ".", "format", "(", "workflow_uuid", ")", ")", "return", "workflow" ]
Get Workflow with UUIDv4. :param workflow_uuid: UUIDv4 of a Workflow. :type workflow_uuid: String representing a valid UUIDv4. :rtype: reana-db.models.Workflow
[ "Get", "Workflow", "with", "UUIDv4", "." ]
python
train
34.473684
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_ntp.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_ntp.py#L85-L99
def ntp_server_key(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ntp = ET.SubElement(config, "ntp", xmlns="urn:brocade.com:mgmt:brocade-ntp") server = ET.SubElement(ntp, "server") ip_key = ET.SubElement(server, "ip") ip_key.text = kwargs.pop('ip') use_vrf_key = ET.SubElement(server, "use-vrf") use_vrf_key.text = kwargs.pop('use_vrf') key = ET.SubElement(server, "key") key.text = kwargs.pop('key') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "ntp_server_key", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "ntp", "=", "ET", ".", "SubElement", "(", "config", ",", "\"ntp\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-ntp\"", ")", "server", "=", "ET", ".", "SubElement", "(", "ntp", ",", "\"server\"", ")", "ip_key", "=", "ET", ".", "SubElement", "(", "server", ",", "\"ip\"", ")", "ip_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'ip'", ")", "use_vrf_key", "=", "ET", ".", "SubElement", "(", "server", ",", "\"use-vrf\"", ")", "use_vrf_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'use_vrf'", ")", "key", "=", "ET", ".", "SubElement", "(", "server", ",", "\"key\"", ")", "key", ".", "text", "=", "kwargs", ".", "pop", "(", "'key'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
39.466667
newville/wxmplot
wxmplot/plotpanel.py
https://github.com/newville/wxmplot/blob/8e0dc037453e5cdf18c968dc5a3d29efd761edee/wxmplot/plotpanel.py#L682-L698
def __onPickEvent(self, event=None): """pick events""" legline = event.artist trace = self.conf.legend_map.get(legline, None) visible = True if trace is not None and self.conf.hidewith_legend: line, legline, legtext = trace visible = not line.get_visible() line.set_visible(visible) if visible: legline.set_zorder(10.00) legline.set_alpha(1.00) legtext.set_zorder(10.00) legtext.set_alpha(1.00) else: legline.set_alpha(0.50) legtext.set_alpha(0.50)
[ "def", "__onPickEvent", "(", "self", ",", "event", "=", "None", ")", ":", "legline", "=", "event", ".", "artist", "trace", "=", "self", ".", "conf", ".", "legend_map", ".", "get", "(", "legline", ",", "None", ")", "visible", "=", "True", "if", "trace", "is", "not", "None", "and", "self", ".", "conf", ".", "hidewith_legend", ":", "line", ",", "legline", ",", "legtext", "=", "trace", "visible", "=", "not", "line", ".", "get_visible", "(", ")", "line", ".", "set_visible", "(", "visible", ")", "if", "visible", ":", "legline", ".", "set_zorder", "(", "10.00", ")", "legline", ".", "set_alpha", "(", "1.00", ")", "legtext", ".", "set_zorder", "(", "10.00", ")", "legtext", ".", "set_alpha", "(", "1.00", ")", "else", ":", "legline", ".", "set_alpha", "(", "0.50", ")", "legtext", ".", "set_alpha", "(", "0.50", ")" ]
pick events
[ "pick", "events" ]
python
train
36.941176
collectiveacuity/labPack
labpack/platforms/localhost.py
https://github.com/collectiveacuity/labPack/blob/52949ece35e72e3cc308f54d9ffa6bfbd96805b8/labpack/platforms/localhost.py#L392-L464
def list(self, filter_function=None, list_root='', max_results=1, reverse_order=False, previous_file=''): ''' a method to list files on localhost from walk of directories :param filter_function: (keyword arguments) function used to filter results :param list_root: string with localhost path from which to root list of files :param max_results: integer with maximum number of results to return :param reverse_order: boolean to determine alphabetical direction of walk :param previous_file: string with absolute path of file to begin search after :return: list of file absolute path strings NOTE: the filter_function must be able to accept keyword arguments and return a value that can evaluate to true or false. while walking the local file structure, the metadata for each file will be fed to the filter function. if the function evaluates this input and returns a true value the file will be included in the list results. fields produced by the metadata function are listed in the self.file_model.schema ''' __name__ = '%s.list(...)' % self.__class__.__name__ # validate input input_kwargs = [list_root, max_results, previous_file] input_names = ['.list_root', '.max_results', '.previous_file'] for i in range(len(input_kwargs)): if input_kwargs[i]: self.fields.validate(input_kwargs[i], input_names[i]) # validate filter function if filter_function: try: filter_function(**self.file_model.schema) except: err_msg = __name__.replace('...', 'filter_function=%s' % filter_function.__class__.__name__) raise TypeError('%s must accept key word arguments.' % err_msg) # validate that previous file exists file_exists = False if previous_file: if os.path.exists(previous_file): if os.path.isfile(previous_file): file_exists = True if not file_exists: err_msg = __name__.replace('...', 'previous_file="%s"' % previous_file) raise ValueError('%s must be a valid file.' % err_msg) # construct empty results object results_list = [] # determine root for walk if list_root: if not os.path.isdir(list_root): return results_list else: list_root = './' # walk directory structure to find files for file_path in self.walk(list_root, reverse_order, previous_file): if filter_function: file_metadata = self.metadata(file_path) if filter_function(**file_metadata): results_list.append(file_path) else: results_list.append(file_path) # return results list if len(results_list) == max_results: return results_list return results_list
[ "def", "list", "(", "self", ",", "filter_function", "=", "None", ",", "list_root", "=", "''", ",", "max_results", "=", "1", ",", "reverse_order", "=", "False", ",", "previous_file", "=", "''", ")", ":", "__name__", "=", "'%s.list(...)'", "%", "self", ".", "__class__", ".", "__name__", "# validate input\r", "input_kwargs", "=", "[", "list_root", ",", "max_results", ",", "previous_file", "]", "input_names", "=", "[", "'.list_root'", ",", "'.max_results'", ",", "'.previous_file'", "]", "for", "i", "in", "range", "(", "len", "(", "input_kwargs", ")", ")", ":", "if", "input_kwargs", "[", "i", "]", ":", "self", ".", "fields", ".", "validate", "(", "input_kwargs", "[", "i", "]", ",", "input_names", "[", "i", "]", ")", "# validate filter function\r", "if", "filter_function", ":", "try", ":", "filter_function", "(", "*", "*", "self", ".", "file_model", ".", "schema", ")", "except", ":", "err_msg", "=", "__name__", ".", "replace", "(", "'...'", ",", "'filter_function=%s'", "%", "filter_function", ".", "__class__", ".", "__name__", ")", "raise", "TypeError", "(", "'%s must accept key word arguments.'", "%", "err_msg", ")", "# validate that previous file exists\r", "file_exists", "=", "False", "if", "previous_file", ":", "if", "os", ".", "path", ".", "exists", "(", "previous_file", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "previous_file", ")", ":", "file_exists", "=", "True", "if", "not", "file_exists", ":", "err_msg", "=", "__name__", ".", "replace", "(", "'...'", ",", "'previous_file=\"%s\"'", "%", "previous_file", ")", "raise", "ValueError", "(", "'%s must be a valid file.'", "%", "err_msg", ")", "# construct empty results object\r", "results_list", "=", "[", "]", "# determine root for walk\r", "if", "list_root", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "list_root", ")", ":", "return", "results_list", "else", ":", "list_root", "=", "'./'", "# walk directory structure to find files\r", "for", "file_path", "in", "self", ".", "walk", "(", "list_root", ",", "reverse_order", ",", "previous_file", ")", ":", "if", "filter_function", ":", "file_metadata", "=", "self", ".", "metadata", "(", "file_path", ")", "if", "filter_function", "(", "*", "*", "file_metadata", ")", ":", "results_list", ".", "append", "(", "file_path", ")", "else", ":", "results_list", ".", "append", "(", "file_path", ")", "# return results list\r", "if", "len", "(", "results_list", ")", "==", "max_results", ":", "return", "results_list", "return", "results_list" ]
a method to list files on localhost from walk of directories :param filter_function: (keyword arguments) function used to filter results :param list_root: string with localhost path from which to root list of files :param max_results: integer with maximum number of results to return :param reverse_order: boolean to determine alphabetical direction of walk :param previous_file: string with absolute path of file to begin search after :return: list of file absolute path strings NOTE: the filter_function must be able to accept keyword arguments and return a value that can evaluate to true or false. while walking the local file structure, the metadata for each file will be fed to the filter function. if the function evaluates this input and returns a true value the file will be included in the list results. fields produced by the metadata function are listed in the self.file_model.schema
[ "a", "method", "to", "list", "files", "on", "localhost", "from", "walk", "of", "directories", ":", "param", "filter_function", ":", "(", "keyword", "arguments", ")", "function", "used", "to", "filter", "results", ":", "param", "list_root", ":", "string", "with", "localhost", "path", "from", "which", "to", "root", "list", "of", "files", ":", "param", "max_results", ":", "integer", "with", "maximum", "number", "of", "results", "to", "return", ":", "param", "reverse_order", ":", "boolean", "to", "determine", "alphabetical", "direction", "of", "walk", ":", "param", "previous_file", ":", "string", "with", "absolute", "path", "of", "file", "to", "begin", "search", "after", ":", "return", ":", "list", "of", "file", "absolute", "path", "strings", "NOTE", ":", "the", "filter_function", "must", "be", "able", "to", "accept", "keyword", "arguments", "and", "return", "a", "value", "that", "can", "evaluate", "to", "true", "or", "false", ".", "while", "walking", "the", "local", "file", "structure", "the", "metadata", "for", "each", "file", "will", "be", "fed", "to", "the", "filter", "function", ".", "if", "the", "function", "evaluates", "this", "input", "and", "returns", "a", "true", "value", "the", "file", "will", "be", "included", "in", "the", "list", "results", ".", "fields", "produced", "by", "the", "metadata", "function", "are", "listed", "in", "the", "self", ".", "file_model", ".", "schema" ]
python
train
42.767123
liftoff/pyminifier
pyminifier/obfuscate.py
https://github.com/liftoff/pyminifier/blob/087ea7b0c8c964f1f907c3f350f5ce281798db86/pyminifier/obfuscate.py#L533-L572
def obfuscate_builtins(module, tokens, name_generator, table=None): """ Inserts an assignment, '<obfuscated identifier> = <builtin function>' at the beginning of *tokens* (after the shebang and encoding if present) for every Python built-in function that is used inside *tokens*. Also, replaces all of said builti-in functions in *tokens* with each respective obfuscated identifer. Obfuscated identifier names are pulled out of name_generator via next(). If *table* is provided, replacements will be looked up there before generating a new unique name. """ used_builtins = analyze.enumerate_builtins(tokens) obfuscated_assignments = remap_name(name_generator, used_builtins, table) replacements = [] for assignment in obfuscated_assignments.split('\n'): replacements.append(assignment.split('=')[0]) replacement_dict = dict(zip(used_builtins, replacements)) if table: table[0].update(replacement_dict) iter_replacements = iter(replacements) for builtin in used_builtins: replace_obfuscatables( module, tokens, obfuscate_unique, builtin, iter_replacements) # Check for shebangs and encodings before we do anything else skip_tokens = 0 matched_shebang = False matched_encoding = False for tok in tokens[0:4]: # Will always be in the first four tokens line = tok[4] if analyze.shebang.match(line): # (e.g. '#!/usr/bin/env python') if not matched_shebang: matched_shebang = True skip_tokens += 1 elif analyze.encoding.match(line): # (e.g. '# -*- coding: utf-8 -*-') if not matched_encoding: matched_encoding = True skip_tokens += 1 insert_in_next_line(tokens, skip_tokens, obfuscated_assignments)
[ "def", "obfuscate_builtins", "(", "module", ",", "tokens", ",", "name_generator", ",", "table", "=", "None", ")", ":", "used_builtins", "=", "analyze", ".", "enumerate_builtins", "(", "tokens", ")", "obfuscated_assignments", "=", "remap_name", "(", "name_generator", ",", "used_builtins", ",", "table", ")", "replacements", "=", "[", "]", "for", "assignment", "in", "obfuscated_assignments", ".", "split", "(", "'\\n'", ")", ":", "replacements", ".", "append", "(", "assignment", ".", "split", "(", "'='", ")", "[", "0", "]", ")", "replacement_dict", "=", "dict", "(", "zip", "(", "used_builtins", ",", "replacements", ")", ")", "if", "table", ":", "table", "[", "0", "]", ".", "update", "(", "replacement_dict", ")", "iter_replacements", "=", "iter", "(", "replacements", ")", "for", "builtin", "in", "used_builtins", ":", "replace_obfuscatables", "(", "module", ",", "tokens", ",", "obfuscate_unique", ",", "builtin", ",", "iter_replacements", ")", "# Check for shebangs and encodings before we do anything else", "skip_tokens", "=", "0", "matched_shebang", "=", "False", "matched_encoding", "=", "False", "for", "tok", "in", "tokens", "[", "0", ":", "4", "]", ":", "# Will always be in the first four tokens", "line", "=", "tok", "[", "4", "]", "if", "analyze", ".", "shebang", ".", "match", "(", "line", ")", ":", "# (e.g. '#!/usr/bin/env python')", "if", "not", "matched_shebang", ":", "matched_shebang", "=", "True", "skip_tokens", "+=", "1", "elif", "analyze", ".", "encoding", ".", "match", "(", "line", ")", ":", "# (e.g. '# -*- coding: utf-8 -*-')", "if", "not", "matched_encoding", ":", "matched_encoding", "=", "True", "skip_tokens", "+=", "1", "insert_in_next_line", "(", "tokens", ",", "skip_tokens", ",", "obfuscated_assignments", ")" ]
Inserts an assignment, '<obfuscated identifier> = <builtin function>' at the beginning of *tokens* (after the shebang and encoding if present) for every Python built-in function that is used inside *tokens*. Also, replaces all of said builti-in functions in *tokens* with each respective obfuscated identifer. Obfuscated identifier names are pulled out of name_generator via next(). If *table* is provided, replacements will be looked up there before generating a new unique name.
[ "Inserts", "an", "assignment", "<obfuscated", "identifier", ">", "=", "<builtin", "function", ">", "at", "the", "beginning", "of", "*", "tokens", "*", "(", "after", "the", "shebang", "and", "encoding", "if", "present", ")", "for", "every", "Python", "built", "-", "in", "function", "that", "is", "used", "inside", "*", "tokens", "*", ".", "Also", "replaces", "all", "of", "said", "builti", "-", "in", "functions", "in", "*", "tokens", "*", "with", "each", "respective", "obfuscated", "identifer", "." ]
python
train
45.025
locationlabs/mockredis
mockredis/client.py
https://github.com/locationlabs/mockredis/blob/fd4e3117066ff0c24e86ebca007853a8092e3254/mockredis/client.py#L424-L446
def msetnx(self, *args, **kwargs): """ Sets key/values based on a mapping if none of the keys are already set. Mapping can be supplied as a single dictionary argument or as kwargs. Returns a boolean indicating if the operation was successful. """ if args: if len(args) != 1 or not isinstance(args[0], dict): raise RedisError('MSETNX requires **kwargs or a single dict arg') mapping = args[0] else: mapping = kwargs if len(mapping) == 0: raise ResponseError("wrong number of arguments for 'msetnx' command") for key in mapping.keys(): if self._encode(key) in self.redis: return False for key, value in mapping.items(): self.set(key, value) return True
[ "def", "msetnx", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "args", ":", "if", "len", "(", "args", ")", "!=", "1", "or", "not", "isinstance", "(", "args", "[", "0", "]", ",", "dict", ")", ":", "raise", "RedisError", "(", "'MSETNX requires **kwargs or a single dict arg'", ")", "mapping", "=", "args", "[", "0", "]", "else", ":", "mapping", "=", "kwargs", "if", "len", "(", "mapping", ")", "==", "0", ":", "raise", "ResponseError", "(", "\"wrong number of arguments for 'msetnx' command\"", ")", "for", "key", "in", "mapping", ".", "keys", "(", ")", ":", "if", "self", ".", "_encode", "(", "key", ")", "in", "self", ".", "redis", ":", "return", "False", "for", "key", ",", "value", "in", "mapping", ".", "items", "(", ")", ":", "self", ".", "set", "(", "key", ",", "value", ")", "return", "True" ]
Sets key/values based on a mapping if none of the keys are already set. Mapping can be supplied as a single dictionary argument or as kwargs. Returns a boolean indicating if the operation was successful.
[ "Sets", "key", "/", "values", "based", "on", "a", "mapping", "if", "none", "of", "the", "keys", "are", "already", "set", ".", "Mapping", "can", "be", "supplied", "as", "a", "single", "dictionary", "argument", "or", "as", "kwargs", ".", "Returns", "a", "boolean", "indicating", "if", "the", "operation", "was", "successful", "." ]
python
train
35.782609
saltstack/salt
salt/pillar/s3.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/s3.py#L398-L432
def _get_file_from_s3(creds, metadata, saltenv, bucket, path, cached_file_path): ''' Checks the local cache for the file, if it's old or missing go grab the file from S3 and update the cache ''' # check the local cache... if os.path.isfile(cached_file_path): file_meta = _find_file_meta(metadata, bucket, saltenv, path) file_md5 = "".join(list(filter(str.isalnum, file_meta['ETag']))) \ if file_meta else None cached_md5 = salt.utils.hashutils.get_hash(cached_file_path, 'md5') # hashes match we have a cache hit log.debug('Cached file: path=%s, md5=%s, etag=%s', cached_file_path, cached_md5, file_md5) if cached_md5 == file_md5: return # ... or get the file from S3 __utils__['s3.query']( key=creds.key, keyid=creds.keyid, kms_keyid=creds.kms_keyid, bucket=bucket, service_url=creds.service_url, path=_quote(path), local_file=cached_file_path, verify_ssl=creds.verify_ssl, location=creds.location, path_style=creds.path_style, https_enable=creds.https_enable )
[ "def", "_get_file_from_s3", "(", "creds", ",", "metadata", ",", "saltenv", ",", "bucket", ",", "path", ",", "cached_file_path", ")", ":", "# check the local cache...", "if", "os", ".", "path", ".", "isfile", "(", "cached_file_path", ")", ":", "file_meta", "=", "_find_file_meta", "(", "metadata", ",", "bucket", ",", "saltenv", ",", "path", ")", "file_md5", "=", "\"\"", ".", "join", "(", "list", "(", "filter", "(", "str", ".", "isalnum", ",", "file_meta", "[", "'ETag'", "]", ")", ")", ")", "if", "file_meta", "else", "None", "cached_md5", "=", "salt", ".", "utils", ".", "hashutils", ".", "get_hash", "(", "cached_file_path", ",", "'md5'", ")", "# hashes match we have a cache hit", "log", ".", "debug", "(", "'Cached file: path=%s, md5=%s, etag=%s'", ",", "cached_file_path", ",", "cached_md5", ",", "file_md5", ")", "if", "cached_md5", "==", "file_md5", ":", "return", "# ... or get the file from S3", "__utils__", "[", "'s3.query'", "]", "(", "key", "=", "creds", ".", "key", ",", "keyid", "=", "creds", ".", "keyid", ",", "kms_keyid", "=", "creds", ".", "kms_keyid", ",", "bucket", "=", "bucket", ",", "service_url", "=", "creds", ".", "service_url", ",", "path", "=", "_quote", "(", "path", ")", ",", "local_file", "=", "cached_file_path", ",", "verify_ssl", "=", "creds", ".", "verify_ssl", ",", "location", "=", "creds", ".", "location", ",", "path_style", "=", "creds", ".", "path_style", ",", "https_enable", "=", "creds", ".", "https_enable", ")" ]
Checks the local cache for the file, if it's old or missing go grab the file from S3 and update the cache
[ "Checks", "the", "local", "cache", "for", "the", "file", "if", "it", "s", "old", "or", "missing", "go", "grab", "the", "file", "from", "S3", "and", "update", "the", "cache" ]
python
train
33.371429
saltstack/salt
salt/cloud/clouds/vultrpy.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/vultrpy.py#L265-L474
def create(vm_): ''' Create a single VM from a data dict ''' if 'driver' not in vm_: vm_['driver'] = vm_['provider'] private_networking = config.get_cloud_config_value( 'enable_private_network', vm_, __opts__, search_global=False, default=False, ) startup_script = config.get_cloud_config_value( 'startup_script_id', vm_, __opts__, search_global=False, default=None, ) if startup_script and str(startup_script) not in avail_scripts(): log.error('Your Vultr account does not have a startup script with ID %s', str(startup_script)) return False if private_networking is not None: if not isinstance(private_networking, bool): raise SaltCloudConfigError("'private_networking' should be a boolean value.") if private_networking is True: enable_private_network = 'yes' else: enable_private_network = 'no' __utils__['cloud.fire_event']( 'event', 'starting create', 'salt/cloud/{0}/creating'.format(vm_['name']), args=__utils__['cloud.filter_event']('creating', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) osid = _lookup_vultrid(vm_['image'], 'avail_images', 'OSID') if not osid: log.error('Vultr does not have an image with id or name %s', vm_['image']) return False vpsplanid = _lookup_vultrid(vm_['size'], 'avail_sizes', 'VPSPLANID') if not vpsplanid: log.error('Vultr does not have a size with id or name %s', vm_['size']) return False dcid = _lookup_vultrid(vm_['location'], 'avail_locations', 'DCID') if not dcid: log.error('Vultr does not have a location with id or name %s', vm_['location']) return False kwargs = { 'label': vm_['name'], 'OSID': osid, 'VPSPLANID': vpsplanid, 'DCID': dcid, 'hostname': vm_['name'], 'enable_private_network': enable_private_network, } if startup_script: kwargs['SCRIPTID'] = startup_script log.info('Creating Cloud VM %s', vm_['name']) __utils__['cloud.fire_event']( 'event', 'requesting instance', 'salt/cloud/{0}/requesting'.format(vm_['name']), args={ 'kwargs': __utils__['cloud.filter_event']('requesting', kwargs, list(kwargs)), }, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'], ) try: data = _query('server/create', method='POST', data=_urlencode(kwargs)) if int(data.get('status', '200')) >= 300: log.error( 'Error creating %s on Vultr\n\n' 'Vultr API returned %s\n', vm_['name'], data ) log.error('Status 412 may mean that you are requesting an\n' 'invalid location, image, or size.') __utils__['cloud.fire_event']( 'event', 'instance request failed', 'salt/cloud/{0}/requesting/failed'.format(vm_['name']), args={'kwargs': kwargs}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'], ) return False except Exception as exc: log.error( 'Error creating %s on Vultr\n\n' 'The following exception was thrown when trying to ' 'run the initial deployment:\n%s', vm_['name'], exc, # Show the traceback if the debug logging level is enabled exc_info_on_loglevel=logging.DEBUG ) __utils__['cloud.fire_event']( 'event', 'instance request failed', 'salt/cloud/{0}/requesting/failed'.format(vm_['name']), args={'kwargs': kwargs}, sock_dir=__opts__['sock_dir'], transport=__opts__['transport'], ) return False def wait_for_hostname(): ''' Wait for the IP address to become available ''' data = show_instance(vm_['name'], call='action') main_ip = six.text_type(data.get('main_ip', '0')) if main_ip.startswith('0'): time.sleep(3) return False return data['main_ip'] def wait_for_default_password(): ''' Wait for the IP address to become available ''' data = show_instance(vm_['name'], call='action') # print("Waiting for default password") # pprint.pprint(data) if six.text_type(data.get('default_password', '')) == '': time.sleep(1) return False return data['default_password'] def wait_for_status(): ''' Wait for the IP address to become available ''' data = show_instance(vm_['name'], call='action') # print("Waiting for status normal") # pprint.pprint(data) if six.text_type(data.get('status', '')) != 'active': time.sleep(1) return False return data['default_password'] def wait_for_server_state(): ''' Wait for the IP address to become available ''' data = show_instance(vm_['name'], call='action') # print("Waiting for server state ok") # pprint.pprint(data) if six.text_type(data.get('server_state', '')) != 'ok': time.sleep(1) return False return data['default_password'] vm_['ssh_host'] = __utils__['cloud.wait_for_fun']( wait_for_hostname, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) vm_['password'] = __utils__['cloud.wait_for_fun']( wait_for_default_password, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) __utils__['cloud.wait_for_fun']( wait_for_status, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) __utils__['cloud.wait_for_fun']( wait_for_server_state, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', vm_, __opts__, default=15 * 60), ) __opts__['hard_timeout'] = config.get_cloud_config_value( 'hard_timeout', get_configured_provider(), __opts__, search_global=False, default=None, ) # Bootstrap ret = __utils__['cloud.bootstrap'](vm_, __opts__) ret.update(show_instance(vm_['name'], call='action')) log.info('Created Cloud VM \'%s\'', vm_['name']) log.debug( '\'%s\' VM creation details:\n%s', vm_['name'], pprint.pformat(data) ) __utils__['cloud.fire_event']( 'event', 'created instance', 'salt/cloud/{0}/created'.format(vm_['name']), args=__utils__['cloud.filter_event']('created', vm_, ['name', 'profile', 'provider', 'driver']), sock_dir=__opts__['sock_dir'], transport=__opts__['transport'] ) return ret
[ "def", "create", "(", "vm_", ")", ":", "if", "'driver'", "not", "in", "vm_", ":", "vm_", "[", "'driver'", "]", "=", "vm_", "[", "'provider'", "]", "private_networking", "=", "config", ".", "get_cloud_config_value", "(", "'enable_private_network'", ",", "vm_", ",", "__opts__", ",", "search_global", "=", "False", ",", "default", "=", "False", ",", ")", "startup_script", "=", "config", ".", "get_cloud_config_value", "(", "'startup_script_id'", ",", "vm_", ",", "__opts__", ",", "search_global", "=", "False", ",", "default", "=", "None", ",", ")", "if", "startup_script", "and", "str", "(", "startup_script", ")", "not", "in", "avail_scripts", "(", ")", ":", "log", ".", "error", "(", "'Your Vultr account does not have a startup script with ID %s'", ",", "str", "(", "startup_script", ")", ")", "return", "False", "if", "private_networking", "is", "not", "None", ":", "if", "not", "isinstance", "(", "private_networking", ",", "bool", ")", ":", "raise", "SaltCloudConfigError", "(", "\"'private_networking' should be a boolean value.\"", ")", "if", "private_networking", "is", "True", ":", "enable_private_network", "=", "'yes'", "else", ":", "enable_private_network", "=", "'no'", "__utils__", "[", "'cloud.fire_event'", "]", "(", "'event'", ",", "'starting create'", ",", "'salt/cloud/{0}/creating'", ".", "format", "(", "vm_", "[", "'name'", "]", ")", ",", "args", "=", "__utils__", "[", "'cloud.filter_event'", "]", "(", "'creating'", ",", "vm_", ",", "[", "'name'", ",", "'profile'", ",", "'provider'", ",", "'driver'", "]", ")", ",", "sock_dir", "=", "__opts__", "[", "'sock_dir'", "]", ",", "transport", "=", "__opts__", "[", "'transport'", "]", ")", "osid", "=", "_lookup_vultrid", "(", "vm_", "[", "'image'", "]", ",", "'avail_images'", ",", "'OSID'", ")", "if", "not", "osid", ":", "log", ".", "error", "(", "'Vultr does not have an image with id or name %s'", ",", "vm_", "[", "'image'", "]", ")", "return", "False", "vpsplanid", "=", "_lookup_vultrid", "(", "vm_", "[", "'size'", "]", ",", "'avail_sizes'", ",", "'VPSPLANID'", ")", "if", "not", "vpsplanid", ":", "log", ".", "error", "(", "'Vultr does not have a size with id or name %s'", ",", "vm_", "[", "'size'", "]", ")", "return", "False", "dcid", "=", "_lookup_vultrid", "(", "vm_", "[", "'location'", "]", ",", "'avail_locations'", ",", "'DCID'", ")", "if", "not", "dcid", ":", "log", ".", "error", "(", "'Vultr does not have a location with id or name %s'", ",", "vm_", "[", "'location'", "]", ")", "return", "False", "kwargs", "=", "{", "'label'", ":", "vm_", "[", "'name'", "]", ",", "'OSID'", ":", "osid", ",", "'VPSPLANID'", ":", "vpsplanid", ",", "'DCID'", ":", "dcid", ",", "'hostname'", ":", "vm_", "[", "'name'", "]", ",", "'enable_private_network'", ":", "enable_private_network", ",", "}", "if", "startup_script", ":", "kwargs", "[", "'SCRIPTID'", "]", "=", "startup_script", "log", ".", "info", "(", "'Creating Cloud VM %s'", ",", "vm_", "[", "'name'", "]", ")", "__utils__", "[", "'cloud.fire_event'", "]", "(", "'event'", ",", "'requesting instance'", ",", "'salt/cloud/{0}/requesting'", ".", "format", "(", "vm_", "[", "'name'", "]", ")", ",", "args", "=", "{", "'kwargs'", ":", "__utils__", "[", "'cloud.filter_event'", "]", "(", "'requesting'", ",", "kwargs", ",", "list", "(", "kwargs", ")", ")", ",", "}", ",", "sock_dir", "=", "__opts__", "[", "'sock_dir'", "]", ",", "transport", "=", "__opts__", "[", "'transport'", "]", ",", ")", "try", ":", "data", "=", "_query", "(", "'server/create'", ",", "method", "=", "'POST'", ",", "data", "=", "_urlencode", "(", "kwargs", ")", ")", "if", "int", "(", "data", ".", "get", "(", "'status'", ",", "'200'", ")", ")", ">=", "300", ":", "log", ".", "error", "(", "'Error creating %s on Vultr\\n\\n'", "'Vultr API returned %s\\n'", ",", "vm_", "[", "'name'", "]", ",", "data", ")", "log", ".", "error", "(", "'Status 412 may mean that you are requesting an\\n'", "'invalid location, image, or size.'", ")", "__utils__", "[", "'cloud.fire_event'", "]", "(", "'event'", ",", "'instance request failed'", ",", "'salt/cloud/{0}/requesting/failed'", ".", "format", "(", "vm_", "[", "'name'", "]", ")", ",", "args", "=", "{", "'kwargs'", ":", "kwargs", "}", ",", "sock_dir", "=", "__opts__", "[", "'sock_dir'", "]", ",", "transport", "=", "__opts__", "[", "'transport'", "]", ",", ")", "return", "False", "except", "Exception", "as", "exc", ":", "log", ".", "error", "(", "'Error creating %s on Vultr\\n\\n'", "'The following exception was thrown when trying to '", "'run the initial deployment:\\n%s'", ",", "vm_", "[", "'name'", "]", ",", "exc", ",", "# Show the traceback if the debug logging level is enabled", "exc_info_on_loglevel", "=", "logging", ".", "DEBUG", ")", "__utils__", "[", "'cloud.fire_event'", "]", "(", "'event'", ",", "'instance request failed'", ",", "'salt/cloud/{0}/requesting/failed'", ".", "format", "(", "vm_", "[", "'name'", "]", ")", ",", "args", "=", "{", "'kwargs'", ":", "kwargs", "}", ",", "sock_dir", "=", "__opts__", "[", "'sock_dir'", "]", ",", "transport", "=", "__opts__", "[", "'transport'", "]", ",", ")", "return", "False", "def", "wait_for_hostname", "(", ")", ":", "'''\n Wait for the IP address to become available\n '''", "data", "=", "show_instance", "(", "vm_", "[", "'name'", "]", ",", "call", "=", "'action'", ")", "main_ip", "=", "six", ".", "text_type", "(", "data", ".", "get", "(", "'main_ip'", ",", "'0'", ")", ")", "if", "main_ip", ".", "startswith", "(", "'0'", ")", ":", "time", ".", "sleep", "(", "3", ")", "return", "False", "return", "data", "[", "'main_ip'", "]", "def", "wait_for_default_password", "(", ")", ":", "'''\n Wait for the IP address to become available\n '''", "data", "=", "show_instance", "(", "vm_", "[", "'name'", "]", ",", "call", "=", "'action'", ")", "# print(\"Waiting for default password\")", "# pprint.pprint(data)", "if", "six", ".", "text_type", "(", "data", ".", "get", "(", "'default_password'", ",", "''", ")", ")", "==", "''", ":", "time", ".", "sleep", "(", "1", ")", "return", "False", "return", "data", "[", "'default_password'", "]", "def", "wait_for_status", "(", ")", ":", "'''\n Wait for the IP address to become available\n '''", "data", "=", "show_instance", "(", "vm_", "[", "'name'", "]", ",", "call", "=", "'action'", ")", "# print(\"Waiting for status normal\")", "# pprint.pprint(data)", "if", "six", ".", "text_type", "(", "data", ".", "get", "(", "'status'", ",", "''", ")", ")", "!=", "'active'", ":", "time", ".", "sleep", "(", "1", ")", "return", "False", "return", "data", "[", "'default_password'", "]", "def", "wait_for_server_state", "(", ")", ":", "'''\n Wait for the IP address to become available\n '''", "data", "=", "show_instance", "(", "vm_", "[", "'name'", "]", ",", "call", "=", "'action'", ")", "# print(\"Waiting for server state ok\")", "# pprint.pprint(data)", "if", "six", ".", "text_type", "(", "data", ".", "get", "(", "'server_state'", ",", "''", ")", ")", "!=", "'ok'", ":", "time", ".", "sleep", "(", "1", ")", "return", "False", "return", "data", "[", "'default_password'", "]", "vm_", "[", "'ssh_host'", "]", "=", "__utils__", "[", "'cloud.wait_for_fun'", "]", "(", "wait_for_hostname", ",", "timeout", "=", "config", ".", "get_cloud_config_value", "(", "'wait_for_fun_timeout'", ",", "vm_", ",", "__opts__", ",", "default", "=", "15", "*", "60", ")", ",", ")", "vm_", "[", "'password'", "]", "=", "__utils__", "[", "'cloud.wait_for_fun'", "]", "(", "wait_for_default_password", ",", "timeout", "=", "config", ".", "get_cloud_config_value", "(", "'wait_for_fun_timeout'", ",", "vm_", ",", "__opts__", ",", "default", "=", "15", "*", "60", ")", ",", ")", "__utils__", "[", "'cloud.wait_for_fun'", "]", "(", "wait_for_status", ",", "timeout", "=", "config", ".", "get_cloud_config_value", "(", "'wait_for_fun_timeout'", ",", "vm_", ",", "__opts__", ",", "default", "=", "15", "*", "60", ")", ",", ")", "__utils__", "[", "'cloud.wait_for_fun'", "]", "(", "wait_for_server_state", ",", "timeout", "=", "config", ".", "get_cloud_config_value", "(", "'wait_for_fun_timeout'", ",", "vm_", ",", "__opts__", ",", "default", "=", "15", "*", "60", ")", ",", ")", "__opts__", "[", "'hard_timeout'", "]", "=", "config", ".", "get_cloud_config_value", "(", "'hard_timeout'", ",", "get_configured_provider", "(", ")", ",", "__opts__", ",", "search_global", "=", "False", ",", "default", "=", "None", ",", ")", "# Bootstrap", "ret", "=", "__utils__", "[", "'cloud.bootstrap'", "]", "(", "vm_", ",", "__opts__", ")", "ret", ".", "update", "(", "show_instance", "(", "vm_", "[", "'name'", "]", ",", "call", "=", "'action'", ")", ")", "log", ".", "info", "(", "'Created Cloud VM \\'%s\\''", ",", "vm_", "[", "'name'", "]", ")", "log", ".", "debug", "(", "'\\'%s\\' VM creation details:\\n%s'", ",", "vm_", "[", "'name'", "]", ",", "pprint", ".", "pformat", "(", "data", ")", ")", "__utils__", "[", "'cloud.fire_event'", "]", "(", "'event'", ",", "'created instance'", ",", "'salt/cloud/{0}/created'", ".", "format", "(", "vm_", "[", "'name'", "]", ")", ",", "args", "=", "__utils__", "[", "'cloud.filter_event'", "]", "(", "'created'", ",", "vm_", ",", "[", "'name'", ",", "'profile'", ",", "'provider'", ",", "'driver'", "]", ")", ",", "sock_dir", "=", "__opts__", "[", "'sock_dir'", "]", ",", "transport", "=", "__opts__", "[", "'transport'", "]", ")", "return", "ret" ]
Create a single VM from a data dict
[ "Create", "a", "single", "VM", "from", "a", "data", "dict" ]
python
train
32.961905
getsenic/gatt-python
gatt/gatt_linux.py
https://github.com/getsenic/gatt-python/blob/e1b147d54ff199571b6c0b43bdd3a9e1ce03850c/gatt/gatt_linux.py#L626-L631
def _write_value_failed(self, dbus_error): """ Called when the write request has failed. """ error = _error_from_dbus_error(dbus_error) self.service.device.characteristic_write_value_failed(characteristic=self, error=error)
[ "def", "_write_value_failed", "(", "self", ",", "dbus_error", ")", ":", "error", "=", "_error_from_dbus_error", "(", "dbus_error", ")", "self", ".", "service", ".", "device", ".", "characteristic_write_value_failed", "(", "characteristic", "=", "self", ",", "error", "=", "error", ")" ]
Called when the write request has failed.
[ "Called", "when", "the", "write", "request", "has", "failed", "." ]
python
train
43
tipsi/aiozk
aiozk/recipes/allocator.py
https://github.com/tipsi/aiozk/blob/96d2f543de248c6d993b5bfe6621167dd1eb8223/aiozk/recipes/allocator.py#L121-L134
def round_robin(members, items): """ Default allocator with a round robin approach. In this algorithm, each member of the group is cycled over and given an item until there are no items left. This assumes roughly equal capacity for each member and aims for even distribution of item counts. """ allocation = collections.defaultdict(set) for member, item in zip(itertools.cycle(members), items): allocation[member].add(item) return allocation
[ "def", "round_robin", "(", "members", ",", "items", ")", ":", "allocation", "=", "collections", ".", "defaultdict", "(", "set", ")", "for", "member", ",", "item", "in", "zip", "(", "itertools", ".", "cycle", "(", "members", ")", ",", "items", ")", ":", "allocation", "[", "member", "]", ".", "add", "(", "item", ")", "return", "allocation" ]
Default allocator with a round robin approach. In this algorithm, each member of the group is cycled over and given an item until there are no items left. This assumes roughly equal capacity for each member and aims for even distribution of item counts.
[ "Default", "allocator", "with", "a", "round", "robin", "approach", "." ]
python
train
34
santoshphilip/eppy
eppy/useful_scripts/loopdiagram.py
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/useful_scripts/loopdiagram.py#L206-L494
def makeairplantloop(data, commdct): """make the edges for the airloop and the plantloop""" anode = "epnode" endnode = "EndNode" # in plantloop get: # demand inlet, outlet, branchlist # supply inlet, outlet, branchlist plantloops = loops.plantloopfields(data, commdct) # splitters # inlet # outlet1 # outlet2 splitters = loops.splitterfields(data, commdct) # # mixer # outlet # inlet1 # inlet2 mixers = loops.mixerfields(data, commdct) # # supply barnchlist # branch1 -> inlet, outlet # branch2 -> inlet, outlet # branch3 -> inlet, outlet # # CONNET INLET OUTLETS edges = [] # get all branches branchkey = "branch".upper() branches = data.dt[branchkey] branch_i_o = {} for br in branches: br_name = br[1] in_out = loops.branch_inlet_outlet(data, commdct, br_name) branch_i_o[br_name] = dict(list(zip(["inlet", "outlet"], in_out))) # for br_name, in_out in branch_i_o.items(): # edges.append(((in_out["inlet"], anode), br_name)) # edges.append((br_name, (in_out["outlet"], anode))) # instead of doing the branch # do the content of the branch edges = makebranchcomponents(data, commdct) # connect splitter to nodes for splitter in splitters: # splitter_inlet = inletbranch.node splittername = splitter[0] inletbranchname = splitter[1] splitter_inlet = branch_i_o[inletbranchname]["outlet"] # edges = splitter_inlet -> splittername edges.append(((splitter_inlet, anode), splittername)) # splitter_outlets = ouletbranches.nodes outletbranchnames = [br for br in splitter[2:]] splitter_outlets = [branch_i_o[br]["inlet"] for br in outletbranchnames] # edges = [splittername -> outlet for outlet in splitter_outlets] moreedges = [(splittername, (outlet, anode)) for outlet in splitter_outlets] edges = edges + moreedges for mixer in mixers: # mixer_outlet = outletbranch.node mixername = mixer[0] outletbranchname = mixer[1] mixer_outlet = branch_i_o[outletbranchname]["inlet"] # edges = mixername -> mixer_outlet edges.append((mixername, (mixer_outlet, anode))) # mixer_inlets = inletbranches.nodes inletbranchnames = [br for br in mixer[2:]] mixer_inlets = [branch_i_o[br]["outlet"] for br in inletbranchnames] # edges = [mixername -> inlet for inlet in mixer_inlets] moreedges = [((inlet, anode), mixername) for inlet in mixer_inlets] edges = edges + moreedges # connect demand and supply side # for plantloop in plantloops: # supplyinlet = plantloop[1] # supplyoutlet = plantloop[2] # demandinlet = plantloop[4] # demandoutlet = plantloop[5] # # edges = [supplyoutlet -> demandinlet, demandoutlet -> supplyinlet] # moreedges = [((supplyoutlet, endnode), (demandinlet, endnode)), # ((demandoutlet, endnode), (supplyinlet, endnode))] # edges = edges + moreedges # # -----------air loop stuff---------------------- # from s_airloop2.py # Get the demand and supply nodes from 'airloophvac' # in airloophvac get: # get branch, supplyinlet, supplyoutlet, demandinlet, demandoutlet objkey = "airloophvac".upper() fieldlists = [["Branch List Name", "Supply Side Inlet Node Name", "Demand Side Outlet Node Name", "Demand Side Inlet Node Names", "Supply Side Outlet Node Names"]] * loops.objectcount(data, objkey) airloophvacs = loops.extractfields(data, commdct, objkey, fieldlists) # airloophvac = airloophvacs[0] # in AirLoopHVAC:ZoneSplitter: # get Name, inlet, all outlets objkey = "AirLoopHVAC:ZoneSplitter".upper() singlefields = ["Name", "Inlet Node Name"] fld = "Outlet %s Node Name" repeatfields = loops.repeatingfields(data, commdct, objkey, fld) fieldlist = singlefields + repeatfields fieldlists = [fieldlist] * loops.objectcount(data, objkey) zonesplitters = loops.extractfields(data, commdct, objkey, fieldlists) # in AirLoopHVAC:SupplyPlenum: # get Name, Zone Name, Zone Node Name, inlet, all outlets objkey = "AirLoopHVAC:SupplyPlenum".upper() singlefields = ["Name", "Zone Name", "Zone Node Name", "Inlet Node Name"] fld = "Outlet %s Node Name" repeatfields = loops.repeatingfields(data, commdct, objkey, fld) fieldlist = singlefields + repeatfields fieldlists = [fieldlist] * loops.objectcount(data, objkey) supplyplenums = loops.extractfields(data, commdct, objkey, fieldlists) # in AirLoopHVAC:ZoneMixer: # get Name, outlet, all inlets objkey = "AirLoopHVAC:ZoneMixer".upper() singlefields = ["Name", "Outlet Node Name"] fld = "Inlet %s Node Name" repeatfields = loops.repeatingfields(data, commdct, objkey, fld) fieldlist = singlefields + repeatfields fieldlists = [fieldlist] * loops.objectcount(data, objkey) zonemixers = loops.extractfields(data, commdct, objkey, fieldlists) # in AirLoopHVAC:ReturnPlenum: # get Name, Zone Name, Zone Node Name, outlet, all inlets objkey = "AirLoopHVAC:ReturnPlenum".upper() singlefields = ["Name", "Zone Name", "Zone Node Name", "Outlet Node Name"] fld = "Inlet %s Node Name" repeatfields = loops.repeatingfields(data, commdct, objkey, fld) fieldlist = singlefields + repeatfields fieldlists = [fieldlist] * loops.objectcount(data, objkey) returnplenums = loops.extractfields(data, commdct, objkey, fieldlists) # connect room to each equip in equiplist # in ZoneHVAC:EquipmentConnections: # get Name, equiplist, zoneairnode, returnnode objkey = "ZoneHVAC:EquipmentConnections".upper() singlefields = ["Zone Name", "Zone Conditioning Equipment List Name", "Zone Air Node Name", "Zone Return Air Node Name"] repeatfields = [] fieldlist = singlefields + repeatfields fieldlists = [fieldlist] * loops.objectcount(data, objkey) equipconnections = loops.extractfields(data, commdct, objkey, fieldlists) # in ZoneHVAC:EquipmentList: # get Name, all equiptype, all equipnames objkey = "ZoneHVAC:EquipmentList".upper() singlefields = ["Name", ] fieldlist = singlefields flds = ["Zone Equipment %s Object Type", "Zone Equipment %s Name"] repeatfields = loops.repeatingfields(data, commdct, objkey, flds) fieldlist = fieldlist + repeatfields fieldlists = [fieldlist] * loops.objectcount(data, objkey) equiplists = loops.extractfields(data, commdct, objkey, fieldlists) equiplistdct = dict([(ep[0], ep[1:]) for ep in equiplists]) for key, equips in list(equiplistdct.items()): enames = [equips[i] for i in range(1, len(equips), 2)] equiplistdct[key] = enames # adistuunit -> room # adistuunit <- VAVreheat # airinlet -> VAVreheat # in ZoneHVAC:AirDistributionUnit: # get Name, equiplist, zoneairnode, returnnode objkey = "ZoneHVAC:AirDistributionUnit".upper() singlefields = ["Name", "Air Terminal Object Type", "Air Terminal Name"] repeatfields = [] fieldlist = singlefields + repeatfields fieldlists = [fieldlist] * loops.objectcount(data, objkey) adistuunits = loops.extractfields(data, commdct, objkey, fieldlists) # code only for AirTerminal:SingleDuct:VAV:Reheat # get airinletnodes for vavreheats # in AirTerminal:SingleDuct:VAV:Reheat: # get Name, airinletnode adistuinlets = loops.makeadistu_inlets(data, commdct) alladistu_comps = [] for key in list(adistuinlets.keys()): objkey = key.upper() singlefields = ["Name"] + adistuinlets[key] repeatfields = [] fieldlist = singlefields + repeatfields fieldlists = [fieldlist] * loops.objectcount(data, objkey) adistu_components = loops.extractfields(data, commdct, objkey, fieldlists) alladistu_comps.append(adistu_components) # in AirTerminal:SingleDuct:Uncontrolled: # get Name, airinletnode objkey = "AirTerminal:SingleDuct:Uncontrolled".upper() singlefields = ["Name", "Zone Supply Air Node Name"] repeatfields = [] fieldlist = singlefields + repeatfields fieldlists = [fieldlist] * loops.objectcount(data, objkey) uncontrolleds = loops.extractfields(data, commdct, objkey, fieldlists) anode = "epnode" endnode = "EndNode" # edges = [] # connect demand and supply side # for airloophvac in airloophvacs: # supplyinlet = airloophvac[1] # supplyoutlet = airloophvac[4] # demandinlet = airloophvac[3] # demandoutlet = airloophvac[2] # # edges = [supplyoutlet -> demandinlet, demandoutlet -> supplyinlet] # moreedges = [((supplyoutlet, endnode), (demandinlet, endnode)), # ((demandoutlet, endnode), (supplyinlet, endnode))] # edges = edges + moreedges # connect zonesplitter to nodes for zonesplitter in zonesplitters: name = zonesplitter[0] inlet = zonesplitter[1] outlets = zonesplitter[2:] edges.append(((inlet, anode), name)) for outlet in outlets: edges.append((name, (outlet, anode))) # connect supplyplenum to nodes for supplyplenum in supplyplenums: name = supplyplenum[0] inlet = supplyplenum[3] outlets = supplyplenum[4:] edges.append(((inlet, anode), name)) for outlet in outlets: edges.append((name, (outlet, anode))) # connect zonemixer to nodes for zonemixer in zonemixers: name = zonemixer[0] outlet = zonemixer[1] inlets = zonemixer[2:] edges.append((name, (outlet, anode))) for inlet in inlets: edges.append(((inlet, anode), name)) # connect returnplenums to nodes for returnplenum in returnplenums: name = returnplenum[0] outlet = returnplenum[3] inlets = returnplenum[4:] edges.append((name, (outlet, anode))) for inlet in inlets: edges.append(((inlet, anode), name)) # connect room to return node for equipconnection in equipconnections: zonename = equipconnection[0] returnnode = equipconnection[-1] edges.append((zonename, (returnnode, anode))) # connect equips to room for equipconnection in equipconnections: zonename = equipconnection[0] zequiplistname = equipconnection[1] for zequip in equiplistdct[zequiplistname]: edges.append((zequip, zonename)) # adistuunit <- adistu_component for adistuunit in adistuunits: unitname = adistuunit[0] compname = adistuunit[2] edges.append((compname, unitname)) # airinlet -> adistu_component for adistu_comps in alladistu_comps: for adistu_comp in adistu_comps: name = adistu_comp[0] for airnode in adistu_comp[1:]: edges.append(((airnode, anode), name)) # supplyairnode -> uncontrolled for uncontrolled in uncontrolleds: name = uncontrolled[0] airnode = uncontrolled[1] edges.append(((airnode, anode), name)) # edges = edges + moreedges return edges
[ "def", "makeairplantloop", "(", "data", ",", "commdct", ")", ":", "anode", "=", "\"epnode\"", "endnode", "=", "\"EndNode\"", "# in plantloop get:", "# demand inlet, outlet, branchlist", "# supply inlet, outlet, branchlist", "plantloops", "=", "loops", ".", "plantloopfields", "(", "data", ",", "commdct", ")", "# splitters", "# inlet", "# outlet1", "# outlet2", "splitters", "=", "loops", ".", "splitterfields", "(", "data", ",", "commdct", ")", "# ", "# mixer", "# outlet", "# inlet1", "# inlet2", "mixers", "=", "loops", ".", "mixerfields", "(", "data", ",", "commdct", ")", "# ", "# supply barnchlist", "# branch1 -> inlet, outlet", "# branch2 -> inlet, outlet", "# branch3 -> inlet, outlet", "# ", "# CONNET INLET OUTLETS", "edges", "=", "[", "]", "# get all branches", "branchkey", "=", "\"branch\"", ".", "upper", "(", ")", "branches", "=", "data", ".", "dt", "[", "branchkey", "]", "branch_i_o", "=", "{", "}", "for", "br", "in", "branches", ":", "br_name", "=", "br", "[", "1", "]", "in_out", "=", "loops", ".", "branch_inlet_outlet", "(", "data", ",", "commdct", ",", "br_name", ")", "branch_i_o", "[", "br_name", "]", "=", "dict", "(", "list", "(", "zip", "(", "[", "\"inlet\"", ",", "\"outlet\"", "]", ",", "in_out", ")", ")", ")", "# for br_name, in_out in branch_i_o.items():", "# edges.append(((in_out[\"inlet\"], anode), br_name))", "# edges.append((br_name, (in_out[\"outlet\"], anode)))", "# instead of doing the branch", "# do the content of the branch", "edges", "=", "makebranchcomponents", "(", "data", ",", "commdct", ")", "# connect splitter to nodes", "for", "splitter", "in", "splitters", ":", "# splitter_inlet = inletbranch.node", "splittername", "=", "splitter", "[", "0", "]", "inletbranchname", "=", "splitter", "[", "1", "]", "splitter_inlet", "=", "branch_i_o", "[", "inletbranchname", "]", "[", "\"outlet\"", "]", "# edges = splitter_inlet -> splittername", "edges", ".", "append", "(", "(", "(", "splitter_inlet", ",", "anode", ")", ",", "splittername", ")", ")", "# splitter_outlets = ouletbranches.nodes", "outletbranchnames", "=", "[", "br", "for", "br", "in", "splitter", "[", "2", ":", "]", "]", "splitter_outlets", "=", "[", "branch_i_o", "[", "br", "]", "[", "\"inlet\"", "]", "for", "br", "in", "outletbranchnames", "]", "# edges = [splittername -> outlet for outlet in splitter_outlets]", "moreedges", "=", "[", "(", "splittername", ",", "(", "outlet", ",", "anode", ")", ")", "for", "outlet", "in", "splitter_outlets", "]", "edges", "=", "edges", "+", "moreedges", "for", "mixer", "in", "mixers", ":", "# mixer_outlet = outletbranch.node", "mixername", "=", "mixer", "[", "0", "]", "outletbranchname", "=", "mixer", "[", "1", "]", "mixer_outlet", "=", "branch_i_o", "[", "outletbranchname", "]", "[", "\"inlet\"", "]", "# edges = mixername -> mixer_outlet", "edges", ".", "append", "(", "(", "mixername", ",", "(", "mixer_outlet", ",", "anode", ")", ")", ")", "# mixer_inlets = inletbranches.nodes", "inletbranchnames", "=", "[", "br", "for", "br", "in", "mixer", "[", "2", ":", "]", "]", "mixer_inlets", "=", "[", "branch_i_o", "[", "br", "]", "[", "\"outlet\"", "]", "for", "br", "in", "inletbranchnames", "]", "# edges = [mixername -> inlet for inlet in mixer_inlets]", "moreedges", "=", "[", "(", "(", "inlet", ",", "anode", ")", ",", "mixername", ")", "for", "inlet", "in", "mixer_inlets", "]", "edges", "=", "edges", "+", "moreedges", "# connect demand and supply side", "# for plantloop in plantloops:", "# supplyinlet = plantloop[1]", "# supplyoutlet = plantloop[2]", "# demandinlet = plantloop[4]", "# demandoutlet = plantloop[5]", "# # edges = [supplyoutlet -> demandinlet, demandoutlet -> supplyinlet]", "# moreedges = [((supplyoutlet, endnode), (demandinlet, endnode)), ", "# ((demandoutlet, endnode), (supplyinlet, endnode))]", "# edges = edges + moreedges", "# ", "# -----------air loop stuff----------------------", "# from s_airloop2.py", "# Get the demand and supply nodes from 'airloophvac'", "# in airloophvac get:", "# get branch, supplyinlet, supplyoutlet, demandinlet, demandoutlet", "objkey", "=", "\"airloophvac\"", ".", "upper", "(", ")", "fieldlists", "=", "[", "[", "\"Branch List Name\"", ",", "\"Supply Side Inlet Node Name\"", ",", "\"Demand Side Outlet Node Name\"", ",", "\"Demand Side Inlet Node Names\"", ",", "\"Supply Side Outlet Node Names\"", "]", "]", "*", "loops", ".", "objectcount", "(", "data", ",", "objkey", ")", "airloophvacs", "=", "loops", ".", "extractfields", "(", "data", ",", "commdct", ",", "objkey", ",", "fieldlists", ")", "# airloophvac = airloophvacs[0]", "# in AirLoopHVAC:ZoneSplitter:", "# get Name, inlet, all outlets", "objkey", "=", "\"AirLoopHVAC:ZoneSplitter\"", ".", "upper", "(", ")", "singlefields", "=", "[", "\"Name\"", ",", "\"Inlet Node Name\"", "]", "fld", "=", "\"Outlet %s Node Name\"", "repeatfields", "=", "loops", ".", "repeatingfields", "(", "data", ",", "commdct", ",", "objkey", ",", "fld", ")", "fieldlist", "=", "singlefields", "+", "repeatfields", "fieldlists", "=", "[", "fieldlist", "]", "*", "loops", ".", "objectcount", "(", "data", ",", "objkey", ")", "zonesplitters", "=", "loops", ".", "extractfields", "(", "data", ",", "commdct", ",", "objkey", ",", "fieldlists", ")", "# in AirLoopHVAC:SupplyPlenum:", "# get Name, Zone Name, Zone Node Name, inlet, all outlets", "objkey", "=", "\"AirLoopHVAC:SupplyPlenum\"", ".", "upper", "(", ")", "singlefields", "=", "[", "\"Name\"", ",", "\"Zone Name\"", ",", "\"Zone Node Name\"", ",", "\"Inlet Node Name\"", "]", "fld", "=", "\"Outlet %s Node Name\"", "repeatfields", "=", "loops", ".", "repeatingfields", "(", "data", ",", "commdct", ",", "objkey", ",", "fld", ")", "fieldlist", "=", "singlefields", "+", "repeatfields", "fieldlists", "=", "[", "fieldlist", "]", "*", "loops", ".", "objectcount", "(", "data", ",", "objkey", ")", "supplyplenums", "=", "loops", ".", "extractfields", "(", "data", ",", "commdct", ",", "objkey", ",", "fieldlists", ")", "# in AirLoopHVAC:ZoneMixer:", "# get Name, outlet, all inlets", "objkey", "=", "\"AirLoopHVAC:ZoneMixer\"", ".", "upper", "(", ")", "singlefields", "=", "[", "\"Name\"", ",", "\"Outlet Node Name\"", "]", "fld", "=", "\"Inlet %s Node Name\"", "repeatfields", "=", "loops", ".", "repeatingfields", "(", "data", ",", "commdct", ",", "objkey", ",", "fld", ")", "fieldlist", "=", "singlefields", "+", "repeatfields", "fieldlists", "=", "[", "fieldlist", "]", "*", "loops", ".", "objectcount", "(", "data", ",", "objkey", ")", "zonemixers", "=", "loops", ".", "extractfields", "(", "data", ",", "commdct", ",", "objkey", ",", "fieldlists", ")", "# in AirLoopHVAC:ReturnPlenum:", "# get Name, Zone Name, Zone Node Name, outlet, all inlets", "objkey", "=", "\"AirLoopHVAC:ReturnPlenum\"", ".", "upper", "(", ")", "singlefields", "=", "[", "\"Name\"", ",", "\"Zone Name\"", ",", "\"Zone Node Name\"", ",", "\"Outlet Node Name\"", "]", "fld", "=", "\"Inlet %s Node Name\"", "repeatfields", "=", "loops", ".", "repeatingfields", "(", "data", ",", "commdct", ",", "objkey", ",", "fld", ")", "fieldlist", "=", "singlefields", "+", "repeatfields", "fieldlists", "=", "[", "fieldlist", "]", "*", "loops", ".", "objectcount", "(", "data", ",", "objkey", ")", "returnplenums", "=", "loops", ".", "extractfields", "(", "data", ",", "commdct", ",", "objkey", ",", "fieldlists", ")", "# connect room to each equip in equiplist", "# in ZoneHVAC:EquipmentConnections:", "# get Name, equiplist, zoneairnode, returnnode", "objkey", "=", "\"ZoneHVAC:EquipmentConnections\"", ".", "upper", "(", ")", "singlefields", "=", "[", "\"Zone Name\"", ",", "\"Zone Conditioning Equipment List Name\"", ",", "\"Zone Air Node Name\"", ",", "\"Zone Return Air Node Name\"", "]", "repeatfields", "=", "[", "]", "fieldlist", "=", "singlefields", "+", "repeatfields", "fieldlists", "=", "[", "fieldlist", "]", "*", "loops", ".", "objectcount", "(", "data", ",", "objkey", ")", "equipconnections", "=", "loops", ".", "extractfields", "(", "data", ",", "commdct", ",", "objkey", ",", "fieldlists", ")", "# in ZoneHVAC:EquipmentList:", "# get Name, all equiptype, all equipnames", "objkey", "=", "\"ZoneHVAC:EquipmentList\"", ".", "upper", "(", ")", "singlefields", "=", "[", "\"Name\"", ",", "]", "fieldlist", "=", "singlefields", "flds", "=", "[", "\"Zone Equipment %s Object Type\"", ",", "\"Zone Equipment %s Name\"", "]", "repeatfields", "=", "loops", ".", "repeatingfields", "(", "data", ",", "commdct", ",", "objkey", ",", "flds", ")", "fieldlist", "=", "fieldlist", "+", "repeatfields", "fieldlists", "=", "[", "fieldlist", "]", "*", "loops", ".", "objectcount", "(", "data", ",", "objkey", ")", "equiplists", "=", "loops", ".", "extractfields", "(", "data", ",", "commdct", ",", "objkey", ",", "fieldlists", ")", "equiplistdct", "=", "dict", "(", "[", "(", "ep", "[", "0", "]", ",", "ep", "[", "1", ":", "]", ")", "for", "ep", "in", "equiplists", "]", ")", "for", "key", ",", "equips", "in", "list", "(", "equiplistdct", ".", "items", "(", ")", ")", ":", "enames", "=", "[", "equips", "[", "i", "]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "equips", ")", ",", "2", ")", "]", "equiplistdct", "[", "key", "]", "=", "enames", "# adistuunit -> room ", "# adistuunit <- VAVreheat ", "# airinlet -> VAVreheat", "# in ZoneHVAC:AirDistributionUnit:", "# get Name, equiplist, zoneairnode, returnnode", "objkey", "=", "\"ZoneHVAC:AirDistributionUnit\"", ".", "upper", "(", ")", "singlefields", "=", "[", "\"Name\"", ",", "\"Air Terminal Object Type\"", ",", "\"Air Terminal Name\"", "]", "repeatfields", "=", "[", "]", "fieldlist", "=", "singlefields", "+", "repeatfields", "fieldlists", "=", "[", "fieldlist", "]", "*", "loops", ".", "objectcount", "(", "data", ",", "objkey", ")", "adistuunits", "=", "loops", ".", "extractfields", "(", "data", ",", "commdct", ",", "objkey", ",", "fieldlists", ")", "# code only for AirTerminal:SingleDuct:VAV:Reheat", "# get airinletnodes for vavreheats", "# in AirTerminal:SingleDuct:VAV:Reheat:", "# get Name, airinletnode", "adistuinlets", "=", "loops", ".", "makeadistu_inlets", "(", "data", ",", "commdct", ")", "alladistu_comps", "=", "[", "]", "for", "key", "in", "list", "(", "adistuinlets", ".", "keys", "(", ")", ")", ":", "objkey", "=", "key", ".", "upper", "(", ")", "singlefields", "=", "[", "\"Name\"", "]", "+", "adistuinlets", "[", "key", "]", "repeatfields", "=", "[", "]", "fieldlist", "=", "singlefields", "+", "repeatfields", "fieldlists", "=", "[", "fieldlist", "]", "*", "loops", ".", "objectcount", "(", "data", ",", "objkey", ")", "adistu_components", "=", "loops", ".", "extractfields", "(", "data", ",", "commdct", ",", "objkey", ",", "fieldlists", ")", "alladistu_comps", ".", "append", "(", "adistu_components", ")", "# in AirTerminal:SingleDuct:Uncontrolled:", "# get Name, airinletnode", "objkey", "=", "\"AirTerminal:SingleDuct:Uncontrolled\"", ".", "upper", "(", ")", "singlefields", "=", "[", "\"Name\"", ",", "\"Zone Supply Air Node Name\"", "]", "repeatfields", "=", "[", "]", "fieldlist", "=", "singlefields", "+", "repeatfields", "fieldlists", "=", "[", "fieldlist", "]", "*", "loops", ".", "objectcount", "(", "data", ",", "objkey", ")", "uncontrolleds", "=", "loops", ".", "extractfields", "(", "data", ",", "commdct", ",", "objkey", ",", "fieldlists", ")", "anode", "=", "\"epnode\"", "endnode", "=", "\"EndNode\"", "# edges = []", "# connect demand and supply side", "# for airloophvac in airloophvacs:", "# supplyinlet = airloophvac[1]", "# supplyoutlet = airloophvac[4]", "# demandinlet = airloophvac[3]", "# demandoutlet = airloophvac[2]", "# # edges = [supplyoutlet -> demandinlet, demandoutlet -> supplyinlet]", "# moreedges = [((supplyoutlet, endnode), (demandinlet, endnode)),", "# ((demandoutlet, endnode), (supplyinlet, endnode))]", "# edges = edges + moreedges", "# connect zonesplitter to nodes", "for", "zonesplitter", "in", "zonesplitters", ":", "name", "=", "zonesplitter", "[", "0", "]", "inlet", "=", "zonesplitter", "[", "1", "]", "outlets", "=", "zonesplitter", "[", "2", ":", "]", "edges", ".", "append", "(", "(", "(", "inlet", ",", "anode", ")", ",", "name", ")", ")", "for", "outlet", "in", "outlets", ":", "edges", ".", "append", "(", "(", "name", ",", "(", "outlet", ",", "anode", ")", ")", ")", "# connect supplyplenum to nodes", "for", "supplyplenum", "in", "supplyplenums", ":", "name", "=", "supplyplenum", "[", "0", "]", "inlet", "=", "supplyplenum", "[", "3", "]", "outlets", "=", "supplyplenum", "[", "4", ":", "]", "edges", ".", "append", "(", "(", "(", "inlet", ",", "anode", ")", ",", "name", ")", ")", "for", "outlet", "in", "outlets", ":", "edges", ".", "append", "(", "(", "name", ",", "(", "outlet", ",", "anode", ")", ")", ")", "# connect zonemixer to nodes", "for", "zonemixer", "in", "zonemixers", ":", "name", "=", "zonemixer", "[", "0", "]", "outlet", "=", "zonemixer", "[", "1", "]", "inlets", "=", "zonemixer", "[", "2", ":", "]", "edges", ".", "append", "(", "(", "name", ",", "(", "outlet", ",", "anode", ")", ")", ")", "for", "inlet", "in", "inlets", ":", "edges", ".", "append", "(", "(", "(", "inlet", ",", "anode", ")", ",", "name", ")", ")", "# connect returnplenums to nodes", "for", "returnplenum", "in", "returnplenums", ":", "name", "=", "returnplenum", "[", "0", "]", "outlet", "=", "returnplenum", "[", "3", "]", "inlets", "=", "returnplenum", "[", "4", ":", "]", "edges", ".", "append", "(", "(", "name", ",", "(", "outlet", ",", "anode", ")", ")", ")", "for", "inlet", "in", "inlets", ":", "edges", ".", "append", "(", "(", "(", "inlet", ",", "anode", ")", ",", "name", ")", ")", "# connect room to return node", "for", "equipconnection", "in", "equipconnections", ":", "zonename", "=", "equipconnection", "[", "0", "]", "returnnode", "=", "equipconnection", "[", "-", "1", "]", "edges", ".", "append", "(", "(", "zonename", ",", "(", "returnnode", ",", "anode", ")", ")", ")", "# connect equips to room", "for", "equipconnection", "in", "equipconnections", ":", "zonename", "=", "equipconnection", "[", "0", "]", "zequiplistname", "=", "equipconnection", "[", "1", "]", "for", "zequip", "in", "equiplistdct", "[", "zequiplistname", "]", ":", "edges", ".", "append", "(", "(", "zequip", ",", "zonename", ")", ")", "# adistuunit <- adistu_component ", "for", "adistuunit", "in", "adistuunits", ":", "unitname", "=", "adistuunit", "[", "0", "]", "compname", "=", "adistuunit", "[", "2", "]", "edges", ".", "append", "(", "(", "compname", ",", "unitname", ")", ")", "# airinlet -> adistu_component", "for", "adistu_comps", "in", "alladistu_comps", ":", "for", "adistu_comp", "in", "adistu_comps", ":", "name", "=", "adistu_comp", "[", "0", "]", "for", "airnode", "in", "adistu_comp", "[", "1", ":", "]", ":", "edges", ".", "append", "(", "(", "(", "airnode", ",", "anode", ")", ",", "name", ")", ")", "# supplyairnode -> uncontrolled", "for", "uncontrolled", "in", "uncontrolleds", ":", "name", "=", "uncontrolled", "[", "0", "]", "airnode", "=", "uncontrolled", "[", "1", "]", "edges", ".", "append", "(", "(", "(", "airnode", ",", "anode", ")", ",", "name", ")", ")", "# edges = edges + moreedges ", "return", "edges" ]
make the edges for the airloop and the plantloop
[ "make", "the", "edges", "for", "the", "airloop", "and", "the", "plantloop" ]
python
train
38.705882
phn/lineid_plot
lineid_plot/lineid_plot.py
https://github.com/phn/lineid_plot/blob/7c7a1af53fe439b3a7c5a57f01680575837fb978/lineid_plot/lineid_plot.py#L16-L37
def _convert_to_array(x, size, name): """Check length of array or convert scalar to array. Check to see is `x` has the given length `size`. If this is true then return Numpy array equivalent of `x`. If not then raise ValueError, using `name` as an idnetification. If len(x) returns TypeError, then assume it is a scalar and create a Numpy array of length `size`. Each item of this array will have the value as `x`. """ try: l = len(x) if l != size: raise ValueError( "{0} must be scalar or of length {1}".format( name, size)) except TypeError: # Only one item xa = np.array([x] * size) # Each item is a diff. object. else: xa = np.array(x) return xa
[ "def", "_convert_to_array", "(", "x", ",", "size", ",", "name", ")", ":", "try", ":", "l", "=", "len", "(", "x", ")", "if", "l", "!=", "size", ":", "raise", "ValueError", "(", "\"{0} must be scalar or of length {1}\"", ".", "format", "(", "name", ",", "size", ")", ")", "except", "TypeError", ":", "# Only one item", "xa", "=", "np", ".", "array", "(", "[", "x", "]", "*", "size", ")", "# Each item is a diff. object.", "else", ":", "xa", "=", "np", ".", "array", "(", "x", ")", "return", "xa" ]
Check length of array or convert scalar to array. Check to see is `x` has the given length `size`. If this is true then return Numpy array equivalent of `x`. If not then raise ValueError, using `name` as an idnetification. If len(x) returns TypeError, then assume it is a scalar and create a Numpy array of length `size`. Each item of this array will have the value as `x`.
[ "Check", "length", "of", "array", "or", "convert", "scalar", "to", "array", "." ]
python
train
34.681818
apache/spark
python/pyspark/rdd.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/rdd.py#L2316-L2331
def sumApprox(self, timeout, confidence=0.95): """ .. note:: Experimental Approximate operation to return the sum within a timeout or meet the confidence. >>> rdd = sc.parallelize(range(1000), 10) >>> r = sum(range(1000)) >>> abs(rdd.sumApprox(1000) - r) / r < 0.05 True """ jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd() jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd()) r = jdrdd.sumApprox(timeout, confidence).getFinalValue() return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
[ "def", "sumApprox", "(", "self", ",", "timeout", ",", "confidence", "=", "0.95", ")", ":", "jrdd", "=", "self", ".", "mapPartitions", "(", "lambda", "it", ":", "[", "float", "(", "sum", "(", "it", ")", ")", "]", ")", ".", "_to_java_object_rdd", "(", ")", "jdrdd", "=", "self", ".", "ctx", ".", "_jvm", ".", "JavaDoubleRDD", ".", "fromRDD", "(", "jrdd", ".", "rdd", "(", ")", ")", "r", "=", "jdrdd", ".", "sumApprox", "(", "timeout", ",", "confidence", ")", ".", "getFinalValue", "(", ")", "return", "BoundedFloat", "(", "r", ".", "mean", "(", ")", ",", "r", ".", "confidence", "(", ")", ",", "r", ".", "low", "(", ")", ",", "r", ".", "high", "(", ")", ")" ]
.. note:: Experimental Approximate operation to return the sum within a timeout or meet the confidence. >>> rdd = sc.parallelize(range(1000), 10) >>> r = sum(range(1000)) >>> abs(rdd.sumApprox(1000) - r) / r < 0.05 True
[ "..", "note", "::", "Experimental" ]
python
train
38.75
eaton-lab/toytree
toytree/etemini.py
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/etemini.py#L675-L686
def _iter_descendants_preorder(self, is_leaf_fn=None): """ Iterator over all descendant nodes. """ to_visit = deque() node = self while node is not None: yield node if not is_leaf_fn or not is_leaf_fn(node): to_visit.extendleft(reversed(node.children)) try: node = to_visit.popleft() except: node = None
[ "def", "_iter_descendants_preorder", "(", "self", ",", "is_leaf_fn", "=", "None", ")", ":", "to_visit", "=", "deque", "(", ")", "node", "=", "self", "while", "node", "is", "not", "None", ":", "yield", "node", "if", "not", "is_leaf_fn", "or", "not", "is_leaf_fn", "(", "node", ")", ":", "to_visit", ".", "extendleft", "(", "reversed", "(", "node", ".", "children", ")", ")", "try", ":", "node", "=", "to_visit", ".", "popleft", "(", ")", "except", ":", "node", "=", "None" ]
Iterator over all descendant nodes.
[ "Iterator", "over", "all", "descendant", "nodes", "." ]
python
train
35
xgvargas/smartside
smartside/signal.py
https://github.com/xgvargas/smartside/blob/c63acb7d628b161f438e877eca12d550647de34d/smartside/signal.py#L98-L109
def print_signals_and_slots(self): """ List all active Slots and Signal. Credits to: http://visitusers.org/index.php?title=PySide_Recipes#Debugging """ for i in xrange(self.metaObject().methodCount()): m = self.metaObject().method(i) if m.methodType() == QMetaMethod.MethodType.Signal: print("SIGNAL: sig=", m.signature(), "hooked to nslots=", self.receivers(SIGNAL(m.signature()))) elif m.methodType() == QMetaMethod.MethodType.Slot: print("SLOT: sig=", m.signature())
[ "def", "print_signals_and_slots", "(", "self", ")", ":", "for", "i", "in", "xrange", "(", "self", ".", "metaObject", "(", ")", ".", "methodCount", "(", ")", ")", ":", "m", "=", "self", ".", "metaObject", "(", ")", ".", "method", "(", "i", ")", "if", "m", ".", "methodType", "(", ")", "==", "QMetaMethod", ".", "MethodType", ".", "Signal", ":", "print", "(", "\"SIGNAL: sig=\"", ",", "m", ".", "signature", "(", ")", ",", "\"hooked to nslots=\"", ",", "self", ".", "receivers", "(", "SIGNAL", "(", "m", ".", "signature", "(", ")", ")", ")", ")", "elif", "m", ".", "methodType", "(", ")", "==", "QMetaMethod", ".", "MethodType", ".", "Slot", ":", "print", "(", "\"SLOT: sig=\"", ",", "m", ".", "signature", "(", ")", ")" ]
List all active Slots and Signal. Credits to: http://visitusers.org/index.php?title=PySide_Recipes#Debugging
[ "List", "all", "active", "Slots", "and", "Signal", "." ]
python
train
47.666667
nickmckay/LiPD-utilities
Python/lipd/doi_resolver.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/doi_resolver.py#L214-L236
def find_doi(self, curr_dict): """ Recursively search the file for the DOI id. More taxing, but more flexible when dictionary structuring isn't absolute :param dict curr_dict: Current dictionary being searched :return dict bool: Recursive - Current dictionary, False flag that DOI was not found :return str bool: Final - DOI id, True flag that DOI was found """ try: if 'id' in curr_dict: return curr_dict['id'], True elif isinstance(curr_dict, list): for i in curr_dict: return self.find_doi(i) elif isinstance(curr_dict, dict): for k, v in curr_dict.items(): if k == 'identifier': return self.find_doi(v) return curr_dict, False else: return curr_dict, False # If the ID key doesn't exist, then return the original dict with a flag except TypeError: return curr_dict, False
[ "def", "find_doi", "(", "self", ",", "curr_dict", ")", ":", "try", ":", "if", "'id'", "in", "curr_dict", ":", "return", "curr_dict", "[", "'id'", "]", ",", "True", "elif", "isinstance", "(", "curr_dict", ",", "list", ")", ":", "for", "i", "in", "curr_dict", ":", "return", "self", ".", "find_doi", "(", "i", ")", "elif", "isinstance", "(", "curr_dict", ",", "dict", ")", ":", "for", "k", ",", "v", "in", "curr_dict", ".", "items", "(", ")", ":", "if", "k", "==", "'identifier'", ":", "return", "self", ".", "find_doi", "(", "v", ")", "return", "curr_dict", ",", "False", "else", ":", "return", "curr_dict", ",", "False", "# If the ID key doesn't exist, then return the original dict with a flag", "except", "TypeError", ":", "return", "curr_dict", ",", "False" ]
Recursively search the file for the DOI id. More taxing, but more flexible when dictionary structuring isn't absolute :param dict curr_dict: Current dictionary being searched :return dict bool: Recursive - Current dictionary, False flag that DOI was not found :return str bool: Final - DOI id, True flag that DOI was found
[ "Recursively", "search", "the", "file", "for", "the", "DOI", "id", ".", "More", "taxing", "but", "more", "flexible", "when", "dictionary", "structuring", "isn", "t", "absolute", ":", "param", "dict", "curr_dict", ":", "Current", "dictionary", "being", "searched", ":", "return", "dict", "bool", ":", "Recursive", "-", "Current", "dictionary", "False", "flag", "that", "DOI", "was", "not", "found", ":", "return", "str", "bool", ":", "Final", "-", "DOI", "id", "True", "flag", "that", "DOI", "was", "found" ]
python
train
44.73913
gem/oq-engine
openquake/commonlib/writers.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/writers.py#L74-L86
def write(self, descrs): """ Convert descriptions into names """ # example: '(poe-[\d\.]+):float32' -> 'poe-[\d\.]+' names = [] for descr in descrs: mo = re.match(self.long_regex, descr) if mo: names.append(mo.group(mo.lastindex) + descr[mo.end():]) else: names.append(descr) return names
[ "def", "write", "(", "self", ",", "descrs", ")", ":", "# example: '(poe-[\\d\\.]+):float32' -> 'poe-[\\d\\.]+'", "names", "=", "[", "]", "for", "descr", "in", "descrs", ":", "mo", "=", "re", ".", "match", "(", "self", ".", "long_regex", ",", "descr", ")", "if", "mo", ":", "names", ".", "append", "(", "mo", ".", "group", "(", "mo", ".", "lastindex", ")", "+", "descr", "[", "mo", ".", "end", "(", ")", ":", "]", ")", "else", ":", "names", ".", "append", "(", "descr", ")", "return", "names" ]
Convert descriptions into names
[ "Convert", "descriptions", "into", "names" ]
python
train
30.769231
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_common_def.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_common_def.py#L475-L487
def ip_hide_ext_community_list_holder_extcommunity_list_ext_community_expr(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def") hide_ext_community_list_holder = ET.SubElement(ip, "hide-ext-community-list-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") extcommunity_list = ET.SubElement(hide_ext_community_list_holder, "extcommunity-list") extcommunity_list_num_key = ET.SubElement(extcommunity_list, "extcommunity-list-num") extcommunity_list_num_key.text = kwargs.pop('extcommunity_list_num') ext_community_expr = ET.SubElement(extcommunity_list, "ext-community-expr") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "ip_hide_ext_community_list_holder_extcommunity_list_ext_community_expr", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "ip", "=", "ET", ".", "SubElement", "(", "config", ",", "\"ip\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-common-def\"", ")", "hide_ext_community_list_holder", "=", "ET", ".", "SubElement", "(", "ip", ",", "\"hide-ext-community-list-holder\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-ip-policy\"", ")", "extcommunity_list", "=", "ET", ".", "SubElement", "(", "hide_ext_community_list_holder", ",", "\"extcommunity-list\"", ")", "extcommunity_list_num_key", "=", "ET", ".", "SubElement", "(", "extcommunity_list", ",", "\"extcommunity-list-num\"", ")", "extcommunity_list_num_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'extcommunity_list_num'", ")", "ext_community_expr", "=", "ET", ".", "SubElement", "(", "extcommunity_list", ",", "\"ext-community-expr\"", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
64
etingof/pysnmp
pysnmp/smi/mibs/SNMPv2-SMI.py
https://github.com/etingof/pysnmp/blob/cde062dd42f67dfd2d7686286a322d40e9c3a4b7/pysnmp/smi/mibs/SNMPv2-SMI.py#L860-L925
def writeCommit(self, varBind, **context): """Commit new value of the Managed Object Instance. Implements the second of the multi-step workflow of the SNMP SET command processing (:RFC:`1905#section-4.2.5`). The goal of the second phase is to actually modify the requested Managed Object Instance. When multiple Managed Objects Instances are modified at once (likely coming all in one SNMP PDU), each of them has to run through the second (*commit*) phase successfully for the system to transition to the third (*cleanup*) phase. If any single *commit* step fails, the system transitions into the *undo* state for each of Managed Objects Instances being processed at once. The role of this object in the MIB tree is non-terminal. It does not access the actual Managed Object Instance, but just traverses one level down the MIB tree and hands off the query to the underlying objects. Parameters ---------- varBind: :py:class:`~pysnmp.smi.rfc1902.ObjectType` object representing new Managed Object Instance value to set Other Parameters ---------------- \*\*context: Query parameters: * `cbFun` (callable) - user-supplied callable that is invoked to pass the new value of the Managed Object Instance or an error. Notes ----- The callback functions (e.g. `cbFun`) have the same signature as this method where `varBind` contains the new Managed Object Instance value. In case of an error, the `error` key in the `context` dict will contain an exception object. """ name, val = varBind (debug.logger & debug.FLAG_INS and debug.logger('%s: writeCommit(%s, %r)' % (self, name, val))) cbFun = context['cbFun'] instances = context['instances'].setdefault(self.name, {self.ST_CREATE: {}, self.ST_DESTROY: {}}) idx = context['idx'] if idx in instances[self.ST_CREATE]: self.createCommit(varBind, **context) return if idx in instances[self.ST_DESTROY]: self.destroyCommit(varBind, **context) return try: node = self.getBranch(name, **context) except (error.NoSuchInstanceError, error.NoSuchObjectError) as exc: cbFun(varBind, **dict(context, error=exc)) else: node.writeCommit(varBind, **context)
[ "def", "writeCommit", "(", "self", ",", "varBind", ",", "*", "*", "context", ")", ":", "name", ",", "val", "=", "varBind", "(", "debug", ".", "logger", "&", "debug", ".", "FLAG_INS", "and", "debug", ".", "logger", "(", "'%s: writeCommit(%s, %r)'", "%", "(", "self", ",", "name", ",", "val", ")", ")", ")", "cbFun", "=", "context", "[", "'cbFun'", "]", "instances", "=", "context", "[", "'instances'", "]", ".", "setdefault", "(", "self", ".", "name", ",", "{", "self", ".", "ST_CREATE", ":", "{", "}", ",", "self", ".", "ST_DESTROY", ":", "{", "}", "}", ")", "idx", "=", "context", "[", "'idx'", "]", "if", "idx", "in", "instances", "[", "self", ".", "ST_CREATE", "]", ":", "self", ".", "createCommit", "(", "varBind", ",", "*", "*", "context", ")", "return", "if", "idx", "in", "instances", "[", "self", ".", "ST_DESTROY", "]", ":", "self", ".", "destroyCommit", "(", "varBind", ",", "*", "*", "context", ")", "return", "try", ":", "node", "=", "self", ".", "getBranch", "(", "name", ",", "*", "*", "context", ")", "except", "(", "error", ".", "NoSuchInstanceError", ",", "error", ".", "NoSuchObjectError", ")", "as", "exc", ":", "cbFun", "(", "varBind", ",", "*", "*", "dict", "(", "context", ",", "error", "=", "exc", ")", ")", "else", ":", "node", ".", "writeCommit", "(", "varBind", ",", "*", "*", "context", ")" ]
Commit new value of the Managed Object Instance. Implements the second of the multi-step workflow of the SNMP SET command processing (:RFC:`1905#section-4.2.5`). The goal of the second phase is to actually modify the requested Managed Object Instance. When multiple Managed Objects Instances are modified at once (likely coming all in one SNMP PDU), each of them has to run through the second (*commit*) phase successfully for the system to transition to the third (*cleanup*) phase. If any single *commit* step fails, the system transitions into the *undo* state for each of Managed Objects Instances being processed at once. The role of this object in the MIB tree is non-terminal. It does not access the actual Managed Object Instance, but just traverses one level down the MIB tree and hands off the query to the underlying objects. Parameters ---------- varBind: :py:class:`~pysnmp.smi.rfc1902.ObjectType` object representing new Managed Object Instance value to set Other Parameters ---------------- \*\*context: Query parameters: * `cbFun` (callable) - user-supplied callable that is invoked to pass the new value of the Managed Object Instance or an error. Notes ----- The callback functions (e.g. `cbFun`) have the same signature as this method where `varBind` contains the new Managed Object Instance value. In case of an error, the `error` key in the `context` dict will contain an exception object.
[ "Commit", "new", "value", "of", "the", "Managed", "Object", "Instance", "." ]
python
train
37.393939
seleniumbase/SeleniumBase
seleniumbase/plugins/base_plugin.py
https://github.com/seleniumbase/SeleniumBase/blob/62e5b43ee1f90a9ed923841bdd53b1b38358f43a/seleniumbase/plugins/base_plugin.py#L184-L200
def handleError(self, test, err, capt=None): """ If the database plugin is not present, we have to handle capturing "errors" that shouldn't be reported as such in base. """ if not hasattr(test.test, "testcase_guid"): if err[0] == errors.BlockedTest: raise SkipTest(err[1]) return True elif err[0] == errors.DeprecatedTest: raise SkipTest(err[1]) return True elif err[0] == errors.SkipTest: raise SkipTest(err[1]) return True
[ "def", "handleError", "(", "self", ",", "test", ",", "err", ",", "capt", "=", "None", ")", ":", "if", "not", "hasattr", "(", "test", ".", "test", ",", "\"testcase_guid\"", ")", ":", "if", "err", "[", "0", "]", "==", "errors", ".", "BlockedTest", ":", "raise", "SkipTest", "(", "err", "[", "1", "]", ")", "return", "True", "elif", "err", "[", "0", "]", "==", "errors", ".", "DeprecatedTest", ":", "raise", "SkipTest", "(", "err", "[", "1", "]", ")", "return", "True", "elif", "err", "[", "0", "]", "==", "errors", ".", "SkipTest", ":", "raise", "SkipTest", "(", "err", "[", "1", "]", ")", "return", "True" ]
If the database plugin is not present, we have to handle capturing "errors" that shouldn't be reported as such in base.
[ "If", "the", "database", "plugin", "is", "not", "present", "we", "have", "to", "handle", "capturing", "errors", "that", "shouldn", "t", "be", "reported", "as", "such", "in", "base", "." ]
python
train
34.235294
ThreatConnect-Inc/tcex
tcex/tcex_session.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_session.py#L70-L74
def request(self, method, url, **kwargs): """Override request method disabling verify on token renewal if disabled on session.""" if not url.startswith('https'): url = '{}{}'.format(self.args.tc_api_path, url) return super(TcExSession, self).request(method, url, **kwargs)
[ "def", "request", "(", "self", ",", "method", ",", "url", ",", "*", "*", "kwargs", ")", ":", "if", "not", "url", ".", "startswith", "(", "'https'", ")", ":", "url", "=", "'{}{}'", ".", "format", "(", "self", ".", "args", ".", "tc_api_path", ",", "url", ")", "return", "super", "(", "TcExSession", ",", "self", ")", ".", "request", "(", "method", ",", "url", ",", "*", "*", "kwargs", ")" ]
Override request method disabling verify on token renewal if disabled on session.
[ "Override", "request", "method", "disabling", "verify", "on", "token", "renewal", "if", "disabled", "on", "session", "." ]
python
train
60.8
dereneaton/ipyrad
ipyrad/assemble/cluster_within.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/cluster_within.py#L493-L618
def build_clusters(data, sample, maxindels): """ Combines information from .utemp and .htemp files to create .clust files, which contain un-aligned clusters. Hits to seeds are only kept in the cluster if the number of internal indels is less than 'maxindels'. By default, we set maxindels=6 for this step (within-sample clustering). """ ## If reference assembly then here we're clustering the unmapped reads if "reference" in data.paramsdict["assembly_method"]: derepfile = os.path.join(data.dirs.edits, sample.name+"-refmap_derep.fastq") else: derepfile = os.path.join(data.dirs.edits, sample.name+"_derep.fastq") ## i/o vsearch files uhandle = os.path.join(data.dirs.clusts, sample.name+".utemp") usort = os.path.join(data.dirs.clusts, sample.name+".utemp.sort") hhandle = os.path.join(data.dirs.clusts, sample.name+".htemp") ## create an output file to write clusters to sample.files.clusters = os.path.join(data.dirs.clusts, sample.name+".clust.gz") clustsout = gzip.open(sample.files.clusters, 'wb') ## Sort the uhandle file so we can read through matches efficiently cmd = ["sort", "-k", "2", uhandle, "-o", usort] proc = sps.Popen(cmd, close_fds=True) _ = proc.communicate()[0] ## load ALL derep reads into a dictionary (this can be a few GB of RAM) ## and is larger if names are larger. We are grabbing two lines at a time. alldereps = {} with open(derepfile, 'rb') as ioderep: dereps = itertools.izip(*[iter(ioderep)]*2) for namestr, seq in dereps: nnn, sss = [i.strip() for i in namestr, seq] alldereps[nnn[1:]] = sss ## store observed seeds (this could count up to >million in bad data sets) seedsseen = set() ## Iterate through the usort file grabbing matches to build clusters with open(usort, 'rb') as insort: ## iterator, seed null, seqlist null isort = iter(insort) lastseed = 0 fseqs = [] seqlist = [] seqsize = 0 while 1: ## grab the next line try: hit, seed, _, ind, ori, _ = isort.next().strip().split() LOGGER.debug(">{} {} {}".format(hit, seed, ori, seq)) except StopIteration: break ## same seed, append match if seed != lastseed: seedsseen.add(seed) ## store the last cluster (fseq), count it, and clear fseq if fseqs: ## sort fseqs by derep after pulling out the seed fseqs = [fseqs[0]] + sorted(fseqs[1:], key=lambda x: \ int(x.split(";size=")[1].split(";")[0]), reverse=True) seqlist.append("\n".join(fseqs)) seqsize += 1 fseqs = [] ## occasionally write/dump stored clusters to file and clear mem if not seqsize % 10000: if seqlist: clustsout.write("\n//\n//\n".join(seqlist)+"\n//\n//\n") ## reset list and counter seqlist = [] ## store the new seed on top of fseq list fseqs.append(">{}*\n{}".format(seed, alldereps[seed])) lastseed = seed ## add match to the seed ## revcomp if orientation is reversed (comp preserves nnnn) if ori == "-": seq = comp(alldereps[hit])[::-1] else: seq = alldereps[hit] ## only save if not too many indels if int(ind) <= maxindels: fseqs.append(">{}{}\n{}".format(hit, ori, seq)) else: LOGGER.info("filtered by maxindels: %s %s", ind, seq) ## write whatever is left over to the clusts file if fseqs: seqlist.append("\n".join(fseqs)) if seqlist: clustsout.write("\n//\n//\n".join(seqlist)+"\n//\n//\n") ## now write the seeds that had no hits. Make dict from htemp with open(hhandle, 'rb') as iotemp: nohits = itertools.izip(*[iter(iotemp)]*2) seqlist = [] seqsize = 0 while 1: try: nnn, _ = [i.strip() for i in nohits.next()] except StopIteration: break ## occasionally write to file if not seqsize % 10000: if seqlist: clustsout.write("\n//\n//\n".join(seqlist)+"\n//\n//\n") ## reset list and counter seqlist = [] ## append to list if new seed if nnn[1:] not in seedsseen: seqlist.append("{}*\n{}".format(nnn, alldereps[nnn[1:]])) seqsize += 1 ## write whatever is left over to the clusts file if seqlist: clustsout.write("\n//\n//\n".join(seqlist))#+"\n//\n//\n") ## close the file handle clustsout.close() del alldereps
[ "def", "build_clusters", "(", "data", ",", "sample", ",", "maxindels", ")", ":", "## If reference assembly then here we're clustering the unmapped reads", "if", "\"reference\"", "in", "data", ".", "paramsdict", "[", "\"assembly_method\"", "]", ":", "derepfile", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "edits", ",", "sample", ".", "name", "+", "\"-refmap_derep.fastq\"", ")", "else", ":", "derepfile", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "edits", ",", "sample", ".", "name", "+", "\"_derep.fastq\"", ")", "## i/o vsearch files", "uhandle", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "clusts", ",", "sample", ".", "name", "+", "\".utemp\"", ")", "usort", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "clusts", ",", "sample", ".", "name", "+", "\".utemp.sort\"", ")", "hhandle", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "clusts", ",", "sample", ".", "name", "+", "\".htemp\"", ")", "## create an output file to write clusters to", "sample", ".", "files", ".", "clusters", "=", "os", ".", "path", ".", "join", "(", "data", ".", "dirs", ".", "clusts", ",", "sample", ".", "name", "+", "\".clust.gz\"", ")", "clustsout", "=", "gzip", ".", "open", "(", "sample", ".", "files", ".", "clusters", ",", "'wb'", ")", "## Sort the uhandle file so we can read through matches efficiently", "cmd", "=", "[", "\"sort\"", ",", "\"-k\"", ",", "\"2\"", ",", "uhandle", ",", "\"-o\"", ",", "usort", "]", "proc", "=", "sps", ".", "Popen", "(", "cmd", ",", "close_fds", "=", "True", ")", "_", "=", "proc", ".", "communicate", "(", ")", "[", "0", "]", "## load ALL derep reads into a dictionary (this can be a few GB of RAM)", "## and is larger if names are larger. We are grabbing two lines at a time.", "alldereps", "=", "{", "}", "with", "open", "(", "derepfile", ",", "'rb'", ")", "as", "ioderep", ":", "dereps", "=", "itertools", ".", "izip", "(", "*", "[", "iter", "(", "ioderep", ")", "]", "*", "2", ")", "for", "namestr", ",", "seq", "in", "dereps", ":", "nnn", ",", "sss", "=", "[", "i", ".", "strip", "(", ")", "for", "i", "in", "namestr", ",", "seq", "]", "alldereps", "[", "nnn", "[", "1", ":", "]", "]", "=", "sss", "## store observed seeds (this could count up to >million in bad data sets)", "seedsseen", "=", "set", "(", ")", "## Iterate through the usort file grabbing matches to build clusters", "with", "open", "(", "usort", ",", "'rb'", ")", "as", "insort", ":", "## iterator, seed null, seqlist null", "isort", "=", "iter", "(", "insort", ")", "lastseed", "=", "0", "fseqs", "=", "[", "]", "seqlist", "=", "[", "]", "seqsize", "=", "0", "while", "1", ":", "## grab the next line", "try", ":", "hit", ",", "seed", ",", "_", ",", "ind", ",", "ori", ",", "_", "=", "isort", ".", "next", "(", ")", ".", "strip", "(", ")", ".", "split", "(", ")", "LOGGER", ".", "debug", "(", "\">{} {} {}\"", ".", "format", "(", "hit", ",", "seed", ",", "ori", ",", "seq", ")", ")", "except", "StopIteration", ":", "break", "## same seed, append match", "if", "seed", "!=", "lastseed", ":", "seedsseen", ".", "add", "(", "seed", ")", "## store the last cluster (fseq), count it, and clear fseq", "if", "fseqs", ":", "## sort fseqs by derep after pulling out the seed", "fseqs", "=", "[", "fseqs", "[", "0", "]", "]", "+", "sorted", "(", "fseqs", "[", "1", ":", "]", ",", "key", "=", "lambda", "x", ":", "int", "(", "x", ".", "split", "(", "\";size=\"", ")", "[", "1", "]", ".", "split", "(", "\";\"", ")", "[", "0", "]", ")", ",", "reverse", "=", "True", ")", "seqlist", ".", "append", "(", "\"\\n\"", ".", "join", "(", "fseqs", ")", ")", "seqsize", "+=", "1", "fseqs", "=", "[", "]", "## occasionally write/dump stored clusters to file and clear mem", "if", "not", "seqsize", "%", "10000", ":", "if", "seqlist", ":", "clustsout", ".", "write", "(", "\"\\n//\\n//\\n\"", ".", "join", "(", "seqlist", ")", "+", "\"\\n//\\n//\\n\"", ")", "## reset list and counter", "seqlist", "=", "[", "]", "## store the new seed on top of fseq list", "fseqs", ".", "append", "(", "\">{}*\\n{}\"", ".", "format", "(", "seed", ",", "alldereps", "[", "seed", "]", ")", ")", "lastseed", "=", "seed", "## add match to the seed", "## revcomp if orientation is reversed (comp preserves nnnn)", "if", "ori", "==", "\"-\"", ":", "seq", "=", "comp", "(", "alldereps", "[", "hit", "]", ")", "[", ":", ":", "-", "1", "]", "else", ":", "seq", "=", "alldereps", "[", "hit", "]", "## only save if not too many indels", "if", "int", "(", "ind", ")", "<=", "maxindels", ":", "fseqs", ".", "append", "(", "\">{}{}\\n{}\"", ".", "format", "(", "hit", ",", "ori", ",", "seq", ")", ")", "else", ":", "LOGGER", ".", "info", "(", "\"filtered by maxindels: %s %s\"", ",", "ind", ",", "seq", ")", "## write whatever is left over to the clusts file", "if", "fseqs", ":", "seqlist", ".", "append", "(", "\"\\n\"", ".", "join", "(", "fseqs", ")", ")", "if", "seqlist", ":", "clustsout", ".", "write", "(", "\"\\n//\\n//\\n\"", ".", "join", "(", "seqlist", ")", "+", "\"\\n//\\n//\\n\"", ")", "## now write the seeds that had no hits. Make dict from htemp", "with", "open", "(", "hhandle", ",", "'rb'", ")", "as", "iotemp", ":", "nohits", "=", "itertools", ".", "izip", "(", "*", "[", "iter", "(", "iotemp", ")", "]", "*", "2", ")", "seqlist", "=", "[", "]", "seqsize", "=", "0", "while", "1", ":", "try", ":", "nnn", ",", "_", "=", "[", "i", ".", "strip", "(", ")", "for", "i", "in", "nohits", ".", "next", "(", ")", "]", "except", "StopIteration", ":", "break", "## occasionally write to file", "if", "not", "seqsize", "%", "10000", ":", "if", "seqlist", ":", "clustsout", ".", "write", "(", "\"\\n//\\n//\\n\"", ".", "join", "(", "seqlist", ")", "+", "\"\\n//\\n//\\n\"", ")", "## reset list and counter", "seqlist", "=", "[", "]", "## append to list if new seed", "if", "nnn", "[", "1", ":", "]", "not", "in", "seedsseen", ":", "seqlist", ".", "append", "(", "\"{}*\\n{}\"", ".", "format", "(", "nnn", ",", "alldereps", "[", "nnn", "[", "1", ":", "]", "]", ")", ")", "seqsize", "+=", "1", "## write whatever is left over to the clusts file", "if", "seqlist", ":", "clustsout", ".", "write", "(", "\"\\n//\\n//\\n\"", ".", "join", "(", "seqlist", ")", ")", "#+\"\\n//\\n//\\n\")", "## close the file handle", "clustsout", ".", "close", "(", ")", "del", "alldereps" ]
Combines information from .utemp and .htemp files to create .clust files, which contain un-aligned clusters. Hits to seeds are only kept in the cluster if the number of internal indels is less than 'maxindels'. By default, we set maxindels=6 for this step (within-sample clustering).
[ "Combines", "information", "from", ".", "utemp", "and", ".", "htemp", "files", "to", "create", ".", "clust", "files", "which", "contain", "un", "-", "aligned", "clusters", ".", "Hits", "to", "seeds", "are", "only", "kept", "in", "the", "cluster", "if", "the", "number", "of", "internal", "indels", "is", "less", "than", "maxindels", ".", "By", "default", "we", "set", "maxindels", "=", "6", "for", "this", "step", "(", "within", "-", "sample", "clustering", ")", "." ]
python
valid
39.095238
Stewori/pytypes
pytypes/util.py
https://github.com/Stewori/pytypes/blob/b814d38709e84c0e0825caf8b721c20eb5a8ab3b/pytypes/util.py#L94-L108
def getargspecs(func): """Bridges inspect.getargspec and inspect.getfullargspec. Automatically selects the proper one depending of current Python version. Automatically bypasses wrappers from typechecked- and override-decorators. """ if func is None: raise TypeError('None is not a Python function') if hasattr(func, 'ch_func'): return getargspecs(func.ch_func) elif hasattr(func, 'ov_func'): return getargspecs(func.ov_func) if hasattr(inspect, 'getfullargspec'): return inspect.getfullargspec(func) # Python 3 else: return inspect.getargspec(func)
[ "def", "getargspecs", "(", "func", ")", ":", "if", "func", "is", "None", ":", "raise", "TypeError", "(", "'None is not a Python function'", ")", "if", "hasattr", "(", "func", ",", "'ch_func'", ")", ":", "return", "getargspecs", "(", "func", ".", "ch_func", ")", "elif", "hasattr", "(", "func", ",", "'ov_func'", ")", ":", "return", "getargspecs", "(", "func", ".", "ov_func", ")", "if", "hasattr", "(", "inspect", ",", "'getfullargspec'", ")", ":", "return", "inspect", ".", "getfullargspec", "(", "func", ")", "# Python 3", "else", ":", "return", "inspect", ".", "getargspec", "(", "func", ")" ]
Bridges inspect.getargspec and inspect.getfullargspec. Automatically selects the proper one depending of current Python version. Automatically bypasses wrappers from typechecked- and override-decorators.
[ "Bridges", "inspect", ".", "getargspec", "and", "inspect", ".", "getfullargspec", ".", "Automatically", "selects", "the", "proper", "one", "depending", "of", "current", "Python", "version", ".", "Automatically", "bypasses", "wrappers", "from", "typechecked", "-", "and", "override", "-", "decorators", "." ]
python
train
40.733333
ask/carrot
carrot/messaging.py
https://github.com/ask/carrot/blob/5889a25cd2e274642071c9bba39772f4b3e3d9da/carrot/messaging.py#L436-L449
def wait(self, limit=None): """Go into consume mode. Mostly for testing purposes and simple programs, you probably want :meth:`iterconsume` or :meth:`iterqueue` instead. This runs an infinite loop, processing all incoming messages using :meth:`receive` to apply the message to all registered callbacks. """ it = self.iterconsume(limit) while True: it.next()
[ "def", "wait", "(", "self", ",", "limit", "=", "None", ")", ":", "it", "=", "self", ".", "iterconsume", "(", "limit", ")", "while", "True", ":", "it", ".", "next", "(", ")" ]
Go into consume mode. Mostly for testing purposes and simple programs, you probably want :meth:`iterconsume` or :meth:`iterqueue` instead. This runs an infinite loop, processing all incoming messages using :meth:`receive` to apply the message to all registered callbacks.
[ "Go", "into", "consume", "mode", "." ]
python
train
30.785714
googledatalab/pydatalab
google/datalab/utils/_utils.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/utils/_utils.py#L111-L125
def is_http_running_on(port): """ Check if an http server runs on a given port. Args: The port to check. Returns: True if it is used by an http server. False otherwise. """ try: conn = httplib.HTTPConnection('127.0.0.1:' + str(port)) conn.connect() conn.close() return True except Exception: return False
[ "def", "is_http_running_on", "(", "port", ")", ":", "try", ":", "conn", "=", "httplib", ".", "HTTPConnection", "(", "'127.0.0.1:'", "+", "str", "(", "port", ")", ")", "conn", ".", "connect", "(", ")", "conn", ".", "close", "(", ")", "return", "True", "except", "Exception", ":", "return", "False" ]
Check if an http server runs on a given port. Args: The port to check. Returns: True if it is used by an http server. False otherwise.
[ "Check", "if", "an", "http", "server", "runs", "on", "a", "given", "port", "." ]
python
train
22.066667
ChrisCummins/labm8
lockfile.py
https://github.com/ChrisCummins/labm8/blob/dd10d67a757aefb180cb508f86696f99440c94f5/lockfile.py#L111-L152
def acquire(self, replace_stale=False, force=False): """ Acquire the lock. A lock can be claimed if any of these conditions are true: 1. The lock is unheld by anyone. 2. The lock is held but the 'force' argument is set. 3. The lock is held by the current process. Arguments: replace_stale (bool, optional) If true, lock can be aquired from stale processes. A stale process is one which currently owns the parent lock, but no process with that PID is alive. force (bool, optional): If true, ignore any existing lock. If false, fail if lock already claimed. Returns: LockFile: self. Raises: UnableToAcquireLockError: If the lock is already claimed (not raised if force option is used). """ def _create_lock(): LockFile.write(self.path, os.getpid(), time.time()) if self.islocked: lock_owner_pid = self.pid if self.owned_by_self: pass # don't replace existing lock elif force: _create_lock() elif replace_stale and not system.isprocess(lock_owner_pid): _create_lock() else: raise UnableToAcquireLockError(self) else: # new lock _create_lock() return self
[ "def", "acquire", "(", "self", ",", "replace_stale", "=", "False", ",", "force", "=", "False", ")", ":", "def", "_create_lock", "(", ")", ":", "LockFile", ".", "write", "(", "self", ".", "path", ",", "os", ".", "getpid", "(", ")", ",", "time", ".", "time", "(", ")", ")", "if", "self", ".", "islocked", ":", "lock_owner_pid", "=", "self", ".", "pid", "if", "self", ".", "owned_by_self", ":", "pass", "# don't replace existing lock", "elif", "force", ":", "_create_lock", "(", ")", "elif", "replace_stale", "and", "not", "system", ".", "isprocess", "(", "lock_owner_pid", ")", ":", "_create_lock", "(", ")", "else", ":", "raise", "UnableToAcquireLockError", "(", "self", ")", "else", ":", "# new lock", "_create_lock", "(", ")", "return", "self" ]
Acquire the lock. A lock can be claimed if any of these conditions are true: 1. The lock is unheld by anyone. 2. The lock is held but the 'force' argument is set. 3. The lock is held by the current process. Arguments: replace_stale (bool, optional) If true, lock can be aquired from stale processes. A stale process is one which currently owns the parent lock, but no process with that PID is alive. force (bool, optional): If true, ignore any existing lock. If false, fail if lock already claimed. Returns: LockFile: self. Raises: UnableToAcquireLockError: If the lock is already claimed (not raised if force option is used).
[ "Acquire", "the", "lock", "." ]
python
train
33.333333
sprockets/sprockets.mixins.statsd
sprockets/mixins/statsd/__init__.py
https://github.com/sprockets/sprockets.mixins.statsd/blob/98dcce37d275a3ab96ef618b4756d7c4618a550a/sprockets/mixins/statsd/__init__.py#L89-L130
def on_finish(self): """Invoked once the request has been finished. Increments a counter created in the format: .. code:: <PREFIX>.counters.<host>.package[.module].Class.METHOD.STATUS sprockets.counters.localhost.tornado.web.RequestHandler.GET.200 Adds a value to a timer in the following format: .. code:: <PREFIX>.timers.<host>.package[.module].Class.METHOD.STATUS sprockets.timers.localhost.tornado.web.RequestHandler.GET.200 """ if self.statsd_prefix != statsd.STATSD_PREFIX: statsd.set_prefix(self.statsd_prefix) if hasattr(self, 'request') and self.request: if self.statsd_use_hostname: timer_prefix = 'timers.{0}'.format(socket.gethostname()) counter_prefix = 'counters.{0}'.format(socket.gethostname()) else: timer_prefix = 'timers' counter_prefix = 'counters' statsd.add_timing(timer_prefix, self.__module__, str(self.__class__.__name__), self.request.method, str(self._status_code), value=self.request.request_time() * 1000) statsd.incr(counter_prefix, self.__module__, self.__class__.__name__, self.request.method, str(self._status_code)) super(RequestMetricsMixin, self).on_finish()
[ "def", "on_finish", "(", "self", ")", ":", "if", "self", ".", "statsd_prefix", "!=", "statsd", ".", "STATSD_PREFIX", ":", "statsd", ".", "set_prefix", "(", "self", ".", "statsd_prefix", ")", "if", "hasattr", "(", "self", ",", "'request'", ")", "and", "self", ".", "request", ":", "if", "self", ".", "statsd_use_hostname", ":", "timer_prefix", "=", "'timers.{0}'", ".", "format", "(", "socket", ".", "gethostname", "(", ")", ")", "counter_prefix", "=", "'counters.{0}'", ".", "format", "(", "socket", ".", "gethostname", "(", ")", ")", "else", ":", "timer_prefix", "=", "'timers'", "counter_prefix", "=", "'counters'", "statsd", ".", "add_timing", "(", "timer_prefix", ",", "self", ".", "__module__", ",", "str", "(", "self", ".", "__class__", ".", "__name__", ")", ",", "self", ".", "request", ".", "method", ",", "str", "(", "self", ".", "_status_code", ")", ",", "value", "=", "self", ".", "request", ".", "request_time", "(", ")", "*", "1000", ")", "statsd", ".", "incr", "(", "counter_prefix", ",", "self", ".", "__module__", ",", "self", ".", "__class__", ".", "__name__", ",", "self", ".", "request", ".", "method", ",", "str", "(", "self", ".", "_status_code", ")", ")", "super", "(", "RequestMetricsMixin", ",", "self", ")", ".", "on_finish", "(", ")" ]
Invoked once the request has been finished. Increments a counter created in the format: .. code:: <PREFIX>.counters.<host>.package[.module].Class.METHOD.STATUS sprockets.counters.localhost.tornado.web.RequestHandler.GET.200 Adds a value to a timer in the following format: .. code:: <PREFIX>.timers.<host>.package[.module].Class.METHOD.STATUS sprockets.timers.localhost.tornado.web.RequestHandler.GET.200
[ "Invoked", "once", "the", "request", "has", "been", "finished", ".", "Increments", "a", "counter", "created", "in", "the", "format", ":" ]
python
train
36.97619
majuss/lupupy
lupupy/devices/binary_sensor.py
https://github.com/majuss/lupupy/blob/71af6c397837ffc393c7b8122be175602638d3c6/lupupy/devices/binary_sensor.py#L11-L18
def is_on(self): """ Get sensor state. Assume offline or open (worst case). """ return self.status not in (CONST.STATUS_OFF, CONST.STATUS_OFFLINE, CONST.STATUS_CLOSED, CONST.STATUS_OPEN)
[ "def", "is_on", "(", "self", ")", ":", "return", "self", ".", "status", "not", "in", "(", "CONST", ".", "STATUS_OFF", ",", "CONST", ".", "STATUS_OFFLINE", ",", "CONST", ".", "STATUS_CLOSED", ",", "CONST", ".", "STATUS_OPEN", ")" ]
Get sensor state. Assume offline or open (worst case).
[ "Get", "sensor", "state", "." ]
python
train
31.875
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L2293-L2298
def _getOLRootNumber(self): """_getOLRootNumber(self) -> PyObject *""" if self.isClosed or self.isEncrypted: raise ValueError("operation illegal for closed / encrypted doc") return _fitz.Document__getOLRootNumber(self)
[ "def", "_getOLRootNumber", "(", "self", ")", ":", "if", "self", ".", "isClosed", "or", "self", ".", "isEncrypted", ":", "raise", "ValueError", "(", "\"operation illegal for closed / encrypted doc\"", ")", "return", "_fitz", ".", "Document__getOLRootNumber", "(", "self", ")" ]
_getOLRootNumber(self) -> PyObject *
[ "_getOLRootNumber", "(", "self", ")", "-", ">", "PyObject", "*" ]
python
train
41.666667
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/setuptools/command/easy_install.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/setuptools/command/easy_install.py#L1223-L1257
def install_site_py(self): """Make sure there's a site.py in the target dir, if needed""" if self.sitepy_installed: return # already did it, or don't need to sitepy = os.path.join(self.install_dir, "site.py") source = resource_string("setuptools", "site-patch.py") current = "" if os.path.exists(sitepy): log.debug("Checking existing site.py in %s", self.install_dir) f = open(sitepy, 'rb') current = f.read() # we want str, not bytes if PY3: current = current.decode() f.close() if not current.startswith('def __boot():'): raise DistutilsError( "%s is not a setuptools-generated site.py; please" " remove it." % sitepy ) if current != source: log.info("Creating %s", sitepy) if not self.dry_run: ensure_directory(sitepy) f = open(sitepy, 'wb') f.write(source) f.close() self.byte_compile([sitepy]) self.sitepy_installed = True
[ "def", "install_site_py", "(", "self", ")", ":", "if", "self", ".", "sitepy_installed", ":", "return", "# already did it, or don't need to", "sitepy", "=", "os", ".", "path", ".", "join", "(", "self", ".", "install_dir", ",", "\"site.py\"", ")", "source", "=", "resource_string", "(", "\"setuptools\"", ",", "\"site-patch.py\"", ")", "current", "=", "\"\"", "if", "os", ".", "path", ".", "exists", "(", "sitepy", ")", ":", "log", ".", "debug", "(", "\"Checking existing site.py in %s\"", ",", "self", ".", "install_dir", ")", "f", "=", "open", "(", "sitepy", ",", "'rb'", ")", "current", "=", "f", ".", "read", "(", ")", "# we want str, not bytes", "if", "PY3", ":", "current", "=", "current", ".", "decode", "(", ")", "f", ".", "close", "(", ")", "if", "not", "current", ".", "startswith", "(", "'def __boot():'", ")", ":", "raise", "DistutilsError", "(", "\"%s is not a setuptools-generated site.py; please\"", "\" remove it.\"", "%", "sitepy", ")", "if", "current", "!=", "source", ":", "log", ".", "info", "(", "\"Creating %s\"", ",", "sitepy", ")", "if", "not", "self", ".", "dry_run", ":", "ensure_directory", "(", "sitepy", ")", "f", "=", "open", "(", "sitepy", ",", "'wb'", ")", "f", ".", "write", "(", "source", ")", "f", ".", "close", "(", ")", "self", ".", "byte_compile", "(", "[", "sitepy", "]", ")", "self", ".", "sitepy_installed", "=", "True" ]
Make sure there's a site.py in the target dir, if needed
[ "Make", "sure", "there", "s", "a", "site", ".", "py", "in", "the", "target", "dir", "if", "needed" ]
python
test
32.8
RedHatInsights/insights-core
insights/contrib/ConfigParser.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/contrib/ConfigParser.py#L630-L660
def items(self, section, raw=False, vars=None): """Return a list of tuples with (name, value) for each option in the section. All % interpolations are expanded in the return values, based on the defaults passed into the constructor, unless the optional argument `raw' is true. Additional substitutions may be provided using the `vars' argument, which must be a dictionary whose contents overrides any pre-existing defaults. The section DEFAULT is special. """ d = self._defaults.copy() try: d.update(self._sections[section]) except KeyError: if section != DEFAULTSECT: raise NoSectionError(section) # Update with the entry specific variables if vars: for key, value in vars.items(): d[self.optionxform(key)] = value options = d.keys() if "__name__" in options: options.remove("__name__") if raw: return [(option, d[option]) for option in options] else: return [(option, self._interpolate(section, option, d[option], d)) for option in options]
[ "def", "items", "(", "self", ",", "section", ",", "raw", "=", "False", ",", "vars", "=", "None", ")", ":", "d", "=", "self", ".", "_defaults", ".", "copy", "(", ")", "try", ":", "d", ".", "update", "(", "self", ".", "_sections", "[", "section", "]", ")", "except", "KeyError", ":", "if", "section", "!=", "DEFAULTSECT", ":", "raise", "NoSectionError", "(", "section", ")", "# Update with the entry specific variables", "if", "vars", ":", "for", "key", ",", "value", "in", "vars", ".", "items", "(", ")", ":", "d", "[", "self", ".", "optionxform", "(", "key", ")", "]", "=", "value", "options", "=", "d", ".", "keys", "(", ")", "if", "\"__name__\"", "in", "options", ":", "options", ".", "remove", "(", "\"__name__\"", ")", "if", "raw", ":", "return", "[", "(", "option", ",", "d", "[", "option", "]", ")", "for", "option", "in", "options", "]", "else", ":", "return", "[", "(", "option", ",", "self", ".", "_interpolate", "(", "section", ",", "option", ",", "d", "[", "option", "]", ",", "d", ")", ")", "for", "option", "in", "options", "]" ]
Return a list of tuples with (name, value) for each option in the section. All % interpolations are expanded in the return values, based on the defaults passed into the constructor, unless the optional argument `raw' is true. Additional substitutions may be provided using the `vars' argument, which must be a dictionary whose contents overrides any pre-existing defaults. The section DEFAULT is special.
[ "Return", "a", "list", "of", "tuples", "with", "(", "name", "value", ")", "for", "each", "option", "in", "the", "section", "." ]
python
train
38.806452
tensorlayer/tensorlayer
tensorlayer/visualize.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/visualize.py#L101-L145
def save_images(images, size, image_path='_temp.png'): """Save multiple images into one single image. Parameters ----------- images : numpy array (batch, w, h, c) size : list of 2 ints row and column number. number of images should be equal or less than size[0] * size[1] image_path : str save path Examples --------- >>> import numpy as np >>> import tensorlayer as tl >>> images = np.random.rand(64, 100, 100, 3) >>> tl.visualize.save_images(images, [8, 8], 'temp.png') """ if len(images.shape) == 3: # Greyscale [batch, h, w] --> [batch, h, w, 1] images = images[:, :, :, np.newaxis] def merge(images, size): h, w = images.shape[1], images.shape[2] img = np.zeros((h * size[0], w * size[1], 3), dtype=images.dtype) for idx, image in enumerate(images): i = idx % size[1] j = idx // size[1] img[j * h:j * h + h, i * w:i * w + w, :] = image return img def imsave(images, size, path): if np.max(images) <= 1 and (-1 <= np.min(images) < 0): images = ((images + 1) * 127.5).astype(np.uint8) elif np.max(images) <= 1 and np.min(images) >= 0: images = (images * 255).astype(np.uint8) return imageio.imwrite(path, merge(images, size)) if len(images) > size[0] * size[1]: raise AssertionError("number of images should be equal or less than size[0] * size[1] {}".format(len(images))) return imsave(images, size, image_path)
[ "def", "save_images", "(", "images", ",", "size", ",", "image_path", "=", "'_temp.png'", ")", ":", "if", "len", "(", "images", ".", "shape", ")", "==", "3", ":", "# Greyscale [batch, h, w] --> [batch, h, w, 1]", "images", "=", "images", "[", ":", ",", ":", ",", ":", ",", "np", ".", "newaxis", "]", "def", "merge", "(", "images", ",", "size", ")", ":", "h", ",", "w", "=", "images", ".", "shape", "[", "1", "]", ",", "images", ".", "shape", "[", "2", "]", "img", "=", "np", ".", "zeros", "(", "(", "h", "*", "size", "[", "0", "]", ",", "w", "*", "size", "[", "1", "]", ",", "3", ")", ",", "dtype", "=", "images", ".", "dtype", ")", "for", "idx", ",", "image", "in", "enumerate", "(", "images", ")", ":", "i", "=", "idx", "%", "size", "[", "1", "]", "j", "=", "idx", "//", "size", "[", "1", "]", "img", "[", "j", "*", "h", ":", "j", "*", "h", "+", "h", ",", "i", "*", "w", ":", "i", "*", "w", "+", "w", ",", ":", "]", "=", "image", "return", "img", "def", "imsave", "(", "images", ",", "size", ",", "path", ")", ":", "if", "np", ".", "max", "(", "images", ")", "<=", "1", "and", "(", "-", "1", "<=", "np", ".", "min", "(", "images", ")", "<", "0", ")", ":", "images", "=", "(", "(", "images", "+", "1", ")", "*", "127.5", ")", ".", "astype", "(", "np", ".", "uint8", ")", "elif", "np", ".", "max", "(", "images", ")", "<=", "1", "and", "np", ".", "min", "(", "images", ")", ">=", "0", ":", "images", "=", "(", "images", "*", "255", ")", ".", "astype", "(", "np", ".", "uint8", ")", "return", "imageio", ".", "imwrite", "(", "path", ",", "merge", "(", "images", ",", "size", ")", ")", "if", "len", "(", "images", ")", ">", "size", "[", "0", "]", "*", "size", "[", "1", "]", ":", "raise", "AssertionError", "(", "\"number of images should be equal or less than size[0] * size[1] {}\"", ".", "format", "(", "len", "(", "images", ")", ")", ")", "return", "imsave", "(", "images", ",", "size", ",", "image_path", ")" ]
Save multiple images into one single image. Parameters ----------- images : numpy array (batch, w, h, c) size : list of 2 ints row and column number. number of images should be equal or less than size[0] * size[1] image_path : str save path Examples --------- >>> import numpy as np >>> import tensorlayer as tl >>> images = np.random.rand(64, 100, 100, 3) >>> tl.visualize.save_images(images, [8, 8], 'temp.png')
[ "Save", "multiple", "images", "into", "one", "single", "image", "." ]
python
valid
33.688889
rikrd/inspire
inspirespeech/common.py
https://github.com/rikrd/inspire/blob/e281c0266a9a9633f34ab70f9c3ad58036c19b59/inspirespeech/common.py#L571-L615
def create_flat_start_model(feature_filename, state_stay_probabilities, symbol_list, output_model_directory, output_prototype_filename, htk_trace): """ Creates a flat start model by using HCompV to compute the global mean and variance. Then uses these global mean and variance to create an N-state model for each symbol in the given list. :param feature_filename: The filename containing the audio and feature file pairs :param output_model_directory: The directory where to write the created model :param output_prototype_filename: The prototype model filename :param htk_trace: Trace level for HTK :rtype : None """ # Create a prototype model create_prototype_model(feature_filename, output_prototype_filename, state_stay_probabilities=state_stay_probabilities) # Compute the global mean and variance config.htk_command("HCompV -A -D -T {} -f 0.01 " "-S {} -m -o {} -M {} {}".format(htk_trace, feature_filename, 'proto', output_model_directory, output_prototype_filename)) # Create an hmmdefs using the global mean and variance for all states and symbols # Duplicate the model 'proto' -> symbol_list proto_model_filename = config.path(output_model_directory, 'proto') model = htk.load_model(proto_model_filename) model = htk_model_utils.map_hmms(model, {'proto': symbol_list}) # vFloors -> macros vfloors_filename = config.path(output_model_directory, 'vFloors') variance_model = htk.load_model(vfloors_filename) model['macros'] += variance_model['macros'] macros, hmmdefs = htk_model_utils.split_model(model) htk.save_model(macros, config.path(output_model_directory, 'macros')) htk.save_model(hmmdefs, config.path(output_model_directory, 'hmmdefs'))
[ "def", "create_flat_start_model", "(", "feature_filename", ",", "state_stay_probabilities", ",", "symbol_list", ",", "output_model_directory", ",", "output_prototype_filename", ",", "htk_trace", ")", ":", "# Create a prototype model", "create_prototype_model", "(", "feature_filename", ",", "output_prototype_filename", ",", "state_stay_probabilities", "=", "state_stay_probabilities", ")", "# Compute the global mean and variance", "config", ".", "htk_command", "(", "\"HCompV -A -D -T {} -f 0.01 \"", "\"-S {} -m -o {} -M {} {}\"", ".", "format", "(", "htk_trace", ",", "feature_filename", ",", "'proto'", ",", "output_model_directory", ",", "output_prototype_filename", ")", ")", "# Create an hmmdefs using the global mean and variance for all states and symbols", "# Duplicate the model 'proto' -> symbol_list", "proto_model_filename", "=", "config", ".", "path", "(", "output_model_directory", ",", "'proto'", ")", "model", "=", "htk", ".", "load_model", "(", "proto_model_filename", ")", "model", "=", "htk_model_utils", ".", "map_hmms", "(", "model", ",", "{", "'proto'", ":", "symbol_list", "}", ")", "# vFloors -> macros", "vfloors_filename", "=", "config", ".", "path", "(", "output_model_directory", ",", "'vFloors'", ")", "variance_model", "=", "htk", ".", "load_model", "(", "vfloors_filename", ")", "model", "[", "'macros'", "]", "+=", "variance_model", "[", "'macros'", "]", "macros", ",", "hmmdefs", "=", "htk_model_utils", ".", "split_model", "(", "model", ")", "htk", ".", "save_model", "(", "macros", ",", "config", ".", "path", "(", "output_model_directory", ",", "'macros'", ")", ")", "htk", ".", "save_model", "(", "hmmdefs", ",", "config", ".", "path", "(", "output_model_directory", ",", "'hmmdefs'", ")", ")" ]
Creates a flat start model by using HCompV to compute the global mean and variance. Then uses these global mean and variance to create an N-state model for each symbol in the given list. :param feature_filename: The filename containing the audio and feature file pairs :param output_model_directory: The directory where to write the created model :param output_prototype_filename: The prototype model filename :param htk_trace: Trace level for HTK :rtype : None
[ "Creates", "a", "flat", "start", "model", "by", "using", "HCompV", "to", "compute", "the", "global", "mean", "and", "variance", ".", "Then", "uses", "these", "global", "mean", "and", "variance", "to", "create", "an", "N", "-", "state", "model", "for", "each", "symbol", "in", "the", "given", "list", "." ]
python
train
47.933333
pysathq/pysat
examples/hitman.py
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/examples/hitman.py#L296-L315
def get(self): """ This method computes and returns a hitting set. The hitting set is obtained using the underlying oracle operating the MaxSAT problem formulation. The computed solution is mapped back to objects of the problem domain. :rtype: list(obj) """ model = self.oracle.compute() if model: if self.htype == 'rc2': # extracting a hitting set self.hset = filter(lambda v: v > 0, model) else: self.hset = model return list(map(lambda vid: self.idpool.id2obj[vid], self.hset))
[ "def", "get", "(", "self", ")", ":", "model", "=", "self", ".", "oracle", ".", "compute", "(", ")", "if", "model", ":", "if", "self", ".", "htype", "==", "'rc2'", ":", "# extracting a hitting set", "self", ".", "hset", "=", "filter", "(", "lambda", "v", ":", "v", ">", "0", ",", "model", ")", "else", ":", "self", ".", "hset", "=", "model", "return", "list", "(", "map", "(", "lambda", "vid", ":", "self", ".", "idpool", ".", "id2obj", "[", "vid", "]", ",", "self", ".", "hset", ")", ")" ]
This method computes and returns a hitting set. The hitting set is obtained using the underlying oracle operating the MaxSAT problem formulation. The computed solution is mapped back to objects of the problem domain. :rtype: list(obj)
[ "This", "method", "computes", "and", "returns", "a", "hitting", "set", ".", "The", "hitting", "set", "is", "obtained", "using", "the", "underlying", "oracle", "operating", "the", "MaxSAT", "problem", "formulation", ".", "The", "computed", "solution", "is", "mapped", "back", "to", "objects", "of", "the", "problem", "domain", "." ]
python
train
32.05
pingali/dgit
dgitcore/contrib/repomanagers/gitmanager.py
https://github.com/pingali/dgit/blob/ecde01f40b98f0719dbcfb54452270ed2f86686d/dgitcore/contrib/repomanagers/gitmanager.py#L381-L405
def drop(self, repo, args=[]): """ Cleanup the repo """ # Clean up the rootdir rootdir = repo.rootdir if os.path.exists(rootdir): print("Cleaning repo directory: {}".format(rootdir)) shutil.rmtree(rootdir) # Cleanup the local version of the repo (this could be on # the server etc. server_repodir = self.server_rootdir_from_repo(repo, create=False) if os.path.exists(server_repodir): print("Cleaning data from local git 'server': {}".format(server_repodir)) shutil.rmtree(server_repodir) super(GitRepoManager, self).drop(repo) return { 'status': 'success', 'message': "successful cleanup" }
[ "def", "drop", "(", "self", ",", "repo", ",", "args", "=", "[", "]", ")", ":", "# Clean up the rootdir", "rootdir", "=", "repo", ".", "rootdir", "if", "os", ".", "path", ".", "exists", "(", "rootdir", ")", ":", "print", "(", "\"Cleaning repo directory: {}\"", ".", "format", "(", "rootdir", ")", ")", "shutil", ".", "rmtree", "(", "rootdir", ")", "# Cleanup the local version of the repo (this could be on", "# the server etc.", "server_repodir", "=", "self", ".", "server_rootdir_from_repo", "(", "repo", ",", "create", "=", "False", ")", "if", "os", ".", "path", ".", "exists", "(", "server_repodir", ")", ":", "print", "(", "\"Cleaning data from local git 'server': {}\"", ".", "format", "(", "server_repodir", ")", ")", "shutil", ".", "rmtree", "(", "server_repodir", ")", "super", "(", "GitRepoManager", ",", "self", ")", ".", "drop", "(", "repo", ")", "return", "{", "'status'", ":", "'success'", ",", "'message'", ":", "\"successful cleanup\"", "}" ]
Cleanup the repo
[ "Cleanup", "the", "repo" ]
python
valid
32.04
AguaClara/aguaclara
aguaclara/design/sed_tank.py
https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/design/sed_tank.py#L422-L440
def q_diffuser(sed_inputs=sed_dict): """Return the flow through each diffuser. Parameters ---------- sed_inputs : dict A dictionary of all of the constant inputs needed for sedimentation tank calculations can be found in sed.yaml Returns ------- float Flow through each diffuser in the sedimentation tank Examples -------- >>> from aide_design.play import* >>> """ return (sed_inputs['tank']['vel_up'].to(u.m/u.s) * sed_inputs['tank']['W'].to(u.m) * L_diffuser_outer(sed_inputs)).magnitude
[ "def", "q_diffuser", "(", "sed_inputs", "=", "sed_dict", ")", ":", "return", "(", "sed_inputs", "[", "'tank'", "]", "[", "'vel_up'", "]", ".", "to", "(", "u", ".", "m", "/", "u", ".", "s", ")", "*", "sed_inputs", "[", "'tank'", "]", "[", "'W'", "]", ".", "to", "(", "u", ".", "m", ")", "*", "L_diffuser_outer", "(", "sed_inputs", ")", ")", ".", "magnitude" ]
Return the flow through each diffuser. Parameters ---------- sed_inputs : dict A dictionary of all of the constant inputs needed for sedimentation tank calculations can be found in sed.yaml Returns ------- float Flow through each diffuser in the sedimentation tank Examples -------- >>> from aide_design.play import* >>>
[ "Return", "the", "flow", "through", "each", "diffuser", ".", "Parameters", "----------", "sed_inputs", ":", "dict", "A", "dictionary", "of", "all", "of", "the", "constant", "inputs", "needed", "for", "sedimentation", "tank", "calculations", "can", "be", "found", "in", "sed", ".", "yaml", "Returns", "-------", "float", "Flow", "through", "each", "diffuser", "in", "the", "sedimentation", "tank", "Examples", "--------", ">>>", "from", "aide_design", ".", "play", "import", "*", ">>>" ]
python
train
30.157895
pmacosta/ptrie
ptrie/ptrie.py
https://github.com/pmacosta/ptrie/blob/c176d3ee810b7b5243c7ff2bbf2f1af0b0fff2a8/ptrie/ptrie.py#L1077-L1133
def print_node(self, name): # noqa: D302 r""" Print node information (parent, children and data). :param name: Node name :type name: :ref:`NodeName` :raises: * RuntimeError (Argument \`name\` is not valid) * RuntimeError (Node *[name]* not in tree) Using the same example tree created in :py:meth:`ptrie.Trie.add_nodes`:: >>> from __future__ import print_function >>> import docs.support.ptrie_example >>> tobj = docs.support.ptrie_example.create_tree() >>> print(tobj) root ├branch1 (*) │├leaf1 ││└subleaf1 (*) │└leaf2 (*) │ └subleaf2 └branch2 >>> print(tobj.print_node('root.branch1')) Name: root.branch1 Parent: root Children: leaf1, leaf2 Data: [5, 7] """ if self._validate_node_name(name): raise RuntimeError("Argument `name` is not valid") self._node_in_tree(name) node = self._db[name] children = ( [self._split_node_name(child)[-1] for child in node["children"]] if node["children"] else node["children"] ) data = ( node["data"][0] if node["data"] and (len(node["data"]) == 1) else node["data"] ) return ( "Name: {node_name}\n" "Parent: {parent_name}\n" "Children: {children_list}\n" "Data: {node_data}".format( node_name=name, parent_name=node["parent"] if node["parent"] else None, children_list=", ".join(children) if children else None, node_data=data if data else None, ) )
[ "def", "print_node", "(", "self", ",", "name", ")", ":", "# noqa: D302", "if", "self", ".", "_validate_node_name", "(", "name", ")", ":", "raise", "RuntimeError", "(", "\"Argument `name` is not valid\"", ")", "self", ".", "_node_in_tree", "(", "name", ")", "node", "=", "self", ".", "_db", "[", "name", "]", "children", "=", "(", "[", "self", ".", "_split_node_name", "(", "child", ")", "[", "-", "1", "]", "for", "child", "in", "node", "[", "\"children\"", "]", "]", "if", "node", "[", "\"children\"", "]", "else", "node", "[", "\"children\"", "]", ")", "data", "=", "(", "node", "[", "\"data\"", "]", "[", "0", "]", "if", "node", "[", "\"data\"", "]", "and", "(", "len", "(", "node", "[", "\"data\"", "]", ")", "==", "1", ")", "else", "node", "[", "\"data\"", "]", ")", "return", "(", "\"Name: {node_name}\\n\"", "\"Parent: {parent_name}\\n\"", "\"Children: {children_list}\\n\"", "\"Data: {node_data}\"", ".", "format", "(", "node_name", "=", "name", ",", "parent_name", "=", "node", "[", "\"parent\"", "]", "if", "node", "[", "\"parent\"", "]", "else", "None", ",", "children_list", "=", "\", \"", ".", "join", "(", "children", ")", "if", "children", "else", "None", ",", "node_data", "=", "data", "if", "data", "else", "None", ",", ")", ")" ]
r""" Print node information (parent, children and data). :param name: Node name :type name: :ref:`NodeName` :raises: * RuntimeError (Argument \`name\` is not valid) * RuntimeError (Node *[name]* not in tree) Using the same example tree created in :py:meth:`ptrie.Trie.add_nodes`:: >>> from __future__ import print_function >>> import docs.support.ptrie_example >>> tobj = docs.support.ptrie_example.create_tree() >>> print(tobj) root ├branch1 (*) │├leaf1 ││└subleaf1 (*) │└leaf2 (*) │ └subleaf2 └branch2 >>> print(tobj.print_node('root.branch1')) Name: root.branch1 Parent: root Children: leaf1, leaf2 Data: [5, 7]
[ "r", "Print", "node", "information", "(", "parent", "children", "and", "data", ")", "." ]
python
train
31.421053
saltstack/salt
salt/states/file.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/file.py#L1032-L1047
def _set_symlink_ownership(path, user, group, win_owner): ''' Set the ownership of a symlink and return a boolean indicating success/failure ''' if salt.utils.platform.is_windows(): try: salt.utils.win_dacl.set_owner(path, win_owner) except CommandExecutionError: pass else: try: __salt__['file.lchown'](path, user, group) except OSError: pass return _check_symlink_ownership(path, user, group, win_owner)
[ "def", "_set_symlink_ownership", "(", "path", ",", "user", ",", "group", ",", "win_owner", ")", ":", "if", "salt", ".", "utils", ".", "platform", ".", "is_windows", "(", ")", ":", "try", ":", "salt", ".", "utils", ".", "win_dacl", ".", "set_owner", "(", "path", ",", "win_owner", ")", "except", "CommandExecutionError", ":", "pass", "else", ":", "try", ":", "__salt__", "[", "'file.lchown'", "]", "(", "path", ",", "user", ",", "group", ")", "except", "OSError", ":", "pass", "return", "_check_symlink_ownership", "(", "path", ",", "user", ",", "group", ",", "win_owner", ")" ]
Set the ownership of a symlink and return a boolean indicating success/failure
[ "Set", "the", "ownership", "of", "a", "symlink", "and", "return", "a", "boolean", "indicating", "success", "/", "failure" ]
python
train
31.125
carljm/django-adminfiles
adminfiles/flickr.py
https://github.com/carljm/django-adminfiles/blob/b01dc7be266305d575c11d5ff9a37ccac04a78c2/adminfiles/flickr.py#L236-L254
def getSizes(self): """ Get all the available sizes of the current image, and all available data about them. Returns: A list of dicts with the size data. """ method = 'flickr.photos.getSizes' data = _doget(method, photo_id=self.id) ret = [] # The given props are those that we return and the according types, since # return width and height as string would make "75">"100" be True, which # is just error prone. props = {'url':str,'width':int,'height':int,'label':str,'source':str,'text':str} for psize in data.rsp.sizes.size: d = {} for prop,convert_to_type in props.items(): d[prop] = convert_to_type(getattr(psize, prop)) ret.append(d) return ret
[ "def", "getSizes", "(", "self", ")", ":", "method", "=", "'flickr.photos.getSizes'", "data", "=", "_doget", "(", "method", ",", "photo_id", "=", "self", ".", "id", ")", "ret", "=", "[", "]", "# The given props are those that we return and the according types, since", "# return width and height as string would make \"75\">\"100\" be True, which ", "# is just error prone.", "props", "=", "{", "'url'", ":", "str", ",", "'width'", ":", "int", ",", "'height'", ":", "int", ",", "'label'", ":", "str", ",", "'source'", ":", "str", ",", "'text'", ":", "str", "}", "for", "psize", "in", "data", ".", "rsp", ".", "sizes", ".", "size", ":", "d", "=", "{", "}", "for", "prop", ",", "convert_to_type", "in", "props", ".", "items", "(", ")", ":", "d", "[", "prop", "]", "=", "convert_to_type", "(", "getattr", "(", "psize", ",", "prop", ")", ")", "ret", ".", "append", "(", "d", ")", "return", "ret" ]
Get all the available sizes of the current image, and all available data about them. Returns: A list of dicts with the size data.
[ "Get", "all", "the", "available", "sizes", "of", "the", "current", "image", "and", "all", "available", "data", "about", "them", ".", "Returns", ":", "A", "list", "of", "dicts", "with", "the", "size", "data", "." ]
python
train
41.842105
gpoulter/python-ngram
scripts/csvjoin.py
https://github.com/gpoulter/python-ngram/blob/f8543bdc84a4d24ac60a48b36c4034f881664491/scripts/csvjoin.py#L16-L20
def lowstrip(term): """Convert to lowercase and strip spaces""" term = re.sub('\s+', ' ', term) term = term.lower() return term
[ "def", "lowstrip", "(", "term", ")", ":", "term", "=", "re", ".", "sub", "(", "'\\s+'", ",", "' '", ",", "term", ")", "term", "=", "term", ".", "lower", "(", ")", "return", "term" ]
Convert to lowercase and strip spaces
[ "Convert", "to", "lowercase", "and", "strip", "spaces" ]
python
train
27.8
SMTG-UCL/sumo
sumo/symmetry/seekpath_kpath.py
https://github.com/SMTG-UCL/sumo/blob/47aec6bbfa033a624435a65bd4edabd18bfb437f/sumo/symmetry/seekpath_kpath.py#L48-L96
def kpath_from_seekpath(cls, seekpath, point_coords): r"""Convert seekpath-formatted kpoints path to sumo-preferred format. If 'GAMMA' is used as a label this will be replaced by '\Gamma'. Args: seekpath (list): A :obj:`list` of 2-tuples containing the labels at each side of each segment of the k-point path:: [(A, B), (B, C), (C, D), ...] where a break in the sequence is indicated by a non-repeating label. E.g.:: [(A, B), (B, C), (D, E), ...] for a break between C and D. point_coords (dict): Dict of coordinates corresponding to k-point labels:: {'GAMMA': [0., 0., 0.], ...} Returns: dict: The path and k-points as:: { 'path', [[l1, l2, l3], [l4, l5], ...], 'kpoints', {l1: [a1, b1, c1], l2: [a2, b2, c2], ...} } """ # convert from seekpath format e.g. [(l1, l2), (l2, l3), (l4, l5)] # to our preferred representation [[l1, l2, l3], [l4, l5]] path = [[seekpath[0][0]]] for (k1, k2) in seekpath: if path[-1] and path[-1][-1] == k1: path[-1].append(k2) else: path.append([k1, k2]) # Rebuild kpoints dictionary skipping any positions not on path # (chain(*list) flattens nested list; set() removes duplicates.) kpoints = {p: point_coords[p] for p in set(chain(*path))} # Every path should include Gamma-point. Change the label to \Gamma assert 'GAMMA' in kpoints kpoints[r'\Gamma'] = kpoints.pop('GAMMA') path = [[label.replace('GAMMA', r'\Gamma') for label in subpath] for subpath in path] return {'kpoints': kpoints, 'path': path}
[ "def", "kpath_from_seekpath", "(", "cls", ",", "seekpath", ",", "point_coords", ")", ":", "# convert from seekpath format e.g. [(l1, l2), (l2, l3), (l4, l5)]", "# to our preferred representation [[l1, l2, l3], [l4, l5]]", "path", "=", "[", "[", "seekpath", "[", "0", "]", "[", "0", "]", "]", "]", "for", "(", "k1", ",", "k2", ")", "in", "seekpath", ":", "if", "path", "[", "-", "1", "]", "and", "path", "[", "-", "1", "]", "[", "-", "1", "]", "==", "k1", ":", "path", "[", "-", "1", "]", ".", "append", "(", "k2", ")", "else", ":", "path", ".", "append", "(", "[", "k1", ",", "k2", "]", ")", "# Rebuild kpoints dictionary skipping any positions not on path", "# (chain(*list) flattens nested list; set() removes duplicates.)", "kpoints", "=", "{", "p", ":", "point_coords", "[", "p", "]", "for", "p", "in", "set", "(", "chain", "(", "*", "path", ")", ")", "}", "# Every path should include Gamma-point. Change the label to \\Gamma", "assert", "'GAMMA'", "in", "kpoints", "kpoints", "[", "r'\\Gamma'", "]", "=", "kpoints", ".", "pop", "(", "'GAMMA'", ")", "path", "=", "[", "[", "label", ".", "replace", "(", "'GAMMA'", ",", "r'\\Gamma'", ")", "for", "label", "in", "subpath", "]", "for", "subpath", "in", "path", "]", "return", "{", "'kpoints'", ":", "kpoints", ",", "'path'", ":", "path", "}" ]
r"""Convert seekpath-formatted kpoints path to sumo-preferred format. If 'GAMMA' is used as a label this will be replaced by '\Gamma'. Args: seekpath (list): A :obj:`list` of 2-tuples containing the labels at each side of each segment of the k-point path:: [(A, B), (B, C), (C, D), ...] where a break in the sequence is indicated by a non-repeating label. E.g.:: [(A, B), (B, C), (D, E), ...] for a break between C and D. point_coords (dict): Dict of coordinates corresponding to k-point labels:: {'GAMMA': [0., 0., 0.], ...} Returns: dict: The path and k-points as:: { 'path', [[l1, l2, l3], [l4, l5], ...], 'kpoints', {l1: [a1, b1, c1], l2: [a2, b2, c2], ...} }
[ "r", "Convert", "seekpath", "-", "formatted", "kpoints", "path", "to", "sumo", "-", "preferred", "format", "." ]
python
train
37.816327
Robpol86/colorclass
colorclass/color.py
https://github.com/Robpol86/colorclass/blob/692e2d6f5ad470b6221c8cb9641970dc5563a572/colorclass/color.py#L163-L172
def bgmagenta(cls, string, auto=False): """Color-code entire string. :param str string: String to colorize. :param bool auto: Enable auto-color (dark/light terminal). :return: Class instance for colorized string. :rtype: Color """ return cls.colorize('bgmagenta', string, auto=auto)
[ "def", "bgmagenta", "(", "cls", ",", "string", ",", "auto", "=", "False", ")", ":", "return", "cls", ".", "colorize", "(", "'bgmagenta'", ",", "string", ",", "auto", "=", "auto", ")" ]
Color-code entire string. :param str string: String to colorize. :param bool auto: Enable auto-color (dark/light terminal). :return: Class instance for colorized string. :rtype: Color
[ "Color", "-", "code", "entire", "string", "." ]
python
train
33.1
proofit404/service-factory
service_factory/validation.py
https://github.com/proofit404/service-factory/blob/a09d4e097e5599244564a2a7f0611e58efb4156a/service_factory/validation.py#L34-L40
def validate_params(request): """Validate request params.""" if 'params' in request: correct_params = isinstance(request['params'], (list, dict)) error = 'Incorrect parameter values' assert correct_params, error
[ "def", "validate_params", "(", "request", ")", ":", "if", "'params'", "in", "request", ":", "correct_params", "=", "isinstance", "(", "request", "[", "'params'", "]", ",", "(", "list", ",", "dict", ")", ")", "error", "=", "'Incorrect parameter values'", "assert", "correct_params", ",", "error" ]
Validate request params.
[ "Validate", "request", "params", "." ]
python
test
34
timothyb0912/pylogit
pylogit/bootstrap_sampler.py
https://github.com/timothyb0912/pylogit/blob/f83b0fd6debaa7358d87c3828428f6d4ead71357/pylogit/bootstrap_sampler.py#L301-L360
def create_bootstrap_dataframe(orig_df, obs_id_col, resampled_obs_ids_1d, groupby_dict, boot_id_col="bootstrap_id"): """ Will create the altered dataframe of data needed to estimate a choice model with the particular observations that belong to the current bootstrap sample. Parameters ---------- orig_df : pandas DataFrame. Should be long-format dataframe containing the data used to estimate the desired choice model. obs_id_col : str. Should be a column name within `orig_df`. Should denote the original observation id column. resampled_obs_ids_1d : 1D ndarray of ints. Each value should represent the alternative id of a given bootstrap replicate. groupby_dict : dict. Each key will be a unique value in `orig_df[obs_id_col]` and each value will be the rows of `orig_df` where `orig_df[obs_id_col] == key`. boot_id_col : str, optional. Denotes the new column that will be created to specify the bootstrap observation ids for choice model estimation. Returns ------- bootstrap_df : pandas Dataframe. Will contain all the same columns as `orig_df` as well as the additional `boot_id_col`. For each value in `resampled_obs_ids_1d`, `bootstrap_df` will contain the long format rows from `orig_df` that have the given observation id. """ # Check the validity of the passed arguments. check_column_existence(obs_id_col, orig_df, presence=True) check_column_existence(boot_id_col, orig_df, presence=False) # Alias the observation id column obs_id_values = orig_df[obs_id_col].values # Check the validity of the resampled observation ids. ensure_resampled_obs_ids_in_df(resampled_obs_ids_1d, obs_id_values) # Initialize a list to store the component dataframes that will be # concatenated to form the final bootstrap_df component_dfs = [] # Populate component_dfs for boot_id, obs_id in enumerate(resampled_obs_ids_1d): # Extract the dataframe that we desire. extracted_df = groupby_dict[obs_id].copy() # Add the bootstrap id value. extracted_df[boot_id_col] = boot_id + 1 # Store the component dataframe component_dfs.append(extracted_df) # Create and return the desired dataframe. bootstrap_df = pd.concat(component_dfs, axis=0, ignore_index=True) return bootstrap_df
[ "def", "create_bootstrap_dataframe", "(", "orig_df", ",", "obs_id_col", ",", "resampled_obs_ids_1d", ",", "groupby_dict", ",", "boot_id_col", "=", "\"bootstrap_id\"", ")", ":", "# Check the validity of the passed arguments.", "check_column_existence", "(", "obs_id_col", ",", "orig_df", ",", "presence", "=", "True", ")", "check_column_existence", "(", "boot_id_col", ",", "orig_df", ",", "presence", "=", "False", ")", "# Alias the observation id column", "obs_id_values", "=", "orig_df", "[", "obs_id_col", "]", ".", "values", "# Check the validity of the resampled observation ids.", "ensure_resampled_obs_ids_in_df", "(", "resampled_obs_ids_1d", ",", "obs_id_values", ")", "# Initialize a list to store the component dataframes that will be", "# concatenated to form the final bootstrap_df", "component_dfs", "=", "[", "]", "# Populate component_dfs", "for", "boot_id", ",", "obs_id", "in", "enumerate", "(", "resampled_obs_ids_1d", ")", ":", "# Extract the dataframe that we desire.", "extracted_df", "=", "groupby_dict", "[", "obs_id", "]", ".", "copy", "(", ")", "# Add the bootstrap id value.", "extracted_df", "[", "boot_id_col", "]", "=", "boot_id", "+", "1", "# Store the component dataframe", "component_dfs", ".", "append", "(", "extracted_df", ")", "# Create and return the desired dataframe.", "bootstrap_df", "=", "pd", ".", "concat", "(", "component_dfs", ",", "axis", "=", "0", ",", "ignore_index", "=", "True", ")", "return", "bootstrap_df" ]
Will create the altered dataframe of data needed to estimate a choice model with the particular observations that belong to the current bootstrap sample. Parameters ---------- orig_df : pandas DataFrame. Should be long-format dataframe containing the data used to estimate the desired choice model. obs_id_col : str. Should be a column name within `orig_df`. Should denote the original observation id column. resampled_obs_ids_1d : 1D ndarray of ints. Each value should represent the alternative id of a given bootstrap replicate. groupby_dict : dict. Each key will be a unique value in `orig_df[obs_id_col]` and each value will be the rows of `orig_df` where `orig_df[obs_id_col] == key`. boot_id_col : str, optional. Denotes the new column that will be created to specify the bootstrap observation ids for choice model estimation. Returns ------- bootstrap_df : pandas Dataframe. Will contain all the same columns as `orig_df` as well as the additional `boot_id_col`. For each value in `resampled_obs_ids_1d`, `bootstrap_df` will contain the long format rows from `orig_df` that have the given observation id.
[ "Will", "create", "the", "altered", "dataframe", "of", "data", "needed", "to", "estimate", "a", "choice", "model", "with", "the", "particular", "observations", "that", "belong", "to", "the", "current", "bootstrap", "sample", "." ]
python
train
41.916667
CI-WATER/gsshapy
gsshapy/orm/spn.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/spn.py#L138-L179
def _createSlink(self, slinks): """ Create GSSHAPY SuperLink, Pipe, and SuperNode Objects Method """ for slink in slinks: # Create GSSHAPY SuperLink object superLink = SuperLink(slinkNumber=slink['slinkNumber'], numPipes=slink['numPipes']) # Associate SuperLink with StormPipeNetworkFile superLink.stormPipeNetworkFile = self for node in slink['nodes']: # Create GSSHAPY SuperNode objects superNode = SuperNode(nodeNumber=node['nodeNumber'], groundSurfaceElev=node['groundSurfaceElev'], invertElev=node['invertElev'], manholeSA=node['manholeSA'], nodeInletCode=node['inletCode'], cellI=node['cellI'], cellJ=node['cellJ'], weirSideLength=node['weirSideLength'], orificeDiameter=node['orificeDiameter']) # Associate SuperNode with SuperLink superNode.superLink = superLink for p in slink['pipes']: # Create GSSHAPY Pipe objects pipe = Pipe(pipeNumber=p['pipeNumber'], xSecType=p['xSecType'], diameterOrHeight=p['diameterOrHeight'], width=p['width'], slope=p['slope'], roughness=p['roughness'], length=p['length'], conductance=p['conductance'], drainSpacing=p['drainSpacing']) # Associate Pipe with SuperLink pipe.superLink = superLink
[ "def", "_createSlink", "(", "self", ",", "slinks", ")", ":", "for", "slink", "in", "slinks", ":", "# Create GSSHAPY SuperLink object", "superLink", "=", "SuperLink", "(", "slinkNumber", "=", "slink", "[", "'slinkNumber'", "]", ",", "numPipes", "=", "slink", "[", "'numPipes'", "]", ")", "# Associate SuperLink with StormPipeNetworkFile", "superLink", ".", "stormPipeNetworkFile", "=", "self", "for", "node", "in", "slink", "[", "'nodes'", "]", ":", "# Create GSSHAPY SuperNode objects", "superNode", "=", "SuperNode", "(", "nodeNumber", "=", "node", "[", "'nodeNumber'", "]", ",", "groundSurfaceElev", "=", "node", "[", "'groundSurfaceElev'", "]", ",", "invertElev", "=", "node", "[", "'invertElev'", "]", ",", "manholeSA", "=", "node", "[", "'manholeSA'", "]", ",", "nodeInletCode", "=", "node", "[", "'inletCode'", "]", ",", "cellI", "=", "node", "[", "'cellI'", "]", ",", "cellJ", "=", "node", "[", "'cellJ'", "]", ",", "weirSideLength", "=", "node", "[", "'weirSideLength'", "]", ",", "orificeDiameter", "=", "node", "[", "'orificeDiameter'", "]", ")", "# Associate SuperNode with SuperLink", "superNode", ".", "superLink", "=", "superLink", "for", "p", "in", "slink", "[", "'pipes'", "]", ":", "# Create GSSHAPY Pipe objects", "pipe", "=", "Pipe", "(", "pipeNumber", "=", "p", "[", "'pipeNumber'", "]", ",", "xSecType", "=", "p", "[", "'xSecType'", "]", ",", "diameterOrHeight", "=", "p", "[", "'diameterOrHeight'", "]", ",", "width", "=", "p", "[", "'width'", "]", ",", "slope", "=", "p", "[", "'slope'", "]", ",", "roughness", "=", "p", "[", "'roughness'", "]", ",", "length", "=", "p", "[", "'length'", "]", ",", "conductance", "=", "p", "[", "'conductance'", "]", ",", "drainSpacing", "=", "p", "[", "'drainSpacing'", "]", ")", "# Associate Pipe with SuperLink", "pipe", ".", "superLink", "=", "superLink" ]
Create GSSHAPY SuperLink, Pipe, and SuperNode Objects Method
[ "Create", "GSSHAPY", "SuperLink", "Pipe", "and", "SuperNode", "Objects", "Method" ]
python
train
45.02381
wglass/lighthouse
lighthouse/checks/tcp.py
https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/checks/tcp.py#L54-L94
def perform(self): """ Performs a straightforward TCP request and response. Sends the TCP `query` to the proper host and port, and loops over the socket, gathering response chunks until a full line is acquired. If the response line matches the expected value, the check passes. If not, the check fails. The check will also fail if there's an error during any step of the send/receive process. """ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((self.host, self.port)) # if no query/response is defined, a successful connection is a pass if not self.query: sock.close() return True try: sock.sendall(self.query) except Exception: logger.exception("Error sending TCP query message.") sock.close() return False response, extra = sockutils.get_response(sock) logger.debug("response: %s (extra: %s)", response, extra) if response != self.expected_response: logger.warn( "Response does not match expected value: %s (expected %s)", response, self.expected_response ) sock.close() return False sock.close() return True
[ "def", "perform", "(", "self", ")", ":", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_STREAM", ")", "sock", ".", "connect", "(", "(", "self", ".", "host", ",", "self", ".", "port", ")", ")", "# if no query/response is defined, a successful connection is a pass", "if", "not", "self", ".", "query", ":", "sock", ".", "close", "(", ")", "return", "True", "try", ":", "sock", ".", "sendall", "(", "self", ".", "query", ")", "except", "Exception", ":", "logger", ".", "exception", "(", "\"Error sending TCP query message.\"", ")", "sock", ".", "close", "(", ")", "return", "False", "response", ",", "extra", "=", "sockutils", ".", "get_response", "(", "sock", ")", "logger", ".", "debug", "(", "\"response: %s (extra: %s)\"", ",", "response", ",", "extra", ")", "if", "response", "!=", "self", ".", "expected_response", ":", "logger", ".", "warn", "(", "\"Response does not match expected value: %s (expected %s)\"", ",", "response", ",", "self", ".", "expected_response", ")", "sock", ".", "close", "(", ")", "return", "False", "sock", ".", "close", "(", ")", "return", "True" ]
Performs a straightforward TCP request and response. Sends the TCP `query` to the proper host and port, and loops over the socket, gathering response chunks until a full line is acquired. If the response line matches the expected value, the check passes. If not, the check fails. The check will also fail if there's an error during any step of the send/receive process.
[ "Performs", "a", "straightforward", "TCP", "request", "and", "response", "." ]
python
train
31.878049
pavoni/pyvera
pyvera/subscribe.py
https://github.com/pavoni/pyvera/blob/e05e3d13f76153444787d31948feb5419d77a8c8/pyvera/subscribe.py#L55-L67
def unregister(self, device, callback): """Remove a registered a callback. device: device that has the subscription callback: callback used in original registration """ if not device: logger.error("Received an invalid device: %r", device) return logger.debug("Removing subscription for {}".format(device.name)) self._callbacks[device].remove(callback) self._devices[device.vera_device_id].remove(device)
[ "def", "unregister", "(", "self", ",", "device", ",", "callback", ")", ":", "if", "not", "device", ":", "logger", ".", "error", "(", "\"Received an invalid device: %r\"", ",", "device", ")", "return", "logger", ".", "debug", "(", "\"Removing subscription for {}\"", ".", "format", "(", "device", ".", "name", ")", ")", "self", ".", "_callbacks", "[", "device", "]", ".", "remove", "(", "callback", ")", "self", ".", "_devices", "[", "device", ".", "vera_device_id", "]", ".", "remove", "(", "device", ")" ]
Remove a registered a callback. device: device that has the subscription callback: callback used in original registration
[ "Remove", "a", "registered", "a", "callback", "." ]
python
train
37
saltstack/salt
salt/log/setup.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/log/setup.py#L562-L739
def setup_logfile_logger(log_path, log_level='error', log_format=None, date_format=None, max_bytes=0, backup_count=0): ''' Setup the logfile logger Since version 0.10.6 we support logging to syslog, some examples: tcp://localhost:514/LOG_USER tcp://localhost/LOG_DAEMON udp://localhost:5145/LOG_KERN udp://localhost file:///dev/log file:///dev/log/LOG_SYSLOG file:///dev/log/LOG_DAEMON The above examples are self explanatory, but: <file|udp|tcp>://<host|socketpath>:<port-if-required>/<log-facility> If you're thinking on doing remote logging you might also be thinking that you could point salt's logging to the remote syslog. **Please Don't!** An issue has been reported when doing this over TCP when the logged lines get concatenated. See #3061. The preferred way to do remote logging is setup a local syslog, point salt's logging to the local syslog(unix socket is much faster) and then have the local syslog forward the log messages to the remote syslog. ''' if is_logfile_configured(): logging.getLogger(__name__).warning('Logfile logging already configured') return if log_path is None: logging.getLogger(__name__).warning( 'log_path setting is set to `None`. Nothing else to do' ) return # Remove the temporary logging handler __remove_temp_logging_handler() if log_level is None: log_level = 'warning' level = LOG_LEVELS.get(log_level.lower(), logging.ERROR) parsed_log_path = urlparse(log_path) root_logger = logging.getLogger() if parsed_log_path.scheme in ('tcp', 'udp', 'file'): syslog_opts = { 'facility': SysLogHandler.LOG_USER, 'socktype': socket.SOCK_DGRAM } if parsed_log_path.scheme == 'file' and parsed_log_path.path: facility_name = parsed_log_path.path.split(os.sep)[-1].upper() if not facility_name.startswith('LOG_'): # The user is not specifying a syslog facility facility_name = 'LOG_USER' # Syslog default syslog_opts['address'] = parsed_log_path.path else: # The user has set a syslog facility, let's update the path to # the logging socket syslog_opts['address'] = os.sep.join( parsed_log_path.path.split(os.sep)[:-1] ) elif parsed_log_path.path: # In case of udp or tcp with a facility specified facility_name = parsed_log_path.path.lstrip(os.sep).upper() if not facility_name.startswith('LOG_'): # Logging facilities start with LOG_ if this is not the case # fail right now! raise RuntimeError( 'The syslog facility \'{0}\' is not known'.format( facility_name ) ) else: # This is the case of udp or tcp without a facility specified facility_name = 'LOG_USER' # Syslog default facility = getattr( SysLogHandler, facility_name, None ) if facility is None: # This python syslog version does not know about the user provided # facility name raise RuntimeError( 'The syslog facility \'{0}\' is not known'.format( facility_name ) ) syslog_opts['facility'] = facility if parsed_log_path.scheme == 'tcp': # tcp syslog support was only added on python versions >= 2.7 if sys.version_info < (2, 7): raise RuntimeError( 'Python versions lower than 2.7 do not support logging ' 'to syslog using tcp sockets' ) syslog_opts['socktype'] = socket.SOCK_STREAM if parsed_log_path.scheme in ('tcp', 'udp'): syslog_opts['address'] = ( parsed_log_path.hostname, parsed_log_path.port or logging.handlers.SYSLOG_UDP_PORT ) if sys.version_info < (2, 7) or parsed_log_path.scheme == 'file': # There's not socktype support on python versions lower than 2.7 syslog_opts.pop('socktype', None) try: # Et voilá! Finally our syslog handler instance handler = SysLogHandler(**syslog_opts) except socket.error as err: logging.getLogger(__name__).error( 'Failed to setup the Syslog logging handler: %s', err ) shutdown_multiprocessing_logging_listener() sys.exit(2) else: # make sure, the logging directory exists and attempt to create it if necessary log_dir = os.path.dirname(log_path) if not os.path.exists(log_dir): logging.getLogger(__name__).info( 'Log directory not found, trying to create it: %s', log_dir ) try: os.makedirs(log_dir, mode=0o700) except OSError as ose: logging.getLogger(__name__).warning( 'Failed to create directory for log file: %s (%s)', log_dir, ose ) return try: # Logfile logging is UTF-8 on purpose. # Since salt uses YAML and YAML uses either UTF-8 or UTF-16, if a # user is not using plain ASCII, their system should be ready to # handle UTF-8. if max_bytes > 0: handler = RotatingFileHandler(log_path, mode='a', maxBytes=max_bytes, backupCount=backup_count, encoding='utf-8', delay=0) else: handler = WatchedFileHandler(log_path, mode='a', encoding='utf-8', delay=0) except (IOError, OSError): logging.getLogger(__name__).warning( 'Failed to open log file, do you have permission to write to %s?', log_path ) # Do not proceed with any more configuration since it will fail, we # have the console logging already setup and the user should see # the error. return handler.setLevel(level) # Set the default console formatter config if not log_format: log_format = '%(asctime)s [%(name)-15s][%(levelname)-8s] %(message)s' if not date_format: date_format = '%Y-%m-%d %H:%M:%S' formatter = logging.Formatter(log_format, datefmt=date_format) handler.setFormatter(formatter) root_logger.addHandler(handler) global __LOGFILE_CONFIGURED global __LOGGING_LOGFILE_HANDLER __LOGFILE_CONFIGURED = True __LOGGING_LOGFILE_HANDLER = handler
[ "def", "setup_logfile_logger", "(", "log_path", ",", "log_level", "=", "'error'", ",", "log_format", "=", "None", ",", "date_format", "=", "None", ",", "max_bytes", "=", "0", ",", "backup_count", "=", "0", ")", ":", "if", "is_logfile_configured", "(", ")", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "warning", "(", "'Logfile logging already configured'", ")", "return", "if", "log_path", "is", "None", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "warning", "(", "'log_path setting is set to `None`. Nothing else to do'", ")", "return", "# Remove the temporary logging handler", "__remove_temp_logging_handler", "(", ")", "if", "log_level", "is", "None", ":", "log_level", "=", "'warning'", "level", "=", "LOG_LEVELS", ".", "get", "(", "log_level", ".", "lower", "(", ")", ",", "logging", ".", "ERROR", ")", "parsed_log_path", "=", "urlparse", "(", "log_path", ")", "root_logger", "=", "logging", ".", "getLogger", "(", ")", "if", "parsed_log_path", ".", "scheme", "in", "(", "'tcp'", ",", "'udp'", ",", "'file'", ")", ":", "syslog_opts", "=", "{", "'facility'", ":", "SysLogHandler", ".", "LOG_USER", ",", "'socktype'", ":", "socket", ".", "SOCK_DGRAM", "}", "if", "parsed_log_path", ".", "scheme", "==", "'file'", "and", "parsed_log_path", ".", "path", ":", "facility_name", "=", "parsed_log_path", ".", "path", ".", "split", "(", "os", ".", "sep", ")", "[", "-", "1", "]", ".", "upper", "(", ")", "if", "not", "facility_name", ".", "startswith", "(", "'LOG_'", ")", ":", "# The user is not specifying a syslog facility", "facility_name", "=", "'LOG_USER'", "# Syslog default", "syslog_opts", "[", "'address'", "]", "=", "parsed_log_path", ".", "path", "else", ":", "# The user has set a syslog facility, let's update the path to", "# the logging socket", "syslog_opts", "[", "'address'", "]", "=", "os", ".", "sep", ".", "join", "(", "parsed_log_path", ".", "path", ".", "split", "(", "os", ".", "sep", ")", "[", ":", "-", "1", "]", ")", "elif", "parsed_log_path", ".", "path", ":", "# In case of udp or tcp with a facility specified", "facility_name", "=", "parsed_log_path", ".", "path", ".", "lstrip", "(", "os", ".", "sep", ")", ".", "upper", "(", ")", "if", "not", "facility_name", ".", "startswith", "(", "'LOG_'", ")", ":", "# Logging facilities start with LOG_ if this is not the case", "# fail right now!", "raise", "RuntimeError", "(", "'The syslog facility \\'{0}\\' is not known'", ".", "format", "(", "facility_name", ")", ")", "else", ":", "# This is the case of udp or tcp without a facility specified", "facility_name", "=", "'LOG_USER'", "# Syslog default", "facility", "=", "getattr", "(", "SysLogHandler", ",", "facility_name", ",", "None", ")", "if", "facility", "is", "None", ":", "# This python syslog version does not know about the user provided", "# facility name", "raise", "RuntimeError", "(", "'The syslog facility \\'{0}\\' is not known'", ".", "format", "(", "facility_name", ")", ")", "syslog_opts", "[", "'facility'", "]", "=", "facility", "if", "parsed_log_path", ".", "scheme", "==", "'tcp'", ":", "# tcp syslog support was only added on python versions >= 2.7", "if", "sys", ".", "version_info", "<", "(", "2", ",", "7", ")", ":", "raise", "RuntimeError", "(", "'Python versions lower than 2.7 do not support logging '", "'to syslog using tcp sockets'", ")", "syslog_opts", "[", "'socktype'", "]", "=", "socket", ".", "SOCK_STREAM", "if", "parsed_log_path", ".", "scheme", "in", "(", "'tcp'", ",", "'udp'", ")", ":", "syslog_opts", "[", "'address'", "]", "=", "(", "parsed_log_path", ".", "hostname", ",", "parsed_log_path", ".", "port", "or", "logging", ".", "handlers", ".", "SYSLOG_UDP_PORT", ")", "if", "sys", ".", "version_info", "<", "(", "2", ",", "7", ")", "or", "parsed_log_path", ".", "scheme", "==", "'file'", ":", "# There's not socktype support on python versions lower than 2.7", "syslog_opts", ".", "pop", "(", "'socktype'", ",", "None", ")", "try", ":", "# Et voilá! Finally our syslog handler instance", "handler", "=", "SysLogHandler", "(", "*", "*", "syslog_opts", ")", "except", "socket", ".", "error", "as", "err", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "error", "(", "'Failed to setup the Syslog logging handler: %s'", ",", "err", ")", "shutdown_multiprocessing_logging_listener", "(", ")", "sys", ".", "exit", "(", "2", ")", "else", ":", "# make sure, the logging directory exists and attempt to create it if necessary", "log_dir", "=", "os", ".", "path", ".", "dirname", "(", "log_path", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "log_dir", ")", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "info", "(", "'Log directory not found, trying to create it: %s'", ",", "log_dir", ")", "try", ":", "os", ".", "makedirs", "(", "log_dir", ",", "mode", "=", "0o700", ")", "except", "OSError", "as", "ose", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "warning", "(", "'Failed to create directory for log file: %s (%s)'", ",", "log_dir", ",", "ose", ")", "return", "try", ":", "# Logfile logging is UTF-8 on purpose.", "# Since salt uses YAML and YAML uses either UTF-8 or UTF-16, if a", "# user is not using plain ASCII, their system should be ready to", "# handle UTF-8.", "if", "max_bytes", ">", "0", ":", "handler", "=", "RotatingFileHandler", "(", "log_path", ",", "mode", "=", "'a'", ",", "maxBytes", "=", "max_bytes", ",", "backupCount", "=", "backup_count", ",", "encoding", "=", "'utf-8'", ",", "delay", "=", "0", ")", "else", ":", "handler", "=", "WatchedFileHandler", "(", "log_path", ",", "mode", "=", "'a'", ",", "encoding", "=", "'utf-8'", ",", "delay", "=", "0", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "logging", ".", "getLogger", "(", "__name__", ")", ".", "warning", "(", "'Failed to open log file, do you have permission to write to %s?'", ",", "log_path", ")", "# Do not proceed with any more configuration since it will fail, we", "# have the console logging already setup and the user should see", "# the error.", "return", "handler", ".", "setLevel", "(", "level", ")", "# Set the default console formatter config", "if", "not", "log_format", ":", "log_format", "=", "'%(asctime)s [%(name)-15s][%(levelname)-8s] %(message)s'", "if", "not", "date_format", ":", "date_format", "=", "'%Y-%m-%d %H:%M:%S'", "formatter", "=", "logging", ".", "Formatter", "(", "log_format", ",", "datefmt", "=", "date_format", ")", "handler", ".", "setFormatter", "(", "formatter", ")", "root_logger", ".", "addHandler", "(", "handler", ")", "global", "__LOGFILE_CONFIGURED", "global", "__LOGGING_LOGFILE_HANDLER", "__LOGFILE_CONFIGURED", "=", "True", "__LOGGING_LOGFILE_HANDLER", "=", "handler" ]
Setup the logfile logger Since version 0.10.6 we support logging to syslog, some examples: tcp://localhost:514/LOG_USER tcp://localhost/LOG_DAEMON udp://localhost:5145/LOG_KERN udp://localhost file:///dev/log file:///dev/log/LOG_SYSLOG file:///dev/log/LOG_DAEMON The above examples are self explanatory, but: <file|udp|tcp>://<host|socketpath>:<port-if-required>/<log-facility> If you're thinking on doing remote logging you might also be thinking that you could point salt's logging to the remote syslog. **Please Don't!** An issue has been reported when doing this over TCP when the logged lines get concatenated. See #3061. The preferred way to do remote logging is setup a local syslog, point salt's logging to the local syslog(unix socket is much faster) and then have the local syslog forward the log messages to the remote syslog.
[ "Setup", "the", "logfile", "logger" ]
python
train
38.764045
vaexio/vaex
packages/vaex-core/vaex/dataframe.py
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-core/vaex/dataframe.py#L1855-L1877
def col(self): """Gives direct access to the columns only (useful for tab completion). Convenient when working with ipython in combination with small DataFrames, since this gives tab-completion. Columns can be accesed by there names, which are attributes. The attribues are currently expressions, so you can do computations with them. Example >>> ds = vaex.example() >>> df.plot(df.col.x, df.col.y) """ class ColumnList(object): pass data = ColumnList() for name in self.get_column_names(): expression = getattr(self, name, None) if not isinstance(expression, Expression): expression = Expression(self, name) setattr(data, name, expression) return data
[ "def", "col", "(", "self", ")", ":", "class", "ColumnList", "(", "object", ")", ":", "pass", "data", "=", "ColumnList", "(", ")", "for", "name", "in", "self", ".", "get_column_names", "(", ")", ":", "expression", "=", "getattr", "(", "self", ",", "name", ",", "None", ")", "if", "not", "isinstance", "(", "expression", ",", "Expression", ")", ":", "expression", "=", "Expression", "(", "self", ",", "name", ")", "setattr", "(", "data", ",", "name", ",", "expression", ")", "return", "data" ]
Gives direct access to the columns only (useful for tab completion). Convenient when working with ipython in combination with small DataFrames, since this gives tab-completion. Columns can be accesed by there names, which are attributes. The attribues are currently expressions, so you can do computations with them. Example >>> ds = vaex.example() >>> df.plot(df.col.x, df.col.y)
[ "Gives", "direct", "access", "to", "the", "columns", "only", "(", "useful", "for", "tab", "completion", ")", "." ]
python
test
34.565217
Qiskit/qiskit-terra
qiskit/pulse/pulse_lib/discrete.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/pulse/pulse_lib/discrete.py#L39-L46
def zero(duration: int, name: str = None) -> SamplePulse: """Generates zero-sampled `SamplePulse`. Args: duration: Duration of pulse. Must be greater than zero. name: Name of pulse. """ return _sampled_zero_pulse(duration, name=name)
[ "def", "zero", "(", "duration", ":", "int", ",", "name", ":", "str", "=", "None", ")", "->", "SamplePulse", ":", "return", "_sampled_zero_pulse", "(", "duration", ",", "name", "=", "name", ")" ]
Generates zero-sampled `SamplePulse`. Args: duration: Duration of pulse. Must be greater than zero. name: Name of pulse.
[ "Generates", "zero", "-", "sampled", "SamplePulse", "." ]
python
test
32.375