repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
ecometrica/grandfatherson
grandfatherson/filters.py
https://github.com/ecometrica/grandfatherson/blob/b166e4e44887960c3066ebd28eecadfae19561e1/grandfatherson/filters.py#L124-L129
def mask(cls, dt, **options): """ Return a datetime with the same value as ``dt``, to a resolution of days. """ return dt.replace(hour=0, minute=0, second=0, microsecond=0)
[ "def", "mask", "(", "cls", ",", "dt", ",", "*", "*", "options", ")", ":", "return", "dt", ".", "replace", "(", "hour", "=", "0", ",", "minute", "=", "0", ",", "second", "=", "0", ",", "microsecond", "=", "0", ")" ]
Return a datetime with the same value as ``dt``, to a resolution of days.
[ "Return", "a", "datetime", "with", "the", "same", "value", "as", "dt", "to", "a", "resolution", "of", "days", "." ]
python
test
raiden-network/raiden
raiden/connection_manager.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/connection_manager.py#L101-L166
def connect( self, funds: typing.TokenAmount, initial_channel_target: int = 3, joinable_funds_target: float = 0.4, ): """Connect to the network. Subsequent calls to `connect` are allowed, but will only affect the spendable funds and the connection strategy parameters for the future. `connect` will not close any channels. Note: the ConnectionManager does not discriminate manually opened channels from automatically opened ones. If the user manually opened channels, those deposit amounts will affect the funding per channel and the number of new channels opened. Args: funds: Target amount of tokens spendable to join the network. initial_channel_target: Target number of channels to open. joinable_funds_target: Amount of funds not initially assigned. """ token = self.raiden.chain.token(self.token_address) token_balance = token.balance_of(self.raiden.address) if token_balance < funds: raise InvalidAmount( f'Insufficient balance for token {pex(self.token_address)}', ) if funds <= 0: raise InvalidAmount( 'The funds to use in the connection need to be a positive integer', ) if joinable_funds_target < 0 or joinable_funds_target > 1: raise InvalidAmount( f'joinable_funds_target should be between 0 and 1. Given: {joinable_funds_target}', ) with self.lock: self.funds = funds self.initial_channel_target = initial_channel_target self.joinable_funds_target = joinable_funds_target log_open_channels(self.raiden, self.registry_address, self.token_address, funds) qty_network_channels = views.count_token_network_channels( views.state_from_raiden(self.raiden), self.registry_address, self.token_address, ) if not qty_network_channels: log.info( 'Bootstrapping token network.', node=pex(self.raiden.address), network_id=pex(self.registry_address), token_id=pex(self.token_address), ) self.api.channel_open( self.registry_address, self.token_address, self.BOOTSTRAP_ADDR, ) else: self._open_channels()
[ "def", "connect", "(", "self", ",", "funds", ":", "typing", ".", "TokenAmount", ",", "initial_channel_target", ":", "int", "=", "3", ",", "joinable_funds_target", ":", "float", "=", "0.4", ",", ")", ":", "token", "=", "self", ".", "raiden", ".", "chain", ".", "token", "(", "self", ".", "token_address", ")", "token_balance", "=", "token", ".", "balance_of", "(", "self", ".", "raiden", ".", "address", ")", "if", "token_balance", "<", "funds", ":", "raise", "InvalidAmount", "(", "f'Insufficient balance for token {pex(self.token_address)}'", ",", ")", "if", "funds", "<=", "0", ":", "raise", "InvalidAmount", "(", "'The funds to use in the connection need to be a positive integer'", ",", ")", "if", "joinable_funds_target", "<", "0", "or", "joinable_funds_target", ">", "1", ":", "raise", "InvalidAmount", "(", "f'joinable_funds_target should be between 0 and 1. Given: {joinable_funds_target}'", ",", ")", "with", "self", ".", "lock", ":", "self", ".", "funds", "=", "funds", "self", ".", "initial_channel_target", "=", "initial_channel_target", "self", ".", "joinable_funds_target", "=", "joinable_funds_target", "log_open_channels", "(", "self", ".", "raiden", ",", "self", ".", "registry_address", ",", "self", ".", "token_address", ",", "funds", ")", "qty_network_channels", "=", "views", ".", "count_token_network_channels", "(", "views", ".", "state_from_raiden", "(", "self", ".", "raiden", ")", ",", "self", ".", "registry_address", ",", "self", ".", "token_address", ",", ")", "if", "not", "qty_network_channels", ":", "log", ".", "info", "(", "'Bootstrapping token network.'", ",", "node", "=", "pex", "(", "self", ".", "raiden", ".", "address", ")", ",", "network_id", "=", "pex", "(", "self", ".", "registry_address", ")", ",", "token_id", "=", "pex", "(", "self", ".", "token_address", ")", ",", ")", "self", ".", "api", ".", "channel_open", "(", "self", ".", "registry_address", ",", "self", ".", "token_address", ",", "self", ".", "BOOTSTRAP_ADDR", ",", ")", "else", ":", "self", ".", "_open_channels", "(", ")" ]
Connect to the network. Subsequent calls to `connect` are allowed, but will only affect the spendable funds and the connection strategy parameters for the future. `connect` will not close any channels. Note: the ConnectionManager does not discriminate manually opened channels from automatically opened ones. If the user manually opened channels, those deposit amounts will affect the funding per channel and the number of new channels opened. Args: funds: Target amount of tokens spendable to join the network. initial_channel_target: Target number of channels to open. joinable_funds_target: Amount of funds not initially assigned.
[ "Connect", "to", "the", "network", "." ]
python
train
hyperledger/indy-sdk
vcx/wrappers/python3/vcx/api/connection.py
https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/vcx/wrappers/python3/vcx/api/connection.py#L144-L168
async def sign_data(self, msg: bytes) -> bytes: """ Sign data using connection's pairwise key :param msg: :return: signature """ def transform_cb(arr_ptr: POINTER(c_uint8), arr_len: c_uint32): return bytes(arr_ptr[:arr_len]), if not hasattr(Connection.sign_data, "cb"): self.logger.debug("vcx_connection_sign_data: Creating callback") Connection.sign_data.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, POINTER(c_uint8), c_uint32), transform_cb) c_connection_handle = c_uint32(self.handle) c_msg_len = c_uint32(len(msg)) result = await do_call('vcx_connection_sign_data', c_connection_handle, msg, c_msg_len, Connection.sign_data.cb) self.logger.debug("vcx_connection_sign_data completed") return result
[ "async", "def", "sign_data", "(", "self", ",", "msg", ":", "bytes", ")", "->", "bytes", ":", "def", "transform_cb", "(", "arr_ptr", ":", "POINTER", "(", "c_uint8", ")", ",", "arr_len", ":", "c_uint32", ")", ":", "return", "bytes", "(", "arr_ptr", "[", ":", "arr_len", "]", ")", ",", "if", "not", "hasattr", "(", "Connection", ".", "sign_data", ",", "\"cb\"", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"vcx_connection_sign_data: Creating callback\"", ")", "Connection", ".", "sign_data", ".", "cb", "=", "create_cb", "(", "CFUNCTYPE", "(", "None", ",", "c_uint32", ",", "c_uint32", ",", "POINTER", "(", "c_uint8", ")", ",", "c_uint32", ")", ",", "transform_cb", ")", "c_connection_handle", "=", "c_uint32", "(", "self", ".", "handle", ")", "c_msg_len", "=", "c_uint32", "(", "len", "(", "msg", ")", ")", "result", "=", "await", "do_call", "(", "'vcx_connection_sign_data'", ",", "c_connection_handle", ",", "msg", ",", "c_msg_len", ",", "Connection", ".", "sign_data", ".", "cb", ")", "self", ".", "logger", ".", "debug", "(", "\"vcx_connection_sign_data completed\"", ")", "return", "result" ]
Sign data using connection's pairwise key :param msg: :return: signature
[ "Sign", "data", "using", "connection", "s", "pairwise", "key", ":", "param", "msg", ":", ":", "return", ":", "signature" ]
python
train
pri22296/beautifultable
beautifultable/beautifultable.py
https://github.com/pri22296/beautifultable/blob/c9638f73dff4bb1f341c9ee783e4e47f26efba0b/beautifultable/beautifultable.py#L963-L1016
def insert_column(self, index, header, column): """Insert a column before `index` in the table. If length of column is bigger than number of rows, lets say `k`, only the first `k` values of `column` is considered. If column is shorter than 'k', ValueError is raised. Note that Table remains in consistent state even if column is too short. Any changes made by this method is rolled back before raising the exception. Parameters ---------- index : int List index rules apply. header : str Title of the column. column : iterable Any iterable of appropriate length. Raises ------ TypeError: If `header` is not of type `str`. ValueError: If length of `column` is shorter than number of rows. """ if self._column_count == 0: self.column_headers = HeaderData(self, [header]) self._table = [RowData(self, [i]) for i in column] else: if not isinstance(header, basestring): raise TypeError("header must be of type str") column_length = 0 for i, (row, new_item) in enumerate(zip(self._table, column)): row._insert(index, new_item) column_length = i if column_length == len(self._table) - 1: self._column_count += 1 self._column_headers._insert(index, header) self._column_alignments._insert(index, self.default_alignment) self._column_widths._insert(index, 0) self._left_padding_widths._insert(index, self.default_padding) self._right_padding_widths._insert(index, self.default_padding) else: # Roll back changes so that table remains in consistent state for j in range(column_length, -1, -1): self._table[j]._pop(index) raise ValueError(("length of 'column' should be atleast {}, " "got {}").format(len(self._table), column_length + 1))
[ "def", "insert_column", "(", "self", ",", "index", ",", "header", ",", "column", ")", ":", "if", "self", ".", "_column_count", "==", "0", ":", "self", ".", "column_headers", "=", "HeaderData", "(", "self", ",", "[", "header", "]", ")", "self", ".", "_table", "=", "[", "RowData", "(", "self", ",", "[", "i", "]", ")", "for", "i", "in", "column", "]", "else", ":", "if", "not", "isinstance", "(", "header", ",", "basestring", ")", ":", "raise", "TypeError", "(", "\"header must be of type str\"", ")", "column_length", "=", "0", "for", "i", ",", "(", "row", ",", "new_item", ")", "in", "enumerate", "(", "zip", "(", "self", ".", "_table", ",", "column", ")", ")", ":", "row", ".", "_insert", "(", "index", ",", "new_item", ")", "column_length", "=", "i", "if", "column_length", "==", "len", "(", "self", ".", "_table", ")", "-", "1", ":", "self", ".", "_column_count", "+=", "1", "self", ".", "_column_headers", ".", "_insert", "(", "index", ",", "header", ")", "self", ".", "_column_alignments", ".", "_insert", "(", "index", ",", "self", ".", "default_alignment", ")", "self", ".", "_column_widths", ".", "_insert", "(", "index", ",", "0", ")", "self", ".", "_left_padding_widths", ".", "_insert", "(", "index", ",", "self", ".", "default_padding", ")", "self", ".", "_right_padding_widths", ".", "_insert", "(", "index", ",", "self", ".", "default_padding", ")", "else", ":", "# Roll back changes so that table remains in consistent state", "for", "j", "in", "range", "(", "column_length", ",", "-", "1", ",", "-", "1", ")", ":", "self", ".", "_table", "[", "j", "]", ".", "_pop", "(", "index", ")", "raise", "ValueError", "(", "(", "\"length of 'column' should be atleast {}, \"", "\"got {}\"", ")", ".", "format", "(", "len", "(", "self", ".", "_table", ")", ",", "column_length", "+", "1", ")", ")" ]
Insert a column before `index` in the table. If length of column is bigger than number of rows, lets say `k`, only the first `k` values of `column` is considered. If column is shorter than 'k', ValueError is raised. Note that Table remains in consistent state even if column is too short. Any changes made by this method is rolled back before raising the exception. Parameters ---------- index : int List index rules apply. header : str Title of the column. column : iterable Any iterable of appropriate length. Raises ------ TypeError: If `header` is not of type `str`. ValueError: If length of `column` is shorter than number of rows.
[ "Insert", "a", "column", "before", "index", "in", "the", "table", "." ]
python
train
volafiled/python-volapi
volapi/volapi.py
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L174-L195
async def on_open(self): """DingDongmaster the connection is open""" self.__ensure_barrier() while self.connected: try: if self.__lastping > self.__lastpong: raise IOError("Last ping remained unanswered") self.send_message("2") self.send_ack() self.__lastping = time.time() await asyncio.sleep(self.ping_interval) except Exception as ex: LOGGER.exception("Failed to ping") try: self.reraise(ex) except Exception: LOGGER.exception( "failed to force close connection after ping error" ) break
[ "async", "def", "on_open", "(", "self", ")", ":", "self", ".", "__ensure_barrier", "(", ")", "while", "self", ".", "connected", ":", "try", ":", "if", "self", ".", "__lastping", ">", "self", ".", "__lastpong", ":", "raise", "IOError", "(", "\"Last ping remained unanswered\"", ")", "self", ".", "send_message", "(", "\"2\"", ")", "self", ".", "send_ack", "(", ")", "self", ".", "__lastping", "=", "time", ".", "time", "(", ")", "await", "asyncio", ".", "sleep", "(", "self", ".", "ping_interval", ")", "except", "Exception", "as", "ex", ":", "LOGGER", ".", "exception", "(", "\"Failed to ping\"", ")", "try", ":", "self", ".", "reraise", "(", "ex", ")", "except", "Exception", ":", "LOGGER", ".", "exception", "(", "\"failed to force close connection after ping error\"", ")", "break" ]
DingDongmaster the connection is open
[ "DingDongmaster", "the", "connection", "is", "open" ]
python
train
blockstack-packages/blockstack-gpg
blockstack_gpg/gpg.py
https://github.com/blockstack-packages/blockstack-gpg/blob/e4d51e4e51678d9b946596ca9dec53e2d78c8710/blockstack_gpg/gpg.py#L175-L203
def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ): """ Remove a public key locally from our local app keyring Return True on success Return False on error """ assert is_valid_appname(appname) if gpghome is None: config_dir = get_config_dir( config_dir ) keydir = get_gpg_home( appname, config_dir=config_dir ) else: keydir = gpghome gpg = gnupg.GPG( homedir=keydir ) res = gpg.delete_keys( [key_id] ) if res.status == 'Must delete secret key first': # this is a private key res = gpg.delete_keys( [key_id], secret=True ) try: assert res.status == 'ok', "Failed to delete key (%s)" % res except AssertionError, e: log.exception(e) log.error("Failed to delete key '%s'" % key_id) log.debug("res: %s" % res.__dict__) return False return True
[ "def", "gpg_unstash_key", "(", "appname", ",", "key_id", ",", "config_dir", "=", "None", ",", "gpghome", "=", "None", ")", ":", "assert", "is_valid_appname", "(", "appname", ")", "if", "gpghome", "is", "None", ":", "config_dir", "=", "get_config_dir", "(", "config_dir", ")", "keydir", "=", "get_gpg_home", "(", "appname", ",", "config_dir", "=", "config_dir", ")", "else", ":", "keydir", "=", "gpghome", "gpg", "=", "gnupg", ".", "GPG", "(", "homedir", "=", "keydir", ")", "res", "=", "gpg", ".", "delete_keys", "(", "[", "key_id", "]", ")", "if", "res", ".", "status", "==", "'Must delete secret key first'", ":", "# this is a private key ", "res", "=", "gpg", ".", "delete_keys", "(", "[", "key_id", "]", ",", "secret", "=", "True", ")", "try", ":", "assert", "res", ".", "status", "==", "'ok'", ",", "\"Failed to delete key (%s)\"", "%", "res", "except", "AssertionError", ",", "e", ":", "log", ".", "exception", "(", "e", ")", "log", ".", "error", "(", "\"Failed to delete key '%s'\"", "%", "key_id", ")", "log", ".", "debug", "(", "\"res: %s\"", "%", "res", ".", "__dict__", ")", "return", "False", "return", "True" ]
Remove a public key locally from our local app keyring Return True on success Return False on error
[ "Remove", "a", "public", "key", "locally", "from", "our", "local", "app", "keyring", "Return", "True", "on", "success", "Return", "False", "on", "error" ]
python
train
orb-framework/orb
orb/core/database.py
https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/database.py#L138-L146
def interrupt(self, threadId=None): """ Interrupts the thread at the given id. :param threadId | <int> || None """ back = self.backend() if back: back.interrupt(threadId)
[ "def", "interrupt", "(", "self", ",", "threadId", "=", "None", ")", ":", "back", "=", "self", ".", "backend", "(", ")", "if", "back", ":", "back", ".", "interrupt", "(", "threadId", ")" ]
Interrupts the thread at the given id. :param threadId | <int> || None
[ "Interrupts", "the", "thread", "at", "the", "given", "id", ".", ":", "param", "threadId", "|", "<int", ">", "||", "None" ]
python
train
jaredLunde/vital-tools
vital/security/__init__.py
https://github.com/jaredLunde/vital-tools/blob/ea924c9bbb6ec22aa66f8095f018b1ee0099ac04/vital/security/__init__.py#L101-L120
def aes_decrypt(value, secret, block_size=AES.block_size): """ AES decrypt @value with @secret using the |CFB| mode of AES with a cryptographically secure initialization vector. -> (#str) AES decrypted @value .. from vital.security import aes_encrypt, aes_decrypt aes_encrypt("Hello, world", "aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW") # -> 'zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw=' aes_decrypt( "zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw=", "aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW") # -> 'Hello, world' .. """ if value is not None: cipher = AES.new(secret[:32], AES.MODE_CFB, value[:block_size]) return cipher.decrypt(uniorbytes(value[block_size * 2:], bytes))
[ "def", "aes_decrypt", "(", "value", ",", "secret", ",", "block_size", "=", "AES", ".", "block_size", ")", ":", "if", "value", "is", "not", "None", ":", "cipher", "=", "AES", ".", "new", "(", "secret", "[", ":", "32", "]", ",", "AES", ".", "MODE_CFB", ",", "value", "[", ":", "block_size", "]", ")", "return", "cipher", ".", "decrypt", "(", "uniorbytes", "(", "value", "[", "block_size", "*", "2", ":", "]", ",", "bytes", ")", ")" ]
AES decrypt @value with @secret using the |CFB| mode of AES with a cryptographically secure initialization vector. -> (#str) AES decrypted @value .. from vital.security import aes_encrypt, aes_decrypt aes_encrypt("Hello, world", "aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW") # -> 'zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw=' aes_decrypt( "zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw=", "aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW") # -> 'Hello, world' ..
[ "AES", "decrypt", "@value", "with", "@secret", "using", "the", "|CFB|", "mode", "of", "AES", "with", "a", "cryptographically", "secure", "initialization", "vector", "." ]
python
train
tritemio/PyBroMo
pybromo/utils/git.py
https://github.com/tritemio/PyBroMo/blob/b75f82a4551ff37e7c7a7e6954c536451f3e6d06/pybromo/utils/git.py#L80-L87
def get_last_commit_line(git_path=None): """ Get one-line description of HEAD commit for repository in current dir. """ if git_path is None: git_path = GIT_PATH output = check_output([git_path, "log", "--pretty=format:'%ad %h %s'", "--date=short", "-n1"]) return output.strip()[1:-1]
[ "def", "get_last_commit_line", "(", "git_path", "=", "None", ")", ":", "if", "git_path", "is", "None", ":", "git_path", "=", "GIT_PATH", "output", "=", "check_output", "(", "[", "git_path", ",", "\"log\"", ",", "\"--pretty=format:'%ad %h %s'\"", ",", "\"--date=short\"", ",", "\"-n1\"", "]", ")", "return", "output", ".", "strip", "(", ")", "[", "1", ":", "-", "1", "]" ]
Get one-line description of HEAD commit for repository in current dir.
[ "Get", "one", "-", "line", "description", "of", "HEAD", "commit", "for", "repository", "in", "current", "dir", "." ]
python
valid
prompt-toolkit/pyvim
pyvim/commands/commands.py
https://github.com/prompt-toolkit/pyvim/blob/5928b53b9d700863c1a06d2181a034a955f94594/pyvim/commands/commands.py#L53-L73
def location_cmd(name, accepts_force=False): """ Decorator that registers a command that takes a location as (optional) parameter. """ COMMANDS_TAKING_LOCATIONS.add(name) def decorator(func): @_cmd(name) def command_wrapper(editor, variables): location = variables.get('location') force = bool(variables['force']) if force and not accepts_force: editor.show_message('No ! allowed') elif accepts_force: func(editor, location, force=force) else: func(editor, location) return func return decorator
[ "def", "location_cmd", "(", "name", ",", "accepts_force", "=", "False", ")", ":", "COMMANDS_TAKING_LOCATIONS", ".", "add", "(", "name", ")", "def", "decorator", "(", "func", ")", ":", "@", "_cmd", "(", "name", ")", "def", "command_wrapper", "(", "editor", ",", "variables", ")", ":", "location", "=", "variables", ".", "get", "(", "'location'", ")", "force", "=", "bool", "(", "variables", "[", "'force'", "]", ")", "if", "force", "and", "not", "accepts_force", ":", "editor", ".", "show_message", "(", "'No ! allowed'", ")", "elif", "accepts_force", ":", "func", "(", "editor", ",", "location", ",", "force", "=", "force", ")", "else", ":", "func", "(", "editor", ",", "location", ")", "return", "func", "return", "decorator" ]
Decorator that registers a command that takes a location as (optional) parameter.
[ "Decorator", "that", "registers", "a", "command", "that", "takes", "a", "location", "as", "(", "optional", ")", "parameter", "." ]
python
train
kpn-digital/py-timeexecution
docs/apidoc.py
https://github.com/kpn-digital/py-timeexecution/blob/79b991e83f783196c41b830d0acef21ac5462596/docs/apidoc.py#L168-L178
def shall_skip(module, opts): """Check if we want to skip this module.""" # skip it if there is nothing (or just \n or \r\n) in the file if path.getsize(module) <= 2: return True # skip if it has a "private" name and this is selected filename = path.basename(module) if filename != '__init__.py' and filename.startswith('_') and \ not opts.includeprivate: return True return False
[ "def", "shall_skip", "(", "module", ",", "opts", ")", ":", "# skip it if there is nothing (or just \\n or \\r\\n) in the file", "if", "path", ".", "getsize", "(", "module", ")", "<=", "2", ":", "return", "True", "# skip if it has a \"private\" name and this is selected", "filename", "=", "path", ".", "basename", "(", "module", ")", "if", "filename", "!=", "'__init__.py'", "and", "filename", ".", "startswith", "(", "'_'", ")", "and", "not", "opts", ".", "includeprivate", ":", "return", "True", "return", "False" ]
Check if we want to skip this module.
[ "Check", "if", "we", "want", "to", "skip", "this", "module", "." ]
python
train
oasis-open/cti-taxii-client
taxii2client/__init__.py
https://github.com/oasis-open/cti-taxii-client/blob/b4c037fb61d8b8892af34423e2c67c81218d6f8e/taxii2client/__init__.py#L951-L980
def post(self, url, headers=None, params=None, **kwargs): """Send a JSON POST request with the given request headers, additional URL query parameters, and the given JSON in the request body. The extra query parameters are merged with any which already exist in the URL. The 'json' and 'data' parameters may not both be given. Args: url (str): URL to retrieve headers (dict): Any other headers to be added to the request. params: dictionary or bytes to be sent in the query string for the request. (optional) json: json to send in the body of the Request. This must be a JSON-serializable object. (optional) data: raw request body data. May be a dictionary, list of tuples, bytes, or file-like object to send in the body of the Request. (optional) """ if len(kwargs) > 1: raise InvalidArgumentsError("Too many extra args ({} > 1)".format( len(kwargs))) if kwargs: kwarg = next(iter(kwargs)) if kwarg not in ("json", "data"): raise InvalidArgumentsError("Invalid kwarg: " + kwarg) resp = self.session.post(url, headers=headers, params=params, **kwargs) resp.raise_for_status() return _to_json(resp)
[ "def", "post", "(", "self", ",", "url", ",", "headers", "=", "None", ",", "params", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "kwargs", ")", ">", "1", ":", "raise", "InvalidArgumentsError", "(", "\"Too many extra args ({} > 1)\"", ".", "format", "(", "len", "(", "kwargs", ")", ")", ")", "if", "kwargs", ":", "kwarg", "=", "next", "(", "iter", "(", "kwargs", ")", ")", "if", "kwarg", "not", "in", "(", "\"json\"", ",", "\"data\"", ")", ":", "raise", "InvalidArgumentsError", "(", "\"Invalid kwarg: \"", "+", "kwarg", ")", "resp", "=", "self", ".", "session", ".", "post", "(", "url", ",", "headers", "=", "headers", ",", "params", "=", "params", ",", "*", "*", "kwargs", ")", "resp", ".", "raise_for_status", "(", ")", "return", "_to_json", "(", "resp", ")" ]
Send a JSON POST request with the given request headers, additional URL query parameters, and the given JSON in the request body. The extra query parameters are merged with any which already exist in the URL. The 'json' and 'data' parameters may not both be given. Args: url (str): URL to retrieve headers (dict): Any other headers to be added to the request. params: dictionary or bytes to be sent in the query string for the request. (optional) json: json to send in the body of the Request. This must be a JSON-serializable object. (optional) data: raw request body data. May be a dictionary, list of tuples, bytes, or file-like object to send in the body of the Request. (optional)
[ "Send", "a", "JSON", "POST", "request", "with", "the", "given", "request", "headers", "additional", "URL", "query", "parameters", "and", "the", "given", "JSON", "in", "the", "request", "body", ".", "The", "extra", "query", "parameters", "are", "merged", "with", "any", "which", "already", "exist", "in", "the", "URL", ".", "The", "json", "and", "data", "parameters", "may", "not", "both", "be", "given", "." ]
python
valid
briandilley/ebs-deploy
ebs_deploy/commands/swap_urls_command.py
https://github.com/briandilley/ebs-deploy/blob/4178c9c1282a9025fb987dab3470bea28c202e10/ebs_deploy/commands/swap_urls_command.py#L5-L10
def add_arguments(parser): """ adds arguments for the swap urls command """ parser.add_argument('-o', '--old-environment', help='Old environment name', required=True) parser.add_argument('-n', '--new-environment', help='New environment name', required=True)
[ "def", "add_arguments", "(", "parser", ")", ":", "parser", ".", "add_argument", "(", "'-o'", ",", "'--old-environment'", ",", "help", "=", "'Old environment name'", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "'-n'", ",", "'--new-environment'", ",", "help", "=", "'New environment name'", ",", "required", "=", "True", ")" ]
adds arguments for the swap urls command
[ "adds", "arguments", "for", "the", "swap", "urls", "command" ]
python
valid
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/memory_profiler.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/memory_profiler.py#L239-L246
def disable_by_count(self): """ Disable the profiler if the number of disable requests matches the number of enable requests. """ if self.enable_count > 0: self.enable_count -= 1 if self.enable_count == 0: self.disable()
[ "def", "disable_by_count", "(", "self", ")", ":", "if", "self", ".", "enable_count", ">", "0", ":", "self", ".", "enable_count", "-=", "1", "if", "self", ".", "enable_count", "==", "0", ":", "self", ".", "disable", "(", ")" ]
Disable the profiler if the number of disable requests matches the number of enable requests.
[ "Disable", "the", "profiler", "if", "the", "number", "of", "disable", "requests", "matches", "the", "number", "of", "enable", "requests", "." ]
python
train
oasis-open/cti-stix-validator
stix2validator/output.py
https://github.com/oasis-open/cti-stix-validator/blob/a607014e3fa500a7678f8b61b278456ca581f9d0/stix2validator/output.py#L157-L169
def print_object_results(obj_result): """Print the results of validating an object. Args: obj_result: An ObjectValidationResults instance. """ print_results_header(obj_result.object_id, obj_result.is_valid) if obj_result.warnings: print_warning_results(obj_result, 1) if obj_result.errors: print_schema_results(obj_result, 1)
[ "def", "print_object_results", "(", "obj_result", ")", ":", "print_results_header", "(", "obj_result", ".", "object_id", ",", "obj_result", ".", "is_valid", ")", "if", "obj_result", ".", "warnings", ":", "print_warning_results", "(", "obj_result", ",", "1", ")", "if", "obj_result", ".", "errors", ":", "print_schema_results", "(", "obj_result", ",", "1", ")" ]
Print the results of validating an object. Args: obj_result: An ObjectValidationResults instance.
[ "Print", "the", "results", "of", "validating", "an", "object", "." ]
python
train
spotify/ulogger
ulogger/stackdriver.py
https://github.com/spotify/ulogger/blob/c59ced69e55b400e9c7a3688145fe3e8cb89db13/ulogger/stackdriver.py#L163-L171
def _set_worker_thread_level(self): """Sets logging level of the background logging thread to DEBUG or INFO """ bthread_logger = logging.getLogger( 'google.cloud.logging.handlers.transports.background_thread') if self.debug_thread_worker: bthread_logger.setLevel(logging.DEBUG) else: bthread_logger.setLevel(logging.INFO)
[ "def", "_set_worker_thread_level", "(", "self", ")", ":", "bthread_logger", "=", "logging", ".", "getLogger", "(", "'google.cloud.logging.handlers.transports.background_thread'", ")", "if", "self", ".", "debug_thread_worker", ":", "bthread_logger", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "else", ":", "bthread_logger", ".", "setLevel", "(", "logging", ".", "INFO", ")" ]
Sets logging level of the background logging thread to DEBUG or INFO
[ "Sets", "logging", "level", "of", "the", "background", "logging", "thread", "to", "DEBUG", "or", "INFO" ]
python
train
eqcorrscan/EQcorrscan
eqcorrscan/utils/correlate.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/correlate.py#L241-L312
def register_array_xcorr(name, func=None, is_default=False): """ Decorator for registering correlation functions. Each function must have the same interface as numpy_normxcorr, which is *f(templates, stream, pads, *args, **kwargs)* any number of specific kwargs can be used. Register_normxcorr can be used as a decorator (with or without arguments) or as a callable. :param name: The name of the function for quick access, or the callable that will be wrapped when used as a decorator. :type name: str, callable :param func: The function to register :type func: callable, optional :param is_default: True if this function should be marked as default normxcorr :type is_default: bool :return: callable """ valid_methods = set(list(XCOR_ARRAY_METHODS) + list(XCORR_STREAM_METHODS)) cache = {} def register(register_str): """ Register a function as an implementation. :param register_str: The registration designation :type register_str: str """ if register_str not in valid_methods: msg = 'register_name must be in %s' % valid_methods raise ValueError(msg) def _register(func): cache[register_str] = func setattr(cache['func'], register_str, func) return func return _register def wrapper(func, func_name=None): # register the functions in the XCOR fname = func_name or name.__name__ if callable(name) else str(name) XCOR_FUNCS[fname] = func # if is_default: # set function as default # XCOR_FUNCS['default'] = func # attach some attrs, this is a bit of a hack to avoid pickle problems func.register = register cache['func'] = func func.multithread = _general_multithread(func) func.multiprocess = _general_multiprocess(func) func.concurrent = _general_multithread(func) func.stream_xcorr = _general_serial(func) func.array_xcorr = func func.registered = True if is_default: # set function as default XCOR_FUNCS['default'] = copy.deepcopy(func) return func # used as a decorator if callable(name): return wrapper(name) # used as a normal function (called and passed a function) if callable(func): return wrapper(func, func_name=name) # called, then used as a decorator return wrapper
[ "def", "register_array_xcorr", "(", "name", ",", "func", "=", "None", ",", "is_default", "=", "False", ")", ":", "valid_methods", "=", "set", "(", "list", "(", "XCOR_ARRAY_METHODS", ")", "+", "list", "(", "XCORR_STREAM_METHODS", ")", ")", "cache", "=", "{", "}", "def", "register", "(", "register_str", ")", ":", "\"\"\"\n Register a function as an implementation.\n\n :param register_str: The registration designation\n :type register_str: str\n \"\"\"", "if", "register_str", "not", "in", "valid_methods", ":", "msg", "=", "'register_name must be in %s'", "%", "valid_methods", "raise", "ValueError", "(", "msg", ")", "def", "_register", "(", "func", ")", ":", "cache", "[", "register_str", "]", "=", "func", "setattr", "(", "cache", "[", "'func'", "]", ",", "register_str", ",", "func", ")", "return", "func", "return", "_register", "def", "wrapper", "(", "func", ",", "func_name", "=", "None", ")", ":", "# register the functions in the XCOR", "fname", "=", "func_name", "or", "name", ".", "__name__", "if", "callable", "(", "name", ")", "else", "str", "(", "name", ")", "XCOR_FUNCS", "[", "fname", "]", "=", "func", "# if is_default: # set function as default", "# XCOR_FUNCS['default'] = func", "# attach some attrs, this is a bit of a hack to avoid pickle problems", "func", ".", "register", "=", "register", "cache", "[", "'func'", "]", "=", "func", "func", ".", "multithread", "=", "_general_multithread", "(", "func", ")", "func", ".", "multiprocess", "=", "_general_multiprocess", "(", "func", ")", "func", ".", "concurrent", "=", "_general_multithread", "(", "func", ")", "func", ".", "stream_xcorr", "=", "_general_serial", "(", "func", ")", "func", ".", "array_xcorr", "=", "func", "func", ".", "registered", "=", "True", "if", "is_default", ":", "# set function as default", "XCOR_FUNCS", "[", "'default'", "]", "=", "copy", ".", "deepcopy", "(", "func", ")", "return", "func", "# used as a decorator", "if", "callable", "(", "name", ")", ":", "return", "wrapper", "(", "name", ")", "# used as a normal function (called and passed a function)", "if", "callable", "(", "func", ")", ":", "return", "wrapper", "(", "func", ",", "func_name", "=", "name", ")", "# called, then used as a decorator", "return", "wrapper" ]
Decorator for registering correlation functions. Each function must have the same interface as numpy_normxcorr, which is *f(templates, stream, pads, *args, **kwargs)* any number of specific kwargs can be used. Register_normxcorr can be used as a decorator (with or without arguments) or as a callable. :param name: The name of the function for quick access, or the callable that will be wrapped when used as a decorator. :type name: str, callable :param func: The function to register :type func: callable, optional :param is_default: True if this function should be marked as default normxcorr :type is_default: bool :return: callable
[ "Decorator", "for", "registering", "correlation", "functions", "." ]
python
train
shidenggui/easytrader
easytrader/helpers.py
https://github.com/shidenggui/easytrader/blob/e5ae4daeda4ea125763a95b280dd694c7f68257d/easytrader/helpers.py#L154-L200
def get_today_ipo_data(): """ 查询今天可以申购的新股信息 :return: 今日可申购新股列表 apply_code申购代码 price发行价格 """ agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:43.0) Gecko/20100101 Firefox/43.0" send_headers = { "Host": "xueqiu.com", "User-Agent": agent, "Accept": "application/json, text/javascript, */*; q=0.01", "Accept-Language": "zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3", "Accept-Encoding": "deflate", "Cache-Control": "no-cache", "X-Requested-With": "XMLHttpRequest", "Referer": "https://xueqiu.com/hq", "Connection": "keep-alive", } timestamp = random.randint(1000000000000, 9999999999999) home_page_url = "https://xueqiu.com" ipo_data_url = ( "https://xueqiu.com/proipo/query.json?column=symbol,name,onl_subcode,onl_subbegdate,actissqty,onl" "_actissqty,onl_submaxqty,iss_price,onl_lotwiner_stpub_date,onl_lotwinrt,onl_lotwin_amount,stock_" "income&orderBy=onl_subbegdate&order=desc&stockType=&page=1&size=30&_=%s" % (str(timestamp)) ) session = requests.session() session.get(home_page_url, headers=send_headers) # 产生cookies ipo_response = session.post(ipo_data_url, headers=send_headers) json_data = json.loads(ipo_response.text) today_ipo = [] for line in json_data["data"]: if datetime.datetime.now().strftime("%a %b %d") == line[3][:10]: today_ipo.append( { "stock_code": line[0], "stock_name": line[1], "apply_code": line[2], "price": line[7], } ) return today_ipo
[ "def", "get_today_ipo_data", "(", ")", ":", "agent", "=", "\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:43.0) Gecko/20100101 Firefox/43.0\"", "send_headers", "=", "{", "\"Host\"", ":", "\"xueqiu.com\"", ",", "\"User-Agent\"", ":", "agent", ",", "\"Accept\"", ":", "\"application/json, text/javascript, */*; q=0.01\"", ",", "\"Accept-Language\"", ":", "\"zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3\"", ",", "\"Accept-Encoding\"", ":", "\"deflate\"", ",", "\"Cache-Control\"", ":", "\"no-cache\"", ",", "\"X-Requested-With\"", ":", "\"XMLHttpRequest\"", ",", "\"Referer\"", ":", "\"https://xueqiu.com/hq\"", ",", "\"Connection\"", ":", "\"keep-alive\"", ",", "}", "timestamp", "=", "random", ".", "randint", "(", "1000000000000", ",", "9999999999999", ")", "home_page_url", "=", "\"https://xueqiu.com\"", "ipo_data_url", "=", "(", "\"https://xueqiu.com/proipo/query.json?column=symbol,name,onl_subcode,onl_subbegdate,actissqty,onl\"", "\"_actissqty,onl_submaxqty,iss_price,onl_lotwiner_stpub_date,onl_lotwinrt,onl_lotwin_amount,stock_\"", "\"income&orderBy=onl_subbegdate&order=desc&stockType=&page=1&size=30&_=%s\"", "%", "(", "str", "(", "timestamp", ")", ")", ")", "session", "=", "requests", ".", "session", "(", ")", "session", ".", "get", "(", "home_page_url", ",", "headers", "=", "send_headers", ")", "# 产生cookies", "ipo_response", "=", "session", ".", "post", "(", "ipo_data_url", ",", "headers", "=", "send_headers", ")", "json_data", "=", "json", ".", "loads", "(", "ipo_response", ".", "text", ")", "today_ipo", "=", "[", "]", "for", "line", "in", "json_data", "[", "\"data\"", "]", ":", "if", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%a %b %d\"", ")", "==", "line", "[", "3", "]", "[", ":", "10", "]", ":", "today_ipo", ".", "append", "(", "{", "\"stock_code\"", ":", "line", "[", "0", "]", ",", "\"stock_name\"", ":", "line", "[", "1", "]", ",", "\"apply_code\"", ":", "line", "[", "2", "]", ",", "\"price\"", ":", "line", "[", "7", "]", ",", "}", ")", "return", "today_ipo" ]
查询今天可以申购的新股信息 :return: 今日可申购新股列表 apply_code申购代码 price发行价格
[ "查询今天可以申购的新股信息", ":", "return", ":", "今日可申购新股列表", "apply_code申购代码", "price发行价格" ]
python
train
alexras/pylsdj
pylsdj/instrument.py
https://github.com/alexras/pylsdj/blob/1c45a7919dd324e941f76b315558b9647892e4d5/pylsdj/instrument.py#L47-L52
def table(self): """a ```pylsdj.Table``` referencing the instrument's table, or None if the instrument doesn't have a table""" if hasattr(self.data, 'table_on') and self.data.table_on: assert_index_sane(self.data.table, len(self.song.tables)) return self.song.tables[self.data.table]
[ "def", "table", "(", "self", ")", ":", "if", "hasattr", "(", "self", ".", "data", ",", "'table_on'", ")", "and", "self", ".", "data", ".", "table_on", ":", "assert_index_sane", "(", "self", ".", "data", ".", "table", ",", "len", "(", "self", ".", "song", ".", "tables", ")", ")", "return", "self", ".", "song", ".", "tables", "[", "self", ".", "data", ".", "table", "]" ]
a ```pylsdj.Table``` referencing the instrument's table, or None if the instrument doesn't have a table
[ "a", "pylsdj", ".", "Table", "referencing", "the", "instrument", "s", "table", "or", "None", "if", "the", "instrument", "doesn", "t", "have", "a", "table" ]
python
train
greenbender/pynntp
nntp/date.py
https://github.com/greenbender/pynntp/blob/991a76331cdf5d8f9dbf5b18f6e29adc80749a2f/nntp/date.py#L383-L440
def datetimeobj(value, fmt=None): """Parse a datetime to a datetime object. Uses fast custom parsing for common datetime formats or the slow dateutil parser for other formats. This is a trade off between ease of use and speed and is very useful for fast parsing of timestamp strings whose format may standard but varied or unknown prior to parsing. Common formats include: 1 Feb 2010 12:00:00 GMT Mon, 1 Feb 2010 22:00:00 +1000 20100201120000 1383470155 (seconds since epoch) See the other datetimeobj_*() functions for more details. Args: value: A string representing a datetime. Returns: A datetime object. """ if fmt: return _datetimeobj_formats.get(fmt, lambda v: datetimeobj_fmt(v, fmt) )(value) l = len(value) if 19 <= l <= 24 and value[3] == " ": # '%d %b %Y %H:%M:%Sxxxx' try: return datetimeobj_d_b_Y_H_M_S(value) except (KeyError, ValueError): pass if 30 <= l <= 31: # '%a, %d %b %Y %H:%M:%S %z' try: return datetimeobj_a__d_b_Y_H_M_S_z(value) except (KeyError, ValueError): pass if l == 14: # '%Y%m%d%H%M%S' try: return datetimeobj_YmdHMS(value) except ValueError: pass # epoch timestamp try: return datetimeobj_epoch(value) except ValueError: pass # slow version return datetimeobj_any(value)
[ "def", "datetimeobj", "(", "value", ",", "fmt", "=", "None", ")", ":", "if", "fmt", ":", "return", "_datetimeobj_formats", ".", "get", "(", "fmt", ",", "lambda", "v", ":", "datetimeobj_fmt", "(", "v", ",", "fmt", ")", ")", "(", "value", ")", "l", "=", "len", "(", "value", ")", "if", "19", "<=", "l", "<=", "24", "and", "value", "[", "3", "]", "==", "\" \"", ":", "# '%d %b %Y %H:%M:%Sxxxx'", "try", ":", "return", "datetimeobj_d_b_Y_H_M_S", "(", "value", ")", "except", "(", "KeyError", ",", "ValueError", ")", ":", "pass", "if", "30", "<=", "l", "<=", "31", ":", "# '%a, %d %b %Y %H:%M:%S %z'", "try", ":", "return", "datetimeobj_a__d_b_Y_H_M_S_z", "(", "value", ")", "except", "(", "KeyError", ",", "ValueError", ")", ":", "pass", "if", "l", "==", "14", ":", "# '%Y%m%d%H%M%S'", "try", ":", "return", "datetimeobj_YmdHMS", "(", "value", ")", "except", "ValueError", ":", "pass", "# epoch timestamp", "try", ":", "return", "datetimeobj_epoch", "(", "value", ")", "except", "ValueError", ":", "pass", "# slow version", "return", "datetimeobj_any", "(", "value", ")" ]
Parse a datetime to a datetime object. Uses fast custom parsing for common datetime formats or the slow dateutil parser for other formats. This is a trade off between ease of use and speed and is very useful for fast parsing of timestamp strings whose format may standard but varied or unknown prior to parsing. Common formats include: 1 Feb 2010 12:00:00 GMT Mon, 1 Feb 2010 22:00:00 +1000 20100201120000 1383470155 (seconds since epoch) See the other datetimeobj_*() functions for more details. Args: value: A string representing a datetime. Returns: A datetime object.
[ "Parse", "a", "datetime", "to", "a", "datetime", "object", "." ]
python
test
globocom/GloboNetworkAPI-client-python
networkapiclient/EspecificacaoGrupoVirtual.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/EspecificacaoGrupoVirtual.py#L64-L79
def add_equipamento_remove(self, id, id_ip, ids_ips_vips): '''Adiciona um equipamento na lista de equipamentos para operação de remover um grupo virtual. :param id: Identificador do equipamento. :param id_ip: Identificador do IP do equipamento. :param ids_ips_vips: Lista com os identificadores de IPs criados para cada VIP e associados ao equipamento. :return: None ''' equipament_map = dict() equipament_map['id'] = id equipament_map['id_ip'] = id_ip equipament_map['vips'] = {'id_ip_vip': ids_ips_vips} self.lista_equipamentos_remove.append(equipament_map)
[ "def", "add_equipamento_remove", "(", "self", ",", "id", ",", "id_ip", ",", "ids_ips_vips", ")", ":", "equipament_map", "=", "dict", "(", ")", "equipament_map", "[", "'id'", "]", "=", "id", "equipament_map", "[", "'id_ip'", "]", "=", "id_ip", "equipament_map", "[", "'vips'", "]", "=", "{", "'id_ip_vip'", ":", "ids_ips_vips", "}", "self", ".", "lista_equipamentos_remove", ".", "append", "(", "equipament_map", ")" ]
Adiciona um equipamento na lista de equipamentos para operação de remover um grupo virtual. :param id: Identificador do equipamento. :param id_ip: Identificador do IP do equipamento. :param ids_ips_vips: Lista com os identificadores de IPs criados para cada VIP e associados ao equipamento. :return: None
[ "Adiciona", "um", "equipamento", "na", "lista", "de", "equipamentos", "para", "operação", "de", "remover", "um", "grupo", "virtual", "." ]
python
train
OCHA-DAP/hdx-python-api
src/hdx/data/dataset.py
https://github.com/OCHA-DAP/hdx-python-api/blob/212440f54f73805826a16db77dbcb6033b18a313/src/hdx/data/dataset.py#L1188-L1195
def get_maintainer(self): # type: () -> hdx.data.user.User """Get the dataset's maintainer. Returns: User: Dataset's maintainer """ return hdx.data.user.User.read_from_hdx(self.data['maintainer'], configuration=self.configuration)
[ "def", "get_maintainer", "(", "self", ")", ":", "# type: () -> hdx.data.user.User", "return", "hdx", ".", "data", ".", "user", ".", "User", ".", "read_from_hdx", "(", "self", ".", "data", "[", "'maintainer'", "]", ",", "configuration", "=", "self", ".", "configuration", ")" ]
Get the dataset's maintainer. Returns: User: Dataset's maintainer
[ "Get", "the", "dataset", "s", "maintainer", "." ]
python
train
zeromake/aiko
aiko/utils.py
https://github.com/zeromake/aiko/blob/53b246fa88652466a9e38ac3d1a99a6198195b0f/aiko/utils.py#L46-L64
def handle_async_gen(gen: Any, gen_obj: Any) -> Any: """ 处理异步生成器 """ if gen is None: return None if asyncio.iscoroutine(gen): try: temp = yield from gen gen_obj.send(temp) return except Exception as error: try: gen = gen_obj.throw(error) return (yield from handle_async_gen(gen, gen_obj)) except StopIteration: return None else: return gen
[ "def", "handle_async_gen", "(", "gen", ":", "Any", ",", "gen_obj", ":", "Any", ")", "->", "Any", ":", "if", "gen", "is", "None", ":", "return", "None", "if", "asyncio", ".", "iscoroutine", "(", "gen", ")", ":", "try", ":", "temp", "=", "yield", "from", "gen", "gen_obj", ".", "send", "(", "temp", ")", "return", "except", "Exception", "as", "error", ":", "try", ":", "gen", "=", "gen_obj", ".", "throw", "(", "error", ")", "return", "(", "yield", "from", "handle_async_gen", "(", "gen", ",", "gen_obj", ")", ")", "except", "StopIteration", ":", "return", "None", "else", ":", "return", "gen" ]
处理异步生成器
[ "处理异步生成器" ]
python
train
dcos/shakedown
shakedown/dcos/marathon.py
https://github.com/dcos/shakedown/blob/e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e/shakedown/dcos/marathon.py#L46-L63
def mom_version_less_than(version, name='marathon-user'): """ Returns True if MoM with the given {name} exists and has a version less than {version}. Note that if MoM does not exist False is returned. :param version: required version :type: string :param name: MoM name, default is 'marathon-user' :type: string :return: True if version < MoM version :rtype: bool """ if service_available_predicate(name): return mom_version() < LooseVersion(version) else: # We can either skip the corresponding test by returning False # or raise an exception. print('WARN: {} MoM not found. mom_version_less_than({}) is False'.format(name, version)) return False
[ "def", "mom_version_less_than", "(", "version", ",", "name", "=", "'marathon-user'", ")", ":", "if", "service_available_predicate", "(", "name", ")", ":", "return", "mom_version", "(", ")", "<", "LooseVersion", "(", "version", ")", "else", ":", "# We can either skip the corresponding test by returning False", "# or raise an exception.", "print", "(", "'WARN: {} MoM not found. mom_version_less_than({}) is False'", ".", "format", "(", "name", ",", "version", ")", ")", "return", "False" ]
Returns True if MoM with the given {name} exists and has a version less than {version}. Note that if MoM does not exist False is returned. :param version: required version :type: string :param name: MoM name, default is 'marathon-user' :type: string :return: True if version < MoM version :rtype: bool
[ "Returns", "True", "if", "MoM", "with", "the", "given", "{", "name", "}", "exists", "and", "has", "a", "version", "less", "than", "{", "version", "}", ".", "Note", "that", "if", "MoM", "does", "not", "exist", "False", "is", "returned", "." ]
python
train
twilio/twilio-python
twilio/rest/ip_messaging/v1/service/channel/message.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/ip_messaging/v1/service/channel/message.py#L293-L317
def update(self, body=values.unset, attributes=values.unset): """ Update the MessageInstance :param unicode body: The new message body string. :param unicode attributes: The new attributes metadata field you can use to store any data you wish. :returns: Updated MessageInstance :rtype: twilio.rest.chat.v1.service.channel.message.MessageInstance """ data = values.of({'Body': body, 'Attributes': attributes, }) payload = self._version.update( 'POST', self._uri, data=data, ) return MessageInstance( self._version, payload, service_sid=self._solution['service_sid'], channel_sid=self._solution['channel_sid'], sid=self._solution['sid'], )
[ "def", "update", "(", "self", ",", "body", "=", "values", ".", "unset", ",", "attributes", "=", "values", ".", "unset", ")", ":", "data", "=", "values", ".", "of", "(", "{", "'Body'", ":", "body", ",", "'Attributes'", ":", "attributes", ",", "}", ")", "payload", "=", "self", ".", "_version", ".", "update", "(", "'POST'", ",", "self", ".", "_uri", ",", "data", "=", "data", ",", ")", "return", "MessageInstance", "(", "self", ".", "_version", ",", "payload", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", "channel_sid", "=", "self", ".", "_solution", "[", "'channel_sid'", "]", ",", "sid", "=", "self", ".", "_solution", "[", "'sid'", "]", ",", ")" ]
Update the MessageInstance :param unicode body: The new message body string. :param unicode attributes: The new attributes metadata field you can use to store any data you wish. :returns: Updated MessageInstance :rtype: twilio.rest.chat.v1.service.channel.message.MessageInstance
[ "Update", "the", "MessageInstance" ]
python
train
wmayner/pyphi
pyphi/connectivity.py
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/connectivity.py#L63-L130
def block_cm(cm): """Return whether ``cm`` can be arranged as a block connectivity matrix. If so, the corresponding mechanism/purview is trivially reducible. Technically, only square matrices are "block diagonal", but the notion of connectivity carries over. We test for block connectivity by trying to grow a block of nodes such that: - 'source' nodes only input to nodes in the block - 'sink' nodes only receive inputs from source nodes in the block For example, the following connectivity matrix represents connections from ``nodes1 = A, B, C`` to ``nodes2 = D, E, F, G`` (without loss of generality, note that ``nodes1`` and ``nodes2`` may share elements):: D E F G A [1, 1, 0, 0] B [1, 1, 0, 0] C [0, 0, 1, 1] Since nodes |AB| only connect to nodes |DE|, and node |C| only connects to nodes |FG|, the subgraph is reducible, because the cut :: A,B C ─── ✕ ─── D,E F,G does not change the structure of the graph. """ if np.any(cm.sum(1) == 0): return True if np.all(cm.sum(1) == 1): return True outputs = list(range(cm.shape[1])) # CM helpers: def outputs_of(nodes): """Return all nodes that `nodes` connect to (output to).""" return np.where(cm[nodes, :].sum(0))[0] def inputs_to(nodes): """Return all nodes which connect to (input to) `nodes`.""" return np.where(cm[:, nodes].sum(1))[0] # Start: source node with most outputs sources = [np.argmax(cm.sum(1))] sinks = outputs_of(sources) sink_inputs = inputs_to(sinks) while True: if np.array_equal(sink_inputs, sources): # sources exclusively connect to sinks. # There are no other nodes which connect sink nodes, # hence set(sources) + set(sinks) form a component # which is not connected to the rest of the graph return True # Recompute sources, sinks, and sink_inputs sources = sink_inputs sinks = outputs_of(sources) sink_inputs = inputs_to(sinks) # Considering all output nodes? if np.array_equal(sinks, outputs): return False
[ "def", "block_cm", "(", "cm", ")", ":", "if", "np", ".", "any", "(", "cm", ".", "sum", "(", "1", ")", "==", "0", ")", ":", "return", "True", "if", "np", ".", "all", "(", "cm", ".", "sum", "(", "1", ")", "==", "1", ")", ":", "return", "True", "outputs", "=", "list", "(", "range", "(", "cm", ".", "shape", "[", "1", "]", ")", ")", "# CM helpers:", "def", "outputs_of", "(", "nodes", ")", ":", "\"\"\"Return all nodes that `nodes` connect to (output to).\"\"\"", "return", "np", ".", "where", "(", "cm", "[", "nodes", ",", ":", "]", ".", "sum", "(", "0", ")", ")", "[", "0", "]", "def", "inputs_to", "(", "nodes", ")", ":", "\"\"\"Return all nodes which connect to (input to) `nodes`.\"\"\"", "return", "np", ".", "where", "(", "cm", "[", ":", ",", "nodes", "]", ".", "sum", "(", "1", ")", ")", "[", "0", "]", "# Start: source node with most outputs", "sources", "=", "[", "np", ".", "argmax", "(", "cm", ".", "sum", "(", "1", ")", ")", "]", "sinks", "=", "outputs_of", "(", "sources", ")", "sink_inputs", "=", "inputs_to", "(", "sinks", ")", "while", "True", ":", "if", "np", ".", "array_equal", "(", "sink_inputs", ",", "sources", ")", ":", "# sources exclusively connect to sinks.", "# There are no other nodes which connect sink nodes,", "# hence set(sources) + set(sinks) form a component", "# which is not connected to the rest of the graph", "return", "True", "# Recompute sources, sinks, and sink_inputs", "sources", "=", "sink_inputs", "sinks", "=", "outputs_of", "(", "sources", ")", "sink_inputs", "=", "inputs_to", "(", "sinks", ")", "# Considering all output nodes?", "if", "np", ".", "array_equal", "(", "sinks", ",", "outputs", ")", ":", "return", "False" ]
Return whether ``cm`` can be arranged as a block connectivity matrix. If so, the corresponding mechanism/purview is trivially reducible. Technically, only square matrices are "block diagonal", but the notion of connectivity carries over. We test for block connectivity by trying to grow a block of nodes such that: - 'source' nodes only input to nodes in the block - 'sink' nodes only receive inputs from source nodes in the block For example, the following connectivity matrix represents connections from ``nodes1 = A, B, C`` to ``nodes2 = D, E, F, G`` (without loss of generality, note that ``nodes1`` and ``nodes2`` may share elements):: D E F G A [1, 1, 0, 0] B [1, 1, 0, 0] C [0, 0, 1, 1] Since nodes |AB| only connect to nodes |DE|, and node |C| only connects to nodes |FG|, the subgraph is reducible, because the cut :: A,B C ─── ✕ ─── D,E F,G does not change the structure of the graph.
[ "Return", "whether", "cm", "can", "be", "arranged", "as", "a", "block", "connectivity", "matrix", "." ]
python
train
SBRG/ssbio
ssbio/pipeline/gempro.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/gempro.py#L1593-L1604
def save_protein_pickles_and_reset_protein(self): """Save all Proteins as pickle files -- currently development code for parallelization purposes. Also clears the protein attribute in all genes!""" self.gene_protein_pickles = {} for g in tqdm(self.genes): if g.protein.representative_sequence: initproteinpickle = op.join(g.protein.protein_dir, '{}_protein.pckl'.format(g.id)) g.protein.save_pickle(initproteinpickle) self.gene_protein_pickles[g.id] = initproteinpickle g.reset_protein() else: g.reset_protein()
[ "def", "save_protein_pickles_and_reset_protein", "(", "self", ")", ":", "self", ".", "gene_protein_pickles", "=", "{", "}", "for", "g", "in", "tqdm", "(", "self", ".", "genes", ")", ":", "if", "g", ".", "protein", ".", "representative_sequence", ":", "initproteinpickle", "=", "op", ".", "join", "(", "g", ".", "protein", ".", "protein_dir", ",", "'{}_protein.pckl'", ".", "format", "(", "g", ".", "id", ")", ")", "g", ".", "protein", ".", "save_pickle", "(", "initproteinpickle", ")", "self", ".", "gene_protein_pickles", "[", "g", ".", "id", "]", "=", "initproteinpickle", "g", ".", "reset_protein", "(", ")", "else", ":", "g", ".", "reset_protein", "(", ")" ]
Save all Proteins as pickle files -- currently development code for parallelization purposes. Also clears the protein attribute in all genes!
[ "Save", "all", "Proteins", "as", "pickle", "files", "--", "currently", "development", "code", "for", "parallelization", "purposes", ".", "Also", "clears", "the", "protein", "attribute", "in", "all", "genes!" ]
python
train
rochacbruno/flask_simplelogin
example/manage.py
https://github.com/rochacbruno/flask_simplelogin/blob/5b319977053649352daa87a6b0632949eee0643c/example/manage.py#L117-L128
def runserver(app=None, reloader=None, debug=None, host=None, port=None): """Run the Flask development server i.e. app.run()""" debug = debug or app.config.get('DEBUG', False) reloader = reloader or app.config.get('RELOADER', False) host = host or app.config.get('HOST', '127.0.0.1') port = port or app.config.get('PORT', 5000) app.run( use_reloader=reloader, debug=debug, host=host, port=port )
[ "def", "runserver", "(", "app", "=", "None", ",", "reloader", "=", "None", ",", "debug", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ")", ":", "debug", "=", "debug", "or", "app", ".", "config", ".", "get", "(", "'DEBUG'", ",", "False", ")", "reloader", "=", "reloader", "or", "app", ".", "config", ".", "get", "(", "'RELOADER'", ",", "False", ")", "host", "=", "host", "or", "app", ".", "config", ".", "get", "(", "'HOST'", ",", "'127.0.0.1'", ")", "port", "=", "port", "or", "app", ".", "config", ".", "get", "(", "'PORT'", ",", "5000", ")", "app", ".", "run", "(", "use_reloader", "=", "reloader", ",", "debug", "=", "debug", ",", "host", "=", "host", ",", "port", "=", "port", ")" ]
Run the Flask development server i.e. app.run()
[ "Run", "the", "Flask", "development", "server", "i", ".", "e", ".", "app", ".", "run", "()" ]
python
train
pywavefront/PyWavefront
pywavefront/obj.py
https://github.com/pywavefront/PyWavefront/blob/39ee5186cb37750d4654d19ebe43f723ecd01e2f/pywavefront/obj.py#L283-L437
def consume_faces(self, collected_faces = None): """ Consume all consecutive faces If more than three vertices are specified, we triangulate by the following procedure: Let the face have n vertices in the order v_1 v_2 v_3 ... v_n, n >= 3. We emit the first face as usual: (v_1, v_2, v_3). For each remaining vertex v_j, j > 3, we emit (v_j, v_1, v_{j - 1}), e.g. (v_4, v_1, v_3), (v_5, v_1, v_4). In a perfect world we could consume all vertices straight forward and draw using GL_TRIANGLE_FAN (which exactly matches the procedure above). This is however rarely the case. * If the face is co-planar but concave, then you need to triangulate the face. * If the face is not-coplanar, you are screwed, because OBJ doesn't preserve enough information to know what tessellation was intended. We always triangulate to make it simple. :param collected_faces: A list into which all (possibly triangulated) faces will be written in the form of triples of the corresponding absolute vertex IDs. These IDs index the list self.wavefront.vertices. Specify None to prevent consuming faces (and thus saving memory usage). """ # Helper tuple and function Vertex = namedtuple('Vertex', 'idx pos color uv normal') def emit_vertex(vertex): # Just yield all the values except for the index for v in vertex.uv: yield v for v in vertex.color: yield v for v in vertex.normal: yield v for v in vertex.pos: yield v # Figure out the format of the first vertex # We raise an exception if any following vertex has a different format # NOTE: Order is always v/vt/vn where v is mandatory and vt and vn is optional has_vt = False has_vn = False has_colors = False parts = self.values[1].split('/') # We assume texture coordinates are present if len(parts) == 2: has_vt = True # We have a vn, but not necessarily a vt elif len(parts) == 3: # Check for empty vt "1//1" if parts[1] != '': has_vt = True has_vn = True # Are we referencing vertex with color info? vindex = int(parts[0]) if vindex < 0: vindex += len(self.wavefront.vertices) else: vindex -= 1 vertex = self.wavefront.vertices[vindex] has_colors = len(vertex) == 6 # Prepare vertex format string vertex_format = "_".join(e[0] for e in [ ("T2F", has_vt), ("C3F", has_colors), ("N3F", has_vn), ("V3F", True) ] if e[1]) # If the material already have vertex data, ensure the same format is used if self.material.vertex_format and self.material.vertex_format != vertex_format: raise ValueError(( "Trying to merge vertex data with different format: {}. " "Material {} has vertex format {}" ).format(vertex_format, self.material.name, self.material.vertex_format)) self.material.vertex_format = vertex_format # The first iteration processes the current/first f statement. # The loop continues until there are no more f-statements or StopIteration is raised by generator while True: # The very first vertex, the last encountered and the current one v1, vlast, vcurrent = None, None, None for i, v in enumerate(self.values[1:]): parts = v.split('/') v_index = (int(parts[0]) - 1) t_index = (int(parts[1]) - 1) if has_vt else None n_index = (int(parts[2]) - 1) if has_vn else None # Resolve negative index lookups if v_index < 0: v_index += len(self.wavefront.vertices) + 1 if has_vt and t_index < 0: t_index += len(self.tex_coords) + 1 if has_vn and n_index < 0: n_index += len(self.normals) + 1 vlast = vcurrent vcurrent = Vertex( idx = v_index, pos = self.wavefront.vertices[v_index][0:3] if has_colors else self.wavefront.vertices[v_index], color = self.wavefront.vertices[v_index][3:] if has_colors else (), uv = self.tex_coords[t_index] if has_vt and t_index < len(self.tex_coords) else (), normal = self.normals[n_index] if has_vn and n_index < len(self.normals) else () ) yield from emit_vertex(vcurrent) # Triangulation when more than 3 elements are present if i >= 3: # The current vertex has already been emitted. # Now just emit the first and the third vertices from the face yield from emit_vertex(v1) yield from emit_vertex(vlast) if i == 0: # Store the first vertex v1 = vcurrent if (collected_faces is not None) and (i >= 2): if i == 2: # Append the first triangle face in usual order (i.e. as specified in the Wavefront file) collected_faces.append([v1.idx, vlast.idx, vcurrent.idx]) if i >= 3: # Triangulate the remaining part of the face by putting the current, the first # and the last parsed vertex in that order as a new face. # This order coincides deliberately with the order from vertex yielding above. collected_faces.append([vcurrent.idx, v1.idx, vlast.idx]) # Break out of the loop when there are no more f statements try: self.next_line() except StopIteration: break if not self.values: break if self.values[0] != "f": break
[ "def", "consume_faces", "(", "self", ",", "collected_faces", "=", "None", ")", ":", "# Helper tuple and function", "Vertex", "=", "namedtuple", "(", "'Vertex'", ",", "'idx pos color uv normal'", ")", "def", "emit_vertex", "(", "vertex", ")", ":", "# Just yield all the values except for the index", "for", "v", "in", "vertex", ".", "uv", ":", "yield", "v", "for", "v", "in", "vertex", ".", "color", ":", "yield", "v", "for", "v", "in", "vertex", ".", "normal", ":", "yield", "v", "for", "v", "in", "vertex", ".", "pos", ":", "yield", "v", "# Figure out the format of the first vertex", "# We raise an exception if any following vertex has a different format", "# NOTE: Order is always v/vt/vn where v is mandatory and vt and vn is optional", "has_vt", "=", "False", "has_vn", "=", "False", "has_colors", "=", "False", "parts", "=", "self", ".", "values", "[", "1", "]", ".", "split", "(", "'/'", ")", "# We assume texture coordinates are present", "if", "len", "(", "parts", ")", "==", "2", ":", "has_vt", "=", "True", "# We have a vn, but not necessarily a vt", "elif", "len", "(", "parts", ")", "==", "3", ":", "# Check for empty vt \"1//1\"", "if", "parts", "[", "1", "]", "!=", "''", ":", "has_vt", "=", "True", "has_vn", "=", "True", "# Are we referencing vertex with color info?", "vindex", "=", "int", "(", "parts", "[", "0", "]", ")", "if", "vindex", "<", "0", ":", "vindex", "+=", "len", "(", "self", ".", "wavefront", ".", "vertices", ")", "else", ":", "vindex", "-=", "1", "vertex", "=", "self", ".", "wavefront", ".", "vertices", "[", "vindex", "]", "has_colors", "=", "len", "(", "vertex", ")", "==", "6", "# Prepare vertex format string", "vertex_format", "=", "\"_\"", ".", "join", "(", "e", "[", "0", "]", "for", "e", "in", "[", "(", "\"T2F\"", ",", "has_vt", ")", ",", "(", "\"C3F\"", ",", "has_colors", ")", ",", "(", "\"N3F\"", ",", "has_vn", ")", ",", "(", "\"V3F\"", ",", "True", ")", "]", "if", "e", "[", "1", "]", ")", "# If the material already have vertex data, ensure the same format is used", "if", "self", ".", "material", ".", "vertex_format", "and", "self", ".", "material", ".", "vertex_format", "!=", "vertex_format", ":", "raise", "ValueError", "(", "(", "\"Trying to merge vertex data with different format: {}. \"", "\"Material {} has vertex format {}\"", ")", ".", "format", "(", "vertex_format", ",", "self", ".", "material", ".", "name", ",", "self", ".", "material", ".", "vertex_format", ")", ")", "self", ".", "material", ".", "vertex_format", "=", "vertex_format", "# The first iteration processes the current/first f statement.", "# The loop continues until there are no more f-statements or StopIteration is raised by generator", "while", "True", ":", "# The very first vertex, the last encountered and the current one", "v1", ",", "vlast", ",", "vcurrent", "=", "None", ",", "None", ",", "None", "for", "i", ",", "v", "in", "enumerate", "(", "self", ".", "values", "[", "1", ":", "]", ")", ":", "parts", "=", "v", ".", "split", "(", "'/'", ")", "v_index", "=", "(", "int", "(", "parts", "[", "0", "]", ")", "-", "1", ")", "t_index", "=", "(", "int", "(", "parts", "[", "1", "]", ")", "-", "1", ")", "if", "has_vt", "else", "None", "n_index", "=", "(", "int", "(", "parts", "[", "2", "]", ")", "-", "1", ")", "if", "has_vn", "else", "None", "# Resolve negative index lookups", "if", "v_index", "<", "0", ":", "v_index", "+=", "len", "(", "self", ".", "wavefront", ".", "vertices", ")", "+", "1", "if", "has_vt", "and", "t_index", "<", "0", ":", "t_index", "+=", "len", "(", "self", ".", "tex_coords", ")", "+", "1", "if", "has_vn", "and", "n_index", "<", "0", ":", "n_index", "+=", "len", "(", "self", ".", "normals", ")", "+", "1", "vlast", "=", "vcurrent", "vcurrent", "=", "Vertex", "(", "idx", "=", "v_index", ",", "pos", "=", "self", ".", "wavefront", ".", "vertices", "[", "v_index", "]", "[", "0", ":", "3", "]", "if", "has_colors", "else", "self", ".", "wavefront", ".", "vertices", "[", "v_index", "]", ",", "color", "=", "self", ".", "wavefront", ".", "vertices", "[", "v_index", "]", "[", "3", ":", "]", "if", "has_colors", "else", "(", ")", ",", "uv", "=", "self", ".", "tex_coords", "[", "t_index", "]", "if", "has_vt", "and", "t_index", "<", "len", "(", "self", ".", "tex_coords", ")", "else", "(", ")", ",", "normal", "=", "self", ".", "normals", "[", "n_index", "]", "if", "has_vn", "and", "n_index", "<", "len", "(", "self", ".", "normals", ")", "else", "(", ")", ")", "yield", "from", "emit_vertex", "(", "vcurrent", ")", "# Triangulation when more than 3 elements are present", "if", "i", ">=", "3", ":", "# The current vertex has already been emitted.", "# Now just emit the first and the third vertices from the face", "yield", "from", "emit_vertex", "(", "v1", ")", "yield", "from", "emit_vertex", "(", "vlast", ")", "if", "i", "==", "0", ":", "# Store the first vertex", "v1", "=", "vcurrent", "if", "(", "collected_faces", "is", "not", "None", ")", "and", "(", "i", ">=", "2", ")", ":", "if", "i", "==", "2", ":", "# Append the first triangle face in usual order (i.e. as specified in the Wavefront file)", "collected_faces", ".", "append", "(", "[", "v1", ".", "idx", ",", "vlast", ".", "idx", ",", "vcurrent", ".", "idx", "]", ")", "if", "i", ">=", "3", ":", "# Triangulate the remaining part of the face by putting the current, the first", "# and the last parsed vertex in that order as a new face.", "# This order coincides deliberately with the order from vertex yielding above.", "collected_faces", ".", "append", "(", "[", "vcurrent", ".", "idx", ",", "v1", ".", "idx", ",", "vlast", ".", "idx", "]", ")", "# Break out of the loop when there are no more f statements", "try", ":", "self", ".", "next_line", "(", ")", "except", "StopIteration", ":", "break", "if", "not", "self", ".", "values", ":", "break", "if", "self", ".", "values", "[", "0", "]", "!=", "\"f\"", ":", "break" ]
Consume all consecutive faces If more than three vertices are specified, we triangulate by the following procedure: Let the face have n vertices in the order v_1 v_2 v_3 ... v_n, n >= 3. We emit the first face as usual: (v_1, v_2, v_3). For each remaining vertex v_j, j > 3, we emit (v_j, v_1, v_{j - 1}), e.g. (v_4, v_1, v_3), (v_5, v_1, v_4). In a perfect world we could consume all vertices straight forward and draw using GL_TRIANGLE_FAN (which exactly matches the procedure above). This is however rarely the case. * If the face is co-planar but concave, then you need to triangulate the face. * If the face is not-coplanar, you are screwed, because OBJ doesn't preserve enough information to know what tessellation was intended. We always triangulate to make it simple. :param collected_faces: A list into which all (possibly triangulated) faces will be written in the form of triples of the corresponding absolute vertex IDs. These IDs index the list self.wavefront.vertices. Specify None to prevent consuming faces (and thus saving memory usage).
[ "Consume", "all", "consecutive", "faces" ]
python
train
Azure/azure-cli-extensions
src/sqlvm-preview/azext_sqlvm_preview/custom.py
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/sqlvm-preview/azext_sqlvm_preview/custom.py#L31-L42
def sqlvm_list( client, resource_group_name=None): ''' Lists all SQL virtual machines in a resource group or subscription. ''' if resource_group_name: # List all sql vms in the resource group return client.list_by_resource_group(resource_group_name=resource_group_name) # List all sql vms in the subscription return client.list()
[ "def", "sqlvm_list", "(", "client", ",", "resource_group_name", "=", "None", ")", ":", "if", "resource_group_name", ":", "# List all sql vms in the resource group", "return", "client", ".", "list_by_resource_group", "(", "resource_group_name", "=", "resource_group_name", ")", "# List all sql vms in the subscription", "return", "client", ".", "list", "(", ")" ]
Lists all SQL virtual machines in a resource group or subscription.
[ "Lists", "all", "SQL", "virtual", "machines", "in", "a", "resource", "group", "or", "subscription", "." ]
python
train
AnalogJ/lexicon
lexicon/providers/netcup.py
https://github.com/AnalogJ/lexicon/blob/9330b871988753cad44fe2876a217b4c67b1fa0e/lexicon/providers/netcup.py#L48-L58
def _create_record(self, rtype, name, content): """Create record. If it already exists, do nothing.""" if not self._list_records(rtype, name, content): self._update_records([{}], { 'type': rtype, 'hostname': self._relative_name(name), 'destination': content, 'priority': self._get_lexicon_option('priority'), }) LOGGER.debug('create_record: %s', True) return True
[ "def", "_create_record", "(", "self", ",", "rtype", ",", "name", ",", "content", ")", ":", "if", "not", "self", ".", "_list_records", "(", "rtype", ",", "name", ",", "content", ")", ":", "self", ".", "_update_records", "(", "[", "{", "}", "]", ",", "{", "'type'", ":", "rtype", ",", "'hostname'", ":", "self", ".", "_relative_name", "(", "name", ")", ",", "'destination'", ":", "content", ",", "'priority'", ":", "self", ".", "_get_lexicon_option", "(", "'priority'", ")", ",", "}", ")", "LOGGER", ".", "debug", "(", "'create_record: %s'", ",", "True", ")", "return", "True" ]
Create record. If it already exists, do nothing.
[ "Create", "record", ".", "If", "it", "already", "exists", "do", "nothing", "." ]
python
train
ihmeuw/vivarium
src/vivarium/interface/interactive.py
https://github.com/ihmeuw/vivarium/blob/c5f5d50f775c8bf337d3aae1ff7c57c025a8e258/src/vivarium/interface/interactive.py#L190-L192
def get_components(self) -> List: """Get a list of all components in the simulation.""" return [component for component in self.component_manager._components + self.component_manager._managers]
[ "def", "get_components", "(", "self", ")", "->", "List", ":", "return", "[", "component", "for", "component", "in", "self", ".", "component_manager", ".", "_components", "+", "self", ".", "component_manager", ".", "_managers", "]" ]
Get a list of all components in the simulation.
[ "Get", "a", "list", "of", "all", "components", "in", "the", "simulation", "." ]
python
train
sryza/spark-timeseries
python/sparkts/timeseriesrdd.py
https://github.com/sryza/spark-timeseries/blob/280aa887dc08ab114411245268f230fdabb76eec/python/sparkts/timeseriesrdd.py#L148-L156
def to_pandas_dataframe(self): """ Pulls the contents of the RDD to the driver and places them in a Pandas DataFrame. Each record in the RDD becomes and column, and the DataFrame is indexed with a DatetimeIndex generated from this RDD's index. """ pd_index = self.index().to_pandas_index() return pd.DataFrame.from_items(self.collect()).set_index(pd_index)
[ "def", "to_pandas_dataframe", "(", "self", ")", ":", "pd_index", "=", "self", ".", "index", "(", ")", ".", "to_pandas_index", "(", ")", "return", "pd", ".", "DataFrame", ".", "from_items", "(", "self", ".", "collect", "(", ")", ")", ".", "set_index", "(", "pd_index", ")" ]
Pulls the contents of the RDD to the driver and places them in a Pandas DataFrame. Each record in the RDD becomes and column, and the DataFrame is indexed with a DatetimeIndex generated from this RDD's index.
[ "Pulls", "the", "contents", "of", "the", "RDD", "to", "the", "driver", "and", "places", "them", "in", "a", "Pandas", "DataFrame", ".", "Each", "record", "in", "the", "RDD", "becomes", "and", "column", "and", "the", "DataFrame", "is", "indexed", "with", "a", "DatetimeIndex", "generated", "from", "this", "RDD", "s", "index", "." ]
python
train
idlesign/django-sitecats
sitecats/toolbox.py
https://github.com/idlesign/django-sitecats/blob/9b45e91fc0dcb63a0011780437fe28145e3ecce9/sitecats/toolbox.py#L254-L280
def register_lists(self, category_lists, lists_init_kwargs=None, editor_init_kwargs=None): """Registers CategoryList objects to handle their requests. :param list category_lists: CategoryList objects :param dict lists_init_kwargs: Attributes to apply to each of CategoryList objects """ lists_init_kwargs = lists_init_kwargs or {} editor_init_kwargs = editor_init_kwargs or {} for lst in category_lists: if isinstance(lst, string_types): # Spawn CategoryList object from base category alias. lst = self.list_cls(lst, **lists_init_kwargs) elif not isinstance(lst, CategoryList): raise SitecatsConfigurationError( '`CategoryRequestHandler.register_lists()` accepts only ' '`CategoryList` objects or category aliases.' ) if self._obj: lst.set_obj(self._obj) for name, val in lists_init_kwargs.items(): # Setting CategoryList attributes from kwargs. setattr(lst, name, val) lst.enable_editor(**editor_init_kwargs) self._lists[lst.get_id()] = lst
[ "def", "register_lists", "(", "self", ",", "category_lists", ",", "lists_init_kwargs", "=", "None", ",", "editor_init_kwargs", "=", "None", ")", ":", "lists_init_kwargs", "=", "lists_init_kwargs", "or", "{", "}", "editor_init_kwargs", "=", "editor_init_kwargs", "or", "{", "}", "for", "lst", "in", "category_lists", ":", "if", "isinstance", "(", "lst", ",", "string_types", ")", ":", "# Spawn CategoryList object from base category alias.", "lst", "=", "self", ".", "list_cls", "(", "lst", ",", "*", "*", "lists_init_kwargs", ")", "elif", "not", "isinstance", "(", "lst", ",", "CategoryList", ")", ":", "raise", "SitecatsConfigurationError", "(", "'`CategoryRequestHandler.register_lists()` accepts only '", "'`CategoryList` objects or category aliases.'", ")", "if", "self", ".", "_obj", ":", "lst", ".", "set_obj", "(", "self", ".", "_obj", ")", "for", "name", ",", "val", "in", "lists_init_kwargs", ".", "items", "(", ")", ":", "# Setting CategoryList attributes from kwargs.", "setattr", "(", "lst", ",", "name", ",", "val", ")", "lst", ".", "enable_editor", "(", "*", "*", "editor_init_kwargs", ")", "self", ".", "_lists", "[", "lst", ".", "get_id", "(", ")", "]", "=", "lst" ]
Registers CategoryList objects to handle their requests. :param list category_lists: CategoryList objects :param dict lists_init_kwargs: Attributes to apply to each of CategoryList objects
[ "Registers", "CategoryList", "objects", "to", "handle", "their", "requests", "." ]
python
train
neuropsychology/NeuroKit.py
neurokit/bio/bio_emg.py
https://github.com/neuropsychology/NeuroKit.py/blob/c9589348fbbde0fa7e986048c48f38e6b488adfe/neurokit/bio/bio_emg.py#L16-L121
def emg_process(emg, sampling_rate=1000, emg_names=None, envelope_freqs=[10, 400], envelope_lfreq=4, activation_treshold="default", activation_n_above=0.25, activation_n_below=1): """ Automated processing of EMG signal. Parameters ---------- emg : list, array or DataFrame EMG signal array. Can include multiple channels. sampling_rate : int Sampling rate (samples/second). emg_names : list List of EMG channel names. envelope_freqs : list [fc_h, fc_l], optional cutoff frequencies for the band-pass filter (in Hz). envelope_lfreq : number, optional cutoff frequency for the low-pass filter (in Hz). activation_treshold : float minimum amplitude of `x` to detect. activation_n_above : float minimum continuous time (in s) greater than or equal to `threshold` to detect (but see the parameter `n_below`). activation_n_below : float minimum time (in s) below `threshold` that will be ignored in the detection of `x` >= `threshold`. Returns ---------- processed_emg : dict Dict containing processed EMG features. Contains the EMG raw signal, the filtered signal and pulse onsets. This function is mainly a wrapper for the biosppy.emg.emg() function. Credits go to its authors. Example ---------- >>> import neurokit as nk >>> >>> processed_emg = nk.emg_process(emg_signal) Notes ---------- *Authors* - Dominique Makowski (https://github.com/DominiqueMakowski) *Dependencies* - biosppy - numpy - pandas *See Also* - BioSPPy: https://github.com/PIA-Group/BioSPPy References ----------- - None """ if emg_names is None: if isinstance(emg, pd.DataFrame): emg_names = emg.columns.values emg = np.array(emg) if len(np.shape(emg)) == 1: emg = np.array(pd.DataFrame(emg)) if emg_names is None: if np.shape(emg)[1]>1: emg_names = [] for index in range(np.shape(emg)[1]): emg_names.append("EMG_" + str(index)) else: emg_names = ["EMG"] processed_emg = {"df": pd.DataFrame()} for index, emg_chan in enumerate(emg.T): # Store Raw signal processed_emg["df"][emg_names[index] + "_Raw"] = emg_chan # Compute several features using biosppy biosppy_emg = dict(biosppy.emg.emg(emg_chan, sampling_rate=sampling_rate, show=False)) # Store EMG pulse onsets pulse_onsets = np.array([np.nan]*len(emg)) if len(biosppy_emg['onsets']) > 0: pulse_onsets[biosppy_emg['onsets']] = 1 processed_emg["df"][emg_names[index] + "_Pulse_Onsets"] = pulse_onsets processed_emg["df"][emg_names[index] + "_Filtered"] = biosppy_emg["filtered"] processed_emg[emg_names[index]] = {} processed_emg[emg_names[index]]["EMG_Pulse_Onsets"] = biosppy_emg['onsets'] # Envelope envelope = emg_linear_envelope(biosppy_emg["filtered"], sampling_rate=sampling_rate, freqs=envelope_freqs, lfreq=envelope_lfreq) processed_emg["df"][emg_names[index] + "_Envelope"] = envelope # Activation if activation_treshold == "default": activation_treshold = 1*np.std(envelope) processed_emg["df"][emg_names[index] + "_Activation"] = emg_find_activation(envelope, sampling_rate=sampling_rate, threshold=1*np.std(envelope), n_above=activation_n_above, n_below=activation_n_below) return(processed_emg)
[ "def", "emg_process", "(", "emg", ",", "sampling_rate", "=", "1000", ",", "emg_names", "=", "None", ",", "envelope_freqs", "=", "[", "10", ",", "400", "]", ",", "envelope_lfreq", "=", "4", ",", "activation_treshold", "=", "\"default\"", ",", "activation_n_above", "=", "0.25", ",", "activation_n_below", "=", "1", ")", ":", "if", "emg_names", "is", "None", ":", "if", "isinstance", "(", "emg", ",", "pd", ".", "DataFrame", ")", ":", "emg_names", "=", "emg", ".", "columns", ".", "values", "emg", "=", "np", ".", "array", "(", "emg", ")", "if", "len", "(", "np", ".", "shape", "(", "emg", ")", ")", "==", "1", ":", "emg", "=", "np", ".", "array", "(", "pd", ".", "DataFrame", "(", "emg", ")", ")", "if", "emg_names", "is", "None", ":", "if", "np", ".", "shape", "(", "emg", ")", "[", "1", "]", ">", "1", ":", "emg_names", "=", "[", "]", "for", "index", "in", "range", "(", "np", ".", "shape", "(", "emg", ")", "[", "1", "]", ")", ":", "emg_names", ".", "append", "(", "\"EMG_\"", "+", "str", "(", "index", ")", ")", "else", ":", "emg_names", "=", "[", "\"EMG\"", "]", "processed_emg", "=", "{", "\"df\"", ":", "pd", ".", "DataFrame", "(", ")", "}", "for", "index", ",", "emg_chan", "in", "enumerate", "(", "emg", ".", "T", ")", ":", "# Store Raw signal", "processed_emg", "[", "\"df\"", "]", "[", "emg_names", "[", "index", "]", "+", "\"_Raw\"", "]", "=", "emg_chan", "# Compute several features using biosppy", "biosppy_emg", "=", "dict", "(", "biosppy", ".", "emg", ".", "emg", "(", "emg_chan", ",", "sampling_rate", "=", "sampling_rate", ",", "show", "=", "False", ")", ")", "# Store EMG pulse onsets", "pulse_onsets", "=", "np", ".", "array", "(", "[", "np", ".", "nan", "]", "*", "len", "(", "emg", ")", ")", "if", "len", "(", "biosppy_emg", "[", "'onsets'", "]", ")", ">", "0", ":", "pulse_onsets", "[", "biosppy_emg", "[", "'onsets'", "]", "]", "=", "1", "processed_emg", "[", "\"df\"", "]", "[", "emg_names", "[", "index", "]", "+", "\"_Pulse_Onsets\"", "]", "=", "pulse_onsets", "processed_emg", "[", "\"df\"", "]", "[", "emg_names", "[", "index", "]", "+", "\"_Filtered\"", "]", "=", "biosppy_emg", "[", "\"filtered\"", "]", "processed_emg", "[", "emg_names", "[", "index", "]", "]", "=", "{", "}", "processed_emg", "[", "emg_names", "[", "index", "]", "]", "[", "\"EMG_Pulse_Onsets\"", "]", "=", "biosppy_emg", "[", "'onsets'", "]", "# Envelope", "envelope", "=", "emg_linear_envelope", "(", "biosppy_emg", "[", "\"filtered\"", "]", ",", "sampling_rate", "=", "sampling_rate", ",", "freqs", "=", "envelope_freqs", ",", "lfreq", "=", "envelope_lfreq", ")", "processed_emg", "[", "\"df\"", "]", "[", "emg_names", "[", "index", "]", "+", "\"_Envelope\"", "]", "=", "envelope", "# Activation", "if", "activation_treshold", "==", "\"default\"", ":", "activation_treshold", "=", "1", "*", "np", ".", "std", "(", "envelope", ")", "processed_emg", "[", "\"df\"", "]", "[", "emg_names", "[", "index", "]", "+", "\"_Activation\"", "]", "=", "emg_find_activation", "(", "envelope", ",", "sampling_rate", "=", "sampling_rate", ",", "threshold", "=", "1", "*", "np", ".", "std", "(", "envelope", ")", ",", "n_above", "=", "activation_n_above", ",", "n_below", "=", "activation_n_below", ")", "return", "(", "processed_emg", ")" ]
Automated processing of EMG signal. Parameters ---------- emg : list, array or DataFrame EMG signal array. Can include multiple channels. sampling_rate : int Sampling rate (samples/second). emg_names : list List of EMG channel names. envelope_freqs : list [fc_h, fc_l], optional cutoff frequencies for the band-pass filter (in Hz). envelope_lfreq : number, optional cutoff frequency for the low-pass filter (in Hz). activation_treshold : float minimum amplitude of `x` to detect. activation_n_above : float minimum continuous time (in s) greater than or equal to `threshold` to detect (but see the parameter `n_below`). activation_n_below : float minimum time (in s) below `threshold` that will be ignored in the detection of `x` >= `threshold`. Returns ---------- processed_emg : dict Dict containing processed EMG features. Contains the EMG raw signal, the filtered signal and pulse onsets. This function is mainly a wrapper for the biosppy.emg.emg() function. Credits go to its authors. Example ---------- >>> import neurokit as nk >>> >>> processed_emg = nk.emg_process(emg_signal) Notes ---------- *Authors* - Dominique Makowski (https://github.com/DominiqueMakowski) *Dependencies* - biosppy - numpy - pandas *See Also* - BioSPPy: https://github.com/PIA-Group/BioSPPy References ----------- - None
[ "Automated", "processing", "of", "EMG", "signal", "." ]
python
train
maxweisspoker/simplebitcoinfuncs
simplebitcoinfuncs/miscbitcoinfuncs.py
https://github.com/maxweisspoker/simplebitcoinfuncs/blob/ad332433dfcc067e86d2e77fa0c8f1a27daffb63/simplebitcoinfuncs/miscbitcoinfuncs.py#L202-L223
def inttoLEB128(intinput): ''' Convert int/long to unsigned LEB128 format hex ''' binstr = str(bin(intinput)) \ .lstrip("0b").replace("b","").replace("L","") \ .replace("'","").replace('"',"") if len(binstr) % 7: binstr = binstr.zfill(len(binstr) + 7 - (len(binstr) % 7)) bytelist = "" for i in range(len(binstr) // 7): if i < ((len(binstr) // 7) - 1): pad = "1" else: pad = "0" currbyte = binstr[(len(binstr) - (7*i + 7)):(len(binstr) - (7*i))] currbyte = pad + currbyte currbyte = dechex(int(currbyte,2)) # assert len(currbyte) == 2 bytelist = bytelist + currbyte return bytelist
[ "def", "inttoLEB128", "(", "intinput", ")", ":", "binstr", "=", "str", "(", "bin", "(", "intinput", ")", ")", ".", "lstrip", "(", "\"0b\"", ")", ".", "replace", "(", "\"b\"", ",", "\"\"", ")", ".", "replace", "(", "\"L\"", ",", "\"\"", ")", ".", "replace", "(", "\"'\"", ",", "\"\"", ")", ".", "replace", "(", "'\"'", ",", "\"\"", ")", "if", "len", "(", "binstr", ")", "%", "7", ":", "binstr", "=", "binstr", ".", "zfill", "(", "len", "(", "binstr", ")", "+", "7", "-", "(", "len", "(", "binstr", ")", "%", "7", ")", ")", "bytelist", "=", "\"\"", "for", "i", "in", "range", "(", "len", "(", "binstr", ")", "//", "7", ")", ":", "if", "i", "<", "(", "(", "len", "(", "binstr", ")", "//", "7", ")", "-", "1", ")", ":", "pad", "=", "\"1\"", "else", ":", "pad", "=", "\"0\"", "currbyte", "=", "binstr", "[", "(", "len", "(", "binstr", ")", "-", "(", "7", "*", "i", "+", "7", ")", ")", ":", "(", "len", "(", "binstr", ")", "-", "(", "7", "*", "i", ")", ")", "]", "currbyte", "=", "pad", "+", "currbyte", "currbyte", "=", "dechex", "(", "int", "(", "currbyte", ",", "2", ")", ")", "# assert len(currbyte) == 2", "bytelist", "=", "bytelist", "+", "currbyte", "return", "bytelist" ]
Convert int/long to unsigned LEB128 format hex
[ "Convert", "int", "/", "long", "to", "unsigned", "LEB128", "format", "hex" ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/module.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/module.py#L1007-L1032
def get_module_at_address(self, address): """ @type address: int @param address: Memory address to query. @rtype: L{Module} @return: C{Module} object that best matches the given address. Returns C{None} if no C{Module} can be found. """ bases = self.get_module_bases() bases.sort() bases.append(long(0x10000000000000000)) # max. 64 bit address + 1 if address >= bases[0]: i = 0 max_i = len(bases) - 1 while i < max_i: begin, end = bases[i:i+2] if begin <= address < end: module = self.get_module(begin) here = module.is_address_here(address) if here is False: break else: # True or None return module i = i + 1 return None
[ "def", "get_module_at_address", "(", "self", ",", "address", ")", ":", "bases", "=", "self", ".", "get_module_bases", "(", ")", "bases", ".", "sort", "(", ")", "bases", ".", "append", "(", "long", "(", "0x10000000000000000", ")", ")", "# max. 64 bit address + 1", "if", "address", ">=", "bases", "[", "0", "]", ":", "i", "=", "0", "max_i", "=", "len", "(", "bases", ")", "-", "1", "while", "i", "<", "max_i", ":", "begin", ",", "end", "=", "bases", "[", "i", ":", "i", "+", "2", "]", "if", "begin", "<=", "address", "<", "end", ":", "module", "=", "self", ".", "get_module", "(", "begin", ")", "here", "=", "module", ".", "is_address_here", "(", "address", ")", "if", "here", "is", "False", ":", "break", "else", ":", "# True or None", "return", "module", "i", "=", "i", "+", "1", "return", "None" ]
@type address: int @param address: Memory address to query. @rtype: L{Module} @return: C{Module} object that best matches the given address. Returns C{None} if no C{Module} can be found.
[ "@type", "address", ":", "int", "@param", "address", ":", "Memory", "address", "to", "query", "." ]
python
train
tjcsl/cslbot
cslbot/commands/repost.py
https://github.com/tjcsl/cslbot/blob/aebe07be47141f61d7c180706bddfb707f19b2b5/cslbot/commands/repost.py#L25-L32
def cmd(send, msg, args): """Reposts a url. Syntax: {command} """ result = args['db'].query(Urls).order_by(func.random()).first() send("%s" % result.url)
[ "def", "cmd", "(", "send", ",", "msg", ",", "args", ")", ":", "result", "=", "args", "[", "'db'", "]", ".", "query", "(", "Urls", ")", ".", "order_by", "(", "func", ".", "random", "(", ")", ")", ".", "first", "(", ")", "send", "(", "\"%s\"", "%", "result", ".", "url", ")" ]
Reposts a url. Syntax: {command}
[ "Reposts", "a", "url", "." ]
python
train
mattja/sdeint
sdeint/wiener.py
https://github.com/mattja/sdeint/blob/7cf807cdf97b3bb39d29e1c2dc834b519499b601/sdeint/wiener.py#L144-L149
def _unvec(vecA, m=None): """inverse of _vec() operator""" N = vecA.shape[0] if m is None: m = np.sqrt(vecA.shape[1] + 0.25).astype(np.int64) return vecA.reshape((N, m, -1), order='F')
[ "def", "_unvec", "(", "vecA", ",", "m", "=", "None", ")", ":", "N", "=", "vecA", ".", "shape", "[", "0", "]", "if", "m", "is", "None", ":", "m", "=", "np", ".", "sqrt", "(", "vecA", ".", "shape", "[", "1", "]", "+", "0.25", ")", ".", "astype", "(", "np", ".", "int64", ")", "return", "vecA", ".", "reshape", "(", "(", "N", ",", "m", ",", "-", "1", ")", ",", "order", "=", "'F'", ")" ]
inverse of _vec() operator
[ "inverse", "of", "_vec", "()", "operator" ]
python
train
facelessuser/soupsieve
soupsieve/css_match.py
https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/css_match.py#L1411-L1415
def select_one(self, tag): """Select a single tag.""" tags = self.select(tag, limit=1) return tags[0] if tags else None
[ "def", "select_one", "(", "self", ",", "tag", ")", ":", "tags", "=", "self", ".", "select", "(", "tag", ",", "limit", "=", "1", ")", "return", "tags", "[", "0", "]", "if", "tags", "else", "None" ]
Select a single tag.
[ "Select", "a", "single", "tag", "." ]
python
train
apriha/lineage
src/lineage/snps.py
https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L234-L269
def _read_ftdna(file): """ Read and parse Family Tree DNA (FTDNA) file. https://www.familytreedna.com Parameters ---------- file : str path to file Returns ------- pandas.DataFrame individual's genetic data normalized for use with `lineage` str name of data source """ df = pd.read_csv( file, skiprows=1, na_values="--", names=["rsid", "chrom", "pos", "genotype"], index_col=0, dtype={"chrom": object}, ) # remove incongruous data df = df.drop(df.loc[df["chrom"] == "0"].index) df = df.drop( df.loc[df.index == "RSID"].index ) # second header for concatenated data # if second header existed, pos dtype will be object (should be np.int64) df["pos"] = df["pos"].astype(np.int64) return sort_snps(df), "FTDNA"
[ "def", "_read_ftdna", "(", "file", ")", ":", "df", "=", "pd", ".", "read_csv", "(", "file", ",", "skiprows", "=", "1", ",", "na_values", "=", "\"--\"", ",", "names", "=", "[", "\"rsid\"", ",", "\"chrom\"", ",", "\"pos\"", ",", "\"genotype\"", "]", ",", "index_col", "=", "0", ",", "dtype", "=", "{", "\"chrom\"", ":", "object", "}", ",", ")", "# remove incongruous data", "df", "=", "df", ".", "drop", "(", "df", ".", "loc", "[", "df", "[", "\"chrom\"", "]", "==", "\"0\"", "]", ".", "index", ")", "df", "=", "df", ".", "drop", "(", "df", ".", "loc", "[", "df", ".", "index", "==", "\"RSID\"", "]", ".", "index", ")", "# second header for concatenated data", "# if second header existed, pos dtype will be object (should be np.int64)", "df", "[", "\"pos\"", "]", "=", "df", "[", "\"pos\"", "]", ".", "astype", "(", "np", ".", "int64", ")", "return", "sort_snps", "(", "df", ")", ",", "\"FTDNA\"" ]
Read and parse Family Tree DNA (FTDNA) file. https://www.familytreedna.com Parameters ---------- file : str path to file Returns ------- pandas.DataFrame individual's genetic data normalized for use with `lineage` str name of data source
[ "Read", "and", "parse", "Family", "Tree", "DNA", "(", "FTDNA", ")", "file", "." ]
python
train
tinybike/weightedstats
weightedstats/__init__.py
https://github.com/tinybike/weightedstats/blob/0e2638099dba7f288a1553a83e957a95522229da/weightedstats/__init__.py#L89-L104
def numpy_weighted_median(data, weights=None): """Calculate the weighted median of an array/list using numpy.""" import numpy as np if weights is None: return np.median(np.array(data).flatten()) data, weights = np.array(data).flatten(), np.array(weights).flatten() if any(weights > 0): sorted_data, sorted_weights = map(np.array, zip(*sorted(zip(data, weights)))) midpoint = 0.5 * sum(sorted_weights) if any(weights > midpoint): return (data[weights == np.max(weights)])[0] cumulative_weight = np.cumsum(sorted_weights) below_midpoint_index = np.where(cumulative_weight <= midpoint)[0][-1] if cumulative_weight[below_midpoint_index] - midpoint < sys.float_info.epsilon: return np.mean(sorted_data[below_midpoint_index:below_midpoint_index+2]) return sorted_data[below_midpoint_index+1]
[ "def", "numpy_weighted_median", "(", "data", ",", "weights", "=", "None", ")", ":", "import", "numpy", "as", "np", "if", "weights", "is", "None", ":", "return", "np", ".", "median", "(", "np", ".", "array", "(", "data", ")", ".", "flatten", "(", ")", ")", "data", ",", "weights", "=", "np", ".", "array", "(", "data", ")", ".", "flatten", "(", ")", ",", "np", ".", "array", "(", "weights", ")", ".", "flatten", "(", ")", "if", "any", "(", "weights", ">", "0", ")", ":", "sorted_data", ",", "sorted_weights", "=", "map", "(", "np", ".", "array", ",", "zip", "(", "*", "sorted", "(", "zip", "(", "data", ",", "weights", ")", ")", ")", ")", "midpoint", "=", "0.5", "*", "sum", "(", "sorted_weights", ")", "if", "any", "(", "weights", ">", "midpoint", ")", ":", "return", "(", "data", "[", "weights", "==", "np", ".", "max", "(", "weights", ")", "]", ")", "[", "0", "]", "cumulative_weight", "=", "np", ".", "cumsum", "(", "sorted_weights", ")", "below_midpoint_index", "=", "np", ".", "where", "(", "cumulative_weight", "<=", "midpoint", ")", "[", "0", "]", "[", "-", "1", "]", "if", "cumulative_weight", "[", "below_midpoint_index", "]", "-", "midpoint", "<", "sys", ".", "float_info", ".", "epsilon", ":", "return", "np", ".", "mean", "(", "sorted_data", "[", "below_midpoint_index", ":", "below_midpoint_index", "+", "2", "]", ")", "return", "sorted_data", "[", "below_midpoint_index", "+", "1", "]" ]
Calculate the weighted median of an array/list using numpy.
[ "Calculate", "the", "weighted", "median", "of", "an", "array", "/", "list", "using", "numpy", "." ]
python
train
python-cmd2/cmd2
examples/paged_output.py
https://github.com/python-cmd2/cmd2/blob/b22c0bd891ed08c8b09df56df9d91f48166a5e2a/examples/paged_output.py#L28-L36
def do_page_wrap(self, args: List[str]): """Read in a text file and display its output in a pager, wrapping long lines if they don't fit. Usage: page_wrap <file_path> """ if not args: self.perror('page_wrap requires a path to a file as an argument', traceback_war=False) return self.page_file(args[0], chop=False)
[ "def", "do_page_wrap", "(", "self", ",", "args", ":", "List", "[", "str", "]", ")", ":", "if", "not", "args", ":", "self", ".", "perror", "(", "'page_wrap requires a path to a file as an argument'", ",", "traceback_war", "=", "False", ")", "return", "self", ".", "page_file", "(", "args", "[", "0", "]", ",", "chop", "=", "False", ")" ]
Read in a text file and display its output in a pager, wrapping long lines if they don't fit. Usage: page_wrap <file_path>
[ "Read", "in", "a", "text", "file", "and", "display", "its", "output", "in", "a", "pager", "wrapping", "long", "lines", "if", "they", "don", "t", "fit", "." ]
python
train
tcalmant/ipopo
pelix/threadpool.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/threadpool.py#L431-L447
def clear(self): """ Empties the current queue content. Returns once the queue have been emptied. """ with self.__lock: # Empty the current queue try: while True: self._queue.get_nowait() self._queue.task_done() except queue.Empty: # Queue is now empty pass # Wait for the tasks currently executed self.join()
[ "def", "clear", "(", "self", ")", ":", "with", "self", ".", "__lock", ":", "# Empty the current queue", "try", ":", "while", "True", ":", "self", ".", "_queue", ".", "get_nowait", "(", ")", "self", ".", "_queue", ".", "task_done", "(", ")", "except", "queue", ".", "Empty", ":", "# Queue is now empty", "pass", "# Wait for the tasks currently executed", "self", ".", "join", "(", ")" ]
Empties the current queue content. Returns once the queue have been emptied.
[ "Empties", "the", "current", "queue", "content", ".", "Returns", "once", "the", "queue", "have", "been", "emptied", "." ]
python
train
rocky/python3-trepan
trepan/inout/tcpserver.py
https://github.com/rocky/python3-trepan/blob/14e91bc0acce090d67be145b1ac040cab92ac5f3/trepan/inout/tcpserver.py#L150-L162
def write(self, msg): """ This method the debugger uses to write. In contrast to writeline, no newline is added to the end to `str'. Also msg doesn't have to be a string. """ if self.state != 'connected': self.wait_for_connect() pass buffer = Mtcpfns.pack_msg(msg) while len(buffer) > Mtcpfns.TCP_MAX_PACKET: self.conn.send(buffer[:Mtcpfns.TCP_MAX_PACKET]) buffer = buffer[Mtcpfns.TCP_MAX_PACKET:] return self.conn.send(buffer)
[ "def", "write", "(", "self", ",", "msg", ")", ":", "if", "self", ".", "state", "!=", "'connected'", ":", "self", ".", "wait_for_connect", "(", ")", "pass", "buffer", "=", "Mtcpfns", ".", "pack_msg", "(", "msg", ")", "while", "len", "(", "buffer", ")", ">", "Mtcpfns", ".", "TCP_MAX_PACKET", ":", "self", ".", "conn", ".", "send", "(", "buffer", "[", ":", "Mtcpfns", ".", "TCP_MAX_PACKET", "]", ")", "buffer", "=", "buffer", "[", "Mtcpfns", ".", "TCP_MAX_PACKET", ":", "]", "return", "self", ".", "conn", ".", "send", "(", "buffer", ")" ]
This method the debugger uses to write. In contrast to writeline, no newline is added to the end to `str'. Also msg doesn't have to be a string.
[ "This", "method", "the", "debugger", "uses", "to", "write", ".", "In", "contrast", "to", "writeline", "no", "newline", "is", "added", "to", "the", "end", "to", "str", ".", "Also", "msg", "doesn", "t", "have", "to", "be", "a", "string", "." ]
python
test
lanpa/tensorboardX
examples/chainer/plain_logger/net.py
https://github.com/lanpa/tensorboardX/blob/0bf6c07d97b0745654fd9fab8ee3261ec707f253/examples/chainer/plain_logger/net.py#L41-L65
def get_loss_func(self, C=1.0, k=1): """Get loss function of VAE. The loss value is equal to ELBO (Evidence Lower Bound) multiplied by -1. Args: C (int): Usually this is 1.0. Can be changed to control the second term of ELBO bound, which works as regularization. k (int): Number of Monte Carlo samples used in encoded vector. """ def lf(x): mu, ln_var = self.encode(x) batchsize = len(mu.data) # reconstruction loss rec_loss = 0 for l in six.moves.range(k): z = F.gaussian(mu, ln_var) rec_loss += F.bernoulli_nll(x, self.decode(z, sigmoid=False)) \ / (k * batchsize) self.rec_loss = rec_loss self.loss = self.rec_loss + \ C * gaussian_kl_divergence(mu, ln_var) / batchsize return self.loss return lf
[ "def", "get_loss_func", "(", "self", ",", "C", "=", "1.0", ",", "k", "=", "1", ")", ":", "def", "lf", "(", "x", ")", ":", "mu", ",", "ln_var", "=", "self", ".", "encode", "(", "x", ")", "batchsize", "=", "len", "(", "mu", ".", "data", ")", "# reconstruction loss", "rec_loss", "=", "0", "for", "l", "in", "six", ".", "moves", ".", "range", "(", "k", ")", ":", "z", "=", "F", ".", "gaussian", "(", "mu", ",", "ln_var", ")", "rec_loss", "+=", "F", ".", "bernoulli_nll", "(", "x", ",", "self", ".", "decode", "(", "z", ",", "sigmoid", "=", "False", ")", ")", "/", "(", "k", "*", "batchsize", ")", "self", ".", "rec_loss", "=", "rec_loss", "self", ".", "loss", "=", "self", ".", "rec_loss", "+", "C", "*", "gaussian_kl_divergence", "(", "mu", ",", "ln_var", ")", "/", "batchsize", "return", "self", ".", "loss", "return", "lf" ]
Get loss function of VAE. The loss value is equal to ELBO (Evidence Lower Bound) multiplied by -1. Args: C (int): Usually this is 1.0. Can be changed to control the second term of ELBO bound, which works as regularization. k (int): Number of Monte Carlo samples used in encoded vector.
[ "Get", "loss", "function", "of", "VAE", "." ]
python
train
spyder-ide/spyder
spyder/plugins/console/plugin.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/console/plugin.py#L193-L198
def register_plugin(self): """Register plugin in Spyder's main window""" self.focus_changed.connect(self.main.plugin_focus_changed) self.main.add_dockwidget(self) # Connecting the following signal once the dockwidget has been created: self.shell.exception_occurred.connect(self.exception_occurred)
[ "def", "register_plugin", "(", "self", ")", ":", "self", ".", "focus_changed", ".", "connect", "(", "self", ".", "main", ".", "plugin_focus_changed", ")", "self", ".", "main", ".", "add_dockwidget", "(", "self", ")", "# Connecting the following signal once the dockwidget has been created:\r", "self", ".", "shell", ".", "exception_occurred", ".", "connect", "(", "self", ".", "exception_occurred", ")" ]
Register plugin in Spyder's main window
[ "Register", "plugin", "in", "Spyder", "s", "main", "window" ]
python
train
trailofbits/manticore
manticore/platforms/linux.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/platforms/linux.py#L1652-L1670
def sys_readlink(self, path, buf, bufsize): """ Read :rtype: int :param path: the "link path id" :param buf: the buffer where the bytes will be putted. :param bufsize: the max size for read the link. :todo: Out eax number of bytes actually sent | EAGAIN | EBADF | EFAULT | EINTR | errno.EINVAL | EIO | ENOSPC | EPIPE """ if bufsize <= 0: return -errno.EINVAL filename = self.current.read_string(path) if filename == '/proc/self/exe': data = os.path.abspath(self.program) else: data = os.readlink(filename)[:bufsize] self.current.write_bytes(buf, data) return len(data)
[ "def", "sys_readlink", "(", "self", ",", "path", ",", "buf", ",", "bufsize", ")", ":", "if", "bufsize", "<=", "0", ":", "return", "-", "errno", ".", "EINVAL", "filename", "=", "self", ".", "current", ".", "read_string", "(", "path", ")", "if", "filename", "==", "'/proc/self/exe'", ":", "data", "=", "os", ".", "path", ".", "abspath", "(", "self", ".", "program", ")", "else", ":", "data", "=", "os", ".", "readlink", "(", "filename", ")", "[", ":", "bufsize", "]", "self", ".", "current", ".", "write_bytes", "(", "buf", ",", "data", ")", "return", "len", "(", "data", ")" ]
Read :rtype: int :param path: the "link path id" :param buf: the buffer where the bytes will be putted. :param bufsize: the max size for read the link. :todo: Out eax number of bytes actually sent | EAGAIN | EBADF | EFAULT | EINTR | errno.EINVAL | EIO | ENOSPC | EPIPE
[ "Read", ":", "rtype", ":", "int" ]
python
valid
berkeley-cocosci/Wallace
wallace/command_line.py
https://github.com/berkeley-cocosci/Wallace/blob/3650c0bc3b0804d0adb1d178c5eba9992babb1b0/wallace/command_line.py#L724-L799
def export(app, local): """Export the data.""" print_header() log("Preparing to export the data...") id = str(app) subdata_path = os.path.join("data", id, "data") # Create the data package os.makedirs(subdata_path) # Copy the experiment code into a code/ subdirectory try: shutil.copyfile( os.path.join("snapshots", id + "-code.zip"), os.path.join("data", id, id + "-code.zip") ) except: pass # Copy in the DATA readme. # open(os.path.join(id, "README.txt"), "a").close() # Save the experiment id. with open(os.path.join("data", id, "experiment_id.md"), "a+") as file: file.write(id) if not local: # Export the logs subprocess.call( "heroku logs " + "-n 10000 > " + os.path.join("data", id, "server_logs.md") + " --app " + id, shell=True) dump_path = dump_database(id) subprocess.call( "pg_restore --verbose --clean -d wallace " + os.path.join("data", id) + "/data.dump", shell=True) all_tables = [ "node", "network", "vector", "info", "transformation", "transmission", "participant", "notification", "question" ] for table in all_tables: subprocess.call( "psql -d wallace --command=\"\\copy " + table + " to \'" + os.path.join(subdata_path, table) + ".csv\' csv header\"", shell=True) if not local: os.remove(dump_path) log("Zipping up the package...") shutil.make_archive( os.path.join("data", id + "-data"), "zip", os.path.join("data", id) ) shutil.rmtree(os.path.join("data", id)) log("Done. Data available in " + str(id) + ".zip")
[ "def", "export", "(", "app", ",", "local", ")", ":", "print_header", "(", ")", "log", "(", "\"Preparing to export the data...\"", ")", "id", "=", "str", "(", "app", ")", "subdata_path", "=", "os", ".", "path", ".", "join", "(", "\"data\"", ",", "id", ",", "\"data\"", ")", "# Create the data package", "os", ".", "makedirs", "(", "subdata_path", ")", "# Copy the experiment code into a code/ subdirectory", "try", ":", "shutil", ".", "copyfile", "(", "os", ".", "path", ".", "join", "(", "\"snapshots\"", ",", "id", "+", "\"-code.zip\"", ")", ",", "os", ".", "path", ".", "join", "(", "\"data\"", ",", "id", ",", "id", "+", "\"-code.zip\"", ")", ")", "except", ":", "pass", "# Copy in the DATA readme.", "# open(os.path.join(id, \"README.txt\"), \"a\").close()", "# Save the experiment id.", "with", "open", "(", "os", ".", "path", ".", "join", "(", "\"data\"", ",", "id", ",", "\"experiment_id.md\"", ")", ",", "\"a+\"", ")", "as", "file", ":", "file", ".", "write", "(", "id", ")", "if", "not", "local", ":", "# Export the logs", "subprocess", ".", "call", "(", "\"heroku logs \"", "+", "\"-n 10000 > \"", "+", "os", ".", "path", ".", "join", "(", "\"data\"", ",", "id", ",", "\"server_logs.md\"", ")", "+", "\" --app \"", "+", "id", ",", "shell", "=", "True", ")", "dump_path", "=", "dump_database", "(", "id", ")", "subprocess", ".", "call", "(", "\"pg_restore --verbose --clean -d wallace \"", "+", "os", ".", "path", ".", "join", "(", "\"data\"", ",", "id", ")", "+", "\"/data.dump\"", ",", "shell", "=", "True", ")", "all_tables", "=", "[", "\"node\"", ",", "\"network\"", ",", "\"vector\"", ",", "\"info\"", ",", "\"transformation\"", ",", "\"transmission\"", ",", "\"participant\"", ",", "\"notification\"", ",", "\"question\"", "]", "for", "table", "in", "all_tables", ":", "subprocess", ".", "call", "(", "\"psql -d wallace --command=\\\"\\\\copy \"", "+", "table", "+", "\" to \\'\"", "+", "os", ".", "path", ".", "join", "(", "subdata_path", ",", "table", ")", "+", "\".csv\\' csv header\\\"\"", ",", "shell", "=", "True", ")", "if", "not", "local", ":", "os", ".", "remove", "(", "dump_path", ")", "log", "(", "\"Zipping up the package...\"", ")", "shutil", ".", "make_archive", "(", "os", ".", "path", ".", "join", "(", "\"data\"", ",", "id", "+", "\"-data\"", ")", ",", "\"zip\"", ",", "os", ".", "path", ".", "join", "(", "\"data\"", ",", "id", ")", ")", "shutil", ".", "rmtree", "(", "os", ".", "path", ".", "join", "(", "\"data\"", ",", "id", ")", ")", "log", "(", "\"Done. Data available in \"", "+", "str", "(", "id", ")", "+", "\".zip\"", ")" ]
Export the data.
[ "Export", "the", "data", "." ]
python
train
Jasily/jasily-python
jasily/utils/__init__.py
https://github.com/Jasily/jasily-python/blob/1c821a120ebbbbc3c5761f5f1e8a73588059242a/jasily/utils/__init__.py#L21-L32
def get_parent(obj): ''' get parent from obj. ''' names = obj.__qualname__.split('.')[:-1] if '<locals>' in names: # locals function raise ValueError('cannot get parent from locals object.') module = sys.modules[obj.__module__] parent = module while names: parent = getattr(parent, names.pop(0)) return parent
[ "def", "get_parent", "(", "obj", ")", ":", "names", "=", "obj", ".", "__qualname__", ".", "split", "(", "'.'", ")", "[", ":", "-", "1", "]", "if", "'<locals>'", "in", "names", ":", "# locals function", "raise", "ValueError", "(", "'cannot get parent from locals object.'", ")", "module", "=", "sys", ".", "modules", "[", "obj", ".", "__module__", "]", "parent", "=", "module", "while", "names", ":", "parent", "=", "getattr", "(", "parent", ",", "names", ".", "pop", "(", "0", ")", ")", "return", "parent" ]
get parent from obj.
[ "get", "parent", "from", "obj", "." ]
python
test
diging/tethne
tethne/classes/corpus.py
https://github.com/diging/tethne/blob/ba10eeb264b7a3f2dbcce71cfd5cb2d6bbf7055f/tethne/classes/corpus.py#L294-L326
def _generate_index(self, paper): """ If the ``index_by`` field is not set or not available, generate a unique identifier using the :class:`.Paper`\'s title and author names. """ if self.index_by is None or not hasattr(paper, self.index_by): if not hasattr(paper, 'hashIndex'): # Generate a new index for this paper. m = hashlib.md5() # If we dont have author name then we just use the title of the paper # to generate unique identifier. if paper.authors is None: hashable = paper.title else: if hasattr(paper, 'title'): title = paper.title else: title = '' if len(paper.authors) == 0: hashable = title else: authors = list(zip(*paper.authors))[0] hashable = u' '.join(list([title] + [l + f for l, f in authors])) m.update(hashable.encode('utf-8')) setattr(paper, 'hashIndex', m.hexdigest()) return getattr(paper, 'hashIndex') identifier = getattr(paper, self.index_by) if type(identifier) is list: identifier = identifier[0] if self.index_by == 'link': _, identifier = os.path.split(identifier) return identifier
[ "def", "_generate_index", "(", "self", ",", "paper", ")", ":", "if", "self", ".", "index_by", "is", "None", "or", "not", "hasattr", "(", "paper", ",", "self", ".", "index_by", ")", ":", "if", "not", "hasattr", "(", "paper", ",", "'hashIndex'", ")", ":", "# Generate a new index for this paper.", "m", "=", "hashlib", ".", "md5", "(", ")", "# If we dont have author name then we just use the title of the paper", "# to generate unique identifier.", "if", "paper", ".", "authors", "is", "None", ":", "hashable", "=", "paper", ".", "title", "else", ":", "if", "hasattr", "(", "paper", ",", "'title'", ")", ":", "title", "=", "paper", ".", "title", "else", ":", "title", "=", "''", "if", "len", "(", "paper", ".", "authors", ")", "==", "0", ":", "hashable", "=", "title", "else", ":", "authors", "=", "list", "(", "zip", "(", "*", "paper", ".", "authors", ")", ")", "[", "0", "]", "hashable", "=", "u' '", ".", "join", "(", "list", "(", "[", "title", "]", "+", "[", "l", "+", "f", "for", "l", ",", "f", "in", "authors", "]", ")", ")", "m", ".", "update", "(", "hashable", ".", "encode", "(", "'utf-8'", ")", ")", "setattr", "(", "paper", ",", "'hashIndex'", ",", "m", ".", "hexdigest", "(", ")", ")", "return", "getattr", "(", "paper", ",", "'hashIndex'", ")", "identifier", "=", "getattr", "(", "paper", ",", "self", ".", "index_by", ")", "if", "type", "(", "identifier", ")", "is", "list", ":", "identifier", "=", "identifier", "[", "0", "]", "if", "self", ".", "index_by", "==", "'link'", ":", "_", ",", "identifier", "=", "os", ".", "path", ".", "split", "(", "identifier", ")", "return", "identifier" ]
If the ``index_by`` field is not set or not available, generate a unique identifier using the :class:`.Paper`\'s title and author names.
[ "If", "the", "index_by", "field", "is", "not", "set", "or", "not", "available", "generate", "a", "unique", "identifier", "using", "the", ":", "class", ":", ".", "Paper", "\\", "s", "title", "and", "author", "names", "." ]
python
train
gccxml/pygccxml
pygccxml/parser/directory_cache.py
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/parser/directory_cache.py#L519-L526
def update_id_counter(self): """Update the `id_` counter so that it doesn't grow forever. """ if not self.__entries: self.__next_id = 1 else: self.__next_id = max(self.__entries.keys()) + 1
[ "def", "update_id_counter", "(", "self", ")", ":", "if", "not", "self", ".", "__entries", ":", "self", ".", "__next_id", "=", "1", "else", ":", "self", ".", "__next_id", "=", "max", "(", "self", ".", "__entries", ".", "keys", "(", ")", ")", "+", "1" ]
Update the `id_` counter so that it doesn't grow forever.
[ "Update", "the", "id_", "counter", "so", "that", "it", "doesn", "t", "grow", "forever", "." ]
python
train
nicolas-van/mailflash
mailflash.py
https://github.com/nicolas-van/mailflash/blob/794598d9df0e343bb1f64b03d09a68a540229774/mailflash.py#L463-L488
def record_messages(self): """Records all messages. Use in unit tests for example:: with mail.record_messages() as outbox: response = app.test_client.get("/email-sending-view/") assert len(outbox) == 1 assert outbox[0].subject == "testing" You must have blinker installed in order to use this feature. :versionadded: 0.4 """ if not email_dispatched: raise RuntimeError("blinker must be installed") outbox = [] def _record(message, mail): outbox.append(message) email_dispatched.connect(_record) try: yield outbox finally: email_dispatched.disconnect(_record)
[ "def", "record_messages", "(", "self", ")", ":", "if", "not", "email_dispatched", ":", "raise", "RuntimeError", "(", "\"blinker must be installed\"", ")", "outbox", "=", "[", "]", "def", "_record", "(", "message", ",", "mail", ")", ":", "outbox", ".", "append", "(", "message", ")", "email_dispatched", ".", "connect", "(", "_record", ")", "try", ":", "yield", "outbox", "finally", ":", "email_dispatched", ".", "disconnect", "(", "_record", ")" ]
Records all messages. Use in unit tests for example:: with mail.record_messages() as outbox: response = app.test_client.get("/email-sending-view/") assert len(outbox) == 1 assert outbox[0].subject == "testing" You must have blinker installed in order to use this feature. :versionadded: 0.4
[ "Records", "all", "messages", ".", "Use", "in", "unit", "tests", "for", "example", "::" ]
python
test
mbedmicro/pyOCD
pyocd/utility/progress.py
https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/utility/progress.py#L125-L146
def print_progress(file=None): """! @brief Progress printer factory. This factory function checks whether the output file is a TTY, and instantiates the appropriate subclass of ProgressReport. @param file The output file. Optional. If not provided, or if set to None, then sys.stdout will be used automatically. """ if file is None: file = sys.stdout try: istty = os.isatty(file.fileno()) except (OSError, AttributeError): # Either the file doesn't have a fileno method, or calling it returned an # error. In either case, just assume we're not connected to a TTY. istty = False klass = ProgressReportTTY if istty else ProgressReportNoTTY return klass(file)
[ "def", "print_progress", "(", "file", "=", "None", ")", ":", "if", "file", "is", "None", ":", "file", "=", "sys", ".", "stdout", "try", ":", "istty", "=", "os", ".", "isatty", "(", "file", ".", "fileno", "(", ")", ")", "except", "(", "OSError", ",", "AttributeError", ")", ":", "# Either the file doesn't have a fileno method, or calling it returned an", "# error. In either case, just assume we're not connected to a TTY.", "istty", "=", "False", "klass", "=", "ProgressReportTTY", "if", "istty", "else", "ProgressReportNoTTY", "return", "klass", "(", "file", ")" ]
! @brief Progress printer factory. This factory function checks whether the output file is a TTY, and instantiates the appropriate subclass of ProgressReport. @param file The output file. Optional. If not provided, or if set to None, then sys.stdout will be used automatically.
[ "!" ]
python
train
wummel/linkchecker
third_party/dnspython/dns/resolver.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/third_party/dnspython/dns/resolver.py#L231-L244
def get(self, key): """Get the answer associated with I{key}. Returns None if no answer is cached for the key. @param key: the key @type key: (dns.name.Name, int, int) tuple whose values are the query name, rdtype, and rdclass. @rtype: dns.resolver.Answer object or None """ self.maybe_clean() v = self.data.get(key) if v is None or v.expiration <= time.time(): return None return v
[ "def", "get", "(", "self", ",", "key", ")", ":", "self", ".", "maybe_clean", "(", ")", "v", "=", "self", ".", "data", ".", "get", "(", "key", ")", "if", "v", "is", "None", "or", "v", ".", "expiration", "<=", "time", ".", "time", "(", ")", ":", "return", "None", "return", "v" ]
Get the answer associated with I{key}. Returns None if no answer is cached for the key. @param key: the key @type key: (dns.name.Name, int, int) tuple whose values are the query name, rdtype, and rdclass. @rtype: dns.resolver.Answer object or None
[ "Get", "the", "answer", "associated", "with", "I", "{", "key", "}", ".", "Returns", "None", "if", "no", "answer", "is", "cached", "for", "the", "key", "." ]
python
train
eventable/vobject
docs/build/lib/vobject/base.py
https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/docs/build/lib/vobject/base.py#L927-L974
def foldOneLine(outbuf, input, lineLength = 75): """ Folding line procedure that ensures multi-byte utf-8 sequences are not broken across lines TO-DO: This all seems odd. Is it still needed, especially in python3? """ if len(input) < lineLength: # Optimize for unfolded line case try: outbuf.write(bytes(input, 'UTF-8')) except Exception: # fall back on py2 syntax outbuf.write(str_(input)) else: # Look for valid utf8 range and write that out start = 0 written = 0 counter = 0 # counts line size in bytes decoded = to_unicode(input) length = len(to_basestring(input)) while written < length: s = decoded[start] # take one char size = len(to_basestring(s)) # calculate it's size in bytes if counter + size > lineLength: try: outbuf.write(bytes("\r\n ", 'UTF-8')) except Exception: # fall back on py2 syntax outbuf.write("\r\n ") counter = 1 # one for space if str is unicode_type: outbuf.write(to_unicode(s)) else: # fall back on py2 syntax outbuf.write(s.encode('utf-8')) written += size counter += size start += 1 try: outbuf.write(bytes("\r\n", 'UTF-8')) except Exception: # fall back on py2 syntax outbuf.write("\r\n")
[ "def", "foldOneLine", "(", "outbuf", ",", "input", ",", "lineLength", "=", "75", ")", ":", "if", "len", "(", "input", ")", "<", "lineLength", ":", "# Optimize for unfolded line case", "try", ":", "outbuf", ".", "write", "(", "bytes", "(", "input", ",", "'UTF-8'", ")", ")", "except", "Exception", ":", "# fall back on py2 syntax", "outbuf", ".", "write", "(", "str_", "(", "input", ")", ")", "else", ":", "# Look for valid utf8 range and write that out", "start", "=", "0", "written", "=", "0", "counter", "=", "0", "# counts line size in bytes", "decoded", "=", "to_unicode", "(", "input", ")", "length", "=", "len", "(", "to_basestring", "(", "input", ")", ")", "while", "written", "<", "length", ":", "s", "=", "decoded", "[", "start", "]", "# take one char", "size", "=", "len", "(", "to_basestring", "(", "s", ")", ")", "# calculate it's size in bytes", "if", "counter", "+", "size", ">", "lineLength", ":", "try", ":", "outbuf", ".", "write", "(", "bytes", "(", "\"\\r\\n \"", ",", "'UTF-8'", ")", ")", "except", "Exception", ":", "# fall back on py2 syntax", "outbuf", ".", "write", "(", "\"\\r\\n \"", ")", "counter", "=", "1", "# one for space", "if", "str", "is", "unicode_type", ":", "outbuf", ".", "write", "(", "to_unicode", "(", "s", ")", ")", "else", ":", "# fall back on py2 syntax", "outbuf", ".", "write", "(", "s", ".", "encode", "(", "'utf-8'", ")", ")", "written", "+=", "size", "counter", "+=", "size", "start", "+=", "1", "try", ":", "outbuf", ".", "write", "(", "bytes", "(", "\"\\r\\n\"", ",", "'UTF-8'", ")", ")", "except", "Exception", ":", "# fall back on py2 syntax", "outbuf", ".", "write", "(", "\"\\r\\n\"", ")" ]
Folding line procedure that ensures multi-byte utf-8 sequences are not broken across lines TO-DO: This all seems odd. Is it still needed, especially in python3?
[ "Folding", "line", "procedure", "that", "ensures", "multi", "-", "byte", "utf", "-", "8", "sequences", "are", "not", "broken", "across", "lines" ]
python
train
vertexproject/synapse
synapse/lib/cmdr.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/cmdr.py#L41-L51
async def runItemCmdr(item, outp=None, **opts): ''' Create a cmdr for the given item and run the cmd loop. Example: runItemCmdr(foo) ''' cmdr = await getItemCmdr(item, outp=outp, **opts) await cmdr.runCmdLoop()
[ "async", "def", "runItemCmdr", "(", "item", ",", "outp", "=", "None", ",", "*", "*", "opts", ")", ":", "cmdr", "=", "await", "getItemCmdr", "(", "item", ",", "outp", "=", "outp", ",", "*", "*", "opts", ")", "await", "cmdr", ".", "runCmdLoop", "(", ")" ]
Create a cmdr for the given item and run the cmd loop. Example: runItemCmdr(foo)
[ "Create", "a", "cmdr", "for", "the", "given", "item", "and", "run", "the", "cmd", "loop", "." ]
python
train
vpelletier/python-libusb1
usb1/__init__.py
https://github.com/vpelletier/python-libusb1/blob/740c9778e28523e4ec3543415d95f5400ae0fa24/usb1/__init__.py#L2437-L2453
def handleEventsTimeout(self, tv=0): """ Handle any pending event. If tv is 0, will return immediately after handling already-pending events. Otherwise, defines the maximum amount of time to wait for events, in seconds. """ if tv is None: tv = 0 tv_s = int(tv) real_tv = libusb1.timeval(tv_s, int((tv - tv_s) * 1000000)) mayRaiseUSBError( libusb1.libusb_handle_events_timeout( self.__context_p, byref(real_tv), ), )
[ "def", "handleEventsTimeout", "(", "self", ",", "tv", "=", "0", ")", ":", "if", "tv", "is", "None", ":", "tv", "=", "0", "tv_s", "=", "int", "(", "tv", ")", "real_tv", "=", "libusb1", ".", "timeval", "(", "tv_s", ",", "int", "(", "(", "tv", "-", "tv_s", ")", "*", "1000000", ")", ")", "mayRaiseUSBError", "(", "libusb1", ".", "libusb_handle_events_timeout", "(", "self", ".", "__context_p", ",", "byref", "(", "real_tv", ")", ",", ")", ",", ")" ]
Handle any pending event. If tv is 0, will return immediately after handling already-pending events. Otherwise, defines the maximum amount of time to wait for events, in seconds.
[ "Handle", "any", "pending", "event", ".", "If", "tv", "is", "0", "will", "return", "immediately", "after", "handling", "already", "-", "pending", "events", ".", "Otherwise", "defines", "the", "maximum", "amount", "of", "time", "to", "wait", "for", "events", "in", "seconds", "." ]
python
train
taleinat/fuzzysearch
src/fuzzysearch/levenshtein_ngram.py
https://github.com/taleinat/fuzzysearch/blob/04be1b4490de92601400be5ecc999003ff2f621f/src/fuzzysearch/levenshtein_ngram.py#L23-L75
def _py_expand_short(subsequence, sequence, max_l_dist): """Straightforward implementation of partial match expansion.""" # The following diagram shows the score calculation step. # # Each new score is the minimum of: # * a OR a + 1 (substitution, if needed) # * b + 1 (deletion, i.e. skipping a sequence character) # * c + 1 (insertion, i.e. skipping a sub-sequence character) # # a -- +1 -> c # # | \ | # | \ | # +1 +1? +1 # | \ | # v ⌟ v # # b -- +1 -> scores[subseq_index] subseq_len = len(subsequence) if subseq_len == 0: return (0, 0) # Initialize the scores array with values for just skipping sub-sequence # chars. scores = list(range(1, subseq_len + 1)) min_score = subseq_len min_score_idx = -1 for seq_index, char in enumerate(sequence): # calculate scores, one for each character in the sub-sequence a = seq_index c = a + 1 for subseq_index in range(subseq_len): b = scores[subseq_index] c = scores[subseq_index] = min( a + (char != subsequence[subseq_index]), b + 1, c + 1, ) a = b # keep the minimum score found for matches of the entire sub-sequence if c <= min_score: min_score = c min_score_idx = seq_index # bail early when it is impossible to find a better expansion elif min(scores) >= min_score: break return (min_score, min_score_idx + 1) if min_score <= max_l_dist else (None, None)
[ "def", "_py_expand_short", "(", "subsequence", ",", "sequence", ",", "max_l_dist", ")", ":", "# The following diagram shows the score calculation step.", "#", "# Each new score is the minimum of:", "# * a OR a + 1 (substitution, if needed)", "# * b + 1 (deletion, i.e. skipping a sequence character)", "# * c + 1 (insertion, i.e. skipping a sub-sequence character)", "#", "# a -- +1 -> c", "#", "# | \\ |", "# | \\ |", "# +1 +1? +1", "# | \\ |", "# v ⌟ v", "#", "# b -- +1 -> scores[subseq_index]", "subseq_len", "=", "len", "(", "subsequence", ")", "if", "subseq_len", "==", "0", ":", "return", "(", "0", ",", "0", ")", "# Initialize the scores array with values for just skipping sub-sequence", "# chars.", "scores", "=", "list", "(", "range", "(", "1", ",", "subseq_len", "+", "1", ")", ")", "min_score", "=", "subseq_len", "min_score_idx", "=", "-", "1", "for", "seq_index", ",", "char", "in", "enumerate", "(", "sequence", ")", ":", "# calculate scores, one for each character in the sub-sequence", "a", "=", "seq_index", "c", "=", "a", "+", "1", "for", "subseq_index", "in", "range", "(", "subseq_len", ")", ":", "b", "=", "scores", "[", "subseq_index", "]", "c", "=", "scores", "[", "subseq_index", "]", "=", "min", "(", "a", "+", "(", "char", "!=", "subsequence", "[", "subseq_index", "]", ")", ",", "b", "+", "1", ",", "c", "+", "1", ",", ")", "a", "=", "b", "# keep the minimum score found for matches of the entire sub-sequence", "if", "c", "<=", "min_score", ":", "min_score", "=", "c", "min_score_idx", "=", "seq_index", "# bail early when it is impossible to find a better expansion", "elif", "min", "(", "scores", ")", ">=", "min_score", ":", "break", "return", "(", "min_score", ",", "min_score_idx", "+", "1", ")", "if", "min_score", "<=", "max_l_dist", "else", "(", "None", ",", "None", ")" ]
Straightforward implementation of partial match expansion.
[ "Straightforward", "implementation", "of", "partial", "match", "expansion", "." ]
python
train
blacktop/virustotal-api
virus_total_apis/api.py
https://github.com/blacktop/virustotal-api/blob/4e01e1c6d87255ec8370ac2a4ee16edce00e1e86/virus_total_apis/api.py#L887-L911
def get_intel_notifications_feed(self, page=None, timeout=None): """ Get notification feed in JSON for further processing. :param page: the next_page property of the results of a previously issued query to this API. This parameter should not be provided if it is the very first query to the API, i.e. if we are retrieving the first page of results. :param timeout: The amount of time in seconds the request should wait before timing out. :returns: The next page identifier, The results (JSON is possible with .json()) """ params = {'apikey': self.api_key, 'next': page} try: response = requests.get(self.base + 'hunting/notifications-feed/', params=params, proxies=self.proxies, timeout=timeout) # VT returns an empty result, len(content)==0, and status OK if there are no pending notifications. # To keep the API consistent we generate an empty object instead. # This might not be necessary with a later release of the VTI API. (bug has been submitted) if len(response.content) == 0: response.__dict__['_content'] = \ b'{"notifications":[],"verbose_msg":"No pending notification","result":0,"next":null}' except requests.RequestException as e: return dict(error=str(e)) return _return_response_and_status_code(response)
[ "def", "get_intel_notifications_feed", "(", "self", ",", "page", "=", "None", ",", "timeout", "=", "None", ")", ":", "params", "=", "{", "'apikey'", ":", "self", ".", "api_key", ",", "'next'", ":", "page", "}", "try", ":", "response", "=", "requests", ".", "get", "(", "self", ".", "base", "+", "'hunting/notifications-feed/'", ",", "params", "=", "params", ",", "proxies", "=", "self", ".", "proxies", ",", "timeout", "=", "timeout", ")", "# VT returns an empty result, len(content)==0, and status OK if there are no pending notifications.", "# To keep the API consistent we generate an empty object instead.", "# This might not be necessary with a later release of the VTI API. (bug has been submitted)", "if", "len", "(", "response", ".", "content", ")", "==", "0", ":", "response", ".", "__dict__", "[", "'_content'", "]", "=", "b'{\"notifications\":[],\"verbose_msg\":\"No pending notification\",\"result\":0,\"next\":null}'", "except", "requests", ".", "RequestException", "as", "e", ":", "return", "dict", "(", "error", "=", "str", "(", "e", ")", ")", "return", "_return_response_and_status_code", "(", "response", ")" ]
Get notification feed in JSON for further processing. :param page: the next_page property of the results of a previously issued query to this API. This parameter should not be provided if it is the very first query to the API, i.e. if we are retrieving the first page of results. :param timeout: The amount of time in seconds the request should wait before timing out. :returns: The next page identifier, The results (JSON is possible with .json())
[ "Get", "notification", "feed", "in", "JSON", "for", "further", "processing", "." ]
python
train
moderngl/moderngl
moderngl/context.py
https://github.com/moderngl/moderngl/blob/a8f5dce8dc72ae84a2f9523887fb5f6b620049b9/moderngl/context.py#L900-L925
def framebuffer(self, color_attachments=(), depth_attachment=None) -> 'Framebuffer': ''' A :py:class:`Framebuffer` is a collection of buffers that can be used as the destination for rendering. The buffers for Framebuffer objects reference images from either Textures or Renderbuffers. Args: color_attachments (list): A list of :py:class:`Texture` or :py:class:`Renderbuffer` objects. depth_attachment (Renderbuffer or Texture): The depth attachment. Returns: :py:class:`Framebuffer` object ''' if type(color_attachments) is Texture or type(color_attachments) is Renderbuffer: color_attachments = (color_attachments,) ca_mglo = tuple(x.mglo for x in color_attachments) da_mglo = None if depth_attachment is None else depth_attachment.mglo res = Framebuffer.__new__(Framebuffer) res.mglo, res._size, res._samples, res._glo = self.mglo.framebuffer(ca_mglo, da_mglo) res._color_attachments = tuple(color_attachments) res._depth_attachment = depth_attachment res.ctx = self res.extra = None return res
[ "def", "framebuffer", "(", "self", ",", "color_attachments", "=", "(", ")", ",", "depth_attachment", "=", "None", ")", "->", "'Framebuffer'", ":", "if", "type", "(", "color_attachments", ")", "is", "Texture", "or", "type", "(", "color_attachments", ")", "is", "Renderbuffer", ":", "color_attachments", "=", "(", "color_attachments", ",", ")", "ca_mglo", "=", "tuple", "(", "x", ".", "mglo", "for", "x", "in", "color_attachments", ")", "da_mglo", "=", "None", "if", "depth_attachment", "is", "None", "else", "depth_attachment", ".", "mglo", "res", "=", "Framebuffer", ".", "__new__", "(", "Framebuffer", ")", "res", ".", "mglo", ",", "res", ".", "_size", ",", "res", ".", "_samples", ",", "res", ".", "_glo", "=", "self", ".", "mglo", ".", "framebuffer", "(", "ca_mglo", ",", "da_mglo", ")", "res", ".", "_color_attachments", "=", "tuple", "(", "color_attachments", ")", "res", ".", "_depth_attachment", "=", "depth_attachment", "res", ".", "ctx", "=", "self", "res", ".", "extra", "=", "None", "return", "res" ]
A :py:class:`Framebuffer` is a collection of buffers that can be used as the destination for rendering. The buffers for Framebuffer objects reference images from either Textures or Renderbuffers. Args: color_attachments (list): A list of :py:class:`Texture` or :py:class:`Renderbuffer` objects. depth_attachment (Renderbuffer or Texture): The depth attachment. Returns: :py:class:`Framebuffer` object
[ "A", ":", "py", ":", "class", ":", "Framebuffer", "is", "a", "collection", "of", "buffers", "that", "can", "be", "used", "as", "the", "destination", "for", "rendering", ".", "The", "buffers", "for", "Framebuffer", "objects", "reference", "images", "from", "either", "Textures", "or", "Renderbuffers", "." ]
python
train
openvax/isovar
isovar/variant_sequences.py
https://github.com/openvax/isovar/blob/b39b684920e3f6b344851d6598a1a1c67bce913b/isovar/variant_sequences.py#L117-L131
def add_reads(self, reads): """ Create another VariantSequence with more supporting reads. """ if len(reads) == 0: return self new_reads = self.reads.union(reads) if len(new_reads) > len(self.reads): return VariantSequence( prefix=self.prefix, alt=self.alt, suffix=self.suffix, reads=new_reads) else: return self
[ "def", "add_reads", "(", "self", ",", "reads", ")", ":", "if", "len", "(", "reads", ")", "==", "0", ":", "return", "self", "new_reads", "=", "self", ".", "reads", ".", "union", "(", "reads", ")", "if", "len", "(", "new_reads", ")", ">", "len", "(", "self", ".", "reads", ")", ":", "return", "VariantSequence", "(", "prefix", "=", "self", ".", "prefix", ",", "alt", "=", "self", ".", "alt", ",", "suffix", "=", "self", ".", "suffix", ",", "reads", "=", "new_reads", ")", "else", ":", "return", "self" ]
Create another VariantSequence with more supporting reads.
[ "Create", "another", "VariantSequence", "with", "more", "supporting", "reads", "." ]
python
train
ellmetha/django-machina
machina/apps/forum_moderation/views.py
https://github.com/ellmetha/django-machina/blob/89ac083c1eaf1cfdeae6686ee094cc86362e8c69/machina/apps/forum_moderation/views.py#L454-L456
def post(self, request, *args, **kwargs): """ Handles POST requests. """ return self.disapprove(request, *args, **kwargs)
[ "def", "post", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "disapprove", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Handles POST requests.
[ "Handles", "POST", "requests", "." ]
python
train
SKA-ScienceDataProcessor/integration-prototype
sip/science_pipeline_workflows/example_imager_spark/example_spark_imager.py
https://github.com/SKA-ScienceDataProcessor/integration-prototype/blob/8c8006de6ad71dcd44114b0338780738079c87d4/sip/science_pipeline_workflows/example_imager_spark/example_spark_imager.py#L237-L314
def main(): """Runs test imaging pipeline using Spark.""" # Check command line arguments. if len(sys.argv) < 3: raise RuntimeError( 'Usage: spark-submit spark_imager_test.py <settings_file> <dir> ' '[partitions]') # Create log object. log = logging.getLogger('pyspark') log.setLevel(logging.INFO) log.addHandler(logging.StreamHandler(sys.stdout)) # Load pipeline settings. with open(sys.argv[1]) as f: settings = json.load(f) # Get a list of input Measurement Sets to process. data_dir = str(sys.argv[2]) inputs = glob(join(data_dir, '*.ms')) + glob(join(data_dir, '*.MS')) inputs = filter(None, inputs) log.info('Found input Measurement Sets: %s', ', '.join(inputs)) # Get a Spark context. context = pyspark.SparkContext(appName="spark_imager_test") # Create the Spark RDD containing the input filenames, # suitably parallelized. partitions = int(sys.argv[3]) if len(sys.argv) > 3 else 2 rdd = context.parallelize(inputs, partitions) # Define Spark broadcast variables. bc_settings = context.broadcast(settings) bc_grid_weights = None # Process coordinates first if required. if (settings['combine'] and ( settings['imager']['weighting'] == 'Uniform' or settings['imager']['algorithm'] == 'W-projection')): # Create RDD to generate weights grids. rdd_coords = rdd.map( partial(node_run, coords_only=True, bc_settings=bc_settings, bc_grid_weights=None)) # Mark this RDD as persistent so it isn't computed more than once. rdd_coords.persist() # Get the maximum number of W-planes required, and update settings. num_w_planes = rdd_coords.map(lambda x: x[1]).max() settings['imager']['num_w_planes'] = num_w_planes # Get the combined grid of weights and broadcast it to nodes. output = rdd_coords.reduce(reduce_sequences) bc_grid_weights = context.broadcast(output[0]) # Delete this RDD. rdd_coords.unpersist() # Re-broadcast updated settings. bc_settings = context.broadcast(settings) # Run parallel pipeline on worker nodes and combine visibility grids. output = rdd.map( partial(node_run, coords_only=False, bc_settings=bc_settings, bc_grid_weights=bc_grid_weights)).reduce(reduce_sequences) # Finalise combined visibility grids if required. if settings['combine']: # Create an imager to finalise (FFT) the gridded data. imager = oskar.Imager(settings['precision']) for key, value in settings['imager'].items(): setattr(imager, key, value) # Finalise grid and save image. save_image(imager, output[0], output[1], settings['output_file']) log.info('Finished. Output file is %s', settings['output_file']) context.stop()
[ "def", "main", "(", ")", ":", "# Check command line arguments.", "if", "len", "(", "sys", ".", "argv", ")", "<", "3", ":", "raise", "RuntimeError", "(", "'Usage: spark-submit spark_imager_test.py <settings_file> <dir> '", "'[partitions]'", ")", "# Create log object.", "log", "=", "logging", ".", "getLogger", "(", "'pyspark'", ")", "log", ".", "setLevel", "(", "logging", ".", "INFO", ")", "log", ".", "addHandler", "(", "logging", ".", "StreamHandler", "(", "sys", ".", "stdout", ")", ")", "# Load pipeline settings.", "with", "open", "(", "sys", ".", "argv", "[", "1", "]", ")", "as", "f", ":", "settings", "=", "json", ".", "load", "(", "f", ")", "# Get a list of input Measurement Sets to process.", "data_dir", "=", "str", "(", "sys", ".", "argv", "[", "2", "]", ")", "inputs", "=", "glob", "(", "join", "(", "data_dir", ",", "'*.ms'", ")", ")", "+", "glob", "(", "join", "(", "data_dir", ",", "'*.MS'", ")", ")", "inputs", "=", "filter", "(", "None", ",", "inputs", ")", "log", ".", "info", "(", "'Found input Measurement Sets: %s'", ",", "', '", ".", "join", "(", "inputs", ")", ")", "# Get a Spark context.", "context", "=", "pyspark", ".", "SparkContext", "(", "appName", "=", "\"spark_imager_test\"", ")", "# Create the Spark RDD containing the input filenames,", "# suitably parallelized.", "partitions", "=", "int", "(", "sys", ".", "argv", "[", "3", "]", ")", "if", "len", "(", "sys", ".", "argv", ")", ">", "3", "else", "2", "rdd", "=", "context", ".", "parallelize", "(", "inputs", ",", "partitions", ")", "# Define Spark broadcast variables.", "bc_settings", "=", "context", ".", "broadcast", "(", "settings", ")", "bc_grid_weights", "=", "None", "# Process coordinates first if required.", "if", "(", "settings", "[", "'combine'", "]", "and", "(", "settings", "[", "'imager'", "]", "[", "'weighting'", "]", "==", "'Uniform'", "or", "settings", "[", "'imager'", "]", "[", "'algorithm'", "]", "==", "'W-projection'", ")", ")", ":", "# Create RDD to generate weights grids.", "rdd_coords", "=", "rdd", ".", "map", "(", "partial", "(", "node_run", ",", "coords_only", "=", "True", ",", "bc_settings", "=", "bc_settings", ",", "bc_grid_weights", "=", "None", ")", ")", "# Mark this RDD as persistent so it isn't computed more than once.", "rdd_coords", ".", "persist", "(", ")", "# Get the maximum number of W-planes required, and update settings.", "num_w_planes", "=", "rdd_coords", ".", "map", "(", "lambda", "x", ":", "x", "[", "1", "]", ")", ".", "max", "(", ")", "settings", "[", "'imager'", "]", "[", "'num_w_planes'", "]", "=", "num_w_planes", "# Get the combined grid of weights and broadcast it to nodes.", "output", "=", "rdd_coords", ".", "reduce", "(", "reduce_sequences", ")", "bc_grid_weights", "=", "context", ".", "broadcast", "(", "output", "[", "0", "]", ")", "# Delete this RDD.", "rdd_coords", ".", "unpersist", "(", ")", "# Re-broadcast updated settings.", "bc_settings", "=", "context", ".", "broadcast", "(", "settings", ")", "# Run parallel pipeline on worker nodes and combine visibility grids.", "output", "=", "rdd", ".", "map", "(", "partial", "(", "node_run", ",", "coords_only", "=", "False", ",", "bc_settings", "=", "bc_settings", ",", "bc_grid_weights", "=", "bc_grid_weights", ")", ")", ".", "reduce", "(", "reduce_sequences", ")", "# Finalise combined visibility grids if required.", "if", "settings", "[", "'combine'", "]", ":", "# Create an imager to finalise (FFT) the gridded data.", "imager", "=", "oskar", ".", "Imager", "(", "settings", "[", "'precision'", "]", ")", "for", "key", ",", "value", "in", "settings", "[", "'imager'", "]", ".", "items", "(", ")", ":", "setattr", "(", "imager", ",", "key", ",", "value", ")", "# Finalise grid and save image.", "save_image", "(", "imager", ",", "output", "[", "0", "]", ",", "output", "[", "1", "]", ",", "settings", "[", "'output_file'", "]", ")", "log", ".", "info", "(", "'Finished. Output file is %s'", ",", "settings", "[", "'output_file'", "]", ")", "context", ".", "stop", "(", ")" ]
Runs test imaging pipeline using Spark.
[ "Runs", "test", "imaging", "pipeline", "using", "Spark", "." ]
python
train
scikit-hep/root_numpy
root_numpy/_tree.py
https://github.com/scikit-hep/root_numpy/blob/3a9bfbcf89f90dc20ca6869480a63a85e1ceebb8/root_numpy/_tree.py#L120-L246
def root2array(filenames, treename=None, branches=None, selection=None, object_selection=None, start=None, stop=None, step=None, include_weight=False, weight_name='weight', cache_size=-1, warn_missing_tree=False): """Convert trees in ROOT files into a numpy structured array. Refer to the documentation of :func:`tree2array`. Parameters ---------- filenames : str or list ROOT file name pattern or list of patterns. Wildcarding is supported by Python globbing. treename : str, optional (default=None) Name of the tree to convert (optional if each file contains exactly one tree). branches : list of strings and tuples or a string or tuple, optional (default=None) List of branches and expressions to include as columns of the array or a single branch or expression in which case a nonstructured array is returned. If None then include all branches that can be converted. Branches or expressions that result in variable-length subarrays can be truncated at a fixed length by using the tuple ``(branch_or_expression, fill_value, length)`` or converted into a single value with ``(branch_or_expression, fill_value)`` where ``length==1`` is implied. ``fill_value`` is used when the original array is shorter than ``length``. This truncation is after any object selection performed with the ``object_selection`` argument. selection : str, optional (default=None) Only include entries fulfilling this condition. If the condition evaluates to multiple values per tree entry (e.g. conditions on array branches) then an entry will be included if the condition evaluates to true for at least one array element. object_selection : dict, optional (default=None) A dictionary mapping selection strings to branch names or lists of branch names. Only array elements passing the selection strings will be included in the output array per entry in the tree. The branches specified must be variable-length array-type branches and the length of the selection and branches it acts on must match for each tree entry. For example ``object_selection={'a > 0': ['a', 'b']}`` will include all elements of 'a' and corresponding elements of 'b' where 'a > 0' for each tree entry. 'a' and 'b' must have the same length in every tree entry. start, stop, step: int, optional (default=None) The meaning of the ``start``, ``stop`` and ``step`` parameters is the same as for Python slices. If a range is supplied (by setting some of the ``start``, ``stop`` or ``step`` parameters), only the entries in that range and fulfilling the ``selection`` condition (if defined) are used. include_weight : bool, optional (default=False) Include a column containing the tree weight ``TTree::GetWeight()``. Note that this will be the same value for all entries unless the tree is actually a TChain containing multiple trees with different weights. weight_name : str, optional (default='weight') The field name for the weight column if ``include_weight=True``. cache_size : int, optional (default=-1) Set the size (in bytes) of the TTreeCache used while reading a TTree. A value of -1 uses ROOT's default cache size. A value of 0 disables the cache. warn_missing_tree : bool, optional (default=False) If True, then warn when a tree is missing from an input file instead of raising an IOError. Notes ----- * Refer to the :ref:`type conversion table <conversion_table>`. See Also -------- tree2array array2tree array2root """ filenames = _glob(filenames) if not filenames: raise ValueError("specify at least one filename") if treename is None: trees = list_trees(filenames[0]) if len(trees) > 1: raise ValueError( "treename must be specified if the file " "contains more than one tree") elif not trees: raise IOError( "no trees present in {0}".format(filenames[0])) treename = trees[0] if isinstance(branches, string_types): # single branch selected flatten = branches branches = [branches] elif isinstance(branches, tuple): if len(branches) not in (2, 3): raise ValueError( "invalid branch tuple: {0}. " "A branch tuple must contain two elements " "(branch_name, fill_value) or three elements " "(branch_name, fill_value, length) " "to yield a single value or truncate, respectively".format(branches)) flatten = branches[0] branches = [branches] else: flatten = False arr = _librootnumpy.root2array_fromfile( filenames, treename, branches, selection, object_selection, start, stop, step, include_weight, weight_name, cache_size, warn_missing_tree) if flatten: # select single column return arr[flatten] return arr
[ "def", "root2array", "(", "filenames", ",", "treename", "=", "None", ",", "branches", "=", "None", ",", "selection", "=", "None", ",", "object_selection", "=", "None", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "step", "=", "None", ",", "include_weight", "=", "False", ",", "weight_name", "=", "'weight'", ",", "cache_size", "=", "-", "1", ",", "warn_missing_tree", "=", "False", ")", ":", "filenames", "=", "_glob", "(", "filenames", ")", "if", "not", "filenames", ":", "raise", "ValueError", "(", "\"specify at least one filename\"", ")", "if", "treename", "is", "None", ":", "trees", "=", "list_trees", "(", "filenames", "[", "0", "]", ")", "if", "len", "(", "trees", ")", ">", "1", ":", "raise", "ValueError", "(", "\"treename must be specified if the file \"", "\"contains more than one tree\"", ")", "elif", "not", "trees", ":", "raise", "IOError", "(", "\"no trees present in {0}\"", ".", "format", "(", "filenames", "[", "0", "]", ")", ")", "treename", "=", "trees", "[", "0", "]", "if", "isinstance", "(", "branches", ",", "string_types", ")", ":", "# single branch selected", "flatten", "=", "branches", "branches", "=", "[", "branches", "]", "elif", "isinstance", "(", "branches", ",", "tuple", ")", ":", "if", "len", "(", "branches", ")", "not", "in", "(", "2", ",", "3", ")", ":", "raise", "ValueError", "(", "\"invalid branch tuple: {0}. \"", "\"A branch tuple must contain two elements \"", "\"(branch_name, fill_value) or three elements \"", "\"(branch_name, fill_value, length) \"", "\"to yield a single value or truncate, respectively\"", ".", "format", "(", "branches", ")", ")", "flatten", "=", "branches", "[", "0", "]", "branches", "=", "[", "branches", "]", "else", ":", "flatten", "=", "False", "arr", "=", "_librootnumpy", ".", "root2array_fromfile", "(", "filenames", ",", "treename", ",", "branches", ",", "selection", ",", "object_selection", ",", "start", ",", "stop", ",", "step", ",", "include_weight", ",", "weight_name", ",", "cache_size", ",", "warn_missing_tree", ")", "if", "flatten", ":", "# select single column", "return", "arr", "[", "flatten", "]", "return", "arr" ]
Convert trees in ROOT files into a numpy structured array. Refer to the documentation of :func:`tree2array`. Parameters ---------- filenames : str or list ROOT file name pattern or list of patterns. Wildcarding is supported by Python globbing. treename : str, optional (default=None) Name of the tree to convert (optional if each file contains exactly one tree). branches : list of strings and tuples or a string or tuple, optional (default=None) List of branches and expressions to include as columns of the array or a single branch or expression in which case a nonstructured array is returned. If None then include all branches that can be converted. Branches or expressions that result in variable-length subarrays can be truncated at a fixed length by using the tuple ``(branch_or_expression, fill_value, length)`` or converted into a single value with ``(branch_or_expression, fill_value)`` where ``length==1`` is implied. ``fill_value`` is used when the original array is shorter than ``length``. This truncation is after any object selection performed with the ``object_selection`` argument. selection : str, optional (default=None) Only include entries fulfilling this condition. If the condition evaluates to multiple values per tree entry (e.g. conditions on array branches) then an entry will be included if the condition evaluates to true for at least one array element. object_selection : dict, optional (default=None) A dictionary mapping selection strings to branch names or lists of branch names. Only array elements passing the selection strings will be included in the output array per entry in the tree. The branches specified must be variable-length array-type branches and the length of the selection and branches it acts on must match for each tree entry. For example ``object_selection={'a > 0': ['a', 'b']}`` will include all elements of 'a' and corresponding elements of 'b' where 'a > 0' for each tree entry. 'a' and 'b' must have the same length in every tree entry. start, stop, step: int, optional (default=None) The meaning of the ``start``, ``stop`` and ``step`` parameters is the same as for Python slices. If a range is supplied (by setting some of the ``start``, ``stop`` or ``step`` parameters), only the entries in that range and fulfilling the ``selection`` condition (if defined) are used. include_weight : bool, optional (default=False) Include a column containing the tree weight ``TTree::GetWeight()``. Note that this will be the same value for all entries unless the tree is actually a TChain containing multiple trees with different weights. weight_name : str, optional (default='weight') The field name for the weight column if ``include_weight=True``. cache_size : int, optional (default=-1) Set the size (in bytes) of the TTreeCache used while reading a TTree. A value of -1 uses ROOT's default cache size. A value of 0 disables the cache. warn_missing_tree : bool, optional (default=False) If True, then warn when a tree is missing from an input file instead of raising an IOError. Notes ----- * Refer to the :ref:`type conversion table <conversion_table>`. See Also -------- tree2array array2tree array2root
[ "Convert", "trees", "in", "ROOT", "files", "into", "a", "numpy", "structured", "array", "." ]
python
train
OCA/odoorpc
odoorpc/odoo.py
https://github.com/OCA/odoorpc/blob/d90aa0b2bc4fafbab8bd8f50d50e3fb0b9ba91f0/odoorpc/odoo.py#L365-L389
def logout(self): """Log out the user. >>> odoo.logout() True *Python 2:* :return: `True` if the operation succeed, `False` if no user was logged :raise: :class:`odoorpc.error.RPCError` :raise: `urllib2.URLError` (connection error) *Python 3:* :return: `True` if the operation succeed, `False` if no user was logged :raise: :class:`odoorpc.error.RPCError` :raise: `urllib.error.URLError` (connection error) """ if not self._env: return False self.json('/web/session/destroy', {}) self._env = None self._login = None self._password = None return True
[ "def", "logout", "(", "self", ")", ":", "if", "not", "self", ".", "_env", ":", "return", "False", "self", ".", "json", "(", "'/web/session/destroy'", ",", "{", "}", ")", "self", ".", "_env", "=", "None", "self", ".", "_login", "=", "None", "self", ".", "_password", "=", "None", "return", "True" ]
Log out the user. >>> odoo.logout() True *Python 2:* :return: `True` if the operation succeed, `False` if no user was logged :raise: :class:`odoorpc.error.RPCError` :raise: `urllib2.URLError` (connection error) *Python 3:* :return: `True` if the operation succeed, `False` if no user was logged :raise: :class:`odoorpc.error.RPCError` :raise: `urllib.error.URLError` (connection error)
[ "Log", "out", "the", "user", "." ]
python
train
formwork-io/lazarus
lazarus/_util.py
https://github.com/formwork-io/lazarus/blob/b2b6120fe06d69c23b4f41d55b6d71860a9fdeaa/lazarus/_util.py#L78-L84
def do_over(): '''Calls :py:func:`os.exec` with executable and args derived from sys.''' path = sys.executable args = [path] + sys.argv # And the rest, after a sudden wet thud, was silence. os.execv(path, args)
[ "def", "do_over", "(", ")", ":", "path", "=", "sys", ".", "executable", "args", "=", "[", "path", "]", "+", "sys", ".", "argv", "# And the rest, after a sudden wet thud, was silence.", "os", ".", "execv", "(", "path", ",", "args", ")" ]
Calls :py:func:`os.exec` with executable and args derived from sys.
[ "Calls", ":", "py", ":", "func", ":", "os", ".", "exec", "with", "executable", "and", "args", "derived", "from", "sys", "." ]
python
train
FutunnOpen/futuquant
futuquant/examples/TinyQuant/TinyStrateBase.py
https://github.com/FutunnOpen/futuquant/blob/1512b321845f92ec9c578ce2689aa4e8482669e4/futuquant/examples/TinyQuant/TinyStrateBase.py#L152-L173
def init_strate(self, global_setting, quant_frame, event_engine): """TinyQuantFrame 初始化策略的接口""" if type(self._quant_frame) is not int: return True self._quant_frame = quant_frame self._event_engine = event_engine init_ret = self.__loadSetting(global_setting) # 注册事件 self._event_engine.register(EVENT_BEFORE_TRADING, self.__event_before_trading) self._event_engine.register(EVENT_AFTER_TRADING, self.__event_after_trading) self._event_engine.register(EVENT_QUOTE_CHANGE, self.__event_quote_change) self._event_engine.register(EVENT_CUR_KLINE_BAR, self.__event_cur_kline_bar) self.log("init_strate '%s' ret = %s" % (self.name, init_ret)) # 对外通知初始化事件 self.on_init_strate() return init_ret
[ "def", "init_strate", "(", "self", ",", "global_setting", ",", "quant_frame", ",", "event_engine", ")", ":", "if", "type", "(", "self", ".", "_quant_frame", ")", "is", "not", "int", ":", "return", "True", "self", ".", "_quant_frame", "=", "quant_frame", "self", ".", "_event_engine", "=", "event_engine", "init_ret", "=", "self", ".", "__loadSetting", "(", "global_setting", ")", "# 注册事件", "self", ".", "_event_engine", ".", "register", "(", "EVENT_BEFORE_TRADING", ",", "self", ".", "__event_before_trading", ")", "self", ".", "_event_engine", ".", "register", "(", "EVENT_AFTER_TRADING", ",", "self", ".", "__event_after_trading", ")", "self", ".", "_event_engine", ".", "register", "(", "EVENT_QUOTE_CHANGE", ",", "self", ".", "__event_quote_change", ")", "self", ".", "_event_engine", ".", "register", "(", "EVENT_CUR_KLINE_BAR", ",", "self", ".", "__event_cur_kline_bar", ")", "self", ".", "log", "(", "\"init_strate '%s' ret = %s\"", "%", "(", "self", ".", "name", ",", "init_ret", ")", ")", "# 对外通知初始化事件", "self", ".", "on_init_strate", "(", ")", "return", "init_ret" ]
TinyQuantFrame 初始化策略的接口
[ "TinyQuantFrame", "初始化策略的接口" ]
python
train
couchbase/couchbase-python-client
examples/docloader.py
https://github.com/couchbase/couchbase-python-client/blob/a7bada167785bf79a29c39f820d932a433a6a535/examples/docloader.py#L67-L92
def prepare_bucket(self): """ Resets and creates the destination bucket ( only called if --create is true). :return: """ self.logger.info('Deleting old bucket first') del_url = '{0}/buckets/{1}'.format(self.cluster_prefix, self.bucket) r = self._htsess.delete(del_url) try: r.raise_for_status() except: self.logger.exception("Couldn't delete bucket") cr_url = '{0}/buckets'.format(self.cluster_prefix) data = { 'name': self.bucket, 'ramQuotaMB': '{0}'.format(self.quota), 'bucketType': 'couchbase', 'authType': 'sasl', 'saslPassword': '', 'replicaNumber': '0' } r = self._htsess.post(cr_url, data) r.raise_for_status()
[ "def", "prepare_bucket", "(", "self", ")", ":", "self", ".", "logger", ".", "info", "(", "'Deleting old bucket first'", ")", "del_url", "=", "'{0}/buckets/{1}'", ".", "format", "(", "self", ".", "cluster_prefix", ",", "self", ".", "bucket", ")", "r", "=", "self", ".", "_htsess", ".", "delete", "(", "del_url", ")", "try", ":", "r", ".", "raise_for_status", "(", ")", "except", ":", "self", ".", "logger", ".", "exception", "(", "\"Couldn't delete bucket\"", ")", "cr_url", "=", "'{0}/buckets'", ".", "format", "(", "self", ".", "cluster_prefix", ")", "data", "=", "{", "'name'", ":", "self", ".", "bucket", ",", "'ramQuotaMB'", ":", "'{0}'", ".", "format", "(", "self", ".", "quota", ")", ",", "'bucketType'", ":", "'couchbase'", ",", "'authType'", ":", "'sasl'", ",", "'saslPassword'", ":", "''", ",", "'replicaNumber'", ":", "'0'", "}", "r", "=", "self", ".", "_htsess", ".", "post", "(", "cr_url", ",", "data", ")", "r", ".", "raise_for_status", "(", ")" ]
Resets and creates the destination bucket ( only called if --create is true). :return:
[ "Resets", "and", "creates", "the", "destination", "bucket", "(", "only", "called", "if", "--", "create", "is", "true", ")", ".", ":", "return", ":" ]
python
train
mesbahamin/chronophore
chronophore/qtview.py
https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/qtview.py#L127-L135
def _set_signed_in(self): """Populate the signed_in list with the names of currently signed in users. """ names = [ controller.get_user_name(user, full_name=CONFIG['FULL_USER_NAMES']) for user in controller.signed_in_users() ] self.lbl_signedin_list.setText('\n'.join(sorted(names)))
[ "def", "_set_signed_in", "(", "self", ")", ":", "names", "=", "[", "controller", ".", "get_user_name", "(", "user", ",", "full_name", "=", "CONFIG", "[", "'FULL_USER_NAMES'", "]", ")", "for", "user", "in", "controller", ".", "signed_in_users", "(", ")", "]", "self", ".", "lbl_signedin_list", ".", "setText", "(", "'\\n'", ".", "join", "(", "sorted", "(", "names", ")", ")", ")" ]
Populate the signed_in list with the names of currently signed in users.
[ "Populate", "the", "signed_in", "list", "with", "the", "names", "of", "currently", "signed", "in", "users", "." ]
python
train
genialis/resolwe
resolwe/permissions/shortcuts.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/permissions/shortcuts.py#L61-L85
def _group_groups(perm_list): """Group permissions by group. Input is list of tuples of length 3, where each tuple is in following format:: (<group_id>, <group_name>, <single_permission>) Permissions are regrouped and returned in such way that there is only one tuple for each group:: (<group_id>, <group_name>, [<first_permission>, <second_permission>,...]) :param list perm_list: list of touples of length 3 :return: list tuples with grouped permissions :rtype: list """ perm_list = sorted(perm_list, key=lambda tup: tup[0]) grouped_perms = [] for key, group in groupby(perm_list, lambda tup: (tup[0], tup[1])): grouped_perms.append((key[0], key[1], [g[2] for g in group])) return grouped_perms
[ "def", "_group_groups", "(", "perm_list", ")", ":", "perm_list", "=", "sorted", "(", "perm_list", ",", "key", "=", "lambda", "tup", ":", "tup", "[", "0", "]", ")", "grouped_perms", "=", "[", "]", "for", "key", ",", "group", "in", "groupby", "(", "perm_list", ",", "lambda", "tup", ":", "(", "tup", "[", "0", "]", ",", "tup", "[", "1", "]", ")", ")", ":", "grouped_perms", ".", "append", "(", "(", "key", "[", "0", "]", ",", "key", "[", "1", "]", ",", "[", "g", "[", "2", "]", "for", "g", "in", "group", "]", ")", ")", "return", "grouped_perms" ]
Group permissions by group. Input is list of tuples of length 3, where each tuple is in following format:: (<group_id>, <group_name>, <single_permission>) Permissions are regrouped and returned in such way that there is only one tuple for each group:: (<group_id>, <group_name>, [<first_permission>, <second_permission>,...]) :param list perm_list: list of touples of length 3 :return: list tuples with grouped permissions :rtype: list
[ "Group", "permissions", "by", "group", "." ]
python
train
mwhooker/jsonselect
jsonselect/jsonselect.py
https://github.com/mwhooker/jsonselect/blob/c64aa9ea930de0344797ff87b04c753c8fc096a6/jsonselect/jsonselect.py#L386-L430
def nth_child_production(self, lexeme, tokens): """Parse args and pass them to pclass_func_validator.""" args = self.match(tokens, 'expr') pat = self.nth_child_pat.match(args) if pat.group(5): a = 2 b = 1 if pat.group(5) == 'odd' else 0 elif pat.group(6): a = 0 b = int(pat.group(6)) else: sign = pat.group(1) if pat.group(1) else '+' coef = pat.group(2) if pat.group(2) else '1' a = eval(sign + coef) b = eval(pat.group(3) + pat.group(4)) if pat.group(3) else 0 reverse = False if lexeme == 'nth-last-child': reverse = True def validate(node): """This crazy function taken from jsonselect.js:444.""" if not node.siblings: return False idx = node.idx - 1 tot = node.siblings if reverse: idx = tot - idx else: idx += 1 if a == 0: m = b == idx else: mod = (idx - b) % a m = not mod and (idx * a + b) >= 0 return m return validate
[ "def", "nth_child_production", "(", "self", ",", "lexeme", ",", "tokens", ")", ":", "args", "=", "self", ".", "match", "(", "tokens", ",", "'expr'", ")", "pat", "=", "self", ".", "nth_child_pat", ".", "match", "(", "args", ")", "if", "pat", ".", "group", "(", "5", ")", ":", "a", "=", "2", "b", "=", "1", "if", "pat", ".", "group", "(", "5", ")", "==", "'odd'", "else", "0", "elif", "pat", ".", "group", "(", "6", ")", ":", "a", "=", "0", "b", "=", "int", "(", "pat", ".", "group", "(", "6", ")", ")", "else", ":", "sign", "=", "pat", ".", "group", "(", "1", ")", "if", "pat", ".", "group", "(", "1", ")", "else", "'+'", "coef", "=", "pat", ".", "group", "(", "2", ")", "if", "pat", ".", "group", "(", "2", ")", "else", "'1'", "a", "=", "eval", "(", "sign", "+", "coef", ")", "b", "=", "eval", "(", "pat", ".", "group", "(", "3", ")", "+", "pat", ".", "group", "(", "4", ")", ")", "if", "pat", ".", "group", "(", "3", ")", "else", "0", "reverse", "=", "False", "if", "lexeme", "==", "'nth-last-child'", ":", "reverse", "=", "True", "def", "validate", "(", "node", ")", ":", "\"\"\"This crazy function taken from jsonselect.js:444.\"\"\"", "if", "not", "node", ".", "siblings", ":", "return", "False", "idx", "=", "node", ".", "idx", "-", "1", "tot", "=", "node", ".", "siblings", "if", "reverse", ":", "idx", "=", "tot", "-", "idx", "else", ":", "idx", "+=", "1", "if", "a", "==", "0", ":", "m", "=", "b", "==", "idx", "else", ":", "mod", "=", "(", "idx", "-", "b", ")", "%", "a", "m", "=", "not", "mod", "and", "(", "idx", "*", "a", "+", "b", ")", ">=", "0", "return", "m", "return", "validate" ]
Parse args and pass them to pclass_func_validator.
[ "Parse", "args", "and", "pass", "them", "to", "pclass_func_validator", "." ]
python
test
pantsbuild/pants
src/python/pants/backend/python/interpreter_cache.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/python/interpreter_cache.py#L139-L145
def _setup_cached(self, filters=()): """Find all currently-cached interpreters.""" for interpreter_dir in os.listdir(self._cache_dir): pi = self._interpreter_from_relpath(interpreter_dir, filters=filters) if pi: logger.debug('Detected interpreter {}: {}'.format(pi.binary, str(pi.identity))) yield pi
[ "def", "_setup_cached", "(", "self", ",", "filters", "=", "(", ")", ")", ":", "for", "interpreter_dir", "in", "os", ".", "listdir", "(", "self", ".", "_cache_dir", ")", ":", "pi", "=", "self", ".", "_interpreter_from_relpath", "(", "interpreter_dir", ",", "filters", "=", "filters", ")", "if", "pi", ":", "logger", ".", "debug", "(", "'Detected interpreter {}: {}'", ".", "format", "(", "pi", ".", "binary", ",", "str", "(", "pi", ".", "identity", ")", ")", ")", "yield", "pi" ]
Find all currently-cached interpreters.
[ "Find", "all", "currently", "-", "cached", "interpreters", "." ]
python
train
MartinThoma/hwrt
hwrt/segmentation/segmentation.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/segmentation/segmentation.py#L472-L505
def get_stroke_features(recording, strokeid1, strokeid2): """Get the features used to decide if two strokes belong to the same symbol or not. Parameters ---------- recording : list A list of strokes strokeid1 : int strokeid2 : int Returns ------- list : A list of features which could be useful to decide if stroke1 and stroke2 belong to the same symbol. """ stroke1 = recording[strokeid1] stroke2 = recording[strokeid2] assert isinstance(stroke1, list), "stroke1 is a %s" % type(stroke1) X_i = [] for s in [stroke1, stroke2]: hw = HandwrittenData(json.dumps([s])) feat1 = features.ConstantPointCoordinates(strokes=1, points_per_stroke=20, fill_empty_with=0) feat2 = features.ReCurvature(strokes=1) feat3 = features.Ink() X_i += hw.feature_extraction([feat1, feat2, feat3]) X_i += [get_strokes_distance(stroke1, stroke2)] # Distance of strokes X_i += [get_time_distance(stroke1, stroke2)] # Time in between X_i += [abs(strokeid2-strokeid1)] # Strokes in between # X_i += [get_black_percentage()] return X_i
[ "def", "get_stroke_features", "(", "recording", ",", "strokeid1", ",", "strokeid2", ")", ":", "stroke1", "=", "recording", "[", "strokeid1", "]", "stroke2", "=", "recording", "[", "strokeid2", "]", "assert", "isinstance", "(", "stroke1", ",", "list", ")", ",", "\"stroke1 is a %s\"", "%", "type", "(", "stroke1", ")", "X_i", "=", "[", "]", "for", "s", "in", "[", "stroke1", ",", "stroke2", "]", ":", "hw", "=", "HandwrittenData", "(", "json", ".", "dumps", "(", "[", "s", "]", ")", ")", "feat1", "=", "features", ".", "ConstantPointCoordinates", "(", "strokes", "=", "1", ",", "points_per_stroke", "=", "20", ",", "fill_empty_with", "=", "0", ")", "feat2", "=", "features", ".", "ReCurvature", "(", "strokes", "=", "1", ")", "feat3", "=", "features", ".", "Ink", "(", ")", "X_i", "+=", "hw", ".", "feature_extraction", "(", "[", "feat1", ",", "feat2", ",", "feat3", "]", ")", "X_i", "+=", "[", "get_strokes_distance", "(", "stroke1", ",", "stroke2", ")", "]", "# Distance of strokes", "X_i", "+=", "[", "get_time_distance", "(", "stroke1", ",", "stroke2", ")", "]", "# Time in between", "X_i", "+=", "[", "abs", "(", "strokeid2", "-", "strokeid1", ")", "]", "# Strokes in between", "# X_i += [get_black_percentage()]", "return", "X_i" ]
Get the features used to decide if two strokes belong to the same symbol or not. Parameters ---------- recording : list A list of strokes strokeid1 : int strokeid2 : int Returns ------- list : A list of features which could be useful to decide if stroke1 and stroke2 belong to the same symbol.
[ "Get", "the", "features", "used", "to", "decide", "if", "two", "strokes", "belong", "to", "the", "same", "symbol", "or", "not", "." ]
python
train
Alignak-monitoring/alignak
alignak/basemodule.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/basemodule.py#L246-L276
def start(self, http_daemon=None): # pylint: disable=unused-argument """Actually restart the process if the module is external Try first to stop the process and create a new Process instance with target start_module. Finally start process. :param http_daemon: Not used here but can be used in other modules :type http_daemon: None | object :return: None """ if not self.is_external: return if self.process: self.stop_process() logger.info("Starting external process for module %s...", self.name) proc = Process(target=self.start_module, args=(), group=None) # Under windows we should not call start() on an object that got its process # as an object, so we remove it and we set it in a earlier start try: del self.properties['process'] except KeyError: pass proc.start() # We save the process data AFTER the fork() self.process = proc self.properties['process'] = proc logger.info("%s is now started (pid=%d)", self.name, proc.pid)
[ "def", "start", "(", "self", ",", "http_daemon", "=", "None", ")", ":", "# pylint: disable=unused-argument", "if", "not", "self", ".", "is_external", ":", "return", "if", "self", ".", "process", ":", "self", ".", "stop_process", "(", ")", "logger", ".", "info", "(", "\"Starting external process for module %s...\"", ",", "self", ".", "name", ")", "proc", "=", "Process", "(", "target", "=", "self", ".", "start_module", ",", "args", "=", "(", ")", ",", "group", "=", "None", ")", "# Under windows we should not call start() on an object that got its process", "# as an object, so we remove it and we set it in a earlier start", "try", ":", "del", "self", ".", "properties", "[", "'process'", "]", "except", "KeyError", ":", "pass", "proc", ".", "start", "(", ")", "# We save the process data AFTER the fork()", "self", ".", "process", "=", "proc", "self", ".", "properties", "[", "'process'", "]", "=", "proc", "logger", ".", "info", "(", "\"%s is now started (pid=%d)\"", ",", "self", ".", "name", ",", "proc", ".", "pid", ")" ]
Actually restart the process if the module is external Try first to stop the process and create a new Process instance with target start_module. Finally start process. :param http_daemon: Not used here but can be used in other modules :type http_daemon: None | object :return: None
[ "Actually", "restart", "the", "process", "if", "the", "module", "is", "external", "Try", "first", "to", "stop", "the", "process", "and", "create", "a", "new", "Process", "instance", "with", "target", "start_module", ".", "Finally", "start", "process", "." ]
python
train
biocommons/hgvs
hgvs/transcriptmapper.py
https://github.com/biocommons/hgvs/blob/4d16efb475e1802b2531a2f1c373e8819d8e533b/hgvs/transcriptmapper.py#L170-L192
def n_to_g(self, n_interval): """convert a transcript cDNA (n.) interval to a genomic (g.) interval""" assert self.strand in [1, -1], "strand = " + str(self.strand) + "; must be 1 or -1" if self.strand == 1: frs, fre = _hgvs_coord_to_ci(n_interval.start.base, n_interval.end.base) start_offset, end_offset = n_interval.start.offset, n_interval.end.offset elif self.strand == -1: frs, fre = _hgvs_coord_to_ci(n_interval.start.base, n_interval.end.base) fre, frs = self.tgt_len - frs, self.tgt_len - fre start_offset, end_offset = self.strand * n_interval.end.offset, self.strand * n_interval.start.offset # returns the genomic range start (grs) and end (gre) grs, gre = self.im.map_tgt_to_ref(frs, fre, max_extent=False) grs, gre = grs + self.gc_offset, gre + self.gc_offset gs, ge = grs + start_offset, gre + end_offset return hgvs.location.Interval( start=hgvs.location.SimplePosition( _ci_to_hgvs_coord(gs, ge)[0], uncertain=n_interval.start.uncertain), end=hgvs.location.SimplePosition( _ci_to_hgvs_coord(gs, ge)[1], uncertain=n_interval.end.uncertain), uncertain=n_interval.uncertain)
[ "def", "n_to_g", "(", "self", ",", "n_interval", ")", ":", "assert", "self", ".", "strand", "in", "[", "1", ",", "-", "1", "]", ",", "\"strand = \"", "+", "str", "(", "self", ".", "strand", ")", "+", "\"; must be 1 or -1\"", "if", "self", ".", "strand", "==", "1", ":", "frs", ",", "fre", "=", "_hgvs_coord_to_ci", "(", "n_interval", ".", "start", ".", "base", ",", "n_interval", ".", "end", ".", "base", ")", "start_offset", ",", "end_offset", "=", "n_interval", ".", "start", ".", "offset", ",", "n_interval", ".", "end", ".", "offset", "elif", "self", ".", "strand", "==", "-", "1", ":", "frs", ",", "fre", "=", "_hgvs_coord_to_ci", "(", "n_interval", ".", "start", ".", "base", ",", "n_interval", ".", "end", ".", "base", ")", "fre", ",", "frs", "=", "self", ".", "tgt_len", "-", "frs", ",", "self", ".", "tgt_len", "-", "fre", "start_offset", ",", "end_offset", "=", "self", ".", "strand", "*", "n_interval", ".", "end", ".", "offset", ",", "self", ".", "strand", "*", "n_interval", ".", "start", ".", "offset", "# returns the genomic range start (grs) and end (gre)", "grs", ",", "gre", "=", "self", ".", "im", ".", "map_tgt_to_ref", "(", "frs", ",", "fre", ",", "max_extent", "=", "False", ")", "grs", ",", "gre", "=", "grs", "+", "self", ".", "gc_offset", ",", "gre", "+", "self", ".", "gc_offset", "gs", ",", "ge", "=", "grs", "+", "start_offset", ",", "gre", "+", "end_offset", "return", "hgvs", ".", "location", ".", "Interval", "(", "start", "=", "hgvs", ".", "location", ".", "SimplePosition", "(", "_ci_to_hgvs_coord", "(", "gs", ",", "ge", ")", "[", "0", "]", ",", "uncertain", "=", "n_interval", ".", "start", ".", "uncertain", ")", ",", "end", "=", "hgvs", ".", "location", ".", "SimplePosition", "(", "_ci_to_hgvs_coord", "(", "gs", ",", "ge", ")", "[", "1", "]", ",", "uncertain", "=", "n_interval", ".", "end", ".", "uncertain", ")", ",", "uncertain", "=", "n_interval", ".", "uncertain", ")" ]
convert a transcript cDNA (n.) interval to a genomic (g.) interval
[ "convert", "a", "transcript", "cDNA", "(", "n", ".", ")", "interval", "to", "a", "genomic", "(", "g", ".", ")", "interval" ]
python
train
azraq27/neural
neural/alignment.py
https://github.com/azraq27/neural/blob/fe91bfeecbf73ad99708cf5dca66cb61fcd529f5/neural/alignment.py#L7-L13
def align_epi(anatomy,epis,suffix='_al',base=3,skull_strip=True): '''[[currently in progress]]: a simple replacement for the ``align_epi_anat.py`` script, because I've found it to be unreliable, in my usage''' for epi in epis: nl.tshift(epi,suffix='_tshift') nl.affine_align(nl.suffix(epi,'_tshift'),'%s[%d]'%(epis[0],base),skull_strip=False,epi=True,cost='crM',resample='wsinc5',grid_size=nl.dset_info(epi).voxel_size[0],suffix='_al') ss = [anatomy] if skull_strip else False nl.affine_align(anatomy,'%s[%d]'%(epis[0],base),skull_strip=ss,cost='lpa',grid_size=1,opts=['-interp','cubic'],suffix='_al-to-EPI')
[ "def", "align_epi", "(", "anatomy", ",", "epis", ",", "suffix", "=", "'_al'", ",", "base", "=", "3", ",", "skull_strip", "=", "True", ")", ":", "for", "epi", "in", "epis", ":", "nl", ".", "tshift", "(", "epi", ",", "suffix", "=", "'_tshift'", ")", "nl", ".", "affine_align", "(", "nl", ".", "suffix", "(", "epi", ",", "'_tshift'", ")", ",", "'%s[%d]'", "%", "(", "epis", "[", "0", "]", ",", "base", ")", ",", "skull_strip", "=", "False", ",", "epi", "=", "True", ",", "cost", "=", "'crM'", ",", "resample", "=", "'wsinc5'", ",", "grid_size", "=", "nl", ".", "dset_info", "(", "epi", ")", ".", "voxel_size", "[", "0", "]", ",", "suffix", "=", "'_al'", ")", "ss", "=", "[", "anatomy", "]", "if", "skull_strip", "else", "False", "nl", ".", "affine_align", "(", "anatomy", ",", "'%s[%d]'", "%", "(", "epis", "[", "0", "]", ",", "base", ")", ",", "skull_strip", "=", "ss", ",", "cost", "=", "'lpa'", ",", "grid_size", "=", "1", ",", "opts", "=", "[", "'-interp'", ",", "'cubic'", "]", ",", "suffix", "=", "'_al-to-EPI'", ")" ]
[[currently in progress]]: a simple replacement for the ``align_epi_anat.py`` script, because I've found it to be unreliable, in my usage
[ "[[", "currently", "in", "progress", "]]", ":", "a", "simple", "replacement", "for", "the", "align_epi_anat", ".", "py", "script", "because", "I", "ve", "found", "it", "to", "be", "unreliable", "in", "my", "usage" ]
python
train
marcomusy/vtkplotter
vtkplotter/shapes.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/shapes.py#L244-L294
def Line(p0, p1=None, lw=1, c="r", alpha=1, dotted=False): """ Build the line segment between points `p0` and `p1`. If `p0` is a list of points returns the line connecting them. A 2D set of coords can also be passed as p0=[x..], p1=[y..]. :param lw: line width. :param c: color name, number, or list of [R,G,B] colors. :type c: int, str, list :param float alpha: transparency in range [0,1]. :param bool dotted: draw a dotted line """ # detect if user is passing a 2D ist of points as p0=xlist, p1=ylist: if len(p0) > 3: if not utils.isSequence(p0[0]) and not utils.isSequence(p1[0]) and len(p0)==len(p1): # assume input is 2D xlist, ylist p0 = list(zip(p0, p1)) p1 = None # detect if user is passing a list of points: if utils.isSequence(p0[0]): ppoints = vtk.vtkPoints() # Generate the polyline dim = len((p0[0])) if dim == 2: for i, p in enumerate(p0): ppoints.InsertPoint(i, p[0], p[1], 0) else: ppoints.SetData(numpy_to_vtk(p0, deep=True)) lines = vtk.vtkCellArray() # Create the polyline. lines.InsertNextCell(len(p0)) for i in range(len(p0)): lines.InsertCellPoint(i) poly = vtk.vtkPolyData() poly.SetPoints(ppoints) poly.SetLines(lines) else: # or just 2 points to link lineSource = vtk.vtkLineSource() lineSource.SetPoint1(p0) lineSource.SetPoint2(p1) lineSource.Update() poly = lineSource.GetOutput() actor = Actor(poly, c, alpha) actor.GetProperty().SetLineWidth(lw) if dotted: actor.GetProperty().SetLineStipplePattern(0xF0F0) actor.GetProperty().SetLineStippleRepeatFactor(1) actor.base = np.array(p0) actor.top = np.array(p1) settings.collectable_actors.append(actor) return actor
[ "def", "Line", "(", "p0", ",", "p1", "=", "None", ",", "lw", "=", "1", ",", "c", "=", "\"r\"", ",", "alpha", "=", "1", ",", "dotted", "=", "False", ")", ":", "# detect if user is passing a 2D ist of points as p0=xlist, p1=ylist:", "if", "len", "(", "p0", ")", ">", "3", ":", "if", "not", "utils", ".", "isSequence", "(", "p0", "[", "0", "]", ")", "and", "not", "utils", ".", "isSequence", "(", "p1", "[", "0", "]", ")", "and", "len", "(", "p0", ")", "==", "len", "(", "p1", ")", ":", "# assume input is 2D xlist, ylist", "p0", "=", "list", "(", "zip", "(", "p0", ",", "p1", ")", ")", "p1", "=", "None", "# detect if user is passing a list of points:", "if", "utils", ".", "isSequence", "(", "p0", "[", "0", "]", ")", ":", "ppoints", "=", "vtk", ".", "vtkPoints", "(", ")", "# Generate the polyline", "dim", "=", "len", "(", "(", "p0", "[", "0", "]", ")", ")", "if", "dim", "==", "2", ":", "for", "i", ",", "p", "in", "enumerate", "(", "p0", ")", ":", "ppoints", ".", "InsertPoint", "(", "i", ",", "p", "[", "0", "]", ",", "p", "[", "1", "]", ",", "0", ")", "else", ":", "ppoints", ".", "SetData", "(", "numpy_to_vtk", "(", "p0", ",", "deep", "=", "True", ")", ")", "lines", "=", "vtk", ".", "vtkCellArray", "(", ")", "# Create the polyline.", "lines", ".", "InsertNextCell", "(", "len", "(", "p0", ")", ")", "for", "i", "in", "range", "(", "len", "(", "p0", ")", ")", ":", "lines", ".", "InsertCellPoint", "(", "i", ")", "poly", "=", "vtk", ".", "vtkPolyData", "(", ")", "poly", ".", "SetPoints", "(", "ppoints", ")", "poly", ".", "SetLines", "(", "lines", ")", "else", ":", "# or just 2 points to link", "lineSource", "=", "vtk", ".", "vtkLineSource", "(", ")", "lineSource", ".", "SetPoint1", "(", "p0", ")", "lineSource", ".", "SetPoint2", "(", "p1", ")", "lineSource", ".", "Update", "(", ")", "poly", "=", "lineSource", ".", "GetOutput", "(", ")", "actor", "=", "Actor", "(", "poly", ",", "c", ",", "alpha", ")", "actor", ".", "GetProperty", "(", ")", ".", "SetLineWidth", "(", "lw", ")", "if", "dotted", ":", "actor", ".", "GetProperty", "(", ")", ".", "SetLineStipplePattern", "(", "0xF0F0", ")", "actor", ".", "GetProperty", "(", ")", ".", "SetLineStippleRepeatFactor", "(", "1", ")", "actor", ".", "base", "=", "np", ".", "array", "(", "p0", ")", "actor", ".", "top", "=", "np", ".", "array", "(", "p1", ")", "settings", ".", "collectable_actors", ".", "append", "(", "actor", ")", "return", "actor" ]
Build the line segment between points `p0` and `p1`. If `p0` is a list of points returns the line connecting them. A 2D set of coords can also be passed as p0=[x..], p1=[y..]. :param lw: line width. :param c: color name, number, or list of [R,G,B] colors. :type c: int, str, list :param float alpha: transparency in range [0,1]. :param bool dotted: draw a dotted line
[ "Build", "the", "line", "segment", "between", "points", "p0", "and", "p1", ".", "If", "p0", "is", "a", "list", "of", "points", "returns", "the", "line", "connecting", "them", ".", "A", "2D", "set", "of", "coords", "can", "also", "be", "passed", "as", "p0", "=", "[", "x", "..", "]", "p1", "=", "[", "y", "..", "]", "." ]
python
train
kennethreitz/envoy
envoy/core.py
https://github.com/kennethreitz/envoy/blob/ab463a14da47bd8334cdf5e64f6b9dd2ba9dd28a/envoy/core.py#L175-L194
def expand_args(command): """Parses command strings and returns a Popen-ready list.""" # Prepare arguments. if isinstance(command, (str, unicode)): splitter = shlex.shlex(command.encode('utf-8')) splitter.whitespace = '|' splitter.whitespace_split = True command = [] while True: token = splitter.get_token() if token: command.append(token) else: break command = list(map(shlex.split, command)) return command
[ "def", "expand_args", "(", "command", ")", ":", "# Prepare arguments.", "if", "isinstance", "(", "command", ",", "(", "str", ",", "unicode", ")", ")", ":", "splitter", "=", "shlex", ".", "shlex", "(", "command", ".", "encode", "(", "'utf-8'", ")", ")", "splitter", ".", "whitespace", "=", "'|'", "splitter", ".", "whitespace_split", "=", "True", "command", "=", "[", "]", "while", "True", ":", "token", "=", "splitter", ".", "get_token", "(", ")", "if", "token", ":", "command", ".", "append", "(", "token", ")", "else", ":", "break", "command", "=", "list", "(", "map", "(", "shlex", ".", "split", ",", "command", ")", ")", "return", "command" ]
Parses command strings and returns a Popen-ready list.
[ "Parses", "command", "strings", "and", "returns", "a", "Popen", "-", "ready", "list", "." ]
python
valid
osrg/ryu
ryu/services/protocols/bgp/utils/bgp.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/utils/bgp.py#L257-L280
def create_l2vpnflowspec_actions(actions=None): """ Create list of traffic filtering actions for L2VPN Flow Specification. """ from ryu.services.protocols.bgp.api.prefix import ( FLOWSPEC_ACTION_TRAFFIC_RATE, FLOWSPEC_ACTION_TRAFFIC_ACTION, FLOWSPEC_ACTION_REDIRECT, FLOWSPEC_ACTION_TRAFFIC_MARKING, FLOWSPEC_ACTION_VLAN, FLOWSPEC_ACTION_TPID, ) # Supported action type for L2VPN. action_types = { FLOWSPEC_ACTION_TRAFFIC_RATE: BGPFlowSpecTrafficRateCommunity, FLOWSPEC_ACTION_TRAFFIC_ACTION: BGPFlowSpecTrafficActionCommunity, FLOWSPEC_ACTION_REDIRECT: BGPFlowSpecRedirectCommunity, FLOWSPEC_ACTION_TRAFFIC_MARKING: BGPFlowSpecTrafficMarkingCommunity, FLOWSPEC_ACTION_VLAN: BGPFlowSpecVlanActionCommunity, FLOWSPEC_ACTION_TPID: BGPFlowSpecTPIDActionCommunity, } return _create_actions(actions, action_types)
[ "def", "create_l2vpnflowspec_actions", "(", "actions", "=", "None", ")", ":", "from", "ryu", ".", "services", ".", "protocols", ".", "bgp", ".", "api", ".", "prefix", "import", "(", "FLOWSPEC_ACTION_TRAFFIC_RATE", ",", "FLOWSPEC_ACTION_TRAFFIC_ACTION", ",", "FLOWSPEC_ACTION_REDIRECT", ",", "FLOWSPEC_ACTION_TRAFFIC_MARKING", ",", "FLOWSPEC_ACTION_VLAN", ",", "FLOWSPEC_ACTION_TPID", ",", ")", "# Supported action type for L2VPN.", "action_types", "=", "{", "FLOWSPEC_ACTION_TRAFFIC_RATE", ":", "BGPFlowSpecTrafficRateCommunity", ",", "FLOWSPEC_ACTION_TRAFFIC_ACTION", ":", "BGPFlowSpecTrafficActionCommunity", ",", "FLOWSPEC_ACTION_REDIRECT", ":", "BGPFlowSpecRedirectCommunity", ",", "FLOWSPEC_ACTION_TRAFFIC_MARKING", ":", "BGPFlowSpecTrafficMarkingCommunity", ",", "FLOWSPEC_ACTION_VLAN", ":", "BGPFlowSpecVlanActionCommunity", ",", "FLOWSPEC_ACTION_TPID", ":", "BGPFlowSpecTPIDActionCommunity", ",", "}", "return", "_create_actions", "(", "actions", ",", "action_types", ")" ]
Create list of traffic filtering actions for L2VPN Flow Specification.
[ "Create", "list", "of", "traffic", "filtering", "actions", "for", "L2VPN", "Flow", "Specification", "." ]
python
train
wandb/client
wandb/apis/internal.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/apis/internal.py#L341-L363
def project(self, project, entity=None): """Retrive project Args: project (str): The project to get details for entity (str, optional): The entity to scope this project to. Returns: [{"id","name","repo","dockerImage","description"}] """ query = gql(''' query Models($entity: String, $project: String!) { model(name: $project, entityName: $entity) { id name repo dockerImage description } } ''') return self.gql(query, variable_values={ 'entity': entity, 'project': project})['model']
[ "def", "project", "(", "self", ",", "project", ",", "entity", "=", "None", ")", ":", "query", "=", "gql", "(", "'''\n query Models($entity: String, $project: String!) {\n model(name: $project, entityName: $entity) {\n id\n name\n repo\n dockerImage\n description\n }\n }\n '''", ")", "return", "self", ".", "gql", "(", "query", ",", "variable_values", "=", "{", "'entity'", ":", "entity", ",", "'project'", ":", "project", "}", ")", "[", "'model'", "]" ]
Retrive project Args: project (str): The project to get details for entity (str, optional): The entity to scope this project to. Returns: [{"id","name","repo","dockerImage","description"}]
[ "Retrive", "project" ]
python
train
google/grr
grr/core/grr_response_core/lib/parsers/chrome_history.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/parsers/chrome_history.py#L25-L46
def Parse(self, stat, file_object, knowledge_base): """Parse the History file.""" _ = knowledge_base # TODO(user): Convert this to use the far more intelligent plaso parser. chrome = ChromeParser(file_object) for timestamp, entry_type, url, data1, _, _ in chrome.Parse(): if entry_type == "CHROME_DOWNLOAD": yield rdf_webhistory.BrowserHistoryItem( url=url, domain=urlparse.urlparse(url).netloc, access_time=timestamp, program_name="Chrome", source_path=file_object.Path(), download_path=data1) elif entry_type == "CHROME_VISIT": yield rdf_webhistory.BrowserHistoryItem( url=url, domain=urlparse.urlparse(url).netloc, access_time=timestamp, program_name="Chrome", source_path=file_object.Path(), title=data1)
[ "def", "Parse", "(", "self", ",", "stat", ",", "file_object", ",", "knowledge_base", ")", ":", "_", "=", "knowledge_base", "# TODO(user): Convert this to use the far more intelligent plaso parser.", "chrome", "=", "ChromeParser", "(", "file_object", ")", "for", "timestamp", ",", "entry_type", ",", "url", ",", "data1", ",", "_", ",", "_", "in", "chrome", ".", "Parse", "(", ")", ":", "if", "entry_type", "==", "\"CHROME_DOWNLOAD\"", ":", "yield", "rdf_webhistory", ".", "BrowserHistoryItem", "(", "url", "=", "url", ",", "domain", "=", "urlparse", ".", "urlparse", "(", "url", ")", ".", "netloc", ",", "access_time", "=", "timestamp", ",", "program_name", "=", "\"Chrome\"", ",", "source_path", "=", "file_object", ".", "Path", "(", ")", ",", "download_path", "=", "data1", ")", "elif", "entry_type", "==", "\"CHROME_VISIT\"", ":", "yield", "rdf_webhistory", ".", "BrowserHistoryItem", "(", "url", "=", "url", ",", "domain", "=", "urlparse", ".", "urlparse", "(", "url", ")", ".", "netloc", ",", "access_time", "=", "timestamp", ",", "program_name", "=", "\"Chrome\"", ",", "source_path", "=", "file_object", ".", "Path", "(", ")", ",", "title", "=", "data1", ")" ]
Parse the History file.
[ "Parse", "the", "History", "file", "." ]
python
train
backtrader/backtrader
backtrader/plot/multicursor.py
https://github.com/backtrader/backtrader/blob/59ee9521f9887c2a1030c6f1db8c918a5816fd64/backtrader/plot/multicursor.py#L179-L182
def disconnect(self): """disconnect events""" self.canvas.mpl_disconnect(self._cidmotion) self.canvas.mpl_disconnect(self._ciddraw)
[ "def", "disconnect", "(", "self", ")", ":", "self", ".", "canvas", ".", "mpl_disconnect", "(", "self", ".", "_cidmotion", ")", "self", ".", "canvas", ".", "mpl_disconnect", "(", "self", ".", "_ciddraw", ")" ]
disconnect events
[ "disconnect", "events" ]
python
train
PythonSanSebastian/docstamp
docstamp/template.py
https://github.com/PythonSanSebastian/docstamp/blob/b43808f2e15351b0b2f0b7eade9c7ef319c9e646/docstamp/template.py#L204-L223
def fill(self, doc_contents): """ Fill the content of the document with the information in doc_contents. This is different from the TextDocument fill function, because this will check for symbools in the values of `doc_content` and replace them to good XML codes before filling the template. Parameters ---------- doc_contents: dict Set of values to set the template document. Returns ------- filled_doc: str The content of the document with the template information filled. """ for key, content in doc_contents.items(): doc_contents[key] = replace_chars_for_svg_code(content) return super(SVGDocument, self).fill(doc_contents=doc_contents)
[ "def", "fill", "(", "self", ",", "doc_contents", ")", ":", "for", "key", ",", "content", "in", "doc_contents", ".", "items", "(", ")", ":", "doc_contents", "[", "key", "]", "=", "replace_chars_for_svg_code", "(", "content", ")", "return", "super", "(", "SVGDocument", ",", "self", ")", ".", "fill", "(", "doc_contents", "=", "doc_contents", ")" ]
Fill the content of the document with the information in doc_contents. This is different from the TextDocument fill function, because this will check for symbools in the values of `doc_content` and replace them to good XML codes before filling the template. Parameters ---------- doc_contents: dict Set of values to set the template document. Returns ------- filled_doc: str The content of the document with the template information filled.
[ "Fill", "the", "content", "of", "the", "document", "with", "the", "information", "in", "doc_contents", ".", "This", "is", "different", "from", "the", "TextDocument", "fill", "function", "because", "this", "will", "check", "for", "symbools", "in", "the", "values", "of", "doc_content", "and", "replace", "them", "to", "good", "XML", "codes", "before", "filling", "the", "template", "." ]
python
test
MillionIntegrals/vel
vel/augmentations/random_crop.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/augmentations/random_crop.py#L88-L90
def create(width, height, padding=0, padding_mode='constant', mode='x', tags=None): """ Vel factory function """ return RandomCrop(size=(width, height), padding=padding, padding_mode=padding_mode, mode=mode, tags=tags)
[ "def", "create", "(", "width", ",", "height", ",", "padding", "=", "0", ",", "padding_mode", "=", "'constant'", ",", "mode", "=", "'x'", ",", "tags", "=", "None", ")", ":", "return", "RandomCrop", "(", "size", "=", "(", "width", ",", "height", ")", ",", "padding", "=", "padding", ",", "padding_mode", "=", "padding_mode", ",", "mode", "=", "mode", ",", "tags", "=", "tags", ")" ]
Vel factory function
[ "Vel", "factory", "function" ]
python
train
JarryShaw/PyPCAPKit
src/utilities/decorators.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/utilities/decorators.py#L63-L82
def beholder_ng(func): """Behold analysis procedure.""" @functools.wraps(func) def behold(file, length, *args, **kwargs): seek_cur = file.tell() try: return func(file, length, *args, **kwargs) except Exception: # from pcapkit.foundation.analysis import analyse from pcapkit.protocols.raw import Raw error = traceback.format_exc(limit=1).strip().split(os.linesep)[-1] # error = traceback.format_exc() file.seek(seek_cur, os.SEEK_SET) # raw = Raw(file, length, error=str(error)) # return analyse(raw.info, raw.protochain, raw.alias) next_ = Raw(file, length, error=error) return next_ return behold
[ "def", "beholder_ng", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "behold", "(", "file", ",", "length", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "seek_cur", "=", "file", ".", "tell", "(", ")", "try", ":", "return", "func", "(", "file", ",", "length", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", "Exception", ":", "# from pcapkit.foundation.analysis import analyse", "from", "pcapkit", ".", "protocols", ".", "raw", "import", "Raw", "error", "=", "traceback", ".", "format_exc", "(", "limit", "=", "1", ")", ".", "strip", "(", ")", ".", "split", "(", "os", ".", "linesep", ")", "[", "-", "1", "]", "# error = traceback.format_exc()", "file", ".", "seek", "(", "seek_cur", ",", "os", ".", "SEEK_SET", ")", "# raw = Raw(file, length, error=str(error))", "# return analyse(raw.info, raw.protochain, raw.alias)", "next_", "=", "Raw", "(", "file", ",", "length", ",", "error", "=", "error", ")", "return", "next_", "return", "behold" ]
Behold analysis procedure.
[ "Behold", "analysis", "procedure", "." ]
python
train
pypa/setuptools
setuptools/dist.py
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/dist.py#L124-L197
def write_pkg_file(self, file): """Write the PKG-INFO format data to a file object. """ version = self.get_metadata_version() if six.PY2: def write_field(key, value): file.write("%s: %s\n" % (key, self._encode_field(value))) else: def write_field(key, value): file.write("%s: %s\n" % (key, value)) write_field('Metadata-Version', str(version)) write_field('Name', self.get_name()) write_field('Version', self.get_version()) write_field('Summary', self.get_description()) write_field('Home-page', self.get_url()) if version < StrictVersion('1.2'): write_field('Author', self.get_contact()) write_field('Author-email', self.get_contact_email()) else: optional_fields = ( ('Author', 'author'), ('Author-email', 'author_email'), ('Maintainer', 'maintainer'), ('Maintainer-email', 'maintainer_email'), ) for field, attr in optional_fields: attr_val = getattr(self, attr) if attr_val is not None: write_field(field, attr_val) write_field('License', self.get_license()) if self.download_url: write_field('Download-URL', self.download_url) for project_url in self.project_urls.items(): write_field('Project-URL', '%s, %s' % project_url) long_desc = rfc822_escape(self.get_long_description()) write_field('Description', long_desc) keywords = ','.join(self.get_keywords()) if keywords: write_field('Keywords', keywords) if version >= StrictVersion('1.2'): for platform in self.get_platforms(): write_field('Platform', platform) else: self._write_list(file, 'Platform', self.get_platforms()) self._write_list(file, 'Classifier', self.get_classifiers()) # PEP 314 self._write_list(file, 'Requires', self.get_requires()) self._write_list(file, 'Provides', self.get_provides()) self._write_list(file, 'Obsoletes', self.get_obsoletes()) # Setuptools specific for PEP 345 if hasattr(self, 'python_requires'): write_field('Requires-Python', self.python_requires) # PEP 566 if self.long_description_content_type: write_field( 'Description-Content-Type', self.long_description_content_type ) if self.provides_extras: for extra in self.provides_extras: write_field('Provides-Extra', extra)
[ "def", "write_pkg_file", "(", "self", ",", "file", ")", ":", "version", "=", "self", ".", "get_metadata_version", "(", ")", "if", "six", ".", "PY2", ":", "def", "write_field", "(", "key", ",", "value", ")", ":", "file", ".", "write", "(", "\"%s: %s\\n\"", "%", "(", "key", ",", "self", ".", "_encode_field", "(", "value", ")", ")", ")", "else", ":", "def", "write_field", "(", "key", ",", "value", ")", ":", "file", ".", "write", "(", "\"%s: %s\\n\"", "%", "(", "key", ",", "value", ")", ")", "write_field", "(", "'Metadata-Version'", ",", "str", "(", "version", ")", ")", "write_field", "(", "'Name'", ",", "self", ".", "get_name", "(", ")", ")", "write_field", "(", "'Version'", ",", "self", ".", "get_version", "(", ")", ")", "write_field", "(", "'Summary'", ",", "self", ".", "get_description", "(", ")", ")", "write_field", "(", "'Home-page'", ",", "self", ".", "get_url", "(", ")", ")", "if", "version", "<", "StrictVersion", "(", "'1.2'", ")", ":", "write_field", "(", "'Author'", ",", "self", ".", "get_contact", "(", ")", ")", "write_field", "(", "'Author-email'", ",", "self", ".", "get_contact_email", "(", ")", ")", "else", ":", "optional_fields", "=", "(", "(", "'Author'", ",", "'author'", ")", ",", "(", "'Author-email'", ",", "'author_email'", ")", ",", "(", "'Maintainer'", ",", "'maintainer'", ")", ",", "(", "'Maintainer-email'", ",", "'maintainer_email'", ")", ",", ")", "for", "field", ",", "attr", "in", "optional_fields", ":", "attr_val", "=", "getattr", "(", "self", ",", "attr", ")", "if", "attr_val", "is", "not", "None", ":", "write_field", "(", "field", ",", "attr_val", ")", "write_field", "(", "'License'", ",", "self", ".", "get_license", "(", ")", ")", "if", "self", ".", "download_url", ":", "write_field", "(", "'Download-URL'", ",", "self", ".", "download_url", ")", "for", "project_url", "in", "self", ".", "project_urls", ".", "items", "(", ")", ":", "write_field", "(", "'Project-URL'", ",", "'%s, %s'", "%", "project_url", ")", "long_desc", "=", "rfc822_escape", "(", "self", ".", "get_long_description", "(", ")", ")", "write_field", "(", "'Description'", ",", "long_desc", ")", "keywords", "=", "','", ".", "join", "(", "self", ".", "get_keywords", "(", ")", ")", "if", "keywords", ":", "write_field", "(", "'Keywords'", ",", "keywords", ")", "if", "version", ">=", "StrictVersion", "(", "'1.2'", ")", ":", "for", "platform", "in", "self", ".", "get_platforms", "(", ")", ":", "write_field", "(", "'Platform'", ",", "platform", ")", "else", ":", "self", ".", "_write_list", "(", "file", ",", "'Platform'", ",", "self", ".", "get_platforms", "(", ")", ")", "self", ".", "_write_list", "(", "file", ",", "'Classifier'", ",", "self", ".", "get_classifiers", "(", ")", ")", "# PEP 314", "self", ".", "_write_list", "(", "file", ",", "'Requires'", ",", "self", ".", "get_requires", "(", ")", ")", "self", ".", "_write_list", "(", "file", ",", "'Provides'", ",", "self", ".", "get_provides", "(", ")", ")", "self", ".", "_write_list", "(", "file", ",", "'Obsoletes'", ",", "self", ".", "get_obsoletes", "(", ")", ")", "# Setuptools specific for PEP 345", "if", "hasattr", "(", "self", ",", "'python_requires'", ")", ":", "write_field", "(", "'Requires-Python'", ",", "self", ".", "python_requires", ")", "# PEP 566", "if", "self", ".", "long_description_content_type", ":", "write_field", "(", "'Description-Content-Type'", ",", "self", ".", "long_description_content_type", ")", "if", "self", ".", "provides_extras", ":", "for", "extra", "in", "self", ".", "provides_extras", ":", "write_field", "(", "'Provides-Extra'", ",", "extra", ")" ]
Write the PKG-INFO format data to a file object.
[ "Write", "the", "PKG", "-", "INFO", "format", "data", "to", "a", "file", "object", "." ]
python
train
rsgalloway/grit
grit/cmd/cmd.py
https://github.com/rsgalloway/grit/blob/e6434ad8a1f4ac5d0903ebad630c81f8a5164d78/grit/cmd/cmd.py#L500-L507
def stream_object_data(self, ref): """As get_object_header, but returns the data as a stream :return: (hexsha, type_string, size_as_int, stream) :note: This method is not threadsafe, you need one independent Command instance per thread to be safe !""" cmd = self.__get_persistent_cmd("cat_file_all", "cat_file", batch=True) hexsha, typename, size = self.__get_object_header(cmd, ref) return (hexsha, typename, size, self.CatFileContentStream(size, cmd.stdout))
[ "def", "stream_object_data", "(", "self", ",", "ref", ")", ":", "cmd", "=", "self", ".", "__get_persistent_cmd", "(", "\"cat_file_all\"", ",", "\"cat_file\"", ",", "batch", "=", "True", ")", "hexsha", ",", "typename", ",", "size", "=", "self", ".", "__get_object_header", "(", "cmd", ",", "ref", ")", "return", "(", "hexsha", ",", "typename", ",", "size", ",", "self", ".", "CatFileContentStream", "(", "size", ",", "cmd", ".", "stdout", ")", ")" ]
As get_object_header, but returns the data as a stream :return: (hexsha, type_string, size_as_int, stream) :note: This method is not threadsafe, you need one independent Command instance per thread to be safe !
[ "As", "get_object_header", "but", "returns", "the", "data", "as", "a", "stream", ":", "return", ":", "(", "hexsha", "type_string", "size_as_int", "stream", ")", ":", "note", ":", "This", "method", "is", "not", "threadsafe", "you", "need", "one", "independent", "Command", "instance", "per", "thread", "to", "be", "safe", "!" ]
python
train
SeleniumHQ/selenium
py/selenium/webdriver/remote/webdriver.py
https://github.com/SeleniumHQ/selenium/blob/df40c28b41d4b3953f90eaff84838a9ac052b84a/py/selenium/webdriver/remote/webdriver.py#L298-L324
def execute(self, driver_command, params=None): """ Sends a command to be executed by a command.CommandExecutor. :Args: - driver_command: The name of the command to execute as a string. - params: A dictionary of named parameters to send with the command. :Returns: The command's JSON response loaded into a dictionary object. """ if self.session_id is not None: if not params: params = {'sessionId': self.session_id} elif 'sessionId' not in params: params['sessionId'] = self.session_id params = self._wrap_value(params) response = self.command_executor.execute(driver_command, params) if response: self.error_handler.check_response(response) response['value'] = self._unwrap_value( response.get('value', None)) return response # If the server doesn't send a response, assume the command was # a success return {'success': 0, 'value': None, 'sessionId': self.session_id}
[ "def", "execute", "(", "self", ",", "driver_command", ",", "params", "=", "None", ")", ":", "if", "self", ".", "session_id", "is", "not", "None", ":", "if", "not", "params", ":", "params", "=", "{", "'sessionId'", ":", "self", ".", "session_id", "}", "elif", "'sessionId'", "not", "in", "params", ":", "params", "[", "'sessionId'", "]", "=", "self", ".", "session_id", "params", "=", "self", ".", "_wrap_value", "(", "params", ")", "response", "=", "self", ".", "command_executor", ".", "execute", "(", "driver_command", ",", "params", ")", "if", "response", ":", "self", ".", "error_handler", ".", "check_response", "(", "response", ")", "response", "[", "'value'", "]", "=", "self", ".", "_unwrap_value", "(", "response", ".", "get", "(", "'value'", ",", "None", ")", ")", "return", "response", "# If the server doesn't send a response, assume the command was", "# a success", "return", "{", "'success'", ":", "0", ",", "'value'", ":", "None", ",", "'sessionId'", ":", "self", ".", "session_id", "}" ]
Sends a command to be executed by a command.CommandExecutor. :Args: - driver_command: The name of the command to execute as a string. - params: A dictionary of named parameters to send with the command. :Returns: The command's JSON response loaded into a dictionary object.
[ "Sends", "a", "command", "to", "be", "executed", "by", "a", "command", ".", "CommandExecutor", "." ]
python
train
matrix-org/matrix-python-sdk
matrix_client/api.py
https://github.com/matrix-org/matrix-python-sdk/blob/e734cce3ccd35f2d355c6a19a7a701033472498a/matrix_client/api.py#L1055-L1065
def key_changes(self, from_token, to_token): """Gets a list of users who have updated their device identity keys. Args: from_token (str): The desired start point of the list. Should be the next_batch field from a response to an earlier call to /sync. to_token (str): The desired end point of the list. Should be the next_batch field from a recent call to /sync - typically the most recent such call. """ params = {"from": from_token, "to": to_token} return self._send("GET", "/keys/changes", query_params=params)
[ "def", "key_changes", "(", "self", ",", "from_token", ",", "to_token", ")", ":", "params", "=", "{", "\"from\"", ":", "from_token", ",", "\"to\"", ":", "to_token", "}", "return", "self", ".", "_send", "(", "\"GET\"", ",", "\"/keys/changes\"", ",", "query_params", "=", "params", ")" ]
Gets a list of users who have updated their device identity keys. Args: from_token (str): The desired start point of the list. Should be the next_batch field from a response to an earlier call to /sync. to_token (str): The desired end point of the list. Should be the next_batch field from a recent call to /sync - typically the most recent such call.
[ "Gets", "a", "list", "of", "users", "who", "have", "updated", "their", "device", "identity", "keys", "." ]
python
train
ossobv/exactonline
exactonline/http.py
https://github.com/ossobv/exactonline/blob/f6bee418a9cb1fcf3ef17347ea7ab0dd3b573fde/exactonline/http.py#L251-L256
def http_post(url, data=None, opt=opt_default): """ Shortcut for urlopen (POST) + read. We'll probably want to add a nice timeout here later too. """ return _http_request(url, method='POST', data=_marshalled(data), opt=opt)
[ "def", "http_post", "(", "url", ",", "data", "=", "None", ",", "opt", "=", "opt_default", ")", ":", "return", "_http_request", "(", "url", ",", "method", "=", "'POST'", ",", "data", "=", "_marshalled", "(", "data", ")", ",", "opt", "=", "opt", ")" ]
Shortcut for urlopen (POST) + read. We'll probably want to add a nice timeout here later too.
[ "Shortcut", "for", "urlopen", "(", "POST", ")", "+", "read", ".", "We", "ll", "probably", "want", "to", "add", "a", "nice", "timeout", "here", "later", "too", "." ]
python
test
ibis-project/ibis
ibis/clickhouse/client.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/clickhouse/client.py#L372-L385
def exists_table(self, name, database=None): """ Determine if the indicated table or view exists Parameters ---------- name : string database : string, default None Returns ------- if_exists : boolean """ return len(self.list_tables(like=name, database=database)) > 0
[ "def", "exists_table", "(", "self", ",", "name", ",", "database", "=", "None", ")", ":", "return", "len", "(", "self", ".", "list_tables", "(", "like", "=", "name", ",", "database", "=", "database", ")", ")", ">", "0" ]
Determine if the indicated table or view exists Parameters ---------- name : string database : string, default None Returns ------- if_exists : boolean
[ "Determine", "if", "the", "indicated", "table", "or", "view", "exists" ]
python
train
ibis-project/ibis
ibis/pandas/core.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/pandas/core.py#L374-L422
def execute_and_reset( expr, params=None, scope=None, aggcontext=None, **kwargs ): """Execute an expression against data that are bound to it. If no data are bound, raise an Exception. Notes ----- The difference between this function and :func:`~ibis.pandas.core.execute` is that this function resets the index of the result, if the result has an index. Parameters ---------- expr : ibis.expr.types.Expr The expression to execute params : Mapping[ibis.expr.types.Expr, object] The data that an unbound parameter in `expr` maps to scope : Mapping[ibis.expr.operations.Node, object] Additional scope, mapping ibis operations to data aggcontext : Optional[ibis.pandas.aggcontext.AggregationContext] An object indicating how to compute aggregations. For example, a rolling mean needs to be computed differently than the mean of a column. kwargs : Dict[str, object] Additional arguments that can potentially be used by individual node execution Returns ------- result : Union[ pandas.Series, pandas.DataFrame, ibis.pandas.core.simple_types ] Raises ------ ValueError * If no data are bound to the input expression """ result = execute( expr, params=params, scope=scope, aggcontext=aggcontext, **kwargs ) if isinstance(result, pd.DataFrame): schema = expr.schema() df = result.reset_index() return df.loc[:, schema.names] elif isinstance(result, pd.Series): return result.reset_index(drop=True) return result
[ "def", "execute_and_reset", "(", "expr", ",", "params", "=", "None", ",", "scope", "=", "None", ",", "aggcontext", "=", "None", ",", "*", "*", "kwargs", ")", ":", "result", "=", "execute", "(", "expr", ",", "params", "=", "params", ",", "scope", "=", "scope", ",", "aggcontext", "=", "aggcontext", ",", "*", "*", "kwargs", ")", "if", "isinstance", "(", "result", ",", "pd", ".", "DataFrame", ")", ":", "schema", "=", "expr", ".", "schema", "(", ")", "df", "=", "result", ".", "reset_index", "(", ")", "return", "df", ".", "loc", "[", ":", ",", "schema", ".", "names", "]", "elif", "isinstance", "(", "result", ",", "pd", ".", "Series", ")", ":", "return", "result", ".", "reset_index", "(", "drop", "=", "True", ")", "return", "result" ]
Execute an expression against data that are bound to it. If no data are bound, raise an Exception. Notes ----- The difference between this function and :func:`~ibis.pandas.core.execute` is that this function resets the index of the result, if the result has an index. Parameters ---------- expr : ibis.expr.types.Expr The expression to execute params : Mapping[ibis.expr.types.Expr, object] The data that an unbound parameter in `expr` maps to scope : Mapping[ibis.expr.operations.Node, object] Additional scope, mapping ibis operations to data aggcontext : Optional[ibis.pandas.aggcontext.AggregationContext] An object indicating how to compute aggregations. For example, a rolling mean needs to be computed differently than the mean of a column. kwargs : Dict[str, object] Additional arguments that can potentially be used by individual node execution Returns ------- result : Union[ pandas.Series, pandas.DataFrame, ibis.pandas.core.simple_types ] Raises ------ ValueError * If no data are bound to the input expression
[ "Execute", "an", "expression", "against", "data", "that", "are", "bound", "to", "it", ".", "If", "no", "data", "are", "bound", "raise", "an", "Exception", "." ]
python
train
pypyr/pypyr-cli
pypyr/dsl.py
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/dsl.py#L351-L366
def run_foreach_or_conditional(self, context): """Run the foreach sequence or the conditional evaluation. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate. """ logger.debug("starting") # friendly reminder [] list obj (i.e empty) evals False if self.foreach_items: self.foreach_loop(context) else: # since no looping required, don't pollute output with looping info self.run_conditional_decorators(context) logger.debug("done")
[ "def", "run_foreach_or_conditional", "(", "self", ",", "context", ")", ":", "logger", ".", "debug", "(", "\"starting\"", ")", "# friendly reminder [] list obj (i.e empty) evals False", "if", "self", ".", "foreach_items", ":", "self", ".", "foreach_loop", "(", "context", ")", "else", ":", "# since no looping required, don't pollute output with looping info", "self", ".", "run_conditional_decorators", "(", "context", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Run the foreach sequence or the conditional evaluation. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate.
[ "Run", "the", "foreach", "sequence", "or", "the", "conditional", "evaluation", "." ]
python
train
SBRG/ssbio
ssbio/pipeline/atlas2.py
https://github.com/SBRG/ssbio/blob/e9449e64ffc1a1f5ad07e5849aa12a650095f8a2/ssbio/pipeline/atlas2.py#L409-L427
def _load_sequences_to_strain(self, strain_id, force_rerun=False): """Load strain GEMPRO with functional genes defined, load sequences to it, save as new GEMPRO""" gp_seqs_path = op.join(self.model_dir, '{}_gp_withseqs.pckl'.format(strain_id)) if ssbio.utils.force_rerun(flag=force_rerun, outfile=gp_seqs_path): gp_noseqs = ssbio.io.load_pickle(self.strain_infodict[strain_id]['gp_noseqs_path']) strain_sequences = SeqIO.index(self.strain_infodict[strain_id]['genome_path'], 'fasta') for strain_gene in gp_noseqs.functional_genes: # Pull the gene ID of the strain from the orthology matrix strain_gene_key = self.df_orthology_matrix.at[strain_gene.id, strain_id] # Load into the strain GEM-PRO new_id = '{}_{}'.format(strain_gene.id, strain_id) if strain_gene.protein.sequences.has_id(new_id): continue strain_gene.protein.load_manual_sequence(seq=strain_sequences[strain_gene_key], ident=new_id, set_as_representative=True) gp_noseqs.save_pickle(outfile=gp_seqs_path) return strain_id, gp_seqs_path
[ "def", "_load_sequences_to_strain", "(", "self", ",", "strain_id", ",", "force_rerun", "=", "False", ")", ":", "gp_seqs_path", "=", "op", ".", "join", "(", "self", ".", "model_dir", ",", "'{}_gp_withseqs.pckl'", ".", "format", "(", "strain_id", ")", ")", "if", "ssbio", ".", "utils", ".", "force_rerun", "(", "flag", "=", "force_rerun", ",", "outfile", "=", "gp_seqs_path", ")", ":", "gp_noseqs", "=", "ssbio", ".", "io", ".", "load_pickle", "(", "self", ".", "strain_infodict", "[", "strain_id", "]", "[", "'gp_noseqs_path'", "]", ")", "strain_sequences", "=", "SeqIO", ".", "index", "(", "self", ".", "strain_infodict", "[", "strain_id", "]", "[", "'genome_path'", "]", ",", "'fasta'", ")", "for", "strain_gene", "in", "gp_noseqs", ".", "functional_genes", ":", "# Pull the gene ID of the strain from the orthology matrix", "strain_gene_key", "=", "self", ".", "df_orthology_matrix", ".", "at", "[", "strain_gene", ".", "id", ",", "strain_id", "]", "# Load into the strain GEM-PRO", "new_id", "=", "'{}_{}'", ".", "format", "(", "strain_gene", ".", "id", ",", "strain_id", ")", "if", "strain_gene", ".", "protein", ".", "sequences", ".", "has_id", "(", "new_id", ")", ":", "continue", "strain_gene", ".", "protein", ".", "load_manual_sequence", "(", "seq", "=", "strain_sequences", "[", "strain_gene_key", "]", ",", "ident", "=", "new_id", ",", "set_as_representative", "=", "True", ")", "gp_noseqs", ".", "save_pickle", "(", "outfile", "=", "gp_seqs_path", ")", "return", "strain_id", ",", "gp_seqs_path" ]
Load strain GEMPRO with functional genes defined, load sequences to it, save as new GEMPRO
[ "Load", "strain", "GEMPRO", "with", "functional", "genes", "defined", "load", "sequences", "to", "it", "save", "as", "new", "GEMPRO" ]
python
train
polyaxon/polyaxon-cli
polyaxon_cli/cli/config.py
https://github.com/polyaxon/polyaxon-cli/blob/a7f5eed74d4d909cad79059f3c21c58606881449/polyaxon_cli/cli/config.py#L23-L28
def config(list): # pylint:disable=redefined-builtin """Set and get the global configurations.""" if list: _config = GlobalConfigManager.get_config_or_default() Printer.print_header('Current config:') dict_tabulate(_config.to_dict())
[ "def", "config", "(", "list", ")", ":", "# pylint:disable=redefined-builtin", "if", "list", ":", "_config", "=", "GlobalConfigManager", ".", "get_config_or_default", "(", ")", "Printer", ".", "print_header", "(", "'Current config:'", ")", "dict_tabulate", "(", "_config", ".", "to_dict", "(", ")", ")" ]
Set and get the global configurations.
[ "Set", "and", "get", "the", "global", "configurations", "." ]
python
valid
CI-WATER/gsshapy
gsshapy/orm/prj.py
https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1180-L1199
def getIndexGrid(self, name): """ Returns GDALGrid object of index map Paramters: name(str): Name of index map in 'cmt' file. Returns: GDALGrid """ index_map = self.mapTableFile.indexMaps.filter_by(name=name).one() gssha_pro_card = self.getCard("#PROJECTION_FILE") if gssha_pro_card is None: raise ValueError("#PROJECTION_FILE card not found ...") with tmp_chdir(self.project_directory): # return gssha grid return GDALGrid(index_map.filename, gssha_pro_card.value.strip('"').strip("'"))
[ "def", "getIndexGrid", "(", "self", ",", "name", ")", ":", "index_map", "=", "self", ".", "mapTableFile", ".", "indexMaps", ".", "filter_by", "(", "name", "=", "name", ")", ".", "one", "(", ")", "gssha_pro_card", "=", "self", ".", "getCard", "(", "\"#PROJECTION_FILE\"", ")", "if", "gssha_pro_card", "is", "None", ":", "raise", "ValueError", "(", "\"#PROJECTION_FILE card not found ...\"", ")", "with", "tmp_chdir", "(", "self", ".", "project_directory", ")", ":", "# return gssha grid", "return", "GDALGrid", "(", "index_map", ".", "filename", ",", "gssha_pro_card", ".", "value", ".", "strip", "(", "'\"'", ")", ".", "strip", "(", "\"'\"", ")", ")" ]
Returns GDALGrid object of index map Paramters: name(str): Name of index map in 'cmt' file. Returns: GDALGrid
[ "Returns", "GDALGrid", "object", "of", "index", "map" ]
python
train
sdispater/poetry
poetry/poetry.py
https://github.com/sdispater/poetry/blob/2d27acd76c165dd49f11934520a7973de7a3762a/poetry/poetry.py#L220-L265
def check(cls, config, strict=False): # type: (dict, bool) -> Dict[str, List[str]] """ Checks the validity of a configuration """ result = {"errors": [], "warnings": []} # Schema validation errors validation_errors = validate_object(config, "poetry-schema") result["errors"] += validation_errors if strict: # If strict, check the file more thoroughly # Checking license license = config.get("license") if license: try: license_by_id(license) except ValueError: result["errors"].append("{} is not a valid license".format(license)) if "dependencies" in config: python_versions = config["dependencies"]["python"] if python_versions == "*": result["warnings"].append( "A wildcard Python dependency is ambiguous. " "Consider specifying a more explicit one." ) # Checking for scripts with extras if "scripts" in config: scripts = config["scripts"] for name, script in scripts.items(): if not isinstance(script, dict): continue extras = script["extras"] for extra in extras: if extra not in config["extras"]: result["errors"].append( 'Script "{}" requires extra "{}" which is not defined.'.format( name, extra ) ) return result
[ "def", "check", "(", "cls", ",", "config", ",", "strict", "=", "False", ")", ":", "# type: (dict, bool) -> Dict[str, List[str]]", "result", "=", "{", "\"errors\"", ":", "[", "]", ",", "\"warnings\"", ":", "[", "]", "}", "# Schema validation errors", "validation_errors", "=", "validate_object", "(", "config", ",", "\"poetry-schema\"", ")", "result", "[", "\"errors\"", "]", "+=", "validation_errors", "if", "strict", ":", "# If strict, check the file more thoroughly", "# Checking license", "license", "=", "config", ".", "get", "(", "\"license\"", ")", "if", "license", ":", "try", ":", "license_by_id", "(", "license", ")", "except", "ValueError", ":", "result", "[", "\"errors\"", "]", ".", "append", "(", "\"{} is not a valid license\"", ".", "format", "(", "license", ")", ")", "if", "\"dependencies\"", "in", "config", ":", "python_versions", "=", "config", "[", "\"dependencies\"", "]", "[", "\"python\"", "]", "if", "python_versions", "==", "\"*\"", ":", "result", "[", "\"warnings\"", "]", ".", "append", "(", "\"A wildcard Python dependency is ambiguous. \"", "\"Consider specifying a more explicit one.\"", ")", "# Checking for scripts with extras", "if", "\"scripts\"", "in", "config", ":", "scripts", "=", "config", "[", "\"scripts\"", "]", "for", "name", ",", "script", "in", "scripts", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "script", ",", "dict", ")", ":", "continue", "extras", "=", "script", "[", "\"extras\"", "]", "for", "extra", "in", "extras", ":", "if", "extra", "not", "in", "config", "[", "\"extras\"", "]", ":", "result", "[", "\"errors\"", "]", ".", "append", "(", "'Script \"{}\" requires extra \"{}\" which is not defined.'", ".", "format", "(", "name", ",", "extra", ")", ")", "return", "result" ]
Checks the validity of a configuration
[ "Checks", "the", "validity", "of", "a", "configuration" ]
python
train
biocommons/bioutils
src/bioutils/digests.py
https://github.com/biocommons/bioutils/blob/88bcbdfa707268fed1110800e91b6d4f8e9475a0/src/bioutils/digests.py#L31-L52
def seq_seguid(seq, normalize=True): """returns seguid for sequence `seq` This seguid is compatible with BioPython's seguid. >>> seq_seguid('') '2jmj7l5rSw0yVb/vlWAYkK/YBwk' >>> seq_seguid('ACGT') 'IQiZThf2zKn/I1KtqStlEdsHYDQ' >>> seq_seguid('acgt') 'IQiZThf2zKn/I1KtqStlEdsHYDQ' >>> seq_seguid('acgt', normalize=False) 'lII0AoG1/I8qKY271rgv5CFZtsU' """ seq = normalize_sequence(seq) if normalize else seq bseq = seq.encode("ascii") return base64.b64encode(hashlib.sha1(bseq).digest()).decode("ascii").rstrip( '=')
[ "def", "seq_seguid", "(", "seq", ",", "normalize", "=", "True", ")", ":", "seq", "=", "normalize_sequence", "(", "seq", ")", "if", "normalize", "else", "seq", "bseq", "=", "seq", ".", "encode", "(", "\"ascii\"", ")", "return", "base64", ".", "b64encode", "(", "hashlib", ".", "sha1", "(", "bseq", ")", ".", "digest", "(", ")", ")", ".", "decode", "(", "\"ascii\"", ")", ".", "rstrip", "(", "'='", ")" ]
returns seguid for sequence `seq` This seguid is compatible with BioPython's seguid. >>> seq_seguid('') '2jmj7l5rSw0yVb/vlWAYkK/YBwk' >>> seq_seguid('ACGT') 'IQiZThf2zKn/I1KtqStlEdsHYDQ' >>> seq_seguid('acgt') 'IQiZThf2zKn/I1KtqStlEdsHYDQ' >>> seq_seguid('acgt', normalize=False) 'lII0AoG1/I8qKY271rgv5CFZtsU'
[ "returns", "seguid", "for", "sequence", "seq" ]
python
train
OSSOS/MOP
src/ossos/core/ossos/downloads/cutouts/downloader.py
https://github.com/OSSOS/MOP/blob/94f91d32ad5ec081d5a1ebd67604a838003465af/src/ossos/core/ossos/downloads/cutouts/downloader.py#L46-L99
def download_cutout(self, reading, focus=None, needs_apcor=False): """ Downloads a cutout of the FITS image for a given source reading. Args: reading: ossos.astrom.SourceReading The reading which will be the focus of the downloaded image. focus: tuple(int, int) The x, y coordinates that should be the focus of the downloaded image. These coordinates should be in terms of the source_reading parameter's coordinate system. Default value is None, in which case the source reading's x, y position is used as the focus. needs_apcor: bool If True, the apcor file with data needed for photometry calculations is downloaded in addition to the image. Defaults to False. Returns: cutout: ossos.downloads.data.SourceCutout """ logger.debug("Doing download_cutout with inputs: reading:{} focus:{} needs_apcor:{}".format(reading, focus, needs_apcor)) assert isinstance(reading, SourceReading) min_radius = config.read('CUTOUTS.SINGLETS.RADIUS') if not isinstance(min_radius, Quantity): min_radius = min_radius * units.arcsec radius = max(reading.uncertainty_ellipse.a, reading.uncertainty_ellipse.b) * 2.5 + min_radius logger.debug("got radius for cutout: {}".format(radius)) image_uri = reading.get_image_uri() logger.debug("Getting cutout at {} for {}".format(reading.reference_sky_coord, image_uri)) hdulist = storage._cutout_expnum(reading.obs, reading.reference_sky_coord, radius) # hdulist = storage.ra_dec_cutout(image_uri, reading.reference_sky_coord, radius) logger.debug("Getting the aperture correction.") source = SourceCutout(reading, hdulist, radius=radius) # Accessing the attribute here to trigger the download. try: apcor = source.apcor zmag = source.zmag source.reading.get_observation_header() except Exception as ex: if needs_apcor: import sys, traceback sys.stderr.write("Failed to retrieve apcor but apcor required. Raising error, see logs for more details") sys.stderr.write(traceback.print_exc()) pass logger.debug("Sending back the source reading.") return source
[ "def", "download_cutout", "(", "self", ",", "reading", ",", "focus", "=", "None", ",", "needs_apcor", "=", "False", ")", ":", "logger", ".", "debug", "(", "\"Doing download_cutout with inputs: reading:{} focus:{} needs_apcor:{}\"", ".", "format", "(", "reading", ",", "focus", ",", "needs_apcor", ")", ")", "assert", "isinstance", "(", "reading", ",", "SourceReading", ")", "min_radius", "=", "config", ".", "read", "(", "'CUTOUTS.SINGLETS.RADIUS'", ")", "if", "not", "isinstance", "(", "min_radius", ",", "Quantity", ")", ":", "min_radius", "=", "min_radius", "*", "units", ".", "arcsec", "radius", "=", "max", "(", "reading", ".", "uncertainty_ellipse", ".", "a", ",", "reading", ".", "uncertainty_ellipse", ".", "b", ")", "*", "2.5", "+", "min_radius", "logger", ".", "debug", "(", "\"got radius for cutout: {}\"", ".", "format", "(", "radius", ")", ")", "image_uri", "=", "reading", ".", "get_image_uri", "(", ")", "logger", ".", "debug", "(", "\"Getting cutout at {} for {}\"", ".", "format", "(", "reading", ".", "reference_sky_coord", ",", "image_uri", ")", ")", "hdulist", "=", "storage", ".", "_cutout_expnum", "(", "reading", ".", "obs", ",", "reading", ".", "reference_sky_coord", ",", "radius", ")", "# hdulist = storage.ra_dec_cutout(image_uri, reading.reference_sky_coord, radius)", "logger", ".", "debug", "(", "\"Getting the aperture correction.\"", ")", "source", "=", "SourceCutout", "(", "reading", ",", "hdulist", ",", "radius", "=", "radius", ")", "# Accessing the attribute here to trigger the download.", "try", ":", "apcor", "=", "source", ".", "apcor", "zmag", "=", "source", ".", "zmag", "source", ".", "reading", ".", "get_observation_header", "(", ")", "except", "Exception", "as", "ex", ":", "if", "needs_apcor", ":", "import", "sys", ",", "traceback", "sys", ".", "stderr", ".", "write", "(", "\"Failed to retrieve apcor but apcor required. Raising error, see logs for more details\"", ")", "sys", ".", "stderr", ".", "write", "(", "traceback", ".", "print_exc", "(", ")", ")", "pass", "logger", ".", "debug", "(", "\"Sending back the source reading.\"", ")", "return", "source" ]
Downloads a cutout of the FITS image for a given source reading. Args: reading: ossos.astrom.SourceReading The reading which will be the focus of the downloaded image. focus: tuple(int, int) The x, y coordinates that should be the focus of the downloaded image. These coordinates should be in terms of the source_reading parameter's coordinate system. Default value is None, in which case the source reading's x, y position is used as the focus. needs_apcor: bool If True, the apcor file with data needed for photometry calculations is downloaded in addition to the image. Defaults to False. Returns: cutout: ossos.downloads.data.SourceCutout
[ "Downloads", "a", "cutout", "of", "the", "FITS", "image", "for", "a", "given", "source", "reading", "." ]
python
train