repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
paulovn/sparql-kernel
sparqlkernel/utils.py
https://github.com/paulovn/sparql-kernel/blob/1d2d155ff5da72070cb2a98fae33ea8113fac782/sparqlkernel/utils.py#L89-L111
def data_msg( msg, mtype=None ): """ Return a Jupyter display_data message, in both HTML & text formats, by formatting a given single message. The passed message may be: * An exception (including a KrnlException): will generate an error message * A list of messages (with \c mtype equal to \c multi) * A single message @param msg (str,list): a string, or a list of format string + args, or an iterable of (msg,mtype) @param mtype (str): the message type (used for the CSS class). If it's \c multi, then \c msg will be treated as a multi-message. If not passed, \c krn-error will be used for exceptions and \c msg for everything else """ if isinstance(msg,KrnlException): return msg() # a KrnlException knows how to format itself elif isinstance(msg,Exception): return KrnlException(msg)() elif mtype == 'multi': return data_msglist( msg ) else: return data_msglist( [ (msg, mtype) ] )
[ "def", "data_msg", "(", "msg", ",", "mtype", "=", "None", ")", ":", "if", "isinstance", "(", "msg", ",", "KrnlException", ")", ":", "return", "msg", "(", ")", "# a KrnlException knows how to format itself", "elif", "isinstance", "(", "msg", ",", "Exception", ")", ":", "return", "KrnlException", "(", "msg", ")", "(", ")", "elif", "mtype", "==", "'multi'", ":", "return", "data_msglist", "(", "msg", ")", "else", ":", "return", "data_msglist", "(", "[", "(", "msg", ",", "mtype", ")", "]", ")" ]
Return a Jupyter display_data message, in both HTML & text formats, by formatting a given single message. The passed message may be: * An exception (including a KrnlException): will generate an error message * A list of messages (with \c mtype equal to \c multi) * A single message @param msg (str,list): a string, or a list of format string + args, or an iterable of (msg,mtype) @param mtype (str): the message type (used for the CSS class). If it's \c multi, then \c msg will be treated as a multi-message. If not passed, \c krn-error will be used for exceptions and \c msg for everything else
[ "Return", "a", "Jupyter", "display_data", "message", "in", "both", "HTML", "&", "text", "formats", "by", "formatting", "a", "given", "single", "message", ".", "The", "passed", "message", "may", "be", ":", "*", "An", "exception", "(", "including", "a", "KrnlException", ")", ":", "will", "generate", "an", "error", "message", "*", "A", "list", "of", "messages", "(", "with", "\\", "c", "mtype", "equal", "to", "\\", "c", "multi", ")", "*", "A", "single", "message" ]
python
train
ska-sa/purr
Purr/Plugins/local_pychart/chart_data.py
https://github.com/ska-sa/purr/blob/4c848768d0485d0f88b30850d0d5372221b21b66/Purr/Plugins/local_pychart/chart_data.py#L291-L309
def func(f, xmin, xmax, step=None): """Create sample points from function <f>, which must be a single-parameter function that returns a number (e.g., math.sin). Parameters <xmin> and <xmax> specify the first and last X values, and <step> specifies the sampling interval. >>> chart_data.func(math.sin, 0, math.pi * 4, math.pi / 2) [(0, 0.0), (1.5707963267948966, 1.0), (3.1415926535897931, 1.2246063538223773e-16), (4.7123889803846897, -1.0), (6.2831853071795862, -2.4492127076447545e-16), (7.8539816339744828, 1.0), (9.4247779607693793, 3.6738190614671318e-16), (10.995574287564276, -1.0)] """ data = [] x = xmin if not step: step = (xmax - xmin) / 100.0 while x < xmax: data.append((x, f(x))) x += step return data
[ "def", "func", "(", "f", ",", "xmin", ",", "xmax", ",", "step", "=", "None", ")", ":", "data", "=", "[", "]", "x", "=", "xmin", "if", "not", "step", ":", "step", "=", "(", "xmax", "-", "xmin", ")", "/", "100.0", "while", "x", "<", "xmax", ":", "data", ".", "append", "(", "(", "x", ",", "f", "(", "x", ")", ")", ")", "x", "+=", "step", "return", "data" ]
Create sample points from function <f>, which must be a single-parameter function that returns a number (e.g., math.sin). Parameters <xmin> and <xmax> specify the first and last X values, and <step> specifies the sampling interval. >>> chart_data.func(math.sin, 0, math.pi * 4, math.pi / 2) [(0, 0.0), (1.5707963267948966, 1.0), (3.1415926535897931, 1.2246063538223773e-16), (4.7123889803846897, -1.0), (6.2831853071795862, -2.4492127076447545e-16), (7.8539816339744828, 1.0), (9.4247779607693793, 3.6738190614671318e-16), (10.995574287564276, -1.0)]
[ "Create", "sample", "points", "from", "function", "<f", ">", "which", "must", "be", "a", "single", "-", "parameter", "function", "that", "returns", "a", "number", "(", "e", ".", "g", ".", "math", ".", "sin", ")", ".", "Parameters", "<xmin", ">", "and", "<xmax", ">", "specify", "the", "first", "and", "last", "X", "values", "and", "<step", ">", "specifies", "the", "sampling", "interval", "." ]
python
train
cherrypy/cheroot
cheroot/wsgi.py
https://github.com/cherrypy/cheroot/blob/2af3b1798d66da697957480d3a8b4831a405770b/cheroot/wsgi.py#L249-L318
def get_environ(self): """Return a new environ dict targeting the given wsgi.version.""" req = self.req req_conn = req.conn env = { # set a non-standard environ entry so the WSGI app can know what # the *real* server protocol is (and what features to support). # See http://www.faqs.org/rfcs/rfc2145.html. 'ACTUAL_SERVER_PROTOCOL': req.server.protocol, 'PATH_INFO': bton(req.path), 'QUERY_STRING': bton(req.qs), 'REMOTE_ADDR': req_conn.remote_addr or '', 'REMOTE_PORT': str(req_conn.remote_port or ''), 'REQUEST_METHOD': bton(req.method), 'REQUEST_URI': bton(req.uri), 'SCRIPT_NAME': '', 'SERVER_NAME': req.server.server_name, # Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol. 'SERVER_PROTOCOL': bton(req.request_protocol), 'SERVER_SOFTWARE': req.server.software, 'wsgi.errors': sys.stderr, 'wsgi.input': req.rfile, 'wsgi.input_terminated': bool(req.chunked_read), 'wsgi.multiprocess': False, 'wsgi.multithread': True, 'wsgi.run_once': False, 'wsgi.url_scheme': bton(req.scheme), 'wsgi.version': self.version, } if isinstance(req.server.bind_addr, six.string_types): # AF_UNIX. This isn't really allowed by WSGI, which doesn't # address unix domain sockets. But it's better than nothing. env['SERVER_PORT'] = '' try: env['X_REMOTE_PID'] = str(req_conn.peer_pid) env['X_REMOTE_UID'] = str(req_conn.peer_uid) env['X_REMOTE_GID'] = str(req_conn.peer_gid) env['X_REMOTE_USER'] = str(req_conn.peer_user) env['X_REMOTE_GROUP'] = str(req_conn.peer_group) env['REMOTE_USER'] = env['X_REMOTE_USER'] except RuntimeError: """Unable to retrieve peer creds data. Unsupported by current kernel or socket error happened, or unsupported socket type, or disabled. """ else: env['SERVER_PORT'] = str(req.server.bind_addr[1]) # Request headers env.update( ('HTTP_' + bton(k).upper().replace('-', '_'), bton(v)) for k, v in req.inheaders.items() ) # CONTENT_TYPE/CONTENT_LENGTH ct = env.pop('HTTP_CONTENT_TYPE', None) if ct is not None: env['CONTENT_TYPE'] = ct cl = env.pop('HTTP_CONTENT_LENGTH', None) if cl is not None: env['CONTENT_LENGTH'] = cl if req.conn.ssl_env: env.update(req.conn.ssl_env) return env
[ "def", "get_environ", "(", "self", ")", ":", "req", "=", "self", ".", "req", "req_conn", "=", "req", ".", "conn", "env", "=", "{", "# set a non-standard environ entry so the WSGI app can know what", "# the *real* server protocol is (and what features to support).", "# See http://www.faqs.org/rfcs/rfc2145.html.", "'ACTUAL_SERVER_PROTOCOL'", ":", "req", ".", "server", ".", "protocol", ",", "'PATH_INFO'", ":", "bton", "(", "req", ".", "path", ")", ",", "'QUERY_STRING'", ":", "bton", "(", "req", ".", "qs", ")", ",", "'REMOTE_ADDR'", ":", "req_conn", ".", "remote_addr", "or", "''", ",", "'REMOTE_PORT'", ":", "str", "(", "req_conn", ".", "remote_port", "or", "''", ")", ",", "'REQUEST_METHOD'", ":", "bton", "(", "req", ".", "method", ")", ",", "'REQUEST_URI'", ":", "bton", "(", "req", ".", "uri", ")", ",", "'SCRIPT_NAME'", ":", "''", ",", "'SERVER_NAME'", ":", "req", ".", "server", ".", "server_name", ",", "# Bah. \"SERVER_PROTOCOL\" is actually the REQUEST protocol.", "'SERVER_PROTOCOL'", ":", "bton", "(", "req", ".", "request_protocol", ")", ",", "'SERVER_SOFTWARE'", ":", "req", ".", "server", ".", "software", ",", "'wsgi.errors'", ":", "sys", ".", "stderr", ",", "'wsgi.input'", ":", "req", ".", "rfile", ",", "'wsgi.input_terminated'", ":", "bool", "(", "req", ".", "chunked_read", ")", ",", "'wsgi.multiprocess'", ":", "False", ",", "'wsgi.multithread'", ":", "True", ",", "'wsgi.run_once'", ":", "False", ",", "'wsgi.url_scheme'", ":", "bton", "(", "req", ".", "scheme", ")", ",", "'wsgi.version'", ":", "self", ".", "version", ",", "}", "if", "isinstance", "(", "req", ".", "server", ".", "bind_addr", ",", "six", ".", "string_types", ")", ":", "# AF_UNIX. This isn't really allowed by WSGI, which doesn't", "# address unix domain sockets. But it's better than nothing.", "env", "[", "'SERVER_PORT'", "]", "=", "''", "try", ":", "env", "[", "'X_REMOTE_PID'", "]", "=", "str", "(", "req_conn", ".", "peer_pid", ")", "env", "[", "'X_REMOTE_UID'", "]", "=", "str", "(", "req_conn", ".", "peer_uid", ")", "env", "[", "'X_REMOTE_GID'", "]", "=", "str", "(", "req_conn", ".", "peer_gid", ")", "env", "[", "'X_REMOTE_USER'", "]", "=", "str", "(", "req_conn", ".", "peer_user", ")", "env", "[", "'X_REMOTE_GROUP'", "]", "=", "str", "(", "req_conn", ".", "peer_group", ")", "env", "[", "'REMOTE_USER'", "]", "=", "env", "[", "'X_REMOTE_USER'", "]", "except", "RuntimeError", ":", "\"\"\"Unable to retrieve peer creds data.\n\n Unsupported by current kernel or socket error happened, or\n unsupported socket type, or disabled.\n \"\"\"", "else", ":", "env", "[", "'SERVER_PORT'", "]", "=", "str", "(", "req", ".", "server", ".", "bind_addr", "[", "1", "]", ")", "# Request headers", "env", ".", "update", "(", "(", "'HTTP_'", "+", "bton", "(", "k", ")", ".", "upper", "(", ")", ".", "replace", "(", "'-'", ",", "'_'", ")", ",", "bton", "(", "v", ")", ")", "for", "k", ",", "v", "in", "req", ".", "inheaders", ".", "items", "(", ")", ")", "# CONTENT_TYPE/CONTENT_LENGTH", "ct", "=", "env", ".", "pop", "(", "'HTTP_CONTENT_TYPE'", ",", "None", ")", "if", "ct", "is", "not", "None", ":", "env", "[", "'CONTENT_TYPE'", "]", "=", "ct", "cl", "=", "env", ".", "pop", "(", "'HTTP_CONTENT_LENGTH'", ",", "None", ")", "if", "cl", "is", "not", "None", ":", "env", "[", "'CONTENT_LENGTH'", "]", "=", "cl", "if", "req", ".", "conn", ".", "ssl_env", ":", "env", ".", "update", "(", "req", ".", "conn", ".", "ssl_env", ")", "return", "env" ]
Return a new environ dict targeting the given wsgi.version.
[ "Return", "a", "new", "environ", "dict", "targeting", "the", "given", "wsgi", ".", "version", "." ]
python
train
googlefonts/fontbakery
Lib/fontbakery/callable.py
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/callable.py#L18-L29
def cached_getter(func): """Decorate a property by executing it at instatiation time and cache the result on the instance object.""" @wraps(func) def wrapper(self): attribute = f'_{func.__name__}' value = getattr(self, attribute, None) if value is None: value = func(self) setattr(self, attribute, value) return value return wrapper
[ "def", "cached_getter", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrapper", "(", "self", ")", ":", "attribute", "=", "f'_{func.__name__}'", "value", "=", "getattr", "(", "self", ",", "attribute", ",", "None", ")", "if", "value", "is", "None", ":", "value", "=", "func", "(", "self", ")", "setattr", "(", "self", ",", "attribute", ",", "value", ")", "return", "value", "return", "wrapper" ]
Decorate a property by executing it at instatiation time and cache the result on the instance object.
[ "Decorate", "a", "property", "by", "executing", "it", "at", "instatiation", "time", "and", "cache", "the", "result", "on", "the", "instance", "object", "." ]
python
train
edx/bok-choy
bok_choy/query.py
https://github.com/edx/bok-choy/blob/cdd0d423419fc0c49d56a9226533aa1490b60afc/bok_choy/query.py#L452-L466
def is_focused(self): """ Checks that *at least one* matched element is focused. More specifically, it checks whether the element is document.activeElement. If no matching element is focused, this returns `False`. Returns: bool """ active_el = self.browser.execute_script("return document.activeElement") query_results = self.map(lambda el: el == active_el, 'focused').results if query_results: return any(query_results) return False
[ "def", "is_focused", "(", "self", ")", ":", "active_el", "=", "self", ".", "browser", ".", "execute_script", "(", "\"return document.activeElement\"", ")", "query_results", "=", "self", ".", "map", "(", "lambda", "el", ":", "el", "==", "active_el", ",", "'focused'", ")", ".", "results", "if", "query_results", ":", "return", "any", "(", "query_results", ")", "return", "False" ]
Checks that *at least one* matched element is focused. More specifically, it checks whether the element is document.activeElement. If no matching element is focused, this returns `False`. Returns: bool
[ "Checks", "that", "*", "at", "least", "one", "*", "matched", "element", "is", "focused", ".", "More", "specifically", "it", "checks", "whether", "the", "element", "is", "document", ".", "activeElement", ".", "If", "no", "matching", "element", "is", "focused", "this", "returns", "False", "." ]
python
train
log2timeline/plaso
plaso/storage/sqlite/sqlite_file.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/storage/sqlite/sqlite_file.py#L829-L847
def GetEventTagByIdentifier(self, identifier): """Retrieves a specific event tag. Args: identifier (SQLTableIdentifier): event tag identifier. Returns: EventTag: event tag or None if not available. """ event_tag = self._GetAttributeContainerByIndex( self._CONTAINER_TYPE_EVENT_TAG, identifier.row_identifier - 1) if event_tag: event_identifier = identifiers.SQLTableIdentifier( self._CONTAINER_TYPE_EVENT, event_tag.event_row_identifier) event_tag.SetEventIdentifier(event_identifier) del event_tag.event_row_identifier return event_tag
[ "def", "GetEventTagByIdentifier", "(", "self", ",", "identifier", ")", ":", "event_tag", "=", "self", ".", "_GetAttributeContainerByIndex", "(", "self", ".", "_CONTAINER_TYPE_EVENT_TAG", ",", "identifier", ".", "row_identifier", "-", "1", ")", "if", "event_tag", ":", "event_identifier", "=", "identifiers", ".", "SQLTableIdentifier", "(", "self", ".", "_CONTAINER_TYPE_EVENT", ",", "event_tag", ".", "event_row_identifier", ")", "event_tag", ".", "SetEventIdentifier", "(", "event_identifier", ")", "del", "event_tag", ".", "event_row_identifier", "return", "event_tag" ]
Retrieves a specific event tag. Args: identifier (SQLTableIdentifier): event tag identifier. Returns: EventTag: event tag or None if not available.
[ "Retrieves", "a", "specific", "event", "tag", "." ]
python
train
saltstack/salt
salt/modules/postgres.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/postgres.py#L1393-L1421
def installed_extensions(user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' List installed postgresql extensions CLI Example: .. code-block:: bash salt '*' postgres.installed_extensions ''' exts = [] query = ( 'select a.*, b.nspname as schema_name ' 'from pg_extension a, pg_namespace b where a.extnamespace = b.oid;' ) ret = psql_query(query, user=user, host=host, port=port, maintenance_db=maintenance_db, password=password, runas=runas) exts = {} for row in ret: if 'extversion' in row and 'extname' in row: exts[row['extname']] = row return exts
[ "def", "installed_extensions", "(", "user", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ",", "maintenance_db", "=", "None", ",", "password", "=", "None", ",", "runas", "=", "None", ")", ":", "exts", "=", "[", "]", "query", "=", "(", "'select a.*, b.nspname as schema_name '", "'from pg_extension a, pg_namespace b where a.extnamespace = b.oid;'", ")", "ret", "=", "psql_query", "(", "query", ",", "user", "=", "user", ",", "host", "=", "host", ",", "port", "=", "port", ",", "maintenance_db", "=", "maintenance_db", ",", "password", "=", "password", ",", "runas", "=", "runas", ")", "exts", "=", "{", "}", "for", "row", "in", "ret", ":", "if", "'extversion'", "in", "row", "and", "'extname'", "in", "row", ":", "exts", "[", "row", "[", "'extname'", "]", "]", "=", "row", "return", "exts" ]
List installed postgresql extensions CLI Example: .. code-block:: bash salt '*' postgres.installed_extensions
[ "List", "installed", "postgresql", "extensions" ]
python
train
raiden-network/raiden
raiden/network/transport/matrix/transport.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/network/transport/matrix/transport.py#L180-L254
def _check_and_send(self): """Check and send all pending/queued messages that are not waiting on retry timeout After composing the to-be-sent message, also message queue from messages that are not present in the respective SendMessageEvent queue anymore """ if self.transport._stop_event.ready() or not self.transport.greenlet: self.log.error("Can't retry - stopped") return if self.transport._prioritize_global_messages: # During startup global messages have to be sent first self.transport._global_send_queue.join() self.log.debug('Retrying message', receiver=to_normalized_address(self.receiver)) status = self.transport._address_mgr.get_address_reachability(self.receiver) if status is not AddressReachability.REACHABLE: # if partner is not reachable, return self.log.debug( 'Partner not reachable. Skipping.', partner=pex(self.receiver), status=status, ) return # sort output by channel_identifier (so global/unordered queue goes first) # inside queue, preserve order in which messages were enqueued ordered_queue = sorted( self._message_queue, key=lambda d: d.queue_identifier.channel_identifier, ) message_texts = [ data.text for data in ordered_queue # if expired_gen generator yields False, message was sent recently, so skip it if next(data.expiration_generator) ] def message_is_in_queue(data: _RetryQueue._MessageData) -> bool: return any( isinstance(data.message, RetrieableMessage) and send_event.message_identifier == data.message.message_identifier for send_event in self.transport._queueids_to_queues[data.queue_identifier] ) # clean after composing, so any queued messages (e.g. Delivered) are sent at least once for msg_data in self._message_queue[:]: remove = False if isinstance(msg_data.message, (Delivered, Ping, Pong)): # e.g. Delivered, send only once and then clear # TODO: Is this correct? Will a missed Delivered be 'fixed' by the # later `Processed` message? remove = True elif msg_data.queue_identifier not in self.transport._queueids_to_queues: remove = True self.log.debug( 'Stopping message send retry', queue=msg_data.queue_identifier, message=msg_data.message, reason='Raiden queue is gone', ) elif not message_is_in_queue(msg_data): remove = True self.log.debug( 'Stopping message send retry', queue=msg_data.queue_identifier, message=msg_data.message, reason='Message was removed from queue', ) if remove: self._message_queue.remove(msg_data) if message_texts: self.log.debug('Send', receiver=pex(self.receiver), messages=message_texts) self.transport._send_raw(self.receiver, '\n'.join(message_texts))
[ "def", "_check_and_send", "(", "self", ")", ":", "if", "self", ".", "transport", ".", "_stop_event", ".", "ready", "(", ")", "or", "not", "self", ".", "transport", ".", "greenlet", ":", "self", ".", "log", ".", "error", "(", "\"Can't retry - stopped\"", ")", "return", "if", "self", ".", "transport", ".", "_prioritize_global_messages", ":", "# During startup global messages have to be sent first", "self", ".", "transport", ".", "_global_send_queue", ".", "join", "(", ")", "self", ".", "log", ".", "debug", "(", "'Retrying message'", ",", "receiver", "=", "to_normalized_address", "(", "self", ".", "receiver", ")", ")", "status", "=", "self", ".", "transport", ".", "_address_mgr", ".", "get_address_reachability", "(", "self", ".", "receiver", ")", "if", "status", "is", "not", "AddressReachability", ".", "REACHABLE", ":", "# if partner is not reachable, return", "self", ".", "log", ".", "debug", "(", "'Partner not reachable. Skipping.'", ",", "partner", "=", "pex", "(", "self", ".", "receiver", ")", ",", "status", "=", "status", ",", ")", "return", "# sort output by channel_identifier (so global/unordered queue goes first)", "# inside queue, preserve order in which messages were enqueued", "ordered_queue", "=", "sorted", "(", "self", ".", "_message_queue", ",", "key", "=", "lambda", "d", ":", "d", ".", "queue_identifier", ".", "channel_identifier", ",", ")", "message_texts", "=", "[", "data", ".", "text", "for", "data", "in", "ordered_queue", "# if expired_gen generator yields False, message was sent recently, so skip it", "if", "next", "(", "data", ".", "expiration_generator", ")", "]", "def", "message_is_in_queue", "(", "data", ":", "_RetryQueue", ".", "_MessageData", ")", "->", "bool", ":", "return", "any", "(", "isinstance", "(", "data", ".", "message", ",", "RetrieableMessage", ")", "and", "send_event", ".", "message_identifier", "==", "data", ".", "message", ".", "message_identifier", "for", "send_event", "in", "self", ".", "transport", ".", "_queueids_to_queues", "[", "data", ".", "queue_identifier", "]", ")", "# clean after composing, so any queued messages (e.g. Delivered) are sent at least once", "for", "msg_data", "in", "self", ".", "_message_queue", "[", ":", "]", ":", "remove", "=", "False", "if", "isinstance", "(", "msg_data", ".", "message", ",", "(", "Delivered", ",", "Ping", ",", "Pong", ")", ")", ":", "# e.g. Delivered, send only once and then clear", "# TODO: Is this correct? Will a missed Delivered be 'fixed' by the", "# later `Processed` message?", "remove", "=", "True", "elif", "msg_data", ".", "queue_identifier", "not", "in", "self", ".", "transport", ".", "_queueids_to_queues", ":", "remove", "=", "True", "self", ".", "log", ".", "debug", "(", "'Stopping message send retry'", ",", "queue", "=", "msg_data", ".", "queue_identifier", ",", "message", "=", "msg_data", ".", "message", ",", "reason", "=", "'Raiden queue is gone'", ",", ")", "elif", "not", "message_is_in_queue", "(", "msg_data", ")", ":", "remove", "=", "True", "self", ".", "log", ".", "debug", "(", "'Stopping message send retry'", ",", "queue", "=", "msg_data", ".", "queue_identifier", ",", "message", "=", "msg_data", ".", "message", ",", "reason", "=", "'Message was removed from queue'", ",", ")", "if", "remove", ":", "self", ".", "_message_queue", ".", "remove", "(", "msg_data", ")", "if", "message_texts", ":", "self", ".", "log", ".", "debug", "(", "'Send'", ",", "receiver", "=", "pex", "(", "self", ".", "receiver", ")", ",", "messages", "=", "message_texts", ")", "self", ".", "transport", ".", "_send_raw", "(", "self", ".", "receiver", ",", "'\\n'", ".", "join", "(", "message_texts", ")", ")" ]
Check and send all pending/queued messages that are not waiting on retry timeout After composing the to-be-sent message, also message queue from messages that are not present in the respective SendMessageEvent queue anymore
[ "Check", "and", "send", "all", "pending", "/", "queued", "messages", "that", "are", "not", "waiting", "on", "retry", "timeout" ]
python
train
bivab/smbus-cffi
smbus/smbus.py
https://github.com/bivab/smbus-cffi/blob/7486931edf55fcdde84db38356331c65851a40b1/smbus/smbus.py#L102-L111
def read_byte(self, addr): """read_byte(addr) -> result Perform SMBus Read Byte transaction. """ self._set_addr(addr) result = SMBUS.i2c_smbus_read_byte(self._fd) if result == -1: raise IOError(ffi.errno) return result
[ "def", "read_byte", "(", "self", ",", "addr", ")", ":", "self", ".", "_set_addr", "(", "addr", ")", "result", "=", "SMBUS", ".", "i2c_smbus_read_byte", "(", "self", ".", "_fd", ")", "if", "result", "==", "-", "1", ":", "raise", "IOError", "(", "ffi", ".", "errno", ")", "return", "result" ]
read_byte(addr) -> result Perform SMBus Read Byte transaction.
[ "read_byte", "(", "addr", ")", "-", ">", "result" ]
python
test
diefans/objective
src/objective/core.py
https://github.com/diefans/objective/blob/e2de37f1cd4f5ad147ab3a5dee7dffd6806f2f88/src/objective/core.py#L134-L149
def node(self): """Create a :py:class:`Node` instance. All args and kwargs are forwarded to the node. :returns: a :py:class:`Node` instance """ if self.node_class is None: raise ValueError("You have to create an ``Item`` by calling ``__init__`` with ``node_class`` argument" " or by decorating a ``Node`` class.") node = self.node_class(*self.node_args, **self.node_kwargs) node.__item__ = self return node
[ "def", "node", "(", "self", ")", ":", "if", "self", ".", "node_class", "is", "None", ":", "raise", "ValueError", "(", "\"You have to create an ``Item`` by calling ``__init__`` with ``node_class`` argument\"", "\" or by decorating a ``Node`` class.\"", ")", "node", "=", "self", ".", "node_class", "(", "*", "self", ".", "node_args", ",", "*", "*", "self", ".", "node_kwargs", ")", "node", ".", "__item__", "=", "self", "return", "node" ]
Create a :py:class:`Node` instance. All args and kwargs are forwarded to the node. :returns: a :py:class:`Node` instance
[ "Create", "a", ":", "py", ":", "class", ":", "Node", "instance", "." ]
python
train
SecurityInnovation/PGPy
pgpy/pgp.py
https://github.com/SecurityInnovation/PGPy/blob/f1c3d68e32c334f5aa14c34580925e97f17f4fde/pgpy/pgp.py#L586-L591
def selfsig(self): """ This will be the most recent, self-signature of this User ID or Attribute. If there isn't one, this will be ``None``. """ if self.parent is not None: return next((sig for sig in reversed(self._signatures) if sig.signer == self.parent.fingerprint.keyid), None)
[ "def", "selfsig", "(", "self", ")", ":", "if", "self", ".", "parent", "is", "not", "None", ":", "return", "next", "(", "(", "sig", "for", "sig", "in", "reversed", "(", "self", ".", "_signatures", ")", "if", "sig", ".", "signer", "==", "self", ".", "parent", ".", "fingerprint", ".", "keyid", ")", ",", "None", ")" ]
This will be the most recent, self-signature of this User ID or Attribute. If there isn't one, this will be ``None``.
[ "This", "will", "be", "the", "most", "recent", "self", "-", "signature", "of", "this", "User", "ID", "or", "Attribute", ".", "If", "there", "isn", "t", "one", "this", "will", "be", "None", "." ]
python
train
RedHatInsights/insights-core
insights/parsers/system_time.py
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/parsers/system_time.py#L135-L153
def get_last(self, keyword, param=None, default=None): """ Get the parameters for a given keyword, or default if keyword or parameter are not present in the configuration. This finds the last declaration of the given parameter (which is the one which takes effect). If no parameter is given, then the entire line is treated as the parameter and returned. Parameters: keyword(str): The keyword name, e.g. 'tinker' or 'driftfile' param(str): The parameter name, e.g. 'panic' or 'step'. If not given, the last definition of that keyword is given. Returns: str or None: The value of the given parameter, or None if not found. """ return self.get_param(keyword, param, default)[-1]
[ "def", "get_last", "(", "self", ",", "keyword", ",", "param", "=", "None", ",", "default", "=", "None", ")", ":", "return", "self", ".", "get_param", "(", "keyword", ",", "param", ",", "default", ")", "[", "-", "1", "]" ]
Get the parameters for a given keyword, or default if keyword or parameter are not present in the configuration. This finds the last declaration of the given parameter (which is the one which takes effect). If no parameter is given, then the entire line is treated as the parameter and returned. Parameters: keyword(str): The keyword name, e.g. 'tinker' or 'driftfile' param(str): The parameter name, e.g. 'panic' or 'step'. If not given, the last definition of that keyword is given. Returns: str or None: The value of the given parameter, or None if not found.
[ "Get", "the", "parameters", "for", "a", "given", "keyword", "or", "default", "if", "keyword", "or", "parameter", "are", "not", "present", "in", "the", "configuration", "." ]
python
train
ScriptSmith/socialreaper
socialreaper/tools.py
https://github.com/ScriptSmith/socialreaper/blob/87fcc3b74bbed6c4f8e7f49a5f0eb8a616cf38da/socialreaper/tools.py#L31-L49
def fill_gaps(list_dicts): """ Fill gaps in a list of dictionaries. Add empty keys to dictionaries in the list that don't contain other entries' keys :param list_dicts: A list of dictionaries :return: A list of field names, a list of dictionaries with identical keys """ field_names = [] # != set bc. preserving order is better for output for datum in list_dicts: for key in datum.keys(): if key not in field_names: field_names.append(key) for datum in list_dicts: for key in field_names: if key not in datum: datum[key] = '' return list(field_names), list_dicts
[ "def", "fill_gaps", "(", "list_dicts", ")", ":", "field_names", "=", "[", "]", "# != set bc. preserving order is better for output\r", "for", "datum", "in", "list_dicts", ":", "for", "key", "in", "datum", ".", "keys", "(", ")", ":", "if", "key", "not", "in", "field_names", ":", "field_names", ".", "append", "(", "key", ")", "for", "datum", "in", "list_dicts", ":", "for", "key", "in", "field_names", ":", "if", "key", "not", "in", "datum", ":", "datum", "[", "key", "]", "=", "''", "return", "list", "(", "field_names", ")", ",", "list_dicts" ]
Fill gaps in a list of dictionaries. Add empty keys to dictionaries in the list that don't contain other entries' keys :param list_dicts: A list of dictionaries :return: A list of field names, a list of dictionaries with identical keys
[ "Fill", "gaps", "in", "a", "list", "of", "dictionaries", ".", "Add", "empty", "keys", "to", "dictionaries", "in", "the", "list", "that", "don", "t", "contain", "other", "entries", "keys", ":", "param", "list_dicts", ":", "A", "list", "of", "dictionaries", ":", "return", ":", "A", "list", "of", "field", "names", "a", "list", "of", "dictionaries", "with", "identical", "keys" ]
python
valid
jmgilman/Neolib
neolib/pyamf/__init__.py
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/__init__.py#L436-L460
def get_decoder(encoding, *args, **kwargs): """ Returns a L{codec.Decoder} capable of decoding AMF[C{encoding}] streams. @raise ValueError: Unknown C{encoding}. """ def _get_decoder_class(): if encoding == AMF0: try: from cpyamf import amf0 except ImportError: from pyamf import amf0 return amf0.Decoder elif encoding == AMF3: try: from cpyamf import amf3 except ImportError: from pyamf import amf3 return amf3.Decoder raise ValueError("Unknown encoding %r" % (encoding,)) return _get_decoder_class()(*args, **kwargs)
[ "def", "get_decoder", "(", "encoding", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "def", "_get_decoder_class", "(", ")", ":", "if", "encoding", "==", "AMF0", ":", "try", ":", "from", "cpyamf", "import", "amf0", "except", "ImportError", ":", "from", "pyamf", "import", "amf0", "return", "amf0", ".", "Decoder", "elif", "encoding", "==", "AMF3", ":", "try", ":", "from", "cpyamf", "import", "amf3", "except", "ImportError", ":", "from", "pyamf", "import", "amf3", "return", "amf3", ".", "Decoder", "raise", "ValueError", "(", "\"Unknown encoding %r\"", "%", "(", "encoding", ",", ")", ")", "return", "_get_decoder_class", "(", ")", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Returns a L{codec.Decoder} capable of decoding AMF[C{encoding}] streams. @raise ValueError: Unknown C{encoding}.
[ "Returns", "a", "L", "{", "codec", ".", "Decoder", "}", "capable", "of", "decoding", "AMF", "[", "C", "{", "encoding", "}", "]", "streams", "." ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/sortedcontainers/sorteddict.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/sortedcontainers/sorteddict.py#L17-L27
def not26(func): """Function decorator for methods not implemented in Python 2.6.""" @wraps(func) def errfunc(*args, **kwargs): raise NotImplementedError if hexversion < 0x02070000: return errfunc else: return func
[ "def", "not26", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "errfunc", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "raise", "NotImplementedError", "if", "hexversion", "<", "0x02070000", ":", "return", "errfunc", "else", ":", "return", "func" ]
Function decorator for methods not implemented in Python 2.6.
[ "Function", "decorator", "for", "methods", "not", "implemented", "in", "Python", "2", ".", "6", "." ]
python
train
chaimleib/intervaltree
intervaltree/intervaltree.py
https://github.com/chaimleib/intervaltree/blob/ffb2b1667f8b832e89324a75a175be8440504c9d/intervaltree/intervaltree.py#L401-L410
def difference(self, other): """ Returns a new tree, comprising all intervals in self but not in other. """ ivs = set() for iv in self: if iv not in other: ivs.add(iv) return IntervalTree(ivs)
[ "def", "difference", "(", "self", ",", "other", ")", ":", "ivs", "=", "set", "(", ")", "for", "iv", "in", "self", ":", "if", "iv", "not", "in", "other", ":", "ivs", ".", "add", "(", "iv", ")", "return", "IntervalTree", "(", "ivs", ")" ]
Returns a new tree, comprising all intervals in self but not in other.
[ "Returns", "a", "new", "tree", "comprising", "all", "intervals", "in", "self", "but", "not", "in", "other", "." ]
python
train
mosdef-hub/mbuild
mbuild/compound.py
https://github.com/mosdef-hub/mbuild/blob/dcb80a2becd5d0e6a7e3e7bcb1b59793c46a2dd3/mbuild/compound.py#L1537-L1648
def _energy_minimize_openbabel(self, tmp_dir, steps=1000, algorithm='cg', forcefield='UFF'): """Perform an energy minimization on a Compound Utilizes Open Babel (http://openbabel.org/docs/dev/) to perform an energy minimization/geometry optimization on a Compound by applying a generic force field. This function is primarily intended to be used on smaller components, with sizes on the order of 10's to 100's of particles, as the energy minimization scales poorly with the number of particles. Parameters ---------- steps : int, optionl, default=1000 The number of optimization iterations algorithm : str, optional, default='cg' The energy minimization algorithm. Valid options are 'steep', 'cg', and 'md', corresponding to steepest descent, conjugate gradient, and equilibrium molecular dynamics respectively. forcefield : str, optional, default='UFF' The generic force field to apply to the Compound for minimization. Valid options are 'MMFF94', 'MMFF94s', ''UFF', 'GAFF', and 'Ghemical'. Please refer to the Open Babel documentation (http://open-babel. readthedocs.io/en/latest/Forcefields/Overview.html) when considering your choice of force field. References ---------- .. [1] O'Boyle, N.M.; Banck, M.; James, C.A.; Morley, C.; Vandermeersch, T.; Hutchison, G.R. "Open Babel: An open chemical toolbox." (2011) J. Cheminf. 3, 33 .. [2] Open Babel, version X.X.X http://openbabel.org, (installed Month Year) If using the 'MMFF94' force field please also cite the following: .. [3] T.A. Halgren, "Merck molecular force field. I. Basis, form, scope, parameterization, and performance of MMFF94." (1996) J. Comput. Chem. 17, 490-519 .. [4] T.A. Halgren, "Merck molecular force field. II. MMFF94 van der Waals and electrostatic parameters for intermolecular interactions." (1996) J. Comput. Chem. 17, 520-552 .. [5] T.A. Halgren, "Merck molecular force field. III. Molecular geometries and vibrational frequencies for MMFF94." (1996) J. Comput. Chem. 17, 553-586 .. [6] T.A. Halgren and R.B. Nachbar, "Merck molecular force field. IV. Conformational energies and geometries for MMFF94." (1996) J. Comput. Chem. 17, 587-615 .. [7] T.A. Halgren, "Merck molecular force field. V. Extension of MMFF94 using experimental data, additional computational data, and empirical rules." (1996) J. Comput. Chem. 17, 616-641 If using the 'MMFF94s' force field please cite the above along with: .. [8] T.A. Halgren, "MMFF VI. MMFF94s option for energy minimization studies." (1999) J. Comput. Chem. 20, 720-729 If using the 'UFF' force field please cite the following: .. [3] Rappe, A.K., Casewit, C.J., Colwell, K.S., Goddard, W.A. III, Skiff, W.M. "UFF, a full periodic table force field for molecular mechanics and molecular dynamics simulations." (1992) J. Am. Chem. Soc. 114, 10024-10039 If using the 'GAFF' force field please cite the following: .. [3] Wang, J., Wolf, R.M., Caldwell, J.W., Kollman, P.A., Case, D.A. "Development and testing of a general AMBER force field" (2004) J. Comput. Chem. 25, 1157-1174 If using the 'Ghemical' force field please cite the following: .. [3] T. Hassinen and M. Perakyla, "New energy terms for reduced protein models implemented in an off-lattice force field" (2001) J. Comput. Chem. 22, 1229-1242 """ openbabel = import_('openbabel') for particle in self.particles(): try: get_by_symbol(particle.name) except KeyError: raise MBuildError("Element name {} not recognized. Cannot " "perform minimization." "".format(particle.name)) obConversion = openbabel.OBConversion() obConversion.SetInAndOutFormats("mol2", "pdb") mol = openbabel.OBMol() obConversion.ReadFile(mol, os.path.join(tmp_dir, "un-minimized.mol2")) ff = openbabel.OBForceField.FindForceField(forcefield) if ff is None: raise MBuildError("Force field '{}' not supported for energy " "minimization. Valid force fields are 'MMFF94', " "'MMFF94s', 'UFF', 'GAFF', and 'Ghemical'." "".format(forcefield)) warn( "Performing energy minimization using the Open Babel package. Please " "refer to the documentation to find the appropriate citations for " "Open Babel and the {} force field".format(forcefield)) ff.Setup(mol) if algorithm == 'steep': ff.SteepestDescent(steps) elif algorithm == 'md': ff.MolecularDynamicsTakeNSteps(steps, 300) elif algorithm == 'cg': ff.ConjugateGradients(steps) else: raise MBuildError("Invalid minimization algorithm. Valid options " "are 'steep', 'cg', and 'md'.") ff.UpdateCoordinates(mol) obConversion.WriteFile(mol, os.path.join(tmp_dir, 'minimized.pdb'))
[ "def", "_energy_minimize_openbabel", "(", "self", ",", "tmp_dir", ",", "steps", "=", "1000", ",", "algorithm", "=", "'cg'", ",", "forcefield", "=", "'UFF'", ")", ":", "openbabel", "=", "import_", "(", "'openbabel'", ")", "for", "particle", "in", "self", ".", "particles", "(", ")", ":", "try", ":", "get_by_symbol", "(", "particle", ".", "name", ")", "except", "KeyError", ":", "raise", "MBuildError", "(", "\"Element name {} not recognized. Cannot \"", "\"perform minimization.\"", "\"\"", ".", "format", "(", "particle", ".", "name", ")", ")", "obConversion", "=", "openbabel", ".", "OBConversion", "(", ")", "obConversion", ".", "SetInAndOutFormats", "(", "\"mol2\"", ",", "\"pdb\"", ")", "mol", "=", "openbabel", ".", "OBMol", "(", ")", "obConversion", ".", "ReadFile", "(", "mol", ",", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "\"un-minimized.mol2\"", ")", ")", "ff", "=", "openbabel", ".", "OBForceField", ".", "FindForceField", "(", "forcefield", ")", "if", "ff", "is", "None", ":", "raise", "MBuildError", "(", "\"Force field '{}' not supported for energy \"", "\"minimization. Valid force fields are 'MMFF94', \"", "\"'MMFF94s', 'UFF', 'GAFF', and 'Ghemical'.\"", "\"\"", ".", "format", "(", "forcefield", ")", ")", "warn", "(", "\"Performing energy minimization using the Open Babel package. Please \"", "\"refer to the documentation to find the appropriate citations for \"", "\"Open Babel and the {} force field\"", ".", "format", "(", "forcefield", ")", ")", "ff", ".", "Setup", "(", "mol", ")", "if", "algorithm", "==", "'steep'", ":", "ff", ".", "SteepestDescent", "(", "steps", ")", "elif", "algorithm", "==", "'md'", ":", "ff", ".", "MolecularDynamicsTakeNSteps", "(", "steps", ",", "300", ")", "elif", "algorithm", "==", "'cg'", ":", "ff", ".", "ConjugateGradients", "(", "steps", ")", "else", ":", "raise", "MBuildError", "(", "\"Invalid minimization algorithm. Valid options \"", "\"are 'steep', 'cg', and 'md'.\"", ")", "ff", ".", "UpdateCoordinates", "(", "mol", ")", "obConversion", ".", "WriteFile", "(", "mol", ",", "os", ".", "path", ".", "join", "(", "tmp_dir", ",", "'minimized.pdb'", ")", ")" ]
Perform an energy minimization on a Compound Utilizes Open Babel (http://openbabel.org/docs/dev/) to perform an energy minimization/geometry optimization on a Compound by applying a generic force field. This function is primarily intended to be used on smaller components, with sizes on the order of 10's to 100's of particles, as the energy minimization scales poorly with the number of particles. Parameters ---------- steps : int, optionl, default=1000 The number of optimization iterations algorithm : str, optional, default='cg' The energy minimization algorithm. Valid options are 'steep', 'cg', and 'md', corresponding to steepest descent, conjugate gradient, and equilibrium molecular dynamics respectively. forcefield : str, optional, default='UFF' The generic force field to apply to the Compound for minimization. Valid options are 'MMFF94', 'MMFF94s', ''UFF', 'GAFF', and 'Ghemical'. Please refer to the Open Babel documentation (http://open-babel. readthedocs.io/en/latest/Forcefields/Overview.html) when considering your choice of force field. References ---------- .. [1] O'Boyle, N.M.; Banck, M.; James, C.A.; Morley, C.; Vandermeersch, T.; Hutchison, G.R. "Open Babel: An open chemical toolbox." (2011) J. Cheminf. 3, 33 .. [2] Open Babel, version X.X.X http://openbabel.org, (installed Month Year) If using the 'MMFF94' force field please also cite the following: .. [3] T.A. Halgren, "Merck molecular force field. I. Basis, form, scope, parameterization, and performance of MMFF94." (1996) J. Comput. Chem. 17, 490-519 .. [4] T.A. Halgren, "Merck molecular force field. II. MMFF94 van der Waals and electrostatic parameters for intermolecular interactions." (1996) J. Comput. Chem. 17, 520-552 .. [5] T.A. Halgren, "Merck molecular force field. III. Molecular geometries and vibrational frequencies for MMFF94." (1996) J. Comput. Chem. 17, 553-586 .. [6] T.A. Halgren and R.B. Nachbar, "Merck molecular force field. IV. Conformational energies and geometries for MMFF94." (1996) J. Comput. Chem. 17, 587-615 .. [7] T.A. Halgren, "Merck molecular force field. V. Extension of MMFF94 using experimental data, additional computational data, and empirical rules." (1996) J. Comput. Chem. 17, 616-641 If using the 'MMFF94s' force field please cite the above along with: .. [8] T.A. Halgren, "MMFF VI. MMFF94s option for energy minimization studies." (1999) J. Comput. Chem. 20, 720-729 If using the 'UFF' force field please cite the following: .. [3] Rappe, A.K., Casewit, C.J., Colwell, K.S., Goddard, W.A. III, Skiff, W.M. "UFF, a full periodic table force field for molecular mechanics and molecular dynamics simulations." (1992) J. Am. Chem. Soc. 114, 10024-10039 If using the 'GAFF' force field please cite the following: .. [3] Wang, J., Wolf, R.M., Caldwell, J.W., Kollman, P.A., Case, D.A. "Development and testing of a general AMBER force field" (2004) J. Comput. Chem. 25, 1157-1174 If using the 'Ghemical' force field please cite the following: .. [3] T. Hassinen and M. Perakyla, "New energy terms for reduced protein models implemented in an off-lattice force field" (2001) J. Comput. Chem. 22, 1229-1242
[ "Perform", "an", "energy", "minimization", "on", "a", "Compound" ]
python
train
saltstack/salt
salt/modules/vsphere.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/vsphere.py#L8197-L8285
def _create_disks(service_instance, disks, scsi_controllers=None, parent=None): ''' Returns a list of disk specs representing the disks to be created for a virtual machine service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. disks List of disks with properties scsi_controllers List of SCSI controllers parent Parent object reference .. code-block: bash disk: adapter: 'Hard disk 1' size: 16 unit: GB address: '0:0' controller: 'SCSI controller 0' thin_provision: False eagerly_scrub: False datastore: 'myshare' filename: 'vm/mydisk.vmdk' ''' disk_specs = [] keys = range(-2000, -2050, -1) if disks: devs = [disk['adapter'] for disk in disks] log.trace('Creating disks %s', devs) for disk, key in zip(disks, keys): # create the disk filename, datastore, datastore_ref = None, None, None size = float(disk['size']) # when creating both SCSI controller and Hard disk at the same time # we need the randomly assigned (temporary) key of the newly created # SCSI controller controller_key = 1000 # Default is the first SCSI controller if 'address' in disk: # 0:0 controller_bus_number, unit_number = disk['address'].split(':') controller_bus_number = int(controller_bus_number) unit_number = int(unit_number) controller_key = _get_scsi_controller_key( controller_bus_number, scsi_ctrls=scsi_controllers) elif 'controller' in disk: for contr in scsi_controllers: if contr['label'] == disk['controller']: controller_key = contr['key'] break else: raise salt.exceptions.VMwareObjectNotFoundError( 'The given controller does not exist: ' '{0}'.format(disk['controller'])) if 'datastore' in disk: datastore_ref = \ salt.utils.vmware.get_datastores( service_instance, parent, datastore_names=[disk['datastore']])[0] datastore = disk['datastore'] if 'filename' in disk: filename = disk['filename'] # XOR filename, datastore if (not filename and datastore) or (filename and not datastore): raise salt.exceptions.ArgumentValueError( 'You must specify both filename and datastore attributes' ' to place your disk to a specific datastore ' '{0}, {1}'.format(datastore, filename)) disk_spec = _apply_hard_disk( unit_number, key, disk_label=disk['adapter'], size=size, unit=disk['unit'], controller_key=controller_key, operation='add', thin_provision=disk['thin_provision'], eagerly_scrub=disk['eagerly_scrub'] if 'eagerly_scrub' in disk else None, datastore=datastore_ref, filename=filename) disk_specs.append(disk_spec) unit_number += 1 return disk_specs
[ "def", "_create_disks", "(", "service_instance", ",", "disks", ",", "scsi_controllers", "=", "None", ",", "parent", "=", "None", ")", ":", "disk_specs", "=", "[", "]", "keys", "=", "range", "(", "-", "2000", ",", "-", "2050", ",", "-", "1", ")", "if", "disks", ":", "devs", "=", "[", "disk", "[", "'adapter'", "]", "for", "disk", "in", "disks", "]", "log", ".", "trace", "(", "'Creating disks %s'", ",", "devs", ")", "for", "disk", ",", "key", "in", "zip", "(", "disks", ",", "keys", ")", ":", "# create the disk", "filename", ",", "datastore", ",", "datastore_ref", "=", "None", ",", "None", ",", "None", "size", "=", "float", "(", "disk", "[", "'size'", "]", ")", "# when creating both SCSI controller and Hard disk at the same time", "# we need the randomly assigned (temporary) key of the newly created", "# SCSI controller", "controller_key", "=", "1000", "# Default is the first SCSI controller", "if", "'address'", "in", "disk", ":", "# 0:0", "controller_bus_number", ",", "unit_number", "=", "disk", "[", "'address'", "]", ".", "split", "(", "':'", ")", "controller_bus_number", "=", "int", "(", "controller_bus_number", ")", "unit_number", "=", "int", "(", "unit_number", ")", "controller_key", "=", "_get_scsi_controller_key", "(", "controller_bus_number", ",", "scsi_ctrls", "=", "scsi_controllers", ")", "elif", "'controller'", "in", "disk", ":", "for", "contr", "in", "scsi_controllers", ":", "if", "contr", "[", "'label'", "]", "==", "disk", "[", "'controller'", "]", ":", "controller_key", "=", "contr", "[", "'key'", "]", "break", "else", ":", "raise", "salt", ".", "exceptions", ".", "VMwareObjectNotFoundError", "(", "'The given controller does not exist: '", "'{0}'", ".", "format", "(", "disk", "[", "'controller'", "]", ")", ")", "if", "'datastore'", "in", "disk", ":", "datastore_ref", "=", "salt", ".", "utils", ".", "vmware", ".", "get_datastores", "(", "service_instance", ",", "parent", ",", "datastore_names", "=", "[", "disk", "[", "'datastore'", "]", "]", ")", "[", "0", "]", "datastore", "=", "disk", "[", "'datastore'", "]", "if", "'filename'", "in", "disk", ":", "filename", "=", "disk", "[", "'filename'", "]", "# XOR filename, datastore", "if", "(", "not", "filename", "and", "datastore", ")", "or", "(", "filename", "and", "not", "datastore", ")", ":", "raise", "salt", ".", "exceptions", ".", "ArgumentValueError", "(", "'You must specify both filename and datastore attributes'", "' to place your disk to a specific datastore '", "'{0}, {1}'", ".", "format", "(", "datastore", ",", "filename", ")", ")", "disk_spec", "=", "_apply_hard_disk", "(", "unit_number", ",", "key", ",", "disk_label", "=", "disk", "[", "'adapter'", "]", ",", "size", "=", "size", ",", "unit", "=", "disk", "[", "'unit'", "]", ",", "controller_key", "=", "controller_key", ",", "operation", "=", "'add'", ",", "thin_provision", "=", "disk", "[", "'thin_provision'", "]", ",", "eagerly_scrub", "=", "disk", "[", "'eagerly_scrub'", "]", "if", "'eagerly_scrub'", "in", "disk", "else", "None", ",", "datastore", "=", "datastore_ref", ",", "filename", "=", "filename", ")", "disk_specs", ".", "append", "(", "disk_spec", ")", "unit_number", "+=", "1", "return", "disk_specs" ]
Returns a list of disk specs representing the disks to be created for a virtual machine service_instance Service instance (vim.ServiceInstance) of the vCenter. Default is None. disks List of disks with properties scsi_controllers List of SCSI controllers parent Parent object reference .. code-block: bash disk: adapter: 'Hard disk 1' size: 16 unit: GB address: '0:0' controller: 'SCSI controller 0' thin_provision: False eagerly_scrub: False datastore: 'myshare' filename: 'vm/mydisk.vmdk'
[ "Returns", "a", "list", "of", "disk", "specs", "representing", "the", "disks", "to", "be", "created", "for", "a", "virtual", "machine" ]
python
train
koalalorenzo/python-digitalocean
digitalocean/Droplet.py
https://github.com/koalalorenzo/python-digitalocean/blob/d0221b57856fb1e131cafecf99d826f7b07a947c/digitalocean/Droplet.py#L595-L604
def get_action(self, action_id): """Returns a specific Action by its ID. Args: action_id (int): id of action """ return Action.get_object( api_token=self.token, action_id=action_id )
[ "def", "get_action", "(", "self", ",", "action_id", ")", ":", "return", "Action", ".", "get_object", "(", "api_token", "=", "self", ".", "token", ",", "action_id", "=", "action_id", ")" ]
Returns a specific Action by its ID. Args: action_id (int): id of action
[ "Returns", "a", "specific", "Action", "by", "its", "ID", "." ]
python
valid
astrorafael/twisted-mqtt
mqtt/pdu.py
https://github.com/astrorafael/twisted-mqtt/blob/5b322f7c2b82a502b1e1b70703ae45f1f668d07d/mqtt/pdu.py#L251-L289
def decode(self, packet): ''' Decode a CONNECT control packet. ''' self.encoded = packet # Strip the fixed header plus variable length field lenLen = 1 while packet[lenLen] & 0x80: lenLen += 1 packet_remaining = packet[lenLen+1:] # Variable Header version_str, packet_remaining = decodeString(packet_remaining) version_id = int(packet_remaining[0]) if version_id == v31['level']: self.version = v31 else: self.version = v311 flags = packet_remaining[1] self.cleanStart = (flags & 0x02) != 0 willFlag = (flags & 0x04) != 0 willQoS = (flags >> 3) & 0x03 willRetain = (flags & 0x20) != 0 userFlag = (flags & 0x80) != 0 passFlag = (flags & 0x40) != 0 packet_remaining = packet_remaining[2:] self.keepalive = decode16Int(packet_remaining) # Payload packet_remaining = packet_remaining[2:] self.clientId, packet_remaining = decodeString(packet_remaining) if willFlag: self.willRetain = willRetain self.willQoS = willQoS self.willTopic, packet_remaining = decodeString(packet_remaining) self.willMessage, packet_remaining = decodeString(packet_remaining) if userFlag: self.username, packet_remaining = decodeString(packet_remaining) if passFlag: l = decode16Int(packet_remaining) self.password = packet_remaining[2:2+l]
[ "def", "decode", "(", "self", ",", "packet", ")", ":", "self", ".", "encoded", "=", "packet", "# Strip the fixed header plus variable length field", "lenLen", "=", "1", "while", "packet", "[", "lenLen", "]", "&", "0x80", ":", "lenLen", "+=", "1", "packet_remaining", "=", "packet", "[", "lenLen", "+", "1", ":", "]", "# Variable Header", "version_str", ",", "packet_remaining", "=", "decodeString", "(", "packet_remaining", ")", "version_id", "=", "int", "(", "packet_remaining", "[", "0", "]", ")", "if", "version_id", "==", "v31", "[", "'level'", "]", ":", "self", ".", "version", "=", "v31", "else", ":", "self", ".", "version", "=", "v311", "flags", "=", "packet_remaining", "[", "1", "]", "self", ".", "cleanStart", "=", "(", "flags", "&", "0x02", ")", "!=", "0", "willFlag", "=", "(", "flags", "&", "0x04", ")", "!=", "0", "willQoS", "=", "(", "flags", ">>", "3", ")", "&", "0x03", "willRetain", "=", "(", "flags", "&", "0x20", ")", "!=", "0", "userFlag", "=", "(", "flags", "&", "0x80", ")", "!=", "0", "passFlag", "=", "(", "flags", "&", "0x40", ")", "!=", "0", "packet_remaining", "=", "packet_remaining", "[", "2", ":", "]", "self", ".", "keepalive", "=", "decode16Int", "(", "packet_remaining", ")", "# Payload", "packet_remaining", "=", "packet_remaining", "[", "2", ":", "]", "self", ".", "clientId", ",", "packet_remaining", "=", "decodeString", "(", "packet_remaining", ")", "if", "willFlag", ":", "self", ".", "willRetain", "=", "willRetain", "self", ".", "willQoS", "=", "willQoS", "self", ".", "willTopic", ",", "packet_remaining", "=", "decodeString", "(", "packet_remaining", ")", "self", ".", "willMessage", ",", "packet_remaining", "=", "decodeString", "(", "packet_remaining", ")", "if", "userFlag", ":", "self", ".", "username", ",", "packet_remaining", "=", "decodeString", "(", "packet_remaining", ")", "if", "passFlag", ":", "l", "=", "decode16Int", "(", "packet_remaining", ")", "self", ".", "password", "=", "packet_remaining", "[", "2", ":", "2", "+", "l", "]" ]
Decode a CONNECT control packet.
[ "Decode", "a", "CONNECT", "control", "packet", "." ]
python
test
DAI-Lab/Copulas
copulas/bivariate/base.py
https://github.com/DAI-Lab/Copulas/blob/821df61c3d36a6b81ef2883935f935c2eaaa862c/copulas/bivariate/base.py#L352-L364
def load(cls, copula_path): """Create a new instance from a file. Args: copula_path: `str` file with the serialized copula. Returns: Bivariate: Instance with the parameters stored in the file. """ with open(copula_path) as f: copula_dict = json.load(f) return cls.from_dict(copula_dict)
[ "def", "load", "(", "cls", ",", "copula_path", ")", ":", "with", "open", "(", "copula_path", ")", "as", "f", ":", "copula_dict", "=", "json", ".", "load", "(", "f", ")", "return", "cls", ".", "from_dict", "(", "copula_dict", ")" ]
Create a new instance from a file. Args: copula_path: `str` file with the serialized copula. Returns: Bivariate: Instance with the parameters stored in the file.
[ "Create", "a", "new", "instance", "from", "a", "file", "." ]
python
train
numenta/nupic
examples/opf/experiments/classification/makeDatasets.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/examples/opf/experiments/classification/makeDatasets.py#L36-L79
def _generateCategory(filename="simple.csv", numSequences=2, elementsPerSeq=1, numRepeats=10, resets=False): """ Generate a simple dataset. This contains a bunch of non-overlapping sequences. Parameters: ---------------------------------------------------- filename: name of the file to produce, including extension. It will be created in a 'datasets' sub-directory within the directory containing this script. numSequences: how many sequences to generate elementsPerSeq: length of each sequence numRepeats: how many times to repeat each sequence in the output resets: if True, turn on reset at start of each sequence """ # Create the output file scriptDir = os.path.dirname(__file__) pathname = os.path.join(scriptDir, 'datasets', filename) print "Creating %s..." % (pathname) fields = [('reset', 'int', 'R'), ('category', 'int', 'C'), ('field1', 'string', '')] outFile = FileRecordStream(pathname, write=True, fields=fields) # Create the sequences sequences = [] for i in range(numSequences): seq = [x for x in range(i*elementsPerSeq, (i+1)*elementsPerSeq)] sequences.append(seq) # Write out the sequences in random order seqIdxs = [] for i in range(numRepeats): seqIdxs += range(numSequences) random.shuffle(seqIdxs) for seqIdx in seqIdxs: reset = int(resets) seq = sequences[seqIdx] for x in seq: outFile.appendRecord([reset, str(seqIdx), str(x)]) reset = 0 outFile.close()
[ "def", "_generateCategory", "(", "filename", "=", "\"simple.csv\"", ",", "numSequences", "=", "2", ",", "elementsPerSeq", "=", "1", ",", "numRepeats", "=", "10", ",", "resets", "=", "False", ")", ":", "# Create the output file", "scriptDir", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "pathname", "=", "os", ".", "path", ".", "join", "(", "scriptDir", ",", "'datasets'", ",", "filename", ")", "print", "\"Creating %s...\"", "%", "(", "pathname", ")", "fields", "=", "[", "(", "'reset'", ",", "'int'", ",", "'R'", ")", ",", "(", "'category'", ",", "'int'", ",", "'C'", ")", ",", "(", "'field1'", ",", "'string'", ",", "''", ")", "]", "outFile", "=", "FileRecordStream", "(", "pathname", ",", "write", "=", "True", ",", "fields", "=", "fields", ")", "# Create the sequences", "sequences", "=", "[", "]", "for", "i", "in", "range", "(", "numSequences", ")", ":", "seq", "=", "[", "x", "for", "x", "in", "range", "(", "i", "*", "elementsPerSeq", ",", "(", "i", "+", "1", ")", "*", "elementsPerSeq", ")", "]", "sequences", ".", "append", "(", "seq", ")", "# Write out the sequences in random order", "seqIdxs", "=", "[", "]", "for", "i", "in", "range", "(", "numRepeats", ")", ":", "seqIdxs", "+=", "range", "(", "numSequences", ")", "random", ".", "shuffle", "(", "seqIdxs", ")", "for", "seqIdx", "in", "seqIdxs", ":", "reset", "=", "int", "(", "resets", ")", "seq", "=", "sequences", "[", "seqIdx", "]", "for", "x", "in", "seq", ":", "outFile", ".", "appendRecord", "(", "[", "reset", ",", "str", "(", "seqIdx", ")", ",", "str", "(", "x", ")", "]", ")", "reset", "=", "0", "outFile", ".", "close", "(", ")" ]
Generate a simple dataset. This contains a bunch of non-overlapping sequences. Parameters: ---------------------------------------------------- filename: name of the file to produce, including extension. It will be created in a 'datasets' sub-directory within the directory containing this script. numSequences: how many sequences to generate elementsPerSeq: length of each sequence numRepeats: how many times to repeat each sequence in the output resets: if True, turn on reset at start of each sequence
[ "Generate", "a", "simple", "dataset", ".", "This", "contains", "a", "bunch", "of", "non", "-", "overlapping", "sequences", ".", "Parameters", ":", "----------------------------------------------------", "filename", ":", "name", "of", "the", "file", "to", "produce", "including", "extension", ".", "It", "will", "be", "created", "in", "a", "datasets", "sub", "-", "directory", "within", "the", "directory", "containing", "this", "script", ".", "numSequences", ":", "how", "many", "sequences", "to", "generate", "elementsPerSeq", ":", "length", "of", "each", "sequence", "numRepeats", ":", "how", "many", "times", "to", "repeat", "each", "sequence", "in", "the", "output", "resets", ":", "if", "True", "turn", "on", "reset", "at", "start", "of", "each", "sequence" ]
python
valid
tensorflow/tensor2tensor
tensor2tensor/utils/metrics.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/metrics.py#L417-L432
def softmax_cross_entropy_one_hot(logits, labels, weights_fn=None): """Calculate softmax cross entropy given one-hot labels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: cross-entropy (scalar), weights """ with tf.variable_scope("softmax_cross_entropy_one_hot", values=[logits, labels]): del weights_fn cross_entropy = tf.losses.softmax_cross_entropy( onehot_labels=labels, logits=logits) return cross_entropy, tf.constant(1.0)
[ "def", "softmax_cross_entropy_one_hot", "(", "logits", ",", "labels", ",", "weights_fn", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "\"softmax_cross_entropy_one_hot\"", ",", "values", "=", "[", "logits", ",", "labels", "]", ")", ":", "del", "weights_fn", "cross_entropy", "=", "tf", ".", "losses", ".", "softmax_cross_entropy", "(", "onehot_labels", "=", "labels", ",", "logits", "=", "logits", ")", "return", "cross_entropy", ",", "tf", ".", "constant", "(", "1.0", ")" ]
Calculate softmax cross entropy given one-hot labels and logits. Args: logits: Tensor of size [batch-size, o=1, p=1, num-classes] labels: Tensor of size [batch-size, o=1, p=1, num-classes] weights_fn: Function that takes in labels and weighs examples (unused) Returns: cross-entropy (scalar), weights
[ "Calculate", "softmax", "cross", "entropy", "given", "one", "-", "hot", "labels", "and", "logits", "." ]
python
train
inveniosoftware/invenio-files-rest
invenio_files_rest/models.py
https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/models.py#L803-L815
def update_contents(self, stream, seek=0, size=None, chunk_size=None, progress_callback=None, **kwargs): """Save contents of stream to this file. :param obj: ObjectVersion instance from where this file is accessed from. :param stream: File-like stream. """ self.checksum = None return self.storage(**kwargs).update( stream, seek=seek, size=size, chunk_size=chunk_size, progress_callback=progress_callback )
[ "def", "update_contents", "(", "self", ",", "stream", ",", "seek", "=", "0", ",", "size", "=", "None", ",", "chunk_size", "=", "None", ",", "progress_callback", "=", "None", ",", "*", "*", "kwargs", ")", ":", "self", ".", "checksum", "=", "None", "return", "self", ".", "storage", "(", "*", "*", "kwargs", ")", ".", "update", "(", "stream", ",", "seek", "=", "seek", ",", "size", "=", "size", ",", "chunk_size", "=", "chunk_size", ",", "progress_callback", "=", "progress_callback", ")" ]
Save contents of stream to this file. :param obj: ObjectVersion instance from where this file is accessed from. :param stream: File-like stream.
[ "Save", "contents", "of", "stream", "to", "this", "file", "." ]
python
train
wdbm/scalar
scalar/__init__.py
https://github.com/wdbm/scalar/blob/c4d70e778a6151b95aad721ca2d3a8bfa38126da/scalar/__init__.py#L71-L101
def setup(path_config="~/.config/scalar/config.yaml", configuration_name=None): """ Load a configuration from a default or specified configuration file, accessing a default or specified configuration name. """ global config global client global token global room # config file path_config = Path(path_config).expanduser() log.debug("load config {path}".format(path = path_config)) if not path_config.exists(): log.error("no config {path} found".format(path = path_config)) sys.exit() else: with open(str(path_config), "r") as _file: config = yaml.load(_file) if not configuration_name: for configuration in list(config["configurations"].items()): if configuration[1]["default"]: config = configuration[1] else: config["configurations"][configuration_name] # connect to homeserver and room log.debug("Matrix username: " + config["username"]) log.debug("connect to homeserver " + config["homeserver"]) client = MatrixClient(config["homeserver"]) token = client.login_with_password(username = config["username"], password = config["passcode"]) log.debug("connect to room " + config["room_alias"]) room = client.join_room(config["room_alias"])
[ "def", "setup", "(", "path_config", "=", "\"~/.config/scalar/config.yaml\"", ",", "configuration_name", "=", "None", ")", ":", "global", "config", "global", "client", "global", "token", "global", "room", "# config file", "path_config", "=", "Path", "(", "path_config", ")", ".", "expanduser", "(", ")", "log", ".", "debug", "(", "\"load config {path}\"", ".", "format", "(", "path", "=", "path_config", ")", ")", "if", "not", "path_config", ".", "exists", "(", ")", ":", "log", ".", "error", "(", "\"no config {path} found\"", ".", "format", "(", "path", "=", "path_config", ")", ")", "sys", ".", "exit", "(", ")", "else", ":", "with", "open", "(", "str", "(", "path_config", ")", ",", "\"r\"", ")", "as", "_file", ":", "config", "=", "yaml", ".", "load", "(", "_file", ")", "if", "not", "configuration_name", ":", "for", "configuration", "in", "list", "(", "config", "[", "\"configurations\"", "]", ".", "items", "(", ")", ")", ":", "if", "configuration", "[", "1", "]", "[", "\"default\"", "]", ":", "config", "=", "configuration", "[", "1", "]", "else", ":", "config", "[", "\"configurations\"", "]", "[", "configuration_name", "]", "# connect to homeserver and room", "log", ".", "debug", "(", "\"Matrix username: \"", "+", "config", "[", "\"username\"", "]", ")", "log", ".", "debug", "(", "\"connect to homeserver \"", "+", "config", "[", "\"homeserver\"", "]", ")", "client", "=", "MatrixClient", "(", "config", "[", "\"homeserver\"", "]", ")", "token", "=", "client", ".", "login_with_password", "(", "username", "=", "config", "[", "\"username\"", "]", ",", "password", "=", "config", "[", "\"passcode\"", "]", ")", "log", ".", "debug", "(", "\"connect to room \"", "+", "config", "[", "\"room_alias\"", "]", ")", "room", "=", "client", ".", "join_room", "(", "config", "[", "\"room_alias\"", "]", ")" ]
Load a configuration from a default or specified configuration file, accessing a default or specified configuration name.
[ "Load", "a", "configuration", "from", "a", "default", "or", "specified", "configuration", "file", "accessing", "a", "default", "or", "specified", "configuration", "name", "." ]
python
train
fracpete/python-weka-wrapper3
python/weka/flow/conversion.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/flow/conversion.py#L220-L229
def convert(self): """ Performs the actual conversion. :return: None if successful, otherwise errors message :rtype: str """ cname = str(self.config["wrapper"]) self._output = classes.from_commandline(self._input, classname=cname) return None
[ "def", "convert", "(", "self", ")", ":", "cname", "=", "str", "(", "self", ".", "config", "[", "\"wrapper\"", "]", ")", "self", ".", "_output", "=", "classes", ".", "from_commandline", "(", "self", ".", "_input", ",", "classname", "=", "cname", ")", "return", "None" ]
Performs the actual conversion. :return: None if successful, otherwise errors message :rtype: str
[ "Performs", "the", "actual", "conversion", "." ]
python
train
Dynatrace/OneAgent-SDK-for-Python
src/oneagent/sdk/__init__.py
https://github.com/Dynatrace/OneAgent-SDK-for-Python/blob/f7b121b492f25b1c5b27316798e1a70b6be2bd01/src/oneagent/sdk/__init__.py#L149-L217
def trace_incoming_web_request( self, webapp_info, url, method, headers=None, remote_address=None, str_tag=None, byte_tag=None): '''Create a tracer for an incoming webrequest. :param WebapplicationInfoHandle webapp_info: Web application information (see :meth:`create_web_application_info`). :param str url: The requested URL (including scheme, hostname/port, path and query). :param str method: The HTTP method of the request (e.g., GET or POST). :param headers: The HTTP headers of the request. Can be either a dictionary mapping header name to value (:class:`str` to :class:`str`) or a tuple containing a sequence of string header names as first element, an equally long sequence of corresponding values as second element and optionally a count as third element (this will default to the :func:`len` of the header names). Some headers can appear multiple times in an HTTP request. To capture all the values, either use the tuple-form and provide the name and corresponding values for each, or if possible for that particular header, set the value to an appropriately concatenated string. .. warning:: If you use Python 2, be sure to use the UTF-8 encoding or the :class:`unicode` type! See :ref:`here <http-encoding-warning>` for more information. :type headers: \ dict[str, str] or \ tuple[~typing.Collection[str], ~typing.Collection[str]] or \ tuple[~typing.Iterable[str], ~typing.Iterable[str], int]] :param str remote_address: The remote (client) IP address (of the peer of the socket connection via which the request was received). The remote address is useful to gain information about load balancers, proxies and ultimately the end user that is sending the request. For the other parameters, see :ref:`tagging`. :rtype: tracers.IncomingWebRequestTracer ''' assert isinstance(webapp_info, WebapplicationInfoHandle) result = tracers.IncomingWebRequestTracer( self._nsdk, self._nsdk.incomingwebrequesttracer_create( webapp_info.handle, url, method)) if not result: return result try: if headers: self._nsdk.incomingwebrequesttracer_add_request_headers( result.handle, *_get_kvc(headers)) if remote_address: self._nsdk.incomingwebrequesttracer_set_remote_address( result.handle, remote_address) self._applytag(result, str_tag, byte_tag) except: result.end() raise return result
[ "def", "trace_incoming_web_request", "(", "self", ",", "webapp_info", ",", "url", ",", "method", ",", "headers", "=", "None", ",", "remote_address", "=", "None", ",", "str_tag", "=", "None", ",", "byte_tag", "=", "None", ")", ":", "assert", "isinstance", "(", "webapp_info", ",", "WebapplicationInfoHandle", ")", "result", "=", "tracers", ".", "IncomingWebRequestTracer", "(", "self", ".", "_nsdk", ",", "self", ".", "_nsdk", ".", "incomingwebrequesttracer_create", "(", "webapp_info", ".", "handle", ",", "url", ",", "method", ")", ")", "if", "not", "result", ":", "return", "result", "try", ":", "if", "headers", ":", "self", ".", "_nsdk", ".", "incomingwebrequesttracer_add_request_headers", "(", "result", ".", "handle", ",", "*", "_get_kvc", "(", "headers", ")", ")", "if", "remote_address", ":", "self", ".", "_nsdk", ".", "incomingwebrequesttracer_set_remote_address", "(", "result", ".", "handle", ",", "remote_address", ")", "self", ".", "_applytag", "(", "result", ",", "str_tag", ",", "byte_tag", ")", "except", ":", "result", ".", "end", "(", ")", "raise", "return", "result" ]
Create a tracer for an incoming webrequest. :param WebapplicationInfoHandle webapp_info: Web application information (see :meth:`create_web_application_info`). :param str url: The requested URL (including scheme, hostname/port, path and query). :param str method: The HTTP method of the request (e.g., GET or POST). :param headers: The HTTP headers of the request. Can be either a dictionary mapping header name to value (:class:`str` to :class:`str`) or a tuple containing a sequence of string header names as first element, an equally long sequence of corresponding values as second element and optionally a count as third element (this will default to the :func:`len` of the header names). Some headers can appear multiple times in an HTTP request. To capture all the values, either use the tuple-form and provide the name and corresponding values for each, or if possible for that particular header, set the value to an appropriately concatenated string. .. warning:: If you use Python 2, be sure to use the UTF-8 encoding or the :class:`unicode` type! See :ref:`here <http-encoding-warning>` for more information. :type headers: \ dict[str, str] or \ tuple[~typing.Collection[str], ~typing.Collection[str]] or \ tuple[~typing.Iterable[str], ~typing.Iterable[str], int]] :param str remote_address: The remote (client) IP address (of the peer of the socket connection via which the request was received). The remote address is useful to gain information about load balancers, proxies and ultimately the end user that is sending the request. For the other parameters, see :ref:`tagging`. :rtype: tracers.IncomingWebRequestTracer
[ "Create", "a", "tracer", "for", "an", "incoming", "webrequest", "." ]
python
train
uw-it-aca/uw-restclients
restclients/mailman/course_list.py
https://github.com/uw-it-aca/uw-restclients/blob/e12dcd32bf5296b6ebdf71798031594afb7852cb/restclients/mailman/course_list.py#L15-L26
def get_course_list_name(curriculum_abbr, course_number, section_id, quarter, year): """ Return the list address of UW course email list """ return "%s%s%s_%s%s" % ( _get_list_name_curr_abbr(curriculum_abbr), course_number, section_id.lower(), quarter.lower()[:2], str(year)[-2:] )
[ "def", "get_course_list_name", "(", "curriculum_abbr", ",", "course_number", ",", "section_id", ",", "quarter", ",", "year", ")", ":", "return", "\"%s%s%s_%s%s\"", "%", "(", "_get_list_name_curr_abbr", "(", "curriculum_abbr", ")", ",", "course_number", ",", "section_id", ".", "lower", "(", ")", ",", "quarter", ".", "lower", "(", ")", "[", ":", "2", "]", ",", "str", "(", "year", ")", "[", "-", "2", ":", "]", ")" ]
Return the list address of UW course email list
[ "Return", "the", "list", "address", "of", "UW", "course", "email", "list" ]
python
train
square/pylink
pylink/jlink.py
https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/pylink/jlink.py#L4197-L4210
def trace_stop(self): """Stops collecting trace data. Args: self (JLink): the ``JLink`` instance. Returns: ``None`` """ cmd = enums.JLinkTraceCommand.STOP res = self._dll.JLINKARM_TRACE_Control(cmd, 0) if (res == 1): raise errors.JLinkException('Failed to stop trace.') return None
[ "def", "trace_stop", "(", "self", ")", ":", "cmd", "=", "enums", ".", "JLinkTraceCommand", ".", "STOP", "res", "=", "self", ".", "_dll", ".", "JLINKARM_TRACE_Control", "(", "cmd", ",", "0", ")", "if", "(", "res", "==", "1", ")", ":", "raise", "errors", ".", "JLinkException", "(", "'Failed to stop trace.'", ")", "return", "None" ]
Stops collecting trace data. Args: self (JLink): the ``JLink`` instance. Returns: ``None``
[ "Stops", "collecting", "trace", "data", "." ]
python
train
eqcorrscan/EQcorrscan
setup.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/setup.py#L181-L186
def export_symbols(*path): """ Required for windows systems - functions defined in libutils.def. """ lines = open(os.path.join(*path), 'r').readlines()[2:] return [s.strip() for s in lines if s.strip() != '']
[ "def", "export_symbols", "(", "*", "path", ")", ":", "lines", "=", "open", "(", "os", ".", "path", ".", "join", "(", "*", "path", ")", ",", "'r'", ")", ".", "readlines", "(", ")", "[", "2", ":", "]", "return", "[", "s", ".", "strip", "(", ")", "for", "s", "in", "lines", "if", "s", ".", "strip", "(", ")", "!=", "''", "]" ]
Required for windows systems - functions defined in libutils.def.
[ "Required", "for", "windows", "systems", "-", "functions", "defined", "in", "libutils", ".", "def", "." ]
python
train
draperunner/fjlc
fjlc/classifier/classifier.py
https://github.com/draperunner/fjlc/blob/d2cc8cf1244984e7caf0bf95b11ed1677a94c994/fjlc/classifier/classifier.py#L26-L40
def classify(self, tweet): """ Classifies the tweet into one of three classes (negative, neutral or positive) depending on the sentiment value of the tweet and the thresholds specified in the classifier_options :param tweet: String tweet to classify :return: Sentiment classification (negative, neutral or positive) """ sentiment_value = self.calculate_sentiment(tweet) return Classification.classify_from_thresholds(sentiment_value, classifier_options.get_variable( classifier_options.Variable.CLASSIFICATION_THRESHOLD_LOWER), classifier_options.get_variable( classifier_options.Variable.CLASSIFICATION_THRESHOLD_HIGHER))
[ "def", "classify", "(", "self", ",", "tweet", ")", ":", "sentiment_value", "=", "self", ".", "calculate_sentiment", "(", "tweet", ")", "return", "Classification", ".", "classify_from_thresholds", "(", "sentiment_value", ",", "classifier_options", ".", "get_variable", "(", "classifier_options", ".", "Variable", ".", "CLASSIFICATION_THRESHOLD_LOWER", ")", ",", "classifier_options", ".", "get_variable", "(", "classifier_options", ".", "Variable", ".", "CLASSIFICATION_THRESHOLD_HIGHER", ")", ")" ]
Classifies the tweet into one of three classes (negative, neutral or positive) depending on the sentiment value of the tweet and the thresholds specified in the classifier_options :param tweet: String tweet to classify :return: Sentiment classification (negative, neutral or positive)
[ "Classifies", "the", "tweet", "into", "one", "of", "three", "classes", "(", "negative", "neutral", "or", "positive", ")", "depending", "on", "the", "sentiment", "value", "of", "the", "tweet", "and", "the", "thresholds", "specified", "in", "the", "classifier_options" ]
python
train
ManiacalLabs/PixelWeb
pixelweb/bottle.py
https://github.com/ManiacalLabs/PixelWeb/blob/9eacbfd40a1d35011c2dcea15c303da9636c6b9e/pixelweb/bottle.py#L2413-L2422
def redirect(url, code=None): """ Aborts execution and causes a 303 or 302 redirect, depending on the HTTP protocol version. """ if not code: code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302 res = response.copy(cls=HTTPResponse) res.status = code res.body = "" res.set_header('Location', urljoin(request.url, url)) raise res
[ "def", "redirect", "(", "url", ",", "code", "=", "None", ")", ":", "if", "not", "code", ":", "code", "=", "303", "if", "request", ".", "get", "(", "'SERVER_PROTOCOL'", ")", "==", "\"HTTP/1.1\"", "else", "302", "res", "=", "response", ".", "copy", "(", "cls", "=", "HTTPResponse", ")", "res", ".", "status", "=", "code", "res", ".", "body", "=", "\"\"", "res", ".", "set_header", "(", "'Location'", ",", "urljoin", "(", "request", ".", "url", ",", "url", ")", ")", "raise", "res" ]
Aborts execution and causes a 303 or 302 redirect, depending on the HTTP protocol version.
[ "Aborts", "execution", "and", "causes", "a", "303", "or", "302", "redirect", "depending", "on", "the", "HTTP", "protocol", "version", "." ]
python
train
jf-parent/brome
brome/core/proxy_driver.py
https://github.com/jf-parent/brome/blob/784f45d96b83b703dd2181cb59ca8ea777c2510e/brome/core/proxy_driver.py#L1078-L1117
def assert_present(self, selector, testid=None, **kwargs): """Assert that the element is present in the dom Args: selector (str): the selector used to find the element test_id (str): the test_id or a str Kwargs: wait_until_present (bool) Returns: bool: True is the assertion succeed; False otherwise. """ self.info_log( "Assert present selector(%s) testid(%s)" % (selector, testid) ) wait_until_present = kwargs.get( 'wait_until_present', BROME_CONFIG['proxy_driver']['wait_until_present_before_assert_present'] # noqa ) self.debug_log( "effective wait_until_present: %s" % wait_until_present ) if wait_until_present: element = self.wait_until_present(selector, raise_exception=False) else: element = self.is_present(selector) if element: if testid is not None: self.create_test_result(testid, True) return True else: if testid is not None: self.create_test_result(testid, False) return False
[ "def", "assert_present", "(", "self", ",", "selector", ",", "testid", "=", "None", ",", "*", "*", "kwargs", ")", ":", "self", ".", "info_log", "(", "\"Assert present selector(%s) testid(%s)\"", "%", "(", "selector", ",", "testid", ")", ")", "wait_until_present", "=", "kwargs", ".", "get", "(", "'wait_until_present'", ",", "BROME_CONFIG", "[", "'proxy_driver'", "]", "[", "'wait_until_present_before_assert_present'", "]", "# noqa", ")", "self", ".", "debug_log", "(", "\"effective wait_until_present: %s\"", "%", "wait_until_present", ")", "if", "wait_until_present", ":", "element", "=", "self", ".", "wait_until_present", "(", "selector", ",", "raise_exception", "=", "False", ")", "else", ":", "element", "=", "self", ".", "is_present", "(", "selector", ")", "if", "element", ":", "if", "testid", "is", "not", "None", ":", "self", ".", "create_test_result", "(", "testid", ",", "True", ")", "return", "True", "else", ":", "if", "testid", "is", "not", "None", ":", "self", ".", "create_test_result", "(", "testid", ",", "False", ")", "return", "False" ]
Assert that the element is present in the dom Args: selector (str): the selector used to find the element test_id (str): the test_id or a str Kwargs: wait_until_present (bool) Returns: bool: True is the assertion succeed; False otherwise.
[ "Assert", "that", "the", "element", "is", "present", "in", "the", "dom" ]
python
train
cltk/cltk
cltk/inflection/old_norse/nouns.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/inflection/old_norse/nouns.py#L55-L66
def get_representative_cases(self): """ >>> armr = OldNorseNoun("armr", decl_utils.Gender.masculine) >>> armr.set_representative_cases("armr", "arms", "armar") >>> armr.get_representative_cases() ('armr', 'arms', 'armar') :return: nominative singular, genetive singular, nominative plural """ return (self.get_declined(decl_utils.Case.nominative, decl_utils.Number.singular), self.get_declined(decl_utils.Case.genitive, decl_utils.Number.singular), self.get_declined(decl_utils.Case.nominative, decl_utils.Number.plural))
[ "def", "get_representative_cases", "(", "self", ")", ":", "return", "(", "self", ".", "get_declined", "(", "decl_utils", ".", "Case", ".", "nominative", ",", "decl_utils", ".", "Number", ".", "singular", ")", ",", "self", ".", "get_declined", "(", "decl_utils", ".", "Case", ".", "genitive", ",", "decl_utils", ".", "Number", ".", "singular", ")", ",", "self", ".", "get_declined", "(", "decl_utils", ".", "Case", ".", "nominative", ",", "decl_utils", ".", "Number", ".", "plural", ")", ")" ]
>>> armr = OldNorseNoun("armr", decl_utils.Gender.masculine) >>> armr.set_representative_cases("armr", "arms", "armar") >>> armr.get_representative_cases() ('armr', 'arms', 'armar') :return: nominative singular, genetive singular, nominative plural
[ ">>>", "armr", "=", "OldNorseNoun", "(", "armr", "decl_utils", ".", "Gender", ".", "masculine", ")", ">>>", "armr", ".", "set_representative_cases", "(", "armr", "arms", "armar", ")", ">>>", "armr", ".", "get_representative_cases", "()", "(", "armr", "arms", "armar", ")" ]
python
train
sentinelsat/sentinelsat
sentinelsat/sentinel.py
https://github.com/sentinelsat/sentinelsat/blob/eacfd79ff4e7e939147db9dfdd393c67d64eecaa/sentinelsat/sentinel.py#L889-L940
def geojson_to_wkt(geojson_obj, feature_number=0, decimals=4): """Convert a GeoJSON object to Well-Known Text. Intended for use with OpenSearch queries. In case of FeatureCollection, only one of the features is used (the first by default). 3D points are converted to 2D. Parameters ---------- geojson_obj : dict a GeoJSON object feature_number : int, optional Feature to extract polygon from (in case of MultiPolygon FeatureCollection), defaults to first Feature decimals : int, optional Number of decimal figures after point to round coordinate to. Defaults to 4 (about 10 meters). Returns ------- polygon coordinates string of comma separated coordinate tuples (lon, lat) to be used by SentinelAPI """ if 'coordinates' in geojson_obj: geometry = geojson_obj elif 'geometry' in geojson_obj: geometry = geojson_obj['geometry'] else: geometry = geojson_obj['features'][feature_number]['geometry'] def ensure_2d(geometry): if isinstance(geometry[0], (list, tuple)): return list(map(ensure_2d, geometry)) else: return geometry[:2] def check_bounds(geometry): if isinstance(geometry[0], (list, tuple)): return list(map(check_bounds, geometry)) else: if geometry[0] > 180 or geometry[0] < -180: raise ValueError('Longitude is out of bounds, check your JSON format or data') if geometry[1] > 90 or geometry[1] < -90: raise ValueError('Latitude is out of bounds, check your JSON format or data') # Discard z-coordinate, if it exists geometry['coordinates'] = ensure_2d(geometry['coordinates']) check_bounds(geometry['coordinates']) wkt = geomet.wkt.dumps(geometry, decimals=decimals) # Strip unnecessary spaces wkt = re.sub(r'(?<!\d) ', '', wkt) return wkt
[ "def", "geojson_to_wkt", "(", "geojson_obj", ",", "feature_number", "=", "0", ",", "decimals", "=", "4", ")", ":", "if", "'coordinates'", "in", "geojson_obj", ":", "geometry", "=", "geojson_obj", "elif", "'geometry'", "in", "geojson_obj", ":", "geometry", "=", "geojson_obj", "[", "'geometry'", "]", "else", ":", "geometry", "=", "geojson_obj", "[", "'features'", "]", "[", "feature_number", "]", "[", "'geometry'", "]", "def", "ensure_2d", "(", "geometry", ")", ":", "if", "isinstance", "(", "geometry", "[", "0", "]", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "list", "(", "map", "(", "ensure_2d", ",", "geometry", ")", ")", "else", ":", "return", "geometry", "[", ":", "2", "]", "def", "check_bounds", "(", "geometry", ")", ":", "if", "isinstance", "(", "geometry", "[", "0", "]", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "list", "(", "map", "(", "check_bounds", ",", "geometry", ")", ")", "else", ":", "if", "geometry", "[", "0", "]", ">", "180", "or", "geometry", "[", "0", "]", "<", "-", "180", ":", "raise", "ValueError", "(", "'Longitude is out of bounds, check your JSON format or data'", ")", "if", "geometry", "[", "1", "]", ">", "90", "or", "geometry", "[", "1", "]", "<", "-", "90", ":", "raise", "ValueError", "(", "'Latitude is out of bounds, check your JSON format or data'", ")", "# Discard z-coordinate, if it exists", "geometry", "[", "'coordinates'", "]", "=", "ensure_2d", "(", "geometry", "[", "'coordinates'", "]", ")", "check_bounds", "(", "geometry", "[", "'coordinates'", "]", ")", "wkt", "=", "geomet", ".", "wkt", ".", "dumps", "(", "geometry", ",", "decimals", "=", "decimals", ")", "# Strip unnecessary spaces", "wkt", "=", "re", ".", "sub", "(", "r'(?<!\\d) '", ",", "''", ",", "wkt", ")", "return", "wkt" ]
Convert a GeoJSON object to Well-Known Text. Intended for use with OpenSearch queries. In case of FeatureCollection, only one of the features is used (the first by default). 3D points are converted to 2D. Parameters ---------- geojson_obj : dict a GeoJSON object feature_number : int, optional Feature to extract polygon from (in case of MultiPolygon FeatureCollection), defaults to first Feature decimals : int, optional Number of decimal figures after point to round coordinate to. Defaults to 4 (about 10 meters). Returns ------- polygon coordinates string of comma separated coordinate tuples (lon, lat) to be used by SentinelAPI
[ "Convert", "a", "GeoJSON", "object", "to", "Well", "-", "Known", "Text", ".", "Intended", "for", "use", "with", "OpenSearch", "queries", "." ]
python
train
ph4r05/monero-serialize
monero_serialize/xmrobj.py
https://github.com/ph4r05/monero-serialize/blob/cebb3ba2aaf2e9211b1dcc6db2bab02946d06e42/monero_serialize/xmrobj.py#L70-L88
async def dump_blob(elem, elem_type=None): """ Dumps blob message. Supports both blob and raw value. :param writer: :param elem: :param elem_type: :param params: :return: """ elem_is_blob = isinstance(elem, x.BlobType) data = getattr(elem, x.BlobType.DATA_ATTR) if elem_is_blob else elem if data is None or len(data) == 0: return b'' if isinstance(data, (bytes, bytearray, list)): return base64.b16encode(bytes(data)) else: raise ValueError('Unknown blob type')
[ "async", "def", "dump_blob", "(", "elem", ",", "elem_type", "=", "None", ")", ":", "elem_is_blob", "=", "isinstance", "(", "elem", ",", "x", ".", "BlobType", ")", "data", "=", "getattr", "(", "elem", ",", "x", ".", "BlobType", ".", "DATA_ATTR", ")", "if", "elem_is_blob", "else", "elem", "if", "data", "is", "None", "or", "len", "(", "data", ")", "==", "0", ":", "return", "b''", "if", "isinstance", "(", "data", ",", "(", "bytes", ",", "bytearray", ",", "list", ")", ")", ":", "return", "base64", ".", "b16encode", "(", "bytes", "(", "data", ")", ")", "else", ":", "raise", "ValueError", "(", "'Unknown blob type'", ")" ]
Dumps blob message. Supports both blob and raw value. :param writer: :param elem: :param elem_type: :param params: :return:
[ "Dumps", "blob", "message", ".", "Supports", "both", "blob", "and", "raw", "value", "." ]
python
train
quantopian/pyfolio
pyfolio/perf_attrib.py
https://github.com/quantopian/pyfolio/blob/712716ab0cdebbec9fabb25eea3bf40e4354749d/pyfolio/perf_attrib.py#L620-L647
def _stack_positions(positions, pos_in_dollars=True): """ Convert positions to percentages if necessary, and change them to long format. Parameters ---------- positions: pd.DataFrame Daily holdings (in dollars or percentages), indexed by date. Will be converted to percentages if positions are in dollars. Short positions show up as cash in the 'cash' column. pos_in_dollars : bool Flag indicating whether `positions` are in dollars or percentages If True, positions are in dollars. """ if pos_in_dollars: # convert holdings to percentages positions = get_percent_alloc(positions) # remove cash after normalizing positions positions = positions.drop('cash', axis='columns') # convert positions to long format positions = positions.stack() positions.index = positions.index.set_names(['dt', 'ticker']) return positions
[ "def", "_stack_positions", "(", "positions", ",", "pos_in_dollars", "=", "True", ")", ":", "if", "pos_in_dollars", ":", "# convert holdings to percentages", "positions", "=", "get_percent_alloc", "(", "positions", ")", "# remove cash after normalizing positions", "positions", "=", "positions", ".", "drop", "(", "'cash'", ",", "axis", "=", "'columns'", ")", "# convert positions to long format", "positions", "=", "positions", ".", "stack", "(", ")", "positions", ".", "index", "=", "positions", ".", "index", ".", "set_names", "(", "[", "'dt'", ",", "'ticker'", "]", ")", "return", "positions" ]
Convert positions to percentages if necessary, and change them to long format. Parameters ---------- positions: pd.DataFrame Daily holdings (in dollars or percentages), indexed by date. Will be converted to percentages if positions are in dollars. Short positions show up as cash in the 'cash' column. pos_in_dollars : bool Flag indicating whether `positions` are in dollars or percentages If True, positions are in dollars.
[ "Convert", "positions", "to", "percentages", "if", "necessary", "and", "change", "them", "to", "long", "format", "." ]
python
valid
spyder-ide/spyder
spyder/widgets/calltip.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/calltip.py#L105-L108
def leaveEvent(self, event): """Override Qt method to hide the tooltip on leave.""" super(ToolTipWidget, self).leaveEvent(event) self.hide()
[ "def", "leaveEvent", "(", "self", ",", "event", ")", ":", "super", "(", "ToolTipWidget", ",", "self", ")", ".", "leaveEvent", "(", "event", ")", "self", ".", "hide", "(", ")" ]
Override Qt method to hide the tooltip on leave.
[ "Override", "Qt", "method", "to", "hide", "the", "tooltip", "on", "leave", "." ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L511-L537
def bodvar(body, item, dim): """ Deprecated: This routine has been superseded by :func:`bodvcd` and :func:`bodvrd`. This routine is supported for purposes of backward compatibility only. Return the values of some item for any body in the kernel pool. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bodvar_c.html :param body: ID code of body. :type body: int :param item: Item for which values are desired, ("RADII", "NUT_PREC_ANGLES", etc.) :type item: str :param dim: Number of values returned. :type dim: int :return: values :rtype: Array of floats """ body = ctypes.c_int(body) dim = ctypes.c_int(dim) item = stypes.stringToCharP(item) values = stypes.emptyDoubleVector(dim.value) libspice.bodvar_c(body, item, ctypes.byref(dim), values) return stypes.cVectorToPython(values)
[ "def", "bodvar", "(", "body", ",", "item", ",", "dim", ")", ":", "body", "=", "ctypes", ".", "c_int", "(", "body", ")", "dim", "=", "ctypes", ".", "c_int", "(", "dim", ")", "item", "=", "stypes", ".", "stringToCharP", "(", "item", ")", "values", "=", "stypes", ".", "emptyDoubleVector", "(", "dim", ".", "value", ")", "libspice", ".", "bodvar_c", "(", "body", ",", "item", ",", "ctypes", ".", "byref", "(", "dim", ")", ",", "values", ")", "return", "stypes", ".", "cVectorToPython", "(", "values", ")" ]
Deprecated: This routine has been superseded by :func:`bodvcd` and :func:`bodvrd`. This routine is supported for purposes of backward compatibility only. Return the values of some item for any body in the kernel pool. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bodvar_c.html :param body: ID code of body. :type body: int :param item: Item for which values are desired, ("RADII", "NUT_PREC_ANGLES", etc.) :type item: str :param dim: Number of values returned. :type dim: int :return: values :rtype: Array of floats
[ "Deprecated", ":", "This", "routine", "has", "been", "superseded", "by", ":", "func", ":", "bodvcd", "and", ":", "func", ":", "bodvrd", ".", "This", "routine", "is", "supported", "for", "purposes", "of", "backward", "compatibility", "only", "." ]
python
train
tcalmant/ipopo
pelix/ipopo/handlers/temporal.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/ipopo/handlers/temporal.py#L312-L345
def on_service_departure(self, svc_ref): """ Called when a service has been unregistered from the framework :param svc_ref: A service reference """ with self._lock: if svc_ref is self.reference: # Forget about the service self._value.unset_service() # Clear the reference self.reference = None # Look for a replacement self._pending_ref = self._context.get_service_reference( self.requirement.specification, self.requirement.filter ) if self._pending_ref is None: # No replacement found yet, wait a little self.__still_valid = True self.__timer_args = (self._value, svc_ref) self.__timer = threading.Timer( self.__timeout, self.__unbind_call, (False,) ) self.__timer.start() else: # Notify iPOPO immediately self._ipopo_instance.unbind(self, self._value, svc_ref) return True return None
[ "def", "on_service_departure", "(", "self", ",", "svc_ref", ")", ":", "with", "self", ".", "_lock", ":", "if", "svc_ref", "is", "self", ".", "reference", ":", "# Forget about the service", "self", ".", "_value", ".", "unset_service", "(", ")", "# Clear the reference", "self", ".", "reference", "=", "None", "# Look for a replacement", "self", ".", "_pending_ref", "=", "self", ".", "_context", ".", "get_service_reference", "(", "self", ".", "requirement", ".", "specification", ",", "self", ".", "requirement", ".", "filter", ")", "if", "self", ".", "_pending_ref", "is", "None", ":", "# No replacement found yet, wait a little", "self", ".", "__still_valid", "=", "True", "self", ".", "__timer_args", "=", "(", "self", ".", "_value", ",", "svc_ref", ")", "self", ".", "__timer", "=", "threading", ".", "Timer", "(", "self", ".", "__timeout", ",", "self", ".", "__unbind_call", ",", "(", "False", ",", ")", ")", "self", ".", "__timer", ".", "start", "(", ")", "else", ":", "# Notify iPOPO immediately", "self", ".", "_ipopo_instance", ".", "unbind", "(", "self", ",", "self", ".", "_value", ",", "svc_ref", ")", "return", "True", "return", "None" ]
Called when a service has been unregistered from the framework :param svc_ref: A service reference
[ "Called", "when", "a", "service", "has", "been", "unregistered", "from", "the", "framework" ]
python
train
pyca/pyopenssl
src/OpenSSL/SSL.py
https://github.com/pyca/pyopenssl/blob/1fbe064c50fd030948141d7d630673761525b0d0/src/OpenSSL/SSL.py#L812-L859
def set_default_verify_paths(self): """ Specify that the platform provided CA certificates are to be used for verification purposes. This method has some caveats related to the binary wheels that cryptography (pyOpenSSL's primary dependency) ships: * macOS will only load certificates using this method if the user has the ``[email protected]`` `Homebrew <https://brew.sh>`_ formula installed in the default location. * Windows will not work. * manylinux1 cryptography wheels will work on most common Linux distributions in pyOpenSSL 17.1.0 and above. pyOpenSSL detects the manylinux1 wheel and attempts to load roots via a fallback path. :return: None """ # SSL_CTX_set_default_verify_paths will attempt to load certs from # both a cafile and capath that are set at compile time. However, # it will first check environment variables and, if present, load # those paths instead set_result = _lib.SSL_CTX_set_default_verify_paths(self._context) _openssl_assert(set_result == 1) # After attempting to set default_verify_paths we need to know whether # to go down the fallback path. # First we'll check to see if any env vars have been set. If so, # we won't try to do anything else because the user has set the path # themselves. dir_env_var = _ffi.string( _lib.X509_get_default_cert_dir_env() ).decode("ascii") file_env_var = _ffi.string( _lib.X509_get_default_cert_file_env() ).decode("ascii") if not self._check_env_vars_set(dir_env_var, file_env_var): default_dir = _ffi.string(_lib.X509_get_default_cert_dir()) default_file = _ffi.string(_lib.X509_get_default_cert_file()) # Now we check to see if the default_dir and default_file are set # to the exact values we use in our manylinux1 builds. If they are # then we know to load the fallbacks if ( default_dir == _CRYPTOGRAPHY_MANYLINUX1_CA_DIR and default_file == _CRYPTOGRAPHY_MANYLINUX1_CA_FILE ): # This is manylinux1, let's load our fallback paths self._fallback_default_verify_paths( _CERTIFICATE_FILE_LOCATIONS, _CERTIFICATE_PATH_LOCATIONS )
[ "def", "set_default_verify_paths", "(", "self", ")", ":", "# SSL_CTX_set_default_verify_paths will attempt to load certs from", "# both a cafile and capath that are set at compile time. However,", "# it will first check environment variables and, if present, load", "# those paths instead", "set_result", "=", "_lib", ".", "SSL_CTX_set_default_verify_paths", "(", "self", ".", "_context", ")", "_openssl_assert", "(", "set_result", "==", "1", ")", "# After attempting to set default_verify_paths we need to know whether", "# to go down the fallback path.", "# First we'll check to see if any env vars have been set. If so,", "# we won't try to do anything else because the user has set the path", "# themselves.", "dir_env_var", "=", "_ffi", ".", "string", "(", "_lib", ".", "X509_get_default_cert_dir_env", "(", ")", ")", ".", "decode", "(", "\"ascii\"", ")", "file_env_var", "=", "_ffi", ".", "string", "(", "_lib", ".", "X509_get_default_cert_file_env", "(", ")", ")", ".", "decode", "(", "\"ascii\"", ")", "if", "not", "self", ".", "_check_env_vars_set", "(", "dir_env_var", ",", "file_env_var", ")", ":", "default_dir", "=", "_ffi", ".", "string", "(", "_lib", ".", "X509_get_default_cert_dir", "(", ")", ")", "default_file", "=", "_ffi", ".", "string", "(", "_lib", ".", "X509_get_default_cert_file", "(", ")", ")", "# Now we check to see if the default_dir and default_file are set", "# to the exact values we use in our manylinux1 builds. If they are", "# then we know to load the fallbacks", "if", "(", "default_dir", "==", "_CRYPTOGRAPHY_MANYLINUX1_CA_DIR", "and", "default_file", "==", "_CRYPTOGRAPHY_MANYLINUX1_CA_FILE", ")", ":", "# This is manylinux1, let's load our fallback paths", "self", ".", "_fallback_default_verify_paths", "(", "_CERTIFICATE_FILE_LOCATIONS", ",", "_CERTIFICATE_PATH_LOCATIONS", ")" ]
Specify that the platform provided CA certificates are to be used for verification purposes. This method has some caveats related to the binary wheels that cryptography (pyOpenSSL's primary dependency) ships: * macOS will only load certificates using this method if the user has the ``[email protected]`` `Homebrew <https://brew.sh>`_ formula installed in the default location. * Windows will not work. * manylinux1 cryptography wheels will work on most common Linux distributions in pyOpenSSL 17.1.0 and above. pyOpenSSL detects the manylinux1 wheel and attempts to load roots via a fallback path. :return: None
[ "Specify", "that", "the", "platform", "provided", "CA", "certificates", "are", "to", "be", "used", "for", "verification", "purposes", ".", "This", "method", "has", "some", "caveats", "related", "to", "the", "binary", "wheels", "that", "cryptography", "(", "pyOpenSSL", "s", "primary", "dependency", ")", "ships", ":" ]
python
test
ANTsX/ANTsPy
ants/utils/iMath.py
https://github.com/ANTsX/ANTsPy/blob/638020af2cdfc5ff4bdb9809ffe67aa505727a3b/ants/utils/iMath.py#L70-L107
def iMath(image, operation, *args): """ Perform various (often mathematical) operations on the input image/s. Additional parameters should be specific for each operation. See the the full iMath in ANTs, on which this function is based. ANTsR function: `iMath` Arguments --------- image : ANTsImage input object, usually antsImage operation a string e.g. "GetLargestComponent" ... the special case of "GetOperations" or "GetOperationsFull" will return a list of operations and brief description. Some operations may not be valid (WIP), but most are. *args : non-keyword arguments additional parameters specific to the operation Example ------- >>> import ants >>> img = ants.image_read(ants.get_ants_data('r16')) >>> img2 = ants.iMath(img, 'Canny', 1, 5, 12) """ if operation not in _iMathOps: raise ValueError('Operation not recognized') imagedim = image.dimension outimage = image.clone() args = [imagedim, outimage, operation, image] + [a for a in args] processed_args = _int_antsProcessArguments(args) libfn = utils.get_lib_fn('iMath') libfn(processed_args) return outimage
[ "def", "iMath", "(", "image", ",", "operation", ",", "*", "args", ")", ":", "if", "operation", "not", "in", "_iMathOps", ":", "raise", "ValueError", "(", "'Operation not recognized'", ")", "imagedim", "=", "image", ".", "dimension", "outimage", "=", "image", ".", "clone", "(", ")", "args", "=", "[", "imagedim", ",", "outimage", ",", "operation", ",", "image", "]", "+", "[", "a", "for", "a", "in", "args", "]", "processed_args", "=", "_int_antsProcessArguments", "(", "args", ")", "libfn", "=", "utils", ".", "get_lib_fn", "(", "'iMath'", ")", "libfn", "(", "processed_args", ")", "return", "outimage" ]
Perform various (often mathematical) operations on the input image/s. Additional parameters should be specific for each operation. See the the full iMath in ANTs, on which this function is based. ANTsR function: `iMath` Arguments --------- image : ANTsImage input object, usually antsImage operation a string e.g. "GetLargestComponent" ... the special case of "GetOperations" or "GetOperationsFull" will return a list of operations and brief description. Some operations may not be valid (WIP), but most are. *args : non-keyword arguments additional parameters specific to the operation Example ------- >>> import ants >>> img = ants.image_read(ants.get_ants_data('r16')) >>> img2 = ants.iMath(img, 'Canny', 1, 5, 12)
[ "Perform", "various", "(", "often", "mathematical", ")", "operations", "on", "the", "input", "image", "/", "s", ".", "Additional", "parameters", "should", "be", "specific", "for", "each", "operation", ".", "See", "the", "the", "full", "iMath", "in", "ANTs", "on", "which", "this", "function", "is", "based", "." ]
python
train
cloud-custodian/cloud-custodian
c7n/policy.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/policy.py#L118-L123
def resource_types(self): """resource types used by the collection.""" rtypes = set() for p in self.policies: rtypes.add(p.resource_type) return rtypes
[ "def", "resource_types", "(", "self", ")", ":", "rtypes", "=", "set", "(", ")", "for", "p", "in", "self", ".", "policies", ":", "rtypes", ".", "add", "(", "p", ".", "resource_type", ")", "return", "rtypes" ]
resource types used by the collection.
[ "resource", "types", "used", "by", "the", "collection", "." ]
python
train
jquast/wcwidth
setup.py
https://github.com/jquast/wcwidth/blob/78800b68911880ef4ef95ae83886154710441871/setup.py#L271-L307
def main(): """Setup.py entry point.""" import codecs setuptools.setup( name='wcwidth', version='0.1.7', description=("Measures number of Terminal column cells " "of wide-character codes"), long_description=codecs.open( os.path.join(HERE, 'README.rst'), 'r', 'utf8').read(), author='Jeff Quast', author_email='[email protected]', license='MIT', packages=['wcwidth', 'wcwidth.tests'], url='https://github.com/jquast/wcwidth', include_package_data=True, test_suite='wcwidth.tests', zip_safe=True, classifiers=[ 'Intended Audience :: Developers', 'Natural Language :: English', 'Development Status :: 3 - Alpha', 'Environment :: Console', 'License :: OSI Approved :: MIT License', 'Operating System :: POSIX', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Localization', 'Topic :: Software Development :: Internationalization', 'Topic :: Terminals' ], keywords=['terminal', 'emulator', 'wcwidth', 'wcswidth', 'cjk', 'combining', 'xterm', 'console', ], cmdclass={'update': SetupUpdate}, )
[ "def", "main", "(", ")", ":", "import", "codecs", "setuptools", ".", "setup", "(", "name", "=", "'wcwidth'", ",", "version", "=", "'0.1.7'", ",", "description", "=", "(", "\"Measures number of Terminal column cells \"", "\"of wide-character codes\"", ")", ",", "long_description", "=", "codecs", ".", "open", "(", "os", ".", "path", ".", "join", "(", "HERE", ",", "'README.rst'", ")", ",", "'r'", ",", "'utf8'", ")", ".", "read", "(", ")", ",", "author", "=", "'Jeff Quast'", ",", "author_email", "=", "'[email protected]'", ",", "license", "=", "'MIT'", ",", "packages", "=", "[", "'wcwidth'", ",", "'wcwidth.tests'", "]", ",", "url", "=", "'https://github.com/jquast/wcwidth'", ",", "include_package_data", "=", "True", ",", "test_suite", "=", "'wcwidth.tests'", ",", "zip_safe", "=", "True", ",", "classifiers", "=", "[", "'Intended Audience :: Developers'", ",", "'Natural Language :: English'", ",", "'Development Status :: 3 - Alpha'", ",", "'Environment :: Console'", ",", "'License :: OSI Approved :: MIT License'", ",", "'Operating System :: POSIX'", ",", "'Programming Language :: Python :: 2.7'", ",", "'Programming Language :: Python :: 3.4'", ",", "'Programming Language :: Python :: 3.5'", ",", "'Topic :: Software Development :: Libraries'", ",", "'Topic :: Software Development :: Localization'", ",", "'Topic :: Software Development :: Internationalization'", ",", "'Topic :: Terminals'", "]", ",", "keywords", "=", "[", "'terminal'", ",", "'emulator'", ",", "'wcwidth'", ",", "'wcswidth'", ",", "'cjk'", ",", "'combining'", ",", "'xterm'", ",", "'console'", ",", "]", ",", "cmdclass", "=", "{", "'update'", ":", "SetupUpdate", "}", ",", ")" ]
Setup.py entry point.
[ "Setup", ".", "py", "entry", "point", "." ]
python
train
cfobel/si-prefix
si_prefix/__init__.py
https://github.com/cfobel/si-prefix/blob/274fdf47f65d87d0b7a2e3c80f267db63d042c59/si_prefix/__init__.py#L109-L125
def prefix(expof10): ''' Args: expof10 : Exponent of a power of 10 associated with a SI unit character. Returns: str : One of the characters in "yzafpnum kMGTPEZY". ''' prefix_levels = (len(SI_PREFIX_UNITS) - 1) // 2 si_level = expof10 // 3 if abs(si_level) > prefix_levels: raise ValueError("Exponent out range of available prefixes.") return SI_PREFIX_UNITS[si_level + prefix_levels]
[ "def", "prefix", "(", "expof10", ")", ":", "prefix_levels", "=", "(", "len", "(", "SI_PREFIX_UNITS", ")", "-", "1", ")", "//", "2", "si_level", "=", "expof10", "//", "3", "if", "abs", "(", "si_level", ")", ">", "prefix_levels", ":", "raise", "ValueError", "(", "\"Exponent out range of available prefixes.\"", ")", "return", "SI_PREFIX_UNITS", "[", "si_level", "+", "prefix_levels", "]" ]
Args: expof10 : Exponent of a power of 10 associated with a SI unit character. Returns: str : One of the characters in "yzafpnum kMGTPEZY".
[ "Args", ":" ]
python
train
nickmckay/LiPD-utilities
Python/lipd/misc.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/misc.py#L505-L519
def path_type(path, target): """ Determine if given path is file, directory, or other. Compare with target to see if it's the type we wanted. :param str path: Path :param str target: Target type wanted :return bool: Path is what it claims to be (True) or mismatch (False) """ if os.path.isfile(path) and target == "file": return True elif os.path.isdir(path) and target == "directory": return True else: print("Error: Path given is not a {}: {}".format(target, path)) return False
[ "def", "path_type", "(", "path", ",", "target", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "path", ")", "and", "target", "==", "\"file\"", ":", "return", "True", "elif", "os", ".", "path", ".", "isdir", "(", "path", ")", "and", "target", "==", "\"directory\"", ":", "return", "True", "else", ":", "print", "(", "\"Error: Path given is not a {}: {}\"", ".", "format", "(", "target", ",", "path", ")", ")", "return", "False" ]
Determine if given path is file, directory, or other. Compare with target to see if it's the type we wanted. :param str path: Path :param str target: Target type wanted :return bool: Path is what it claims to be (True) or mismatch (False)
[ "Determine", "if", "given", "path", "is", "file", "directory", "or", "other", ".", "Compare", "with", "target", "to", "see", "if", "it", "s", "the", "type", "we", "wanted", "." ]
python
train
dmbee/seglearn
seglearn/pipe.py
https://github.com/dmbee/seglearn/blob/d8d7039e92c4c6571a70350c03298aceab8dbeec/seglearn/pipe.py#L310-L325
def decision_function(self, X): """ Apply transforms, and decision_function of the final estimator Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. Returns ------- y_score : array-like, shape = [n_samples, n_classes] """ Xt, _, _ = self._transform(X) return self._final_estimator.decision_function(Xt)
[ "def", "decision_function", "(", "self", ",", "X", ")", ":", "Xt", ",", "_", ",", "_", "=", "self", ".", "_transform", "(", "X", ")", "return", "self", ".", "_final_estimator", ".", "decision_function", "(", "Xt", ")" ]
Apply transforms, and decision_function of the final estimator Parameters ---------- X : iterable Data to predict on. Must fulfill input requirements of first step of the pipeline. Returns ------- y_score : array-like, shape = [n_samples, n_classes]
[ "Apply", "transforms", "and", "decision_function", "of", "the", "final", "estimator" ]
python
train
bitcraft/PyTMX
pytmx/pytmx.py
https://github.com/bitcraft/PyTMX/blob/3fb9788dd66ecfd0c8fa0e9f38c582337d89e1d9/pytmx/pytmx.py#L196-L213
def parse_properties(node): """ Parse a Tiled xml node and return a dict that represents a tiled "property" :param node: etree element :return: dict """ d = dict() for child in node.findall('properties'): for subnode in child.findall('property'): cls = None try: if "type" in subnode.keys(): module = importlib.import_module('builtins') cls = getattr(module, subnode.get("type")) except AttributeError: logger.info("Type [} Not a built-in type. Defaulting to string-cast.") d[subnode.get('name')] = cls(subnode.get('value')) if cls is not None else subnode.get('value') return d
[ "def", "parse_properties", "(", "node", ")", ":", "d", "=", "dict", "(", ")", "for", "child", "in", "node", ".", "findall", "(", "'properties'", ")", ":", "for", "subnode", "in", "child", ".", "findall", "(", "'property'", ")", ":", "cls", "=", "None", "try", ":", "if", "\"type\"", "in", "subnode", ".", "keys", "(", ")", ":", "module", "=", "importlib", ".", "import_module", "(", "'builtins'", ")", "cls", "=", "getattr", "(", "module", ",", "subnode", ".", "get", "(", "\"type\"", ")", ")", "except", "AttributeError", ":", "logger", ".", "info", "(", "\"Type [} Not a built-in type. Defaulting to string-cast.\"", ")", "d", "[", "subnode", ".", "get", "(", "'name'", ")", "]", "=", "cls", "(", "subnode", ".", "get", "(", "'value'", ")", ")", "if", "cls", "is", "not", "None", "else", "subnode", ".", "get", "(", "'value'", ")", "return", "d" ]
Parse a Tiled xml node and return a dict that represents a tiled "property" :param node: etree element :return: dict
[ "Parse", "a", "Tiled", "xml", "node", "and", "return", "a", "dict", "that", "represents", "a", "tiled", "property" ]
python
train
turicas/rows
rows/plugins/plugin_csv.py
https://github.com/turicas/rows/blob/c74da41ae9ed091356b803a64f8a30c641c5fc45/rows/plugins/plugin_csv.py#L139-L201
def export_to_csv( table, filename_or_fobj=None, encoding="utf-8", dialect=unicodecsv.excel, batch_size=100, callback=None, *args, **kwargs ): """Export a `rows.Table` to a CSV file. If a file-like object is provided it MUST be in binary mode, like in `open(filename, mode='wb')`. If not filename/fobj is provided, the function returns a string with CSV contents. """ # TODO: will work only if table.fields is OrderedDict # TODO: should use fobj? What about creating a method like json.dumps? return_data, should_close = False, None if filename_or_fobj is None: filename_or_fobj = BytesIO() return_data = should_close = True source = Source.from_file( filename_or_fobj, plugin_name="csv", mode="wb", encoding=encoding, should_close=should_close, ) # TODO: may use `io.BufferedWriter` instead of `ipartition` so user can # choose the real size (in Bytes) when to flush to the file system, instead # number of rows writer = unicodecsv.writer(source.fobj, encoding=encoding, dialect=dialect) if callback is None: for batch in ipartition(serialize(table, *args, **kwargs), batch_size): writer.writerows(batch) else: serialized = serialize(table, *args, **kwargs) writer.writerow(next(serialized)) # First, write the header total = 0 for batch in ipartition(serialized, batch_size): writer.writerows(batch) total += len(batch) callback(total) if return_data: source.fobj.seek(0) result = source.fobj.read() else: source.fobj.flush() result = source.fobj if source.should_close: source.fobj.close() return result
[ "def", "export_to_csv", "(", "table", ",", "filename_or_fobj", "=", "None", ",", "encoding", "=", "\"utf-8\"", ",", "dialect", "=", "unicodecsv", ".", "excel", ",", "batch_size", "=", "100", ",", "callback", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# TODO: will work only if table.fields is OrderedDict", "# TODO: should use fobj? What about creating a method like json.dumps?", "return_data", ",", "should_close", "=", "False", ",", "None", "if", "filename_or_fobj", "is", "None", ":", "filename_or_fobj", "=", "BytesIO", "(", ")", "return_data", "=", "should_close", "=", "True", "source", "=", "Source", ".", "from_file", "(", "filename_or_fobj", ",", "plugin_name", "=", "\"csv\"", ",", "mode", "=", "\"wb\"", ",", "encoding", "=", "encoding", ",", "should_close", "=", "should_close", ",", ")", "# TODO: may use `io.BufferedWriter` instead of `ipartition` so user can", "# choose the real size (in Bytes) when to flush to the file system, instead", "# number of rows", "writer", "=", "unicodecsv", ".", "writer", "(", "source", ".", "fobj", ",", "encoding", "=", "encoding", ",", "dialect", "=", "dialect", ")", "if", "callback", "is", "None", ":", "for", "batch", "in", "ipartition", "(", "serialize", "(", "table", ",", "*", "args", ",", "*", "*", "kwargs", ")", ",", "batch_size", ")", ":", "writer", ".", "writerows", "(", "batch", ")", "else", ":", "serialized", "=", "serialize", "(", "table", ",", "*", "args", ",", "*", "*", "kwargs", ")", "writer", ".", "writerow", "(", "next", "(", "serialized", ")", ")", "# First, write the header", "total", "=", "0", "for", "batch", "in", "ipartition", "(", "serialized", ",", "batch_size", ")", ":", "writer", ".", "writerows", "(", "batch", ")", "total", "+=", "len", "(", "batch", ")", "callback", "(", "total", ")", "if", "return_data", ":", "source", ".", "fobj", ".", "seek", "(", "0", ")", "result", "=", "source", ".", "fobj", ".", "read", "(", ")", "else", ":", "source", ".", "fobj", ".", "flush", "(", ")", "result", "=", "source", ".", "fobj", "if", "source", ".", "should_close", ":", "source", ".", "fobj", ".", "close", "(", ")", "return", "result" ]
Export a `rows.Table` to a CSV file. If a file-like object is provided it MUST be in binary mode, like in `open(filename, mode='wb')`. If not filename/fobj is provided, the function returns a string with CSV contents.
[ "Export", "a", "rows", ".", "Table", "to", "a", "CSV", "file", "." ]
python
train
edx/edx-django-release-util
scripts/update_repos_version.py
https://github.com/edx/edx-django-release-util/blob/de0fde41d6a19885ab7dc309472b94fd0fccbc1d/scripts/update_repos_version.py#L55-L153
def bump_repos_version(module_name, new_version, local_only): """ Changes the pinned version number in the requirements files of all repos which have the specified Python module as a dependency. This script assumes that GITHUB_TOKEN is set for GitHub authentication. """ # Make the cloning directory and change directories into it. tmp_dir = tempfile.mkdtemp(dir=os.getcwd()) # Iterate through each repository. for owner, repo_name in REPOS_TO_CHANGE: repo_url = REPO_URL_FORMAT.format(owner, repo_name) gh = GitHubApiUtils(owner, repo_name) os.chdir(tmp_dir) # Clone the repo. ret_code = subprocess.call(['git', 'clone', '{}.git'.format(repo_url)]) if ret_code: logging.error('Failed to clone repo {}'.format(repo_url)) continue # Change into the cloned repo dir. os.chdir(repo_name) # Create a branch, using the version number. branch_name = '{}/{}'.format(module_name, new_version) ret_code = subprocess.call(['git', 'checkout', '-b', branch_name]) if ret_code: logging.error('Failed to create branch in repo {}'.format(repo_url)) continue # Search through all TXT files to find all lines with the module name, changing the pinned version. files_changed = False for root, _dirs, files in os.walk('.'): for file in files: if file.endswith('.txt') and (('requirements' in file) or ('requirements' in root)): found = False filepath = os.path.join(root, file) with open(filepath) as f: if '{}=='.format(module_name) in f.read(): found = True if found: files_changed = True # Change the file in-place. for line in fileinput.input(filepath, inplace=True): if '{}=='.format(module_name) in line: print '{}=={}'.format(module_name, new_version) else: print line, if not files_changed: # Module name wasn't found in the requirements files. logging.info("Module name '{}' not found in repo {} - skipping.".format(module_name, repo_url)) continue # Add/commit the files. ret_code = subprocess.call(['git', 'commit', '-am', 'Updating {} requirement to version {}'.format(module_name, new_version)]) if ret_code: logging.error("Failed to add and commit changed files to repo {}".format(repo_url)) continue if local_only: # For local_only, don't push the branch to the remote and create the PR - leave all changes local for review. continue # Push the branch. ret_code = subprocess.call(['git', 'push', '--set-upstream', 'origin', branch_name]) if ret_code: logging.error("Failed to push branch {} upstream for repo {}".format(branch_name, repo_url)) continue # Create a PR with an automated message. rollback_branch_push = False try: # The GitHub "mention" below does not work via the API - unfortunately... response = gh.create_pull( title='Change {} version.'.format(module_name), body='Change the required version of {} to {}.\n\n@edx-ops/pipeline-team Please review and tag appropriate parties.'.format(module_name, new_version), head=branch_name, base='master' ) except: logging.error('Failed to create PR for repo {} - did you set GITHUB_TOKEN?'.format(repo_url)) rollback_branch_push = True else: logging.info('Created PR #{} for repo {}: {}'.format(response.number, repo_url, response.html_url)) if rollback_branch_push: # Since the PR creation failed, delete the branch in the remote repo as well. ret_code = subprocess.call(['git', 'push', 'origin', '--delete', branch_name]) if ret_code: logging.error("ROLLBACK: Failed to delete upstream branch {} for repo {}".format(branch_name, repo_url)) if not local_only: # Remove the temp directory containing all the cloned repos. shutil.rmtree(tmp_dir)
[ "def", "bump_repos_version", "(", "module_name", ",", "new_version", ",", "local_only", ")", ":", "# Make the cloning directory and change directories into it.", "tmp_dir", "=", "tempfile", ".", "mkdtemp", "(", "dir", "=", "os", ".", "getcwd", "(", ")", ")", "# Iterate through each repository.", "for", "owner", ",", "repo_name", "in", "REPOS_TO_CHANGE", ":", "repo_url", "=", "REPO_URL_FORMAT", ".", "format", "(", "owner", ",", "repo_name", ")", "gh", "=", "GitHubApiUtils", "(", "owner", ",", "repo_name", ")", "os", ".", "chdir", "(", "tmp_dir", ")", "# Clone the repo.", "ret_code", "=", "subprocess", ".", "call", "(", "[", "'git'", ",", "'clone'", ",", "'{}.git'", ".", "format", "(", "repo_url", ")", "]", ")", "if", "ret_code", ":", "logging", ".", "error", "(", "'Failed to clone repo {}'", ".", "format", "(", "repo_url", ")", ")", "continue", "# Change into the cloned repo dir.", "os", ".", "chdir", "(", "repo_name", ")", "# Create a branch, using the version number.", "branch_name", "=", "'{}/{}'", ".", "format", "(", "module_name", ",", "new_version", ")", "ret_code", "=", "subprocess", ".", "call", "(", "[", "'git'", ",", "'checkout'", ",", "'-b'", ",", "branch_name", "]", ")", "if", "ret_code", ":", "logging", ".", "error", "(", "'Failed to create branch in repo {}'", ".", "format", "(", "repo_url", ")", ")", "continue", "# Search through all TXT files to find all lines with the module name, changing the pinned version.", "files_changed", "=", "False", "for", "root", ",", "_dirs", ",", "files", "in", "os", ".", "walk", "(", "'.'", ")", ":", "for", "file", "in", "files", ":", "if", "file", ".", "endswith", "(", "'.txt'", ")", "and", "(", "(", "'requirements'", "in", "file", ")", "or", "(", "'requirements'", "in", "root", ")", ")", ":", "found", "=", "False", "filepath", "=", "os", ".", "path", ".", "join", "(", "root", ",", "file", ")", "with", "open", "(", "filepath", ")", "as", "f", ":", "if", "'{}=='", ".", "format", "(", "module_name", ")", "in", "f", ".", "read", "(", ")", ":", "found", "=", "True", "if", "found", ":", "files_changed", "=", "True", "# Change the file in-place.", "for", "line", "in", "fileinput", ".", "input", "(", "filepath", ",", "inplace", "=", "True", ")", ":", "if", "'{}=='", ".", "format", "(", "module_name", ")", "in", "line", ":", "print", "'{}=={}'", ".", "format", "(", "module_name", ",", "new_version", ")", "else", ":", "print", "line", ",", "if", "not", "files_changed", ":", "# Module name wasn't found in the requirements files.", "logging", ".", "info", "(", "\"Module name '{}' not found in repo {} - skipping.\"", ".", "format", "(", "module_name", ",", "repo_url", ")", ")", "continue", "# Add/commit the files.", "ret_code", "=", "subprocess", ".", "call", "(", "[", "'git'", ",", "'commit'", ",", "'-am'", ",", "'Updating {} requirement to version {}'", ".", "format", "(", "module_name", ",", "new_version", ")", "]", ")", "if", "ret_code", ":", "logging", ".", "error", "(", "\"Failed to add and commit changed files to repo {}\"", ".", "format", "(", "repo_url", ")", ")", "continue", "if", "local_only", ":", "# For local_only, don't push the branch to the remote and create the PR - leave all changes local for review.", "continue", "# Push the branch.", "ret_code", "=", "subprocess", ".", "call", "(", "[", "'git'", ",", "'push'", ",", "'--set-upstream'", ",", "'origin'", ",", "branch_name", "]", ")", "if", "ret_code", ":", "logging", ".", "error", "(", "\"Failed to push branch {} upstream for repo {}\"", ".", "format", "(", "branch_name", ",", "repo_url", ")", ")", "continue", "# Create a PR with an automated message.", "rollback_branch_push", "=", "False", "try", ":", "# The GitHub \"mention\" below does not work via the API - unfortunately...", "response", "=", "gh", ".", "create_pull", "(", "title", "=", "'Change {} version.'", ".", "format", "(", "module_name", ")", ",", "body", "=", "'Change the required version of {} to {}.\\n\\n@edx-ops/pipeline-team Please review and tag appropriate parties.'", ".", "format", "(", "module_name", ",", "new_version", ")", ",", "head", "=", "branch_name", ",", "base", "=", "'master'", ")", "except", ":", "logging", ".", "error", "(", "'Failed to create PR for repo {} - did you set GITHUB_TOKEN?'", ".", "format", "(", "repo_url", ")", ")", "rollback_branch_push", "=", "True", "else", ":", "logging", ".", "info", "(", "'Created PR #{} for repo {}: {}'", ".", "format", "(", "response", ".", "number", ",", "repo_url", ",", "response", ".", "html_url", ")", ")", "if", "rollback_branch_push", ":", "# Since the PR creation failed, delete the branch in the remote repo as well.", "ret_code", "=", "subprocess", ".", "call", "(", "[", "'git'", ",", "'push'", ",", "'origin'", ",", "'--delete'", ",", "branch_name", "]", ")", "if", "ret_code", ":", "logging", ".", "error", "(", "\"ROLLBACK: Failed to delete upstream branch {} for repo {}\"", ".", "format", "(", "branch_name", ",", "repo_url", ")", ")", "if", "not", "local_only", ":", "# Remove the temp directory containing all the cloned repos.", "shutil", ".", "rmtree", "(", "tmp_dir", ")" ]
Changes the pinned version number in the requirements files of all repos which have the specified Python module as a dependency. This script assumes that GITHUB_TOKEN is set for GitHub authentication.
[ "Changes", "the", "pinned", "version", "number", "in", "the", "requirements", "files", "of", "all", "repos", "which", "have", "the", "specified", "Python", "module", "as", "a", "dependency", "." ]
python
train
zerotk/easyfs
zerotk/easyfs/_easyfs.py
https://github.com/zerotk/easyfs/blob/140923db51fb91d5a5847ad17412e8bce51ba3da/zerotk/easyfs/_easyfs.py#L802-L837
def MoveDirectory(source_dir, target_dir): ''' Moves a directory. :param unicode source_dir: :param unicode target_dir: :raises NotImplementedError: If trying to move anything other than: Local dir -> local dir FTP dir -> FTP dir (same host) ''' if not IsDir(source_dir): from ._exceptions import DirectoryNotFoundError raise DirectoryNotFoundError(source_dir) if Exists(target_dir): from ._exceptions import DirectoryAlreadyExistsError raise DirectoryAlreadyExistsError(target_dir) from six.moves.urllib.parse import urlparse source_url = urlparse(source_dir) target_url = urlparse(target_dir) # Local to local if _UrlIsLocal(source_url) and _UrlIsLocal(target_url): import shutil shutil.move(source_dir, target_dir) # FTP to FTP elif source_url.scheme == 'ftp' and target_url.scheme == 'ftp': from ._exceptions import NotImplementedProtocol raise NotImplementedProtocol(target_url.scheme) else: raise NotImplementedError('Can only move directories local->local or ftp->ftp')
[ "def", "MoveDirectory", "(", "source_dir", ",", "target_dir", ")", ":", "if", "not", "IsDir", "(", "source_dir", ")", ":", "from", ".", "_exceptions", "import", "DirectoryNotFoundError", "raise", "DirectoryNotFoundError", "(", "source_dir", ")", "if", "Exists", "(", "target_dir", ")", ":", "from", ".", "_exceptions", "import", "DirectoryAlreadyExistsError", "raise", "DirectoryAlreadyExistsError", "(", "target_dir", ")", "from", "six", ".", "moves", ".", "urllib", ".", "parse", "import", "urlparse", "source_url", "=", "urlparse", "(", "source_dir", ")", "target_url", "=", "urlparse", "(", "target_dir", ")", "# Local to local", "if", "_UrlIsLocal", "(", "source_url", ")", "and", "_UrlIsLocal", "(", "target_url", ")", ":", "import", "shutil", "shutil", ".", "move", "(", "source_dir", ",", "target_dir", ")", "# FTP to FTP", "elif", "source_url", ".", "scheme", "==", "'ftp'", "and", "target_url", ".", "scheme", "==", "'ftp'", ":", "from", ".", "_exceptions", "import", "NotImplementedProtocol", "raise", "NotImplementedProtocol", "(", "target_url", ".", "scheme", ")", "else", ":", "raise", "NotImplementedError", "(", "'Can only move directories local->local or ftp->ftp'", ")" ]
Moves a directory. :param unicode source_dir: :param unicode target_dir: :raises NotImplementedError: If trying to move anything other than: Local dir -> local dir FTP dir -> FTP dir (same host)
[ "Moves", "a", "directory", "." ]
python
valid
koalalorenzo/python-digitalocean
digitalocean/Volume.py
https://github.com/koalalorenzo/python-digitalocean/blob/d0221b57856fb1e131cafecf99d826f7b07a947c/digitalocean/Volume.py#L74-L108
def create_from_snapshot(self, *args, **kwargs): """ Creates a Block Storage volume Note: Every argument and parameter given to this method will be assigned to the object. Args: name: string - a name for the volume snapshot_id: string - unique identifier for the volume snapshot size_gigabytes: int - size of the Block Storage volume in GiB filesystem_type: string, optional - name of the filesystem type the volume will be formated with ('ext4' or 'xfs') filesystem_label: string, optional - the label to be applied to the filesystem, only used in conjunction with filesystem_type Optional Args: description: string - text field to describe a volume """ data = self.get_data('volumes/', type=POST, params={'name': self.name, 'snapshot_id': self.snapshot_id, 'region': self.region, 'size_gigabytes': self.size_gigabytes, 'description': self.description, 'filesystem_type': self.filesystem_type, 'filesystem_label': self.filesystem_label }) if data: self.id = data['volume']['id'] self.created_at = data['volume']['created_at'] return self
[ "def", "create_from_snapshot", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "data", "=", "self", ".", "get_data", "(", "'volumes/'", ",", "type", "=", "POST", ",", "params", "=", "{", "'name'", ":", "self", ".", "name", ",", "'snapshot_id'", ":", "self", ".", "snapshot_id", ",", "'region'", ":", "self", ".", "region", ",", "'size_gigabytes'", ":", "self", ".", "size_gigabytes", ",", "'description'", ":", "self", ".", "description", ",", "'filesystem_type'", ":", "self", ".", "filesystem_type", ",", "'filesystem_label'", ":", "self", ".", "filesystem_label", "}", ")", "if", "data", ":", "self", ".", "id", "=", "data", "[", "'volume'", "]", "[", "'id'", "]", "self", ".", "created_at", "=", "data", "[", "'volume'", "]", "[", "'created_at'", "]", "return", "self" ]
Creates a Block Storage volume Note: Every argument and parameter given to this method will be assigned to the object. Args: name: string - a name for the volume snapshot_id: string - unique identifier for the volume snapshot size_gigabytes: int - size of the Block Storage volume in GiB filesystem_type: string, optional - name of the filesystem type the volume will be formated with ('ext4' or 'xfs') filesystem_label: string, optional - the label to be applied to the filesystem, only used in conjunction with filesystem_type Optional Args: description: string - text field to describe a volume
[ "Creates", "a", "Block", "Storage", "volume" ]
python
valid
rjdkmr/do_x3dna
dnaMD/dnaMD/dnaMD.py
https://github.com/rjdkmr/do_x3dna/blob/fe910335eefcada76737f9e7cd6f25036cd32ab6/dnaMD/dnaMD/dnaMD.py#L481-L581
def parameter_distribution(self, parameter, bp, bins=30, merge=False, merge_method='mean', masked=False): """To get the parameter distribution of either a specific base-pair/step or a DNA segment over the MD simulation. parameters ---------- parameter : str Name of a base-pair or base-step or helical parameter For details about accepted keywords, see ``parameter`` in the method :meth:`DNA.get_parameters`. bp : 1D list or array base-pairs to analyze Example: :: bp = [6] # merge = False bp = [4,15] # merge = True bins int Number of bins to calculate histogram merge : bool ``Default=False``: As shown above, if ``True``, bp should a list of range otherwise a list of single value. If ``bp = True``, the parameter for the respective DNA segment could be merged or calculated by ``merge_method``. merge_method : str Method to calculate the parameter of a DNA segment from local parameters of all base-pairs/steps that are between the range given through ``bp``. Currently accepted keywords are as follows: * ``merge_method = mean``: Average of local parameters * ``merge_method = sum``: Sum of local parameters masked : bool ``Default=False``. To skip specific frames/snapshots. ``DNA.mask`` array should be set to use this functionality. This array contains boolean (either ``True`` or ``False``) value for each frame to mask the frames. Presently, mask array is automatically generated during :meth:`DNA.generate_smooth_axis` to skip those frames where 3D fitting curve was not successful within the given criteria. Returns ------- values : 1D array Array containing parameter values density : 1D array Array containing density for respective parameter values """ if not (isinstance(bp, list) or isinstance(bp, np.ndarray)): raise AssertionError( "type %s is not list or np.ndarray" % type(bp)) if (len(bp) > 1) and (merge == False): raise AssertionError( "bp %s contains more than two values, whereas merge=False. Use either one value in bp or merge=True" % bp) exit(1) if len(bp) == 1: merge = False if (merge == True) and not ((merge_method == 'mean') or (merge_method == 'sum')): raise AssertionError( "merge method %s is not available." % merge_method) exit(1) if len(bp) == 1: param_value, bp_idx = self.get_parameters( parameter, bp, bp_range=False, masked=masked) else: param_value, bp_idx = self.get_parameters( parameter, bp, bp_range=True, masked=masked) if (merge == True) and (merge_method == 'mean'): param_value = np.mean(param_value, axis=0) elif (merge == True) and (merge_method == 'sum'): param_value = np.sum(param_value, axis=0) else: param_value = param_value[0] density, bin_edges = np.histogram(param_value, bins=bins, density=True) bin_width = bin_edges[1] - bin_edges[0] density = np.insert(density, 0, 0.0) density = np.append(density, 0.0) values = [] for i in range(len(bin_edges) - 1): values.append((bin_edges[i] + bin_edges[i + 1]) / 2) values = np.asarray(values) values = np.append(values, values[-1] + bin_width) values = np.insert(values, 0, values[0] - bin_width) return np.array(values), density
[ "def", "parameter_distribution", "(", "self", ",", "parameter", ",", "bp", ",", "bins", "=", "30", ",", "merge", "=", "False", ",", "merge_method", "=", "'mean'", ",", "masked", "=", "False", ")", ":", "if", "not", "(", "isinstance", "(", "bp", ",", "list", ")", "or", "isinstance", "(", "bp", ",", "np", ".", "ndarray", ")", ")", ":", "raise", "AssertionError", "(", "\"type %s is not list or np.ndarray\"", "%", "type", "(", "bp", ")", ")", "if", "(", "len", "(", "bp", ")", ">", "1", ")", "and", "(", "merge", "==", "False", ")", ":", "raise", "AssertionError", "(", "\"bp %s contains more than two values, whereas merge=False. Use either one value in bp or merge=True\"", "%", "bp", ")", "exit", "(", "1", ")", "if", "len", "(", "bp", ")", "==", "1", ":", "merge", "=", "False", "if", "(", "merge", "==", "True", ")", "and", "not", "(", "(", "merge_method", "==", "'mean'", ")", "or", "(", "merge_method", "==", "'sum'", ")", ")", ":", "raise", "AssertionError", "(", "\"merge method %s is not available.\"", "%", "merge_method", ")", "exit", "(", "1", ")", "if", "len", "(", "bp", ")", "==", "1", ":", "param_value", ",", "bp_idx", "=", "self", ".", "get_parameters", "(", "parameter", ",", "bp", ",", "bp_range", "=", "False", ",", "masked", "=", "masked", ")", "else", ":", "param_value", ",", "bp_idx", "=", "self", ".", "get_parameters", "(", "parameter", ",", "bp", ",", "bp_range", "=", "True", ",", "masked", "=", "masked", ")", "if", "(", "merge", "==", "True", ")", "and", "(", "merge_method", "==", "'mean'", ")", ":", "param_value", "=", "np", ".", "mean", "(", "param_value", ",", "axis", "=", "0", ")", "elif", "(", "merge", "==", "True", ")", "and", "(", "merge_method", "==", "'sum'", ")", ":", "param_value", "=", "np", ".", "sum", "(", "param_value", ",", "axis", "=", "0", ")", "else", ":", "param_value", "=", "param_value", "[", "0", "]", "density", ",", "bin_edges", "=", "np", ".", "histogram", "(", "param_value", ",", "bins", "=", "bins", ",", "density", "=", "True", ")", "bin_width", "=", "bin_edges", "[", "1", "]", "-", "bin_edges", "[", "0", "]", "density", "=", "np", ".", "insert", "(", "density", ",", "0", ",", "0.0", ")", "density", "=", "np", ".", "append", "(", "density", ",", "0.0", ")", "values", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "bin_edges", ")", "-", "1", ")", ":", "values", ".", "append", "(", "(", "bin_edges", "[", "i", "]", "+", "bin_edges", "[", "i", "+", "1", "]", ")", "/", "2", ")", "values", "=", "np", ".", "asarray", "(", "values", ")", "values", "=", "np", ".", "append", "(", "values", ",", "values", "[", "-", "1", "]", "+", "bin_width", ")", "values", "=", "np", ".", "insert", "(", "values", ",", "0", ",", "values", "[", "0", "]", "-", "bin_width", ")", "return", "np", ".", "array", "(", "values", ")", ",", "density" ]
To get the parameter distribution of either a specific base-pair/step or a DNA segment over the MD simulation. parameters ---------- parameter : str Name of a base-pair or base-step or helical parameter For details about accepted keywords, see ``parameter`` in the method :meth:`DNA.get_parameters`. bp : 1D list or array base-pairs to analyze Example: :: bp = [6] # merge = False bp = [4,15] # merge = True bins int Number of bins to calculate histogram merge : bool ``Default=False``: As shown above, if ``True``, bp should a list of range otherwise a list of single value. If ``bp = True``, the parameter for the respective DNA segment could be merged or calculated by ``merge_method``. merge_method : str Method to calculate the parameter of a DNA segment from local parameters of all base-pairs/steps that are between the range given through ``bp``. Currently accepted keywords are as follows: * ``merge_method = mean``: Average of local parameters * ``merge_method = sum``: Sum of local parameters masked : bool ``Default=False``. To skip specific frames/snapshots. ``DNA.mask`` array should be set to use this functionality. This array contains boolean (either ``True`` or ``False``) value for each frame to mask the frames. Presently, mask array is automatically generated during :meth:`DNA.generate_smooth_axis` to skip those frames where 3D fitting curve was not successful within the given criteria. Returns ------- values : 1D array Array containing parameter values density : 1D array Array containing density for respective parameter values
[ "To", "get", "the", "parameter", "distribution", "of", "either", "a", "specific", "base", "-", "pair", "/", "step", "or", "a", "DNA", "segment", "over", "the", "MD", "simulation", "." ]
python
train
pypa/setuptools
setuptools/monkey.py
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/monkey.py#L132-L179
def patch_for_msvc_specialized_compiler(): """ Patch functions in distutils to use standalone Microsoft Visual C++ compilers. """ # import late to avoid circular imports on Python < 3.5 msvc = import_module('setuptools.msvc') if platform.system() != 'Windows': # Compilers only availables on Microsoft Windows return def patch_params(mod_name, func_name): """ Prepare the parameters for patch_func to patch indicated function. """ repl_prefix = 'msvc9_' if 'msvc9' in mod_name else 'msvc14_' repl_name = repl_prefix + func_name.lstrip('_') repl = getattr(msvc, repl_name) mod = import_module(mod_name) if not hasattr(mod, func_name): raise ImportError(func_name) return repl, mod, func_name # Python 2.7 to 3.4 msvc9 = functools.partial(patch_params, 'distutils.msvc9compiler') # Python 3.5+ msvc14 = functools.partial(patch_params, 'distutils._msvccompiler') try: # Patch distutils.msvc9compiler patch_func(*msvc9('find_vcvarsall')) patch_func(*msvc9('query_vcvarsall')) except ImportError: pass try: # Patch distutils._msvccompiler._get_vc_env patch_func(*msvc14('_get_vc_env')) except ImportError: pass try: # Patch distutils._msvccompiler.gen_lib_options for Numpy patch_func(*msvc14('gen_lib_options')) except ImportError: pass
[ "def", "patch_for_msvc_specialized_compiler", "(", ")", ":", "# import late to avoid circular imports on Python < 3.5", "msvc", "=", "import_module", "(", "'setuptools.msvc'", ")", "if", "platform", ".", "system", "(", ")", "!=", "'Windows'", ":", "# Compilers only availables on Microsoft Windows", "return", "def", "patch_params", "(", "mod_name", ",", "func_name", ")", ":", "\"\"\"\n Prepare the parameters for patch_func to patch indicated function.\n \"\"\"", "repl_prefix", "=", "'msvc9_'", "if", "'msvc9'", "in", "mod_name", "else", "'msvc14_'", "repl_name", "=", "repl_prefix", "+", "func_name", ".", "lstrip", "(", "'_'", ")", "repl", "=", "getattr", "(", "msvc", ",", "repl_name", ")", "mod", "=", "import_module", "(", "mod_name", ")", "if", "not", "hasattr", "(", "mod", ",", "func_name", ")", ":", "raise", "ImportError", "(", "func_name", ")", "return", "repl", ",", "mod", ",", "func_name", "# Python 2.7 to 3.4", "msvc9", "=", "functools", ".", "partial", "(", "patch_params", ",", "'distutils.msvc9compiler'", ")", "# Python 3.5+", "msvc14", "=", "functools", ".", "partial", "(", "patch_params", ",", "'distutils._msvccompiler'", ")", "try", ":", "# Patch distutils.msvc9compiler", "patch_func", "(", "*", "msvc9", "(", "'find_vcvarsall'", ")", ")", "patch_func", "(", "*", "msvc9", "(", "'query_vcvarsall'", ")", ")", "except", "ImportError", ":", "pass", "try", ":", "# Patch distutils._msvccompiler._get_vc_env", "patch_func", "(", "*", "msvc14", "(", "'_get_vc_env'", ")", ")", "except", "ImportError", ":", "pass", "try", ":", "# Patch distutils._msvccompiler.gen_lib_options for Numpy", "patch_func", "(", "*", "msvc14", "(", "'gen_lib_options'", ")", ")", "except", "ImportError", ":", "pass" ]
Patch functions in distutils to use standalone Microsoft Visual C++ compilers.
[ "Patch", "functions", "in", "distutils", "to", "use", "standalone", "Microsoft", "Visual", "C", "++", "compilers", "." ]
python
train
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L5702-L5705
def resolveURI(self, URI): """Do a complete resolution lookup of an URI """ ret = libxml2mod.xmlACatalogResolveURI(self._o, URI) return ret
[ "def", "resolveURI", "(", "self", ",", "URI", ")", ":", "ret", "=", "libxml2mod", ".", "xmlACatalogResolveURI", "(", "self", ".", "_o", ",", "URI", ")", "return", "ret" ]
Do a complete resolution lookup of an URI
[ "Do", "a", "complete", "resolution", "lookup", "of", "an", "URI" ]
python
train
jic-dtool/dtoolcore
dtoolcore/storagebroker.py
https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/storagebroker.py#L273-L278
def put_overlay(self, overlay_name, overlay): """Store the overlay.""" logger.debug("Putting overlay: {}".format(overlay_name)) key = self.get_overlay_key(overlay_name) text = json.dumps(overlay, indent=2) self.put_text(key, text)
[ "def", "put_overlay", "(", "self", ",", "overlay_name", ",", "overlay", ")", ":", "logger", ".", "debug", "(", "\"Putting overlay: {}\"", ".", "format", "(", "overlay_name", ")", ")", "key", "=", "self", ".", "get_overlay_key", "(", "overlay_name", ")", "text", "=", "json", ".", "dumps", "(", "overlay", ",", "indent", "=", "2", ")", "self", ".", "put_text", "(", "key", ",", "text", ")" ]
Store the overlay.
[ "Store", "the", "overlay", "." ]
python
train
gholt/swiftly
swiftly/cli/get.py
https://github.com/gholt/swiftly/blob/5bcc1c65323b1caf1f85adbefd9fc4988c072149/swiftly/cli/get.py#L79-L161
def cli_get_account_listing(context): """ Performs a GET on the account as a listing request. See :py:mod:`swiftly.cli.get` for context usage information. See :py:class:`CLIGet` for more information. """ limit = context.query.get('limit') delimiter = context.query.get('delimiter') prefix = context.query.get('prefix') marker = context.query.get('marker') end_marker = context.query.get('end_marker') if context.raw: with context.client_manager.with_client() as client: status, reason, headers, contents = client.get_account( decode_json=False, headers=context.headers, limit=limit, marker=marker, end_marker=end_marker, query=context.query, cdn=context.cdn) if hasattr(contents, 'read'): contents = contents.read() if status // 100 != 2: if status == 404 and context.ignore_404: return raise ReturnCode('listing account: %s %s' % (status, reason)) with context.io_manager.with_stdout() as fp: if context.output_headers: context.write_headers( fp, headers, context.muted_account_headers) fp.write(contents) fp.flush() return with context.client_manager.with_client() as client: status, reason, headers, contents = client.get_account( headers=context.headers, limit=limit, marker=marker, end_marker=end_marker, query=context.query, cdn=context.cdn) if status // 100 != 2: if status == 404 and context.ignore_404: return if hasattr(contents, 'read'): contents.read() raise ReturnCode('listing account: %s %s' % (status, reason)) if context.output_headers and not context.all_objects: with context.io_manager.with_stdout() as fp: context.write_headers( fp, headers, context.muted_account_headers) while contents: if context.all_objects: new_context = context.copy() new_context.query = dict(new_context.query) for remove in ( 'limit', 'delimiter', 'prefix', 'marker', 'end_marker'): if remove in new_context.query: del new_context.query[remove] for item in contents: if 'name' in item: new_path = item['name'].encode('utf8') cli_get_container_listing(new_context, new_path) else: with context.io_manager.with_stdout() as fp: for item in contents: if context.full: fp.write('%13s %13s ' % ( item.get('bytes', '-'), item.get('count', '-'))) fp.write(item.get( 'name', item.get('subdir'))) fp.write('\n') fp.flush() if limit: break marker = contents[-1].get('name', contents[-1].get('subdir', '')) with context.client_manager.with_client() as client: status, reason, headers, contents = client.get_account( headers=context.headers, limit=limit, delimiter=delimiter, prefix=prefix, end_marker=end_marker, marker=marker, query=context.query, cdn=context.cdn) if status // 100 != 2: if status == 404 and context.ignore_404: return if hasattr(contents, 'read'): contents.read() raise ReturnCode('listing account: %s %s' % (status, reason))
[ "def", "cli_get_account_listing", "(", "context", ")", ":", "limit", "=", "context", ".", "query", ".", "get", "(", "'limit'", ")", "delimiter", "=", "context", ".", "query", ".", "get", "(", "'delimiter'", ")", "prefix", "=", "context", ".", "query", ".", "get", "(", "'prefix'", ")", "marker", "=", "context", ".", "query", ".", "get", "(", "'marker'", ")", "end_marker", "=", "context", ".", "query", ".", "get", "(", "'end_marker'", ")", "if", "context", ".", "raw", ":", "with", "context", ".", "client_manager", ".", "with_client", "(", ")", "as", "client", ":", "status", ",", "reason", ",", "headers", ",", "contents", "=", "client", ".", "get_account", "(", "decode_json", "=", "False", ",", "headers", "=", "context", ".", "headers", ",", "limit", "=", "limit", ",", "marker", "=", "marker", ",", "end_marker", "=", "end_marker", ",", "query", "=", "context", ".", "query", ",", "cdn", "=", "context", ".", "cdn", ")", "if", "hasattr", "(", "contents", ",", "'read'", ")", ":", "contents", "=", "contents", ".", "read", "(", ")", "if", "status", "//", "100", "!=", "2", ":", "if", "status", "==", "404", "and", "context", ".", "ignore_404", ":", "return", "raise", "ReturnCode", "(", "'listing account: %s %s'", "%", "(", "status", ",", "reason", ")", ")", "with", "context", ".", "io_manager", ".", "with_stdout", "(", ")", "as", "fp", ":", "if", "context", ".", "output_headers", ":", "context", ".", "write_headers", "(", "fp", ",", "headers", ",", "context", ".", "muted_account_headers", ")", "fp", ".", "write", "(", "contents", ")", "fp", ".", "flush", "(", ")", "return", "with", "context", ".", "client_manager", ".", "with_client", "(", ")", "as", "client", ":", "status", ",", "reason", ",", "headers", ",", "contents", "=", "client", ".", "get_account", "(", "headers", "=", "context", ".", "headers", ",", "limit", "=", "limit", ",", "marker", "=", "marker", ",", "end_marker", "=", "end_marker", ",", "query", "=", "context", ".", "query", ",", "cdn", "=", "context", ".", "cdn", ")", "if", "status", "//", "100", "!=", "2", ":", "if", "status", "==", "404", "and", "context", ".", "ignore_404", ":", "return", "if", "hasattr", "(", "contents", ",", "'read'", ")", ":", "contents", ".", "read", "(", ")", "raise", "ReturnCode", "(", "'listing account: %s %s'", "%", "(", "status", ",", "reason", ")", ")", "if", "context", ".", "output_headers", "and", "not", "context", ".", "all_objects", ":", "with", "context", ".", "io_manager", ".", "with_stdout", "(", ")", "as", "fp", ":", "context", ".", "write_headers", "(", "fp", ",", "headers", ",", "context", ".", "muted_account_headers", ")", "while", "contents", ":", "if", "context", ".", "all_objects", ":", "new_context", "=", "context", ".", "copy", "(", ")", "new_context", ".", "query", "=", "dict", "(", "new_context", ".", "query", ")", "for", "remove", "in", "(", "'limit'", ",", "'delimiter'", ",", "'prefix'", ",", "'marker'", ",", "'end_marker'", ")", ":", "if", "remove", "in", "new_context", ".", "query", ":", "del", "new_context", ".", "query", "[", "remove", "]", "for", "item", "in", "contents", ":", "if", "'name'", "in", "item", ":", "new_path", "=", "item", "[", "'name'", "]", ".", "encode", "(", "'utf8'", ")", "cli_get_container_listing", "(", "new_context", ",", "new_path", ")", "else", ":", "with", "context", ".", "io_manager", ".", "with_stdout", "(", ")", "as", "fp", ":", "for", "item", "in", "contents", ":", "if", "context", ".", "full", ":", "fp", ".", "write", "(", "'%13s %13s '", "%", "(", "item", ".", "get", "(", "'bytes'", ",", "'-'", ")", ",", "item", ".", "get", "(", "'count'", ",", "'-'", ")", ")", ")", "fp", ".", "write", "(", "item", ".", "get", "(", "'name'", ",", "item", ".", "get", "(", "'subdir'", ")", ")", ")", "fp", ".", "write", "(", "'\\n'", ")", "fp", ".", "flush", "(", ")", "if", "limit", ":", "break", "marker", "=", "contents", "[", "-", "1", "]", ".", "get", "(", "'name'", ",", "contents", "[", "-", "1", "]", ".", "get", "(", "'subdir'", ",", "''", ")", ")", "with", "context", ".", "client_manager", ".", "with_client", "(", ")", "as", "client", ":", "status", ",", "reason", ",", "headers", ",", "contents", "=", "client", ".", "get_account", "(", "headers", "=", "context", ".", "headers", ",", "limit", "=", "limit", ",", "delimiter", "=", "delimiter", ",", "prefix", "=", "prefix", ",", "end_marker", "=", "end_marker", ",", "marker", "=", "marker", ",", "query", "=", "context", ".", "query", ",", "cdn", "=", "context", ".", "cdn", ")", "if", "status", "//", "100", "!=", "2", ":", "if", "status", "==", "404", "and", "context", ".", "ignore_404", ":", "return", "if", "hasattr", "(", "contents", ",", "'read'", ")", ":", "contents", ".", "read", "(", ")", "raise", "ReturnCode", "(", "'listing account: %s %s'", "%", "(", "status", ",", "reason", ")", ")" ]
Performs a GET on the account as a listing request. See :py:mod:`swiftly.cli.get` for context usage information. See :py:class:`CLIGet` for more information.
[ "Performs", "a", "GET", "on", "the", "account", "as", "a", "listing", "request", "." ]
python
test
cloudify-cosmo/repex
repex.py
https://github.com/cloudify-cosmo/repex/blob/589e442857fa4a99fa88670d7df1a72f983bbd28/repex.py#L328-L340
def _match_tags(repex_tags, path_tags): """Check for matching tags between what the user provided and the tags set in the config. If `any` is chosen, match. If no tags are chosen and none are configured, match. If the user provided tags match any of the configured tags, match. """ if 'any' in repex_tags or (not repex_tags and not path_tags): return True elif set(repex_tags) & set(path_tags): return True return False
[ "def", "_match_tags", "(", "repex_tags", ",", "path_tags", ")", ":", "if", "'any'", "in", "repex_tags", "or", "(", "not", "repex_tags", "and", "not", "path_tags", ")", ":", "return", "True", "elif", "set", "(", "repex_tags", ")", "&", "set", "(", "path_tags", ")", ":", "return", "True", "return", "False" ]
Check for matching tags between what the user provided and the tags set in the config. If `any` is chosen, match. If no tags are chosen and none are configured, match. If the user provided tags match any of the configured tags, match.
[ "Check", "for", "matching", "tags", "between", "what", "the", "user", "provided", "and", "the", "tags", "set", "in", "the", "config", "." ]
python
train
polyaxon/polyaxon
polyaxon/pipelines/dags.py
https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/pipelines/dags.py#L50-L77
def sort_topologically(dag): """Sort the dag breath first topologically. Only the nodes inside the dag are returned, i.e. the nodes that are also keys. Returns: a topological ordering of the DAG. Raises: an error if this is not possible (graph is not valid). """ dag = copy.deepcopy(dag) sorted_nodes = [] independent_nodes = deque(get_independent_nodes(dag)) while independent_nodes: node = independent_nodes.popleft() sorted_nodes.append(node) # this alters the dag so that we are sure we are visiting the nodes only once downstream_nodes = dag[node] while downstream_nodes: downstream_node = downstream_nodes.pop(0) if downstream_node not in dag: continue if not has_dependencies(downstream_node, dag): independent_nodes.append(downstream_node) if len(sorted_nodes) != len(dag.keys()): raise ValueError('graph is not acyclic') return sorted_nodes
[ "def", "sort_topologically", "(", "dag", ")", ":", "dag", "=", "copy", ".", "deepcopy", "(", "dag", ")", "sorted_nodes", "=", "[", "]", "independent_nodes", "=", "deque", "(", "get_independent_nodes", "(", "dag", ")", ")", "while", "independent_nodes", ":", "node", "=", "independent_nodes", ".", "popleft", "(", ")", "sorted_nodes", ".", "append", "(", "node", ")", "# this alters the dag so that we are sure we are visiting the nodes only once", "downstream_nodes", "=", "dag", "[", "node", "]", "while", "downstream_nodes", ":", "downstream_node", "=", "downstream_nodes", ".", "pop", "(", "0", ")", "if", "downstream_node", "not", "in", "dag", ":", "continue", "if", "not", "has_dependencies", "(", "downstream_node", ",", "dag", ")", ":", "independent_nodes", ".", "append", "(", "downstream_node", ")", "if", "len", "(", "sorted_nodes", ")", "!=", "len", "(", "dag", ".", "keys", "(", ")", ")", ":", "raise", "ValueError", "(", "'graph is not acyclic'", ")", "return", "sorted_nodes" ]
Sort the dag breath first topologically. Only the nodes inside the dag are returned, i.e. the nodes that are also keys. Returns: a topological ordering of the DAG. Raises: an error if this is not possible (graph is not valid).
[ "Sort", "the", "dag", "breath", "first", "topologically", "." ]
python
train
numenta/nupic
src/nupic/database/connection.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/connection.py#L373-L409
def _trackInstanceAndCheckForConcurrencyViolation(self): """ Check for concurrency violation and add self to _clsOutstandingInstances. ASSUMPTION: Called from constructor BEFORE _clsNumOutstanding is incremented """ global g_max_concurrency, g_max_concurrency_raise_exception assert g_max_concurrency is not None assert self not in self._clsOutstandingInstances, repr(self) # Populate diagnostic info self._creationTracebackString = traceback.format_stack() # Check for concurrency violation if self._clsNumOutstanding >= g_max_concurrency: # NOTE: It's possible for _clsNumOutstanding to be greater than # len(_clsOutstandingInstances) if concurrency check was enabled after # unrelease allocations. errorMsg = ("With numOutstanding=%r, exceeded concurrency limit=%r " "when requesting %r. OTHER TRACKED UNRELEASED " "INSTANCES (%s): %r") % ( self._clsNumOutstanding, g_max_concurrency, self, len(self._clsOutstandingInstances), self._clsOutstandingInstances,) self._logger.error(errorMsg) if g_max_concurrency_raise_exception: raise ConcurrencyExceededError(errorMsg) # Add self to tracked instance set self._clsOutstandingInstances.add(self) self._addedToInstanceSet = True return
[ "def", "_trackInstanceAndCheckForConcurrencyViolation", "(", "self", ")", ":", "global", "g_max_concurrency", ",", "g_max_concurrency_raise_exception", "assert", "g_max_concurrency", "is", "not", "None", "assert", "self", "not", "in", "self", ".", "_clsOutstandingInstances", ",", "repr", "(", "self", ")", "# Populate diagnostic info", "self", ".", "_creationTracebackString", "=", "traceback", ".", "format_stack", "(", ")", "# Check for concurrency violation", "if", "self", ".", "_clsNumOutstanding", ">=", "g_max_concurrency", ":", "# NOTE: It's possible for _clsNumOutstanding to be greater than", "# len(_clsOutstandingInstances) if concurrency check was enabled after", "# unrelease allocations.", "errorMsg", "=", "(", "\"With numOutstanding=%r, exceeded concurrency limit=%r \"", "\"when requesting %r. OTHER TRACKED UNRELEASED \"", "\"INSTANCES (%s): %r\"", ")", "%", "(", "self", ".", "_clsNumOutstanding", ",", "g_max_concurrency", ",", "self", ",", "len", "(", "self", ".", "_clsOutstandingInstances", ")", ",", "self", ".", "_clsOutstandingInstances", ",", ")", "self", ".", "_logger", ".", "error", "(", "errorMsg", ")", "if", "g_max_concurrency_raise_exception", ":", "raise", "ConcurrencyExceededError", "(", "errorMsg", ")", "# Add self to tracked instance set", "self", ".", "_clsOutstandingInstances", ".", "add", "(", "self", ")", "self", ".", "_addedToInstanceSet", "=", "True", "return" ]
Check for concurrency violation and add self to _clsOutstandingInstances. ASSUMPTION: Called from constructor BEFORE _clsNumOutstanding is incremented
[ "Check", "for", "concurrency", "violation", "and", "add", "self", "to", "_clsOutstandingInstances", "." ]
python
valid
clusterpoint/python-client-api
pycps/converters.py
https://github.com/clusterpoint/python-client-api/blob/fabf9bd8355aa54ba08fd6649e48f16e2c35eacd/pycps/converters.py#L178-L204
def to_raw_xml(source): """ Convert various representations of an XML structure to a normal XML string. Args: source -- The source object to be converted - ET.Element, dict or string. Returns: A rew xml string matching the source object. >>> to_raw_xml("<content/>") '<content/>' >>> to_raw_xml({'document': {'title': 'foo', 'list': [{'li':1}, {'li':2}]}}) '<document><list><li>1</li><li>2</li></list><title>foo</title></document>' >>> to_raw_xml(ET.Element('root')) '<root/>' """ if isinstance(source, basestring): return source elif hasattr(source, 'getiterator'): # Element or ElementTree. return ET.tostring(source, encoding="utf-8") elif hasattr(source, 'keys'): # Dict. xml_root = dict_to_etree(source) return ET.tostring(xml_root, encoding="utf-8") else: raise TypeError("Accepted representations of a document are string, dict and etree")
[ "def", "to_raw_xml", "(", "source", ")", ":", "if", "isinstance", "(", "source", ",", "basestring", ")", ":", "return", "source", "elif", "hasattr", "(", "source", ",", "'getiterator'", ")", ":", "# Element or ElementTree.", "return", "ET", ".", "tostring", "(", "source", ",", "encoding", "=", "\"utf-8\"", ")", "elif", "hasattr", "(", "source", ",", "'keys'", ")", ":", "# Dict.", "xml_root", "=", "dict_to_etree", "(", "source", ")", "return", "ET", ".", "tostring", "(", "xml_root", ",", "encoding", "=", "\"utf-8\"", ")", "else", ":", "raise", "TypeError", "(", "\"Accepted representations of a document are string, dict and etree\"", ")" ]
Convert various representations of an XML structure to a normal XML string. Args: source -- The source object to be converted - ET.Element, dict or string. Returns: A rew xml string matching the source object. >>> to_raw_xml("<content/>") '<content/>' >>> to_raw_xml({'document': {'title': 'foo', 'list': [{'li':1}, {'li':2}]}}) '<document><list><li>1</li><li>2</li></list><title>foo</title></document>' >>> to_raw_xml(ET.Element('root')) '<root/>'
[ "Convert", "various", "representations", "of", "an", "XML", "structure", "to", "a", "normal", "XML", "string", "." ]
python
train
monarch-initiative/dipper
dipper/sources/UDP.py
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/UDP.py#L770-L821
def _get_rs_id(variant, rs_map, variant_type): """ Given a variant dict, return unambiguous RS ID TODO Some sequence alterations appear to have mappings to dbsnp's notation for example, reference allele: TTTTTTTTTTTTTT variant allele: TTTTTTTTTTTTTTT Is theoretically the same as -/T, we should clarify with UDP and then add functionality to map this notation to the more common -/T :param variant: :param rs_map: :param type: snp or indel :return: """ rs_id = None if variant_type == 'snp': variant_key = "{0}-{1}".format(variant['chromosome'], variant['position']) if variant_key in rs_map: snp_candidates = [ rs_dict for rs_dict in rs_map[variant_key] if rs_dict['type'] == 'snp'] if len(snp_candidates) == 1: rs_id = snp_candidates[0]["rs_id"] elif variant_type == 'indel': rs_candidates = [] variant_key = "{0}-{1}".format(variant['chromosome'], variant['position']) if variant_key in rs_map: snp_candidates = [ rs_dict for rs_dict in rs_map[variant_key] if rs_dict['type'] == 'in-del'] for candidate in snp_candidates: alleles = candidate['alleles'].split('/') if variant['reference_allele'] in alleles \ and variant['variant_allele'] in alleles: rs_candidates.append(candidate['rs_id']) if len(rs_candidates) == 1: rs_id = rs_candidates[0] elif len(rs_candidates) > 1: LOG.info( "ambiguous rs mapping for: %s\ncandidate ids: %s", variant, rs_candidates) else: LOG.info( "rs at coordinate but no match found" " for variant %s\n candidate ids: %s", variant, rs_map[variant_key]) else: LOG.warning("type: %s unsupported", variant_type) return rs_id
[ "def", "_get_rs_id", "(", "variant", ",", "rs_map", ",", "variant_type", ")", ":", "rs_id", "=", "None", "if", "variant_type", "==", "'snp'", ":", "variant_key", "=", "\"{0}-{1}\"", ".", "format", "(", "variant", "[", "'chromosome'", "]", ",", "variant", "[", "'position'", "]", ")", "if", "variant_key", "in", "rs_map", ":", "snp_candidates", "=", "[", "rs_dict", "for", "rs_dict", "in", "rs_map", "[", "variant_key", "]", "if", "rs_dict", "[", "'type'", "]", "==", "'snp'", "]", "if", "len", "(", "snp_candidates", ")", "==", "1", ":", "rs_id", "=", "snp_candidates", "[", "0", "]", "[", "\"rs_id\"", "]", "elif", "variant_type", "==", "'indel'", ":", "rs_candidates", "=", "[", "]", "variant_key", "=", "\"{0}-{1}\"", ".", "format", "(", "variant", "[", "'chromosome'", "]", ",", "variant", "[", "'position'", "]", ")", "if", "variant_key", "in", "rs_map", ":", "snp_candidates", "=", "[", "rs_dict", "for", "rs_dict", "in", "rs_map", "[", "variant_key", "]", "if", "rs_dict", "[", "'type'", "]", "==", "'in-del'", "]", "for", "candidate", "in", "snp_candidates", ":", "alleles", "=", "candidate", "[", "'alleles'", "]", ".", "split", "(", "'/'", ")", "if", "variant", "[", "'reference_allele'", "]", "in", "alleles", "and", "variant", "[", "'variant_allele'", "]", "in", "alleles", ":", "rs_candidates", ".", "append", "(", "candidate", "[", "'rs_id'", "]", ")", "if", "len", "(", "rs_candidates", ")", "==", "1", ":", "rs_id", "=", "rs_candidates", "[", "0", "]", "elif", "len", "(", "rs_candidates", ")", ">", "1", ":", "LOG", ".", "info", "(", "\"ambiguous rs mapping for: %s\\ncandidate ids: %s\"", ",", "variant", ",", "rs_candidates", ")", "else", ":", "LOG", ".", "info", "(", "\"rs at coordinate but no match found\"", "\" for variant %s\\n candidate ids: %s\"", ",", "variant", ",", "rs_map", "[", "variant_key", "]", ")", "else", ":", "LOG", ".", "warning", "(", "\"type: %s unsupported\"", ",", "variant_type", ")", "return", "rs_id" ]
Given a variant dict, return unambiguous RS ID TODO Some sequence alterations appear to have mappings to dbsnp's notation for example, reference allele: TTTTTTTTTTTTTT variant allele: TTTTTTTTTTTTTTT Is theoretically the same as -/T, we should clarify with UDP and then add functionality to map this notation to the more common -/T :param variant: :param rs_map: :param type: snp or indel :return:
[ "Given", "a", "variant", "dict", "return", "unambiguous", "RS", "ID", "TODO", "Some", "sequence", "alterations", "appear", "to", "have", "mappings", "to", "dbsnp", "s", "notation", "for", "example", "reference", "allele", ":", "TTTTTTTTTTTTTT", "variant", "allele", ":", "TTTTTTTTTTTTTTT", "Is", "theoretically", "the", "same", "as", "-", "/", "T", "we", "should", "clarify", "with", "UDP", "and", "then", "add", "functionality", "to", "map", "this", "notation", "to", "the", "more", "common", "-", "/", "T", ":", "param", "variant", ":", ":", "param", "rs_map", ":", ":", "param", "type", ":", "snp", "or", "indel", ":", "return", ":" ]
python
train
bitshares/uptick
uptick/wallet.py
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/wallet.py#L158-L201
def importaccount(ctx, account, role): """ Import an account using an account password """ from bitsharesbase.account import PasswordKey password = click.prompt("Account Passphrase", hide_input=True) account = Account(account, bitshares_instance=ctx.bitshares) imported = False if role == "owner": owner_key = PasswordKey(account["name"], password, role="owner") owner_pubkey = format( owner_key.get_public_key(), ctx.bitshares.rpc.chain_params["prefix"] ) if owner_pubkey in [x[0] for x in account["owner"]["key_auths"]]: print_message("Importing owner key!") owner_privkey = owner_key.get_private_key() ctx.bitshares.wallet.addPrivateKey(owner_privkey) imported = True if role == "active": active_key = PasswordKey(account["name"], password, role="active") active_pubkey = format( active_key.get_public_key(), ctx.bitshares.rpc.chain_params["prefix"] ) if active_pubkey in [x[0] for x in account["active"]["key_auths"]]: print_message("Importing active key!") active_privkey = active_key.get_private_key() ctx.bitshares.wallet.addPrivateKey(active_privkey) imported = True if role == "memo": memo_key = PasswordKey(account["name"], password, role=role) memo_pubkey = format( memo_key.get_public_key(), ctx.bitshares.rpc.chain_params["prefix"] ) if memo_pubkey == account["memo_key"]: print_message("Importing memo key!") memo_privkey = memo_key.get_private_key() ctx.bitshares.wallet.addPrivateKey(memo_privkey) imported = True if not imported: print_message("No matching key(s) found. Password correct?", "error")
[ "def", "importaccount", "(", "ctx", ",", "account", ",", "role", ")", ":", "from", "bitsharesbase", ".", "account", "import", "PasswordKey", "password", "=", "click", ".", "prompt", "(", "\"Account Passphrase\"", ",", "hide_input", "=", "True", ")", "account", "=", "Account", "(", "account", ",", "bitshares_instance", "=", "ctx", ".", "bitshares", ")", "imported", "=", "False", "if", "role", "==", "\"owner\"", ":", "owner_key", "=", "PasswordKey", "(", "account", "[", "\"name\"", "]", ",", "password", ",", "role", "=", "\"owner\"", ")", "owner_pubkey", "=", "format", "(", "owner_key", ".", "get_public_key", "(", ")", ",", "ctx", ".", "bitshares", ".", "rpc", ".", "chain_params", "[", "\"prefix\"", "]", ")", "if", "owner_pubkey", "in", "[", "x", "[", "0", "]", "for", "x", "in", "account", "[", "\"owner\"", "]", "[", "\"key_auths\"", "]", "]", ":", "print_message", "(", "\"Importing owner key!\"", ")", "owner_privkey", "=", "owner_key", ".", "get_private_key", "(", ")", "ctx", ".", "bitshares", ".", "wallet", ".", "addPrivateKey", "(", "owner_privkey", ")", "imported", "=", "True", "if", "role", "==", "\"active\"", ":", "active_key", "=", "PasswordKey", "(", "account", "[", "\"name\"", "]", ",", "password", ",", "role", "=", "\"active\"", ")", "active_pubkey", "=", "format", "(", "active_key", ".", "get_public_key", "(", ")", ",", "ctx", ".", "bitshares", ".", "rpc", ".", "chain_params", "[", "\"prefix\"", "]", ")", "if", "active_pubkey", "in", "[", "x", "[", "0", "]", "for", "x", "in", "account", "[", "\"active\"", "]", "[", "\"key_auths\"", "]", "]", ":", "print_message", "(", "\"Importing active key!\"", ")", "active_privkey", "=", "active_key", ".", "get_private_key", "(", ")", "ctx", ".", "bitshares", ".", "wallet", ".", "addPrivateKey", "(", "active_privkey", ")", "imported", "=", "True", "if", "role", "==", "\"memo\"", ":", "memo_key", "=", "PasswordKey", "(", "account", "[", "\"name\"", "]", ",", "password", ",", "role", "=", "role", ")", "memo_pubkey", "=", "format", "(", "memo_key", ".", "get_public_key", "(", ")", ",", "ctx", ".", "bitshares", ".", "rpc", ".", "chain_params", "[", "\"prefix\"", "]", ")", "if", "memo_pubkey", "==", "account", "[", "\"memo_key\"", "]", ":", "print_message", "(", "\"Importing memo key!\"", ")", "memo_privkey", "=", "memo_key", ".", "get_private_key", "(", ")", "ctx", ".", "bitshares", ".", "wallet", ".", "addPrivateKey", "(", "memo_privkey", ")", "imported", "=", "True", "if", "not", "imported", ":", "print_message", "(", "\"No matching key(s) found. Password correct?\"", ",", "\"error\"", ")" ]
Import an account using an account password
[ "Import", "an", "account", "using", "an", "account", "password" ]
python
train
NASA-AMMOS/AIT-Core
ait/core/tlm.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/tlm.py#L960-L967
def _assertField(self, name): """Raise AttributeError when PacketHistory has no field with the given name. """ if name not in self._names: msg = 'PacketHistory "%s" has no field "%s"' values = self._defn.name, name raise AttributeError(msg % values)
[ "def", "_assertField", "(", "self", ",", "name", ")", ":", "if", "name", "not", "in", "self", ".", "_names", ":", "msg", "=", "'PacketHistory \"%s\" has no field \"%s\"'", "values", "=", "self", ".", "_defn", ".", "name", ",", "name", "raise", "AttributeError", "(", "msg", "%", "values", ")" ]
Raise AttributeError when PacketHistory has no field with the given name.
[ "Raise", "AttributeError", "when", "PacketHistory", "has", "no", "field", "with", "the", "given", "name", "." ]
python
train
coderholic/pyradio
pyradio/config.py
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/config.py#L383-L401
def read_playlists(self): self.playlists = [] self.selected_playlist = -1 files = glob.glob(path.join(self.stations_dir, '*.csv')) if len(files) == 0: return 0, -1 else: for a_file in files: a_file_name = ''.join(path.basename(a_file).split('.')[:-1]) a_file_size = self._bytes_to_human(path.getsize(a_file)) a_file_time = ctime(path.getmtime(a_file)) self.playlists.append([a_file_name, a_file_time, a_file_size, a_file]) self.playlists.sort() """ get already loaded playlist id """ for i, a_playlist in enumerate(self.playlists): if a_playlist[-1] == self.stations_file: self.selected_playlist = i break return len(self.playlists), self.selected_playlist
[ "def", "read_playlists", "(", "self", ")", ":", "self", ".", "playlists", "=", "[", "]", "self", ".", "selected_playlist", "=", "-", "1", "files", "=", "glob", ".", "glob", "(", "path", ".", "join", "(", "self", ".", "stations_dir", ",", "'*.csv'", ")", ")", "if", "len", "(", "files", ")", "==", "0", ":", "return", "0", ",", "-", "1", "else", ":", "for", "a_file", "in", "files", ":", "a_file_name", "=", "''", ".", "join", "(", "path", ".", "basename", "(", "a_file", ")", ".", "split", "(", "'.'", ")", "[", ":", "-", "1", "]", ")", "a_file_size", "=", "self", ".", "_bytes_to_human", "(", "path", ".", "getsize", "(", "a_file", ")", ")", "a_file_time", "=", "ctime", "(", "path", ".", "getmtime", "(", "a_file", ")", ")", "self", ".", "playlists", ".", "append", "(", "[", "a_file_name", ",", "a_file_time", ",", "a_file_size", ",", "a_file", "]", ")", "self", ".", "playlists", ".", "sort", "(", ")", "for", "i", ",", "a_playlist", "in", "enumerate", "(", "self", ".", "playlists", ")", ":", "if", "a_playlist", "[", "-", "1", "]", "==", "self", ".", "stations_file", ":", "self", ".", "selected_playlist", "=", "i", "break", "return", "len", "(", "self", ".", "playlists", ")", ",", "self", ".", "selected_playlist" ]
get already loaded playlist id
[ "get", "already", "loaded", "playlist", "id" ]
python
train
saltstack/salt
salt/utils/msazure.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/msazure.py#L123-L167
def get_blob(storage_conn=None, **kwargs): ''' .. versionadded:: 2015.8.0 Download a blob ''' if not storage_conn: storage_conn = get_storage_conn(opts=kwargs) if 'container' not in kwargs: raise SaltSystemExit(code=42, msg='The blob container name must be specified as "container"') if 'name' not in kwargs: raise SaltSystemExit(code=42, msg='The blob name must be specified as "name"') if 'local_path' not in kwargs and 'return_content' not in kwargs: raise SaltSystemExit( code=42, msg='Either a local path needs to be passed in as "local_path", ' 'or "return_content" to return the blob contents directly' ) blob_kwargs = { 'container_name': kwargs['container'], 'blob_name': kwargs['name'], 'snapshot': kwargs.get('snapshot', None), 'x_ms_lease_id': kwargs.get('lease_id', None), 'progress_callback': kwargs.get('progress_callback', None), 'max_connections': kwargs.get('max_connections', 1), 'max_retries': kwargs.get('max_retries', 5), 'retry_wait': kwargs.get('retry_wait', 1), } if 'local_path' in kwargs: data = storage_conn.get_blob_to_path( file_path=kwargs['local_path'], open_mode=kwargs.get('open_mode', 'wb'), **blob_kwargs ) elif 'return_content' in kwargs: data = storage_conn.get_blob_to_bytes( **blob_kwargs ) return data
[ "def", "get_blob", "(", "storage_conn", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "storage_conn", ":", "storage_conn", "=", "get_storage_conn", "(", "opts", "=", "kwargs", ")", "if", "'container'", "not", "in", "kwargs", ":", "raise", "SaltSystemExit", "(", "code", "=", "42", ",", "msg", "=", "'The blob container name must be specified as \"container\"'", ")", "if", "'name'", "not", "in", "kwargs", ":", "raise", "SaltSystemExit", "(", "code", "=", "42", ",", "msg", "=", "'The blob name must be specified as \"name\"'", ")", "if", "'local_path'", "not", "in", "kwargs", "and", "'return_content'", "not", "in", "kwargs", ":", "raise", "SaltSystemExit", "(", "code", "=", "42", ",", "msg", "=", "'Either a local path needs to be passed in as \"local_path\", '", "'or \"return_content\" to return the blob contents directly'", ")", "blob_kwargs", "=", "{", "'container_name'", ":", "kwargs", "[", "'container'", "]", ",", "'blob_name'", ":", "kwargs", "[", "'name'", "]", ",", "'snapshot'", ":", "kwargs", ".", "get", "(", "'snapshot'", ",", "None", ")", ",", "'x_ms_lease_id'", ":", "kwargs", ".", "get", "(", "'lease_id'", ",", "None", ")", ",", "'progress_callback'", ":", "kwargs", ".", "get", "(", "'progress_callback'", ",", "None", ")", ",", "'max_connections'", ":", "kwargs", ".", "get", "(", "'max_connections'", ",", "1", ")", ",", "'max_retries'", ":", "kwargs", ".", "get", "(", "'max_retries'", ",", "5", ")", ",", "'retry_wait'", ":", "kwargs", ".", "get", "(", "'retry_wait'", ",", "1", ")", ",", "}", "if", "'local_path'", "in", "kwargs", ":", "data", "=", "storage_conn", ".", "get_blob_to_path", "(", "file_path", "=", "kwargs", "[", "'local_path'", "]", ",", "open_mode", "=", "kwargs", ".", "get", "(", "'open_mode'", ",", "'wb'", ")", ",", "*", "*", "blob_kwargs", ")", "elif", "'return_content'", "in", "kwargs", ":", "data", "=", "storage_conn", ".", "get_blob_to_bytes", "(", "*", "*", "blob_kwargs", ")", "return", "data" ]
.. versionadded:: 2015.8.0 Download a blob
[ "..", "versionadded", "::", "2015", ".", "8", ".", "0" ]
python
train
kxgames/vecrec
vecrec/collisions.py
https://github.com/kxgames/vecrec/blob/18b0841419de21a644b4511e2229af853ed09529/vecrec/collisions.py#L3-L40
def circle_touching_line(center, radius, start, end): """ Return true if the given circle intersects the given segment. Note that this checks for intersection with a line segment, and not an actual line. :param center: Center of the circle. :type center: Vector :param radius: Radius of the circle. :type radius: float :param start: The first end of the line segment. :type start: Vector :param end: The second end of the line segment. :type end: Vector """ C, R = center, radius A, B = start, end a = (B.x - A.x)**2 + (B.y - A.y)**2 b = 2 * (B.x - A.x) * (A.x - C.x) \ + 2 * (B.y - A.y) * (A.y - C.y) c = C.x**2 + C.y**2 + A.x**2 + A.y**2 \ - 2 * (C.x * A.x + C.y * A.y) - R**2 discriminant = b**2 - 4 * a * c if discriminant < 0: return False elif discriminant == 0: u = v = -b / float(2 * a) else: u = (-b + math.sqrt(discriminant)) / float(2 * a) v = (-b - math.sqrt(discriminant)) / float(2 * a) if u < 0 and v < 0: return False if u > 1 and v > 1: return False return True
[ "def", "circle_touching_line", "(", "center", ",", "radius", ",", "start", ",", "end", ")", ":", "C", ",", "R", "=", "center", ",", "radius", "A", ",", "B", "=", "start", ",", "end", "a", "=", "(", "B", ".", "x", "-", "A", ".", "x", ")", "**", "2", "+", "(", "B", ".", "y", "-", "A", ".", "y", ")", "**", "2", "b", "=", "2", "*", "(", "B", ".", "x", "-", "A", ".", "x", ")", "*", "(", "A", ".", "x", "-", "C", ".", "x", ")", "+", "2", "*", "(", "B", ".", "y", "-", "A", ".", "y", ")", "*", "(", "A", ".", "y", "-", "C", ".", "y", ")", "c", "=", "C", ".", "x", "**", "2", "+", "C", ".", "y", "**", "2", "+", "A", ".", "x", "**", "2", "+", "A", ".", "y", "**", "2", "-", "2", "*", "(", "C", ".", "x", "*", "A", ".", "x", "+", "C", ".", "y", "*", "A", ".", "y", ")", "-", "R", "**", "2", "discriminant", "=", "b", "**", "2", "-", "4", "*", "a", "*", "c", "if", "discriminant", "<", "0", ":", "return", "False", "elif", "discriminant", "==", "0", ":", "u", "=", "v", "=", "-", "b", "/", "float", "(", "2", "*", "a", ")", "else", ":", "u", "=", "(", "-", "b", "+", "math", ".", "sqrt", "(", "discriminant", ")", ")", "/", "float", "(", "2", "*", "a", ")", "v", "=", "(", "-", "b", "-", "math", ".", "sqrt", "(", "discriminant", ")", ")", "/", "float", "(", "2", "*", "a", ")", "if", "u", "<", "0", "and", "v", "<", "0", ":", "return", "False", "if", "u", ">", "1", "and", "v", ">", "1", ":", "return", "False", "return", "True" ]
Return true if the given circle intersects the given segment. Note that this checks for intersection with a line segment, and not an actual line. :param center: Center of the circle. :type center: Vector :param radius: Radius of the circle. :type radius: float :param start: The first end of the line segment. :type start: Vector :param end: The second end of the line segment. :type end: Vector
[ "Return", "true", "if", "the", "given", "circle", "intersects", "the", "given", "segment", ".", "Note", "that", "this", "checks", "for", "intersection", "with", "a", "line", "segment", "and", "not", "an", "actual", "line", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/models/selection.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/models/selection.py#L157-L167
def _check_model_types(self, models): """ Check types of passed models for correctness and in case raise exception :rtype: set :returns: set of models that are valid for the class""" if not hasattr(models, "__iter__"): models = {models} if not all([isinstance(model, (AbstractStateModel, StateElementModel)) for model in models]): raise TypeError("The selection supports only models with base class AbstractStateModel or " "StateElementModel, see handed elements {0}".format(models)) return models if isinstance(models, set) else set(models)
[ "def", "_check_model_types", "(", "self", ",", "models", ")", ":", "if", "not", "hasattr", "(", "models", ",", "\"__iter__\"", ")", ":", "models", "=", "{", "models", "}", "if", "not", "all", "(", "[", "isinstance", "(", "model", ",", "(", "AbstractStateModel", ",", "StateElementModel", ")", ")", "for", "model", "in", "models", "]", ")", ":", "raise", "TypeError", "(", "\"The selection supports only models with base class AbstractStateModel or \"", "\"StateElementModel, see handed elements {0}\"", ".", "format", "(", "models", ")", ")", "return", "models", "if", "isinstance", "(", "models", ",", "set", ")", "else", "set", "(", "models", ")" ]
Check types of passed models for correctness and in case raise exception :rtype: set :returns: set of models that are valid for the class
[ "Check", "types", "of", "passed", "models", "for", "correctness", "and", "in", "case", "raise", "exception" ]
python
train
cbclab/MOT
mot/library_functions/__init__.py
https://github.com/cbclab/MOT/blob/fb3243b65025705842e82704705c00902f9a35af/mot/library_functions/__init__.py#L498-L504
def get_kernel_data(self): """Get the kernel data needed for this optimization routine to work.""" return { 'nmsimplex_scratch': LocalMemory( 'mot_float_type', self._nmr_parameters * 2 + (self._nmr_parameters + 1) ** 2 + 1), 'initial_simplex_scale': LocalMemory('mot_float_type', self._nmr_parameters) }
[ "def", "get_kernel_data", "(", "self", ")", ":", "return", "{", "'nmsimplex_scratch'", ":", "LocalMemory", "(", "'mot_float_type'", ",", "self", ".", "_nmr_parameters", "*", "2", "+", "(", "self", ".", "_nmr_parameters", "+", "1", ")", "**", "2", "+", "1", ")", ",", "'initial_simplex_scale'", ":", "LocalMemory", "(", "'mot_float_type'", ",", "self", ".", "_nmr_parameters", ")", "}" ]
Get the kernel data needed for this optimization routine to work.
[ "Get", "the", "kernel", "data", "needed", "for", "this", "optimization", "routine", "to", "work", "." ]
python
train
manahl/arctic
arctic/store/version_store.py
https://github.com/manahl/arctic/blob/57e110b6e182dbab00e7e214dc26f7d9ec47c120/arctic/store/version_store.py#L716-L761
def write_metadata(self, symbol, metadata, prune_previous_version=True, **kwargs): """ Write 'metadata' under the specified 'symbol' name to this library. The data will remain unchanged. A new version will be created. If the symbol is missing, it causes a write with empty data (None, pickled, can't append) and the supplied metadata. Returns a VersionedItem object only with a metadata element. Fast operation: Zero data/segment read/write operations. Parameters ---------- symbol : `str` symbol name for the item metadata : `dict` or `None` dictionary of metadata to persist along with the symbol prune_previous_version : `bool` Removes previous (non-snapshotted) versions from the database. Default: True kwargs : passed through to the write handler (only used if symbol does not already exist or is deleted) Returns ------- `VersionedItem` VersionedItem named tuple containing the metadata of the written symbol's version document in the store. """ # Make a normal write with empty data and supplied metadata if symbol does not exist try: previous_version = self._read_metadata(symbol) except NoDataFoundException: return self.write(symbol, data=None, metadata=metadata, prune_previous_version=prune_previous_version, **kwargs) # Reaching here means that and/or metadata exist and we are set to update the metadata new_version_num = self._version_nums.find_one_and_update({'symbol': symbol}, {'$inc': {'version': 1}}, upsert=True, new=True)['version'] # Populate the new version entry, preserving existing data, and updating with the supplied metadata version = {k: previous_version[k] for k in previous_version.keys() if k != 'parent'} # don't copy snapshots version['_id'] = bson.ObjectId() version['version'] = new_version_num version['metadata'] = metadata version['base_version_id'] = previous_version.get('base_version_id', previous_version['_id']) return self._add_new_version_using_reference(symbol, version, previous_version, prune_previous_version)
[ "def", "write_metadata", "(", "self", ",", "symbol", ",", "metadata", ",", "prune_previous_version", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# Make a normal write with empty data and supplied metadata if symbol does not exist", "try", ":", "previous_version", "=", "self", ".", "_read_metadata", "(", "symbol", ")", "except", "NoDataFoundException", ":", "return", "self", ".", "write", "(", "symbol", ",", "data", "=", "None", ",", "metadata", "=", "metadata", ",", "prune_previous_version", "=", "prune_previous_version", ",", "*", "*", "kwargs", ")", "# Reaching here means that and/or metadata exist and we are set to update the metadata", "new_version_num", "=", "self", ".", "_version_nums", ".", "find_one_and_update", "(", "{", "'symbol'", ":", "symbol", "}", ",", "{", "'$inc'", ":", "{", "'version'", ":", "1", "}", "}", ",", "upsert", "=", "True", ",", "new", "=", "True", ")", "[", "'version'", "]", "# Populate the new version entry, preserving existing data, and updating with the supplied metadata", "version", "=", "{", "k", ":", "previous_version", "[", "k", "]", "for", "k", "in", "previous_version", ".", "keys", "(", ")", "if", "k", "!=", "'parent'", "}", "# don't copy snapshots", "version", "[", "'_id'", "]", "=", "bson", ".", "ObjectId", "(", ")", "version", "[", "'version'", "]", "=", "new_version_num", "version", "[", "'metadata'", "]", "=", "metadata", "version", "[", "'base_version_id'", "]", "=", "previous_version", ".", "get", "(", "'base_version_id'", ",", "previous_version", "[", "'_id'", "]", ")", "return", "self", ".", "_add_new_version_using_reference", "(", "symbol", ",", "version", ",", "previous_version", ",", "prune_previous_version", ")" ]
Write 'metadata' under the specified 'symbol' name to this library. The data will remain unchanged. A new version will be created. If the symbol is missing, it causes a write with empty data (None, pickled, can't append) and the supplied metadata. Returns a VersionedItem object only with a metadata element. Fast operation: Zero data/segment read/write operations. Parameters ---------- symbol : `str` symbol name for the item metadata : `dict` or `None` dictionary of metadata to persist along with the symbol prune_previous_version : `bool` Removes previous (non-snapshotted) versions from the database. Default: True kwargs : passed through to the write handler (only used if symbol does not already exist or is deleted) Returns ------- `VersionedItem` VersionedItem named tuple containing the metadata of the written symbol's version document in the store.
[ "Write", "metadata", "under", "the", "specified", "symbol", "name", "to", "this", "library", ".", "The", "data", "will", "remain", "unchanged", ".", "A", "new", "version", "will", "be", "created", ".", "If", "the", "symbol", "is", "missing", "it", "causes", "a", "write", "with", "empty", "data", "(", "None", "pickled", "can", "t", "append", ")", "and", "the", "supplied", "metadata", ".", "Returns", "a", "VersionedItem", "object", "only", "with", "a", "metadata", "element", ".", "Fast", "operation", ":", "Zero", "data", "/", "segment", "read", "/", "write", "operations", "." ]
python
train
saltstack/salt
salt/states/infoblox_cname.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/infoblox_cname.py#L104-L124
def absent(name=None, canonical=None, **api_opts): ''' Ensure the CNAME with the given name or canonical name is removed ''' ret = {'name': name, 'result': False, 'comment': '', 'changes': {}} obj = __salt__['infoblox.get_cname'](name=name, canonical=canonical, **api_opts) if not obj: ret['result'] = True ret['comment'] = 'infoblox already removed' return ret if __opts__['test']: ret['result'] = None ret['changes'] = {'old': obj, 'new': 'absent'} return ret if __salt__['infoblox.delete_cname'](name=name, canonical=canonical, **api_opts): ret['result'] = True ret['changes'] = {'old': obj, 'new': 'absent'} return ret
[ "def", "absent", "(", "name", "=", "None", ",", "canonical", "=", "None", ",", "*", "*", "api_opts", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "False", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "obj", "=", "__salt__", "[", "'infoblox.get_cname'", "]", "(", "name", "=", "name", ",", "canonical", "=", "canonical", ",", "*", "*", "api_opts", ")", "if", "not", "obj", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'comment'", "]", "=", "'infoblox already removed'", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "obj", ",", "'new'", ":", "'absent'", "}", "return", "ret", "if", "__salt__", "[", "'infoblox.delete_cname'", "]", "(", "name", "=", "name", ",", "canonical", "=", "canonical", ",", "*", "*", "api_opts", ")", ":", "ret", "[", "'result'", "]", "=", "True", "ret", "[", "'changes'", "]", "=", "{", "'old'", ":", "obj", ",", "'new'", ":", "'absent'", "}", "return", "ret" ]
Ensure the CNAME with the given name or canonical name is removed
[ "Ensure", "the", "CNAME", "with", "the", "given", "name", "or", "canonical", "name", "is", "removed" ]
python
train
unixsurfer/anycast_healthchecker
anycast_healthchecker/utils.py
https://github.com/unixsurfer/anycast_healthchecker/blob/3ab9c1d65d550eb30621ced2434252f61d1fdd33/anycast_healthchecker/utils.py#L725-L778
def reconfigure_bird(cmd): """Reconfigure BIRD daemon. Arguments: cmd (string): A command to trigger a reconfiguration of Bird daemon Notes: Runs 'birdc configure' to reconfigure BIRD. Some useful information on how birdc tool works: -- Returns a non-zero exit code only when it can't access BIRD daemon via the control socket (/var/run/bird.ctl). This happens when BIRD daemon is either down or when the caller of birdc doesn't have access to the control socket. -- Returns zero exit code when reconfigure fails due to invalid configuration. Thus, we catch this case by looking at the output and not at the exit code. -- Returns zero exit code when reconfigure was successful. -- Should never timeout, if it does then it is a bug. """ log = logging.getLogger(PROGRAM_NAME) cmd = shlex.split(cmd) log.info("reconfiguring BIRD by running %s", ' '.join(cmd)) try: output = subprocess.check_output( cmd, timeout=2, stderr=subprocess.STDOUT, universal_newlines=True, ) except subprocess.TimeoutExpired: log.error("reconfiguring bird timed out") return except subprocess.CalledProcessError as error: # birdc returns 0 even when it fails due to invalid config, # but it returns 1 when BIRD is down. log.error("reconfiguring BIRD failed, either BIRD daemon is down or " "we don't have privileges to reconfigure it (sudo problems?)" ":%s", error.output.strip()) return except FileNotFoundError as error: log.error("reconfiguring BIRD failed with: %s", error) return # 'Reconfigured' string will be in the output if and only if conf is valid. pattern = re.compile('^Reconfigured$', re.MULTILINE) if pattern.search(str(output)): log.info('reconfigured BIRD daemon') else: # We will end up here only if we generated an invalid conf # or someone broke bird.conf. log.error("reconfiguring BIRD returned error, most likely we generated" " an invalid configuration file or Bird configuration in is " "broken:%s", output)
[ "def", "reconfigure_bird", "(", "cmd", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "PROGRAM_NAME", ")", "cmd", "=", "shlex", ".", "split", "(", "cmd", ")", "log", ".", "info", "(", "\"reconfiguring BIRD by running %s\"", ",", "' '", ".", "join", "(", "cmd", ")", ")", "try", ":", "output", "=", "subprocess", ".", "check_output", "(", "cmd", ",", "timeout", "=", "2", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", "universal_newlines", "=", "True", ",", ")", "except", "subprocess", ".", "TimeoutExpired", ":", "log", ".", "error", "(", "\"reconfiguring bird timed out\"", ")", "return", "except", "subprocess", ".", "CalledProcessError", "as", "error", ":", "# birdc returns 0 even when it fails due to invalid config,", "# but it returns 1 when BIRD is down.", "log", ".", "error", "(", "\"reconfiguring BIRD failed, either BIRD daemon is down or \"", "\"we don't have privileges to reconfigure it (sudo problems?)\"", "\":%s\"", ",", "error", ".", "output", ".", "strip", "(", ")", ")", "return", "except", "FileNotFoundError", "as", "error", ":", "log", ".", "error", "(", "\"reconfiguring BIRD failed with: %s\"", ",", "error", ")", "return", "# 'Reconfigured' string will be in the output if and only if conf is valid.", "pattern", "=", "re", ".", "compile", "(", "'^Reconfigured$'", ",", "re", ".", "MULTILINE", ")", "if", "pattern", ".", "search", "(", "str", "(", "output", ")", ")", ":", "log", ".", "info", "(", "'reconfigured BIRD daemon'", ")", "else", ":", "# We will end up here only if we generated an invalid conf", "# or someone broke bird.conf.", "log", ".", "error", "(", "\"reconfiguring BIRD returned error, most likely we generated\"", "\" an invalid configuration file or Bird configuration in is \"", "\"broken:%s\"", ",", "output", ")" ]
Reconfigure BIRD daemon. Arguments: cmd (string): A command to trigger a reconfiguration of Bird daemon Notes: Runs 'birdc configure' to reconfigure BIRD. Some useful information on how birdc tool works: -- Returns a non-zero exit code only when it can't access BIRD daemon via the control socket (/var/run/bird.ctl). This happens when BIRD daemon is either down or when the caller of birdc doesn't have access to the control socket. -- Returns zero exit code when reconfigure fails due to invalid configuration. Thus, we catch this case by looking at the output and not at the exit code. -- Returns zero exit code when reconfigure was successful. -- Should never timeout, if it does then it is a bug.
[ "Reconfigure", "BIRD", "daemon", "." ]
python
train
jupyterhub/kubespawner
kubespawner/spawner.py
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1824-L1837
def _options_form_default(self): ''' Build the form template according to the `profile_list` setting. Returns: '' when no `profile_list` has been defined The rendered template (using jinja2) when `profile_list` is defined. ''' if not self.profile_list: return '' if callable(self.profile_list): return self._render_options_form_dynamically else: return self._render_options_form(self.profile_list)
[ "def", "_options_form_default", "(", "self", ")", ":", "if", "not", "self", ".", "profile_list", ":", "return", "''", "if", "callable", "(", "self", ".", "profile_list", ")", ":", "return", "self", ".", "_render_options_form_dynamically", "else", ":", "return", "self", ".", "_render_options_form", "(", "self", ".", "profile_list", ")" ]
Build the form template according to the `profile_list` setting. Returns: '' when no `profile_list` has been defined The rendered template (using jinja2) when `profile_list` is defined.
[ "Build", "the", "form", "template", "according", "to", "the", "profile_list", "setting", "." ]
python
train
saltstack/salt
salt/modules/win_smtp_server.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_smtp_server.py#L403-L458
def set_connection_ip_list(addresses=None, grant_by_default=False, server=_DEFAULT_SERVER): ''' Set the IPGrant list for the SMTP virtual server. :param str addresses: A dictionary of IP + subnet pairs. :param bool grant_by_default: Whether the addresses should be a blacklist or whitelist. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_connection_ip_list addresses="{'127.0.0.1': '255.255.255.255'}" ''' setting = 'IPGrant' formatted_addresses = list() # It's okay to accept an empty list for set_connection_ip_list, # since an empty list may be desirable. if not addresses: addresses = dict() _LOG.debug('Empty %s specified.', setting) # Convert addresses to the 'ip_address, subnet' format used by # IIsIPSecuritySetting. for address in addresses: formatted_addresses.append('{0}, {1}'.format(address.strip(), addresses[address].strip())) current_addresses = get_connection_ip_list(as_wmi_format=True, server=server) # Order is not important, so compare to the current addresses as unordered sets. if set(formatted_addresses) == set(current_addresses): _LOG.debug('%s already contains the provided addresses.', setting) return True # First we should check GrantByDefault, and change it if necessary. current_grant_by_default = _get_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', server) if grant_by_default != current_grant_by_default: _LOG.debug('Setting GrantByDefault to: %s', grant_by_default) _set_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', grant_by_default, server) _set_wmi_setting('IIsIPSecuritySetting', setting, formatted_addresses, server) new_addresses = get_connection_ip_list(as_wmi_format=True, server=server) ret = set(formatted_addresses) == set(new_addresses) if ret: _LOG.debug('%s configured successfully: %s', setting, formatted_addresses) return ret _LOG.error('Unable to configure %s with value: %s', setting, formatted_addresses) return ret
[ "def", "set_connection_ip_list", "(", "addresses", "=", "None", ",", "grant_by_default", "=", "False", ",", "server", "=", "_DEFAULT_SERVER", ")", ":", "setting", "=", "'IPGrant'", "formatted_addresses", "=", "list", "(", ")", "# It's okay to accept an empty list for set_connection_ip_list,", "# since an empty list may be desirable.", "if", "not", "addresses", ":", "addresses", "=", "dict", "(", ")", "_LOG", ".", "debug", "(", "'Empty %s specified.'", ",", "setting", ")", "# Convert addresses to the 'ip_address, subnet' format used by", "# IIsIPSecuritySetting.", "for", "address", "in", "addresses", ":", "formatted_addresses", ".", "append", "(", "'{0}, {1}'", ".", "format", "(", "address", ".", "strip", "(", ")", ",", "addresses", "[", "address", "]", ".", "strip", "(", ")", ")", ")", "current_addresses", "=", "get_connection_ip_list", "(", "as_wmi_format", "=", "True", ",", "server", "=", "server", ")", "# Order is not important, so compare to the current addresses as unordered sets.", "if", "set", "(", "formatted_addresses", ")", "==", "set", "(", "current_addresses", ")", ":", "_LOG", ".", "debug", "(", "'%s already contains the provided addresses.'", ",", "setting", ")", "return", "True", "# First we should check GrantByDefault, and change it if necessary.", "current_grant_by_default", "=", "_get_wmi_setting", "(", "'IIsIPSecuritySetting'", ",", "'GrantByDefault'", ",", "server", ")", "if", "grant_by_default", "!=", "current_grant_by_default", ":", "_LOG", ".", "debug", "(", "'Setting GrantByDefault to: %s'", ",", "grant_by_default", ")", "_set_wmi_setting", "(", "'IIsIPSecuritySetting'", ",", "'GrantByDefault'", ",", "grant_by_default", ",", "server", ")", "_set_wmi_setting", "(", "'IIsIPSecuritySetting'", ",", "setting", ",", "formatted_addresses", ",", "server", ")", "new_addresses", "=", "get_connection_ip_list", "(", "as_wmi_format", "=", "True", ",", "server", "=", "server", ")", "ret", "=", "set", "(", "formatted_addresses", ")", "==", "set", "(", "new_addresses", ")", "if", "ret", ":", "_LOG", ".", "debug", "(", "'%s configured successfully: %s'", ",", "setting", ",", "formatted_addresses", ")", "return", "ret", "_LOG", ".", "error", "(", "'Unable to configure %s with value: %s'", ",", "setting", ",", "formatted_addresses", ")", "return", "ret" ]
Set the IPGrant list for the SMTP virtual server. :param str addresses: A dictionary of IP + subnet pairs. :param bool grant_by_default: Whether the addresses should be a blacklist or whitelist. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_connection_ip_list addresses="{'127.0.0.1': '255.255.255.255'}"
[ "Set", "the", "IPGrant", "list", "for", "the", "SMTP", "virtual", "server", "." ]
python
train
micahhausler/container-transform
container_transform/kubernetes.py
https://github.com/micahhausler/container-transform/blob/68223fae98f30b8bb2ce0f02ba9e58afbc80f196/container_transform/kubernetes.py#L267-L310
def ingest_memory(self, memory): """ Transform the memory into bytes :param memory: Compose memory definition. (1g, 24k) :type memory: memory string or integer :return: The memory in bytes :rtype: int """ def lshift(num, shift): return num << shift def k(num, thousands): return num * thousands # if isinstance(memory, int): # # Memory was specified as an integer, meaning it is in bytes memory = str(memory) bit_shift = { 'E': {'func': k, 'shift': 10e17}, 'P': {'func': k, 'shift': 10e14}, 'T': {'func': k, 'shift': 10e11}, 'G': {'func': k, 'shift': 10e8}, 'M': {'func': k, 'shift': 10e5}, 'K': {'func': k, 'shift': 10e2}, 'Ei': {'func': lshift, 'shift': 60}, 'Pi': {'func': lshift, 'shift': 50}, 'Ti': {'func': lshift, 'shift': 40}, 'Gi': {'func': lshift, 'shift': 30}, 'Mi': {'func': lshift, 'shift': 20}, 'Ki': {'func': lshift, 'shift': 10}, } if len(memory) > 2 and memory[-2:] in bit_shift.keys(): unit = memory[-2:] number = int(memory[:-2]) memory = bit_shift[unit]['func'](number, bit_shift[unit]['shift']) elif len(memory) > 1 and memory[-1:] in bit_shift.keys(): unit = memory[-1] number = int(memory[:-1]) memory = bit_shift[unit]['func'](number, bit_shift[unit]['shift']) # Cast to a float to properly consume scientific notation return int(float(memory))
[ "def", "ingest_memory", "(", "self", ",", "memory", ")", ":", "def", "lshift", "(", "num", ",", "shift", ")", ":", "return", "num", "<<", "shift", "def", "k", "(", "num", ",", "thousands", ")", ":", "return", "num", "*", "thousands", "# if isinstance(memory, int):", "# # Memory was specified as an integer, meaning it is in bytes", "memory", "=", "str", "(", "memory", ")", "bit_shift", "=", "{", "'E'", ":", "{", "'func'", ":", "k", ",", "'shift'", ":", "10e17", "}", ",", "'P'", ":", "{", "'func'", ":", "k", ",", "'shift'", ":", "10e14", "}", ",", "'T'", ":", "{", "'func'", ":", "k", ",", "'shift'", ":", "10e11", "}", ",", "'G'", ":", "{", "'func'", ":", "k", ",", "'shift'", ":", "10e8", "}", ",", "'M'", ":", "{", "'func'", ":", "k", ",", "'shift'", ":", "10e5", "}", ",", "'K'", ":", "{", "'func'", ":", "k", ",", "'shift'", ":", "10e2", "}", ",", "'Ei'", ":", "{", "'func'", ":", "lshift", ",", "'shift'", ":", "60", "}", ",", "'Pi'", ":", "{", "'func'", ":", "lshift", ",", "'shift'", ":", "50", "}", ",", "'Ti'", ":", "{", "'func'", ":", "lshift", ",", "'shift'", ":", "40", "}", ",", "'Gi'", ":", "{", "'func'", ":", "lshift", ",", "'shift'", ":", "30", "}", ",", "'Mi'", ":", "{", "'func'", ":", "lshift", ",", "'shift'", ":", "20", "}", ",", "'Ki'", ":", "{", "'func'", ":", "lshift", ",", "'shift'", ":", "10", "}", ",", "}", "if", "len", "(", "memory", ")", ">", "2", "and", "memory", "[", "-", "2", ":", "]", "in", "bit_shift", ".", "keys", "(", ")", ":", "unit", "=", "memory", "[", "-", "2", ":", "]", "number", "=", "int", "(", "memory", "[", ":", "-", "2", "]", ")", "memory", "=", "bit_shift", "[", "unit", "]", "[", "'func'", "]", "(", "number", ",", "bit_shift", "[", "unit", "]", "[", "'shift'", "]", ")", "elif", "len", "(", "memory", ")", ">", "1", "and", "memory", "[", "-", "1", ":", "]", "in", "bit_shift", ".", "keys", "(", ")", ":", "unit", "=", "memory", "[", "-", "1", "]", "number", "=", "int", "(", "memory", "[", ":", "-", "1", "]", ")", "memory", "=", "bit_shift", "[", "unit", "]", "[", "'func'", "]", "(", "number", ",", "bit_shift", "[", "unit", "]", "[", "'shift'", "]", ")", "# Cast to a float to properly consume scientific notation", "return", "int", "(", "float", "(", "memory", ")", ")" ]
Transform the memory into bytes :param memory: Compose memory definition. (1g, 24k) :type memory: memory string or integer :return: The memory in bytes :rtype: int
[ "Transform", "the", "memory", "into", "bytes" ]
python
train
materialsvirtuallab/monty
monty/json.py
https://github.com/materialsvirtuallab/monty/blob/d99d6f3c68372d83489d28ff515566c93cd569e2/monty/json.py#L74-L120
def as_dict(self): """ A JSON serializable dict representation of an object. """ d = {"@module": self.__class__.__module__, "@class": self.__class__.__name__} try: parent_module = self.__class__.__module__.split('.')[0] module_version = import_module(parent_module).__version__ d["@version"] = u"{}".format(module_version) except AttributeError: d["@version"] = None args = getargspec(self.__class__.__init__).args def recursive_as_dict(obj): if isinstance(obj, (list, tuple)): return [recursive_as_dict(it) for it in obj] elif isinstance(obj, dict): return {kk: recursive_as_dict(vv) for kk, vv in obj.items()} elif hasattr(obj, "as_dict"): return obj.as_dict() return obj for c in args: if c != "self": try: a = self.__getattribute__(c) except AttributeError: try: a = self.__getattribute__("_" + c) except AttributeError: raise NotImplementedError( "Unable to automatically determine as_dict " "format from class. MSONAble requires all " "args to be present as either self.argname or " "self._argname, and kwargs to be present under" "a self.kwargs variable to automatically " "determine the dict format. Alternatively, " "you can implement both as_dict and from_dict.") d[c] = recursive_as_dict(a) if hasattr(self, "kwargs"): d.update(**self.kwargs) if hasattr(self, "_kwargs"): d.update(**self._kwargs) return d
[ "def", "as_dict", "(", "self", ")", ":", "d", "=", "{", "\"@module\"", ":", "self", ".", "__class__", ".", "__module__", ",", "\"@class\"", ":", "self", ".", "__class__", ".", "__name__", "}", "try", ":", "parent_module", "=", "self", ".", "__class__", ".", "__module__", ".", "split", "(", "'.'", ")", "[", "0", "]", "module_version", "=", "import_module", "(", "parent_module", ")", ".", "__version__", "d", "[", "\"@version\"", "]", "=", "u\"{}\"", ".", "format", "(", "module_version", ")", "except", "AttributeError", ":", "d", "[", "\"@version\"", "]", "=", "None", "args", "=", "getargspec", "(", "self", ".", "__class__", ".", "__init__", ")", ".", "args", "def", "recursive_as_dict", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "[", "recursive_as_dict", "(", "it", ")", "for", "it", "in", "obj", "]", "elif", "isinstance", "(", "obj", ",", "dict", ")", ":", "return", "{", "kk", ":", "recursive_as_dict", "(", "vv", ")", "for", "kk", ",", "vv", "in", "obj", ".", "items", "(", ")", "}", "elif", "hasattr", "(", "obj", ",", "\"as_dict\"", ")", ":", "return", "obj", ".", "as_dict", "(", ")", "return", "obj", "for", "c", "in", "args", ":", "if", "c", "!=", "\"self\"", ":", "try", ":", "a", "=", "self", ".", "__getattribute__", "(", "c", ")", "except", "AttributeError", ":", "try", ":", "a", "=", "self", ".", "__getattribute__", "(", "\"_\"", "+", "c", ")", "except", "AttributeError", ":", "raise", "NotImplementedError", "(", "\"Unable to automatically determine as_dict \"", "\"format from class. MSONAble requires all \"", "\"args to be present as either self.argname or \"", "\"self._argname, and kwargs to be present under\"", "\"a self.kwargs variable to automatically \"", "\"determine the dict format. Alternatively, \"", "\"you can implement both as_dict and from_dict.\"", ")", "d", "[", "c", "]", "=", "recursive_as_dict", "(", "a", ")", "if", "hasattr", "(", "self", ",", "\"kwargs\"", ")", ":", "d", ".", "update", "(", "*", "*", "self", ".", "kwargs", ")", "if", "hasattr", "(", "self", ",", "\"_kwargs\"", ")", ":", "d", ".", "update", "(", "*", "*", "self", ".", "_kwargs", ")", "return", "d" ]
A JSON serializable dict representation of an object.
[ "A", "JSON", "serializable", "dict", "representation", "of", "an", "object", "." ]
python
train
markuskiller/textblob-de
textblob_de/tokenizers.py
https://github.com/markuskiller/textblob-de/blob/1b427b2cdd7e5e9fd3697677a98358fae4aa6ad1/textblob_de/tokenizers.py#L90-L132
def word_tokenize(self, text, include_punc=True): """The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. It assumes that the text has already been segmented into sentences, e.g. using ``self.sent_tokenize()``. This tokenizer performs the following steps: - split standard contractions, e.g. ``don't`` -> ``do n't`` and ``they'll`` -> ``they 'll`` - treat most punctuation characters as separate tokens - split off commas and single quotes, when followed by whitespace - separate periods that appear at the end of line Source: NLTK's docstring of ``TreebankWordTokenizer`` (accessed: 02/10/2014) """ #: Do not process empty strings (Issue #3) if text.strip() == "": return [] _tokens = self.word_tok.tokenize(text) #: Handle strings consisting of a single punctuation mark seperately (Issue #4) if len(_tokens) == 1: if _tokens[0] in PUNCTUATION: if include_punc: return _tokens else: return [] if include_punc: return _tokens else: # Return each word token # Strips punctuation unless the word comes from a contraction # e.g. "gibt's" => ["gibt", "'s"] in "Heute gibt's viel zu tun!" # e.g. "hat's" => ["hat", "'s"] # e.g. "home." => ['home'] words = [ word if word.startswith("'") else strip_punc( word, all=False) for word in _tokens if strip_punc( word, all=False)] return list(words)
[ "def", "word_tokenize", "(", "self", ",", "text", ",", "include_punc", "=", "True", ")", ":", "#: Do not process empty strings (Issue #3)", "if", "text", ".", "strip", "(", ")", "==", "\"\"", ":", "return", "[", "]", "_tokens", "=", "self", ".", "word_tok", ".", "tokenize", "(", "text", ")", "#: Handle strings consisting of a single punctuation mark seperately (Issue #4)", "if", "len", "(", "_tokens", ")", "==", "1", ":", "if", "_tokens", "[", "0", "]", "in", "PUNCTUATION", ":", "if", "include_punc", ":", "return", "_tokens", "else", ":", "return", "[", "]", "if", "include_punc", ":", "return", "_tokens", "else", ":", "# Return each word token", "# Strips punctuation unless the word comes from a contraction", "# e.g. \"gibt's\" => [\"gibt\", \"'s\"] in \"Heute gibt's viel zu tun!\"", "# e.g. \"hat's\" => [\"hat\", \"'s\"]", "# e.g. \"home.\" => ['home']", "words", "=", "[", "word", "if", "word", ".", "startswith", "(", "\"'\"", ")", "else", "strip_punc", "(", "word", ",", "all", "=", "False", ")", "for", "word", "in", "_tokens", "if", "strip_punc", "(", "word", ",", "all", "=", "False", ")", "]", "return", "list", "(", "words", ")" ]
The Treebank tokenizer uses regular expressions to tokenize text as in Penn Treebank. It assumes that the text has already been segmented into sentences, e.g. using ``self.sent_tokenize()``. This tokenizer performs the following steps: - split standard contractions, e.g. ``don't`` -> ``do n't`` and ``they'll`` -> ``they 'll`` - treat most punctuation characters as separate tokens - split off commas and single quotes, when followed by whitespace - separate periods that appear at the end of line Source: NLTK's docstring of ``TreebankWordTokenizer`` (accessed: 02/10/2014)
[ "The", "Treebank", "tokenizer", "uses", "regular", "expressions", "to", "tokenize", "text", "as", "in", "Penn", "Treebank", "." ]
python
train
sibirrer/lenstronomy
lenstronomy/Util/prob_density.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/Util/prob_density.py#L59-L69
def _w_sigma_delta(self, sigma, delta): """ invert variance :param sigma: :param delta: :return: w parameter """ sigma2=sigma**2 w2 = sigma2/(1-2*delta**2/np.pi) w = np.sqrt(w2) return w
[ "def", "_w_sigma_delta", "(", "self", ",", "sigma", ",", "delta", ")", ":", "sigma2", "=", "sigma", "**", "2", "w2", "=", "sigma2", "/", "(", "1", "-", "2", "*", "delta", "**", "2", "/", "np", ".", "pi", ")", "w", "=", "np", ".", "sqrt", "(", "w2", ")", "return", "w" ]
invert variance :param sigma: :param delta: :return: w parameter
[ "invert", "variance", ":", "param", "sigma", ":", ":", "param", "delta", ":", ":", "return", ":", "w", "parameter" ]
python
train
jaraco/keyrings.alt
keyrings/alt/multi.py
https://github.com/jaraco/keyrings.alt/blob/5b71223d12bf9ac6abd05b1b395f1efccb5ea660/keyrings/alt/multi.py#L24-L41
def get_password(self, service, username): """Get password of the username for the service """ init_part = self._keyring.get_password(service, username) if init_part: parts = [init_part] i = 1 while True: next_part = self._keyring.get_password( service, '%s{{part_%d}}' % (username, i)) if next_part: parts.append(next_part) i += 1 else: break return ''.join(parts) return None
[ "def", "get_password", "(", "self", ",", "service", ",", "username", ")", ":", "init_part", "=", "self", ".", "_keyring", ".", "get_password", "(", "service", ",", "username", ")", "if", "init_part", ":", "parts", "=", "[", "init_part", "]", "i", "=", "1", "while", "True", ":", "next_part", "=", "self", ".", "_keyring", ".", "get_password", "(", "service", ",", "'%s{{part_%d}}'", "%", "(", "username", ",", "i", ")", ")", "if", "next_part", ":", "parts", ".", "append", "(", "next_part", ")", "i", "+=", "1", "else", ":", "break", "return", "''", ".", "join", "(", "parts", ")", "return", "None" ]
Get password of the username for the service
[ "Get", "password", "of", "the", "username", "for", "the", "service" ]
python
train
evhub/coconut
coconut/compiler/grammar.py
https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/grammar.py#L563-L574
def subscriptgroup_handle(tokens): """Process subscriptgroups.""" internal_assert(0 < len(tokens) <= 3, "invalid slice args", tokens) args = [] for arg in tokens: if not arg: arg = "None" args.append(arg) if len(args) == 1: return args[0] else: return "_coconut.slice(" + ", ".join(args) + ")"
[ "def", "subscriptgroup_handle", "(", "tokens", ")", ":", "internal_assert", "(", "0", "<", "len", "(", "tokens", ")", "<=", "3", ",", "\"invalid slice args\"", ",", "tokens", ")", "args", "=", "[", "]", "for", "arg", "in", "tokens", ":", "if", "not", "arg", ":", "arg", "=", "\"None\"", "args", ".", "append", "(", "arg", ")", "if", "len", "(", "args", ")", "==", "1", ":", "return", "args", "[", "0", "]", "else", ":", "return", "\"_coconut.slice(\"", "+", "\", \"", ".", "join", "(", "args", ")", "+", "\")\"" ]
Process subscriptgroups.
[ "Process", "subscriptgroups", "." ]
python
train
tomplus/kubernetes_asyncio
kubernetes_asyncio/client/api/apiextensions_v1beta1_api.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/client/api/apiextensions_v1beta1_api.py#L143-L171
def delete_collection_custom_resource_definition(self, **kwargs): # noqa: E501 """delete_collection_custom_resource_definition # noqa: E501 delete collection of CustomResourceDefinition # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_custom_resource_definition(async_req=True) >>> result = thread.get() :param async_req bool :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_custom_resource_definition_with_http_info(**kwargs) # noqa: E501 else: (data) = self.delete_collection_custom_resource_definition_with_http_info(**kwargs) # noqa: E501 return data
[ "def", "delete_collection_custom_resource_definition", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "delete_collection_custom_resource_definition_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "delete_collection_custom_resource_definition_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
delete_collection_custom_resource_definition # noqa: E501 delete collection of CustomResourceDefinition # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_custom_resource_definition(async_req=True) >>> result = thread.get() :param async_req bool :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread.
[ "delete_collection_custom_resource_definition", "#", "noqa", ":", "E501" ]
python
train
wdbm/abstraction
es-1.py
https://github.com/wdbm/abstraction/blob/58c81e73954cc6b4cd2f79b2216467528a96376b/es-1.py#L117-L175
def deepdream( net, base_image, iter_n = 10, octave_n = 4, octave_scale = 1.4, end = "inception_4c/output", clip = True, **step_params ): ''' an ascent through different scales called "octaves" ''' # Prepare base images for all octaves. octaves = [preprocess(net, base_image)] for i in xrange(octave_n-1): octaves.append( nd.zoom( octaves[-1], (1, 1.0 / octave_scale, 1.0 / octave_scale), order = 1 ) ) src = net.blobs["data"] # Allocate image for network-produced details. detail = np.zeros_like(octaves[-1]) for octave, octave_base in enumerate(octaves[::-1]): h, w = octave_base.shape[-2:] if octave > 0: # Upscale details from the previous octave. h1, w1 = detail.shape[-2:] detail = nd.zoom( detail, (1, 1.0 * h / h1, 1.0 * w/w1), order = 1 ) # Resize the network input image size. src.reshape(1, 3, h, w) src.data[0] = octave_base+detail for i in xrange(iter_n): make_step(net, end = end, clip = clip, **step_params) # visualisation vis = deprocess(net, src.data[0]) # If clipping is disabled, adjust image contrast. if not clip: vis = vis*(255.0 / np.percentile(vis, 99.98)) log.info("octave: {octave}, index: {index}, blob/layer: {end}, dimensions: {shape}".format( octave = octave, index = i, end = end, shape = vis.shape, )) # Extract details produced on the current octave. detail = src.data[0] - octave_base # Return the resulting image. return deprocess(net, src.data[0])
[ "def", "deepdream", "(", "net", ",", "base_image", ",", "iter_n", "=", "10", ",", "octave_n", "=", "4", ",", "octave_scale", "=", "1.4", ",", "end", "=", "\"inception_4c/output\"", ",", "clip", "=", "True", ",", "*", "*", "step_params", ")", ":", "# Prepare base images for all octaves.", "octaves", "=", "[", "preprocess", "(", "net", ",", "base_image", ")", "]", "for", "i", "in", "xrange", "(", "octave_n", "-", "1", ")", ":", "octaves", ".", "append", "(", "nd", ".", "zoom", "(", "octaves", "[", "-", "1", "]", ",", "(", "1", ",", "1.0", "/", "octave_scale", ",", "1.0", "/", "octave_scale", ")", ",", "order", "=", "1", ")", ")", "src", "=", "net", ".", "blobs", "[", "\"data\"", "]", "# Allocate image for network-produced details.", "detail", "=", "np", ".", "zeros_like", "(", "octaves", "[", "-", "1", "]", ")", "for", "octave", ",", "octave_base", "in", "enumerate", "(", "octaves", "[", ":", ":", "-", "1", "]", ")", ":", "h", ",", "w", "=", "octave_base", ".", "shape", "[", "-", "2", ":", "]", "if", "octave", ">", "0", ":", "# Upscale details from the previous octave.", "h1", ",", "w1", "=", "detail", ".", "shape", "[", "-", "2", ":", "]", "detail", "=", "nd", ".", "zoom", "(", "detail", ",", "(", "1", ",", "1.0", "*", "h", "/", "h1", ",", "1.0", "*", "w", "/", "w1", ")", ",", "order", "=", "1", ")", "# Resize the network input image size.", "src", ".", "reshape", "(", "1", ",", "3", ",", "h", ",", "w", ")", "src", ".", "data", "[", "0", "]", "=", "octave_base", "+", "detail", "for", "i", "in", "xrange", "(", "iter_n", ")", ":", "make_step", "(", "net", ",", "end", "=", "end", ",", "clip", "=", "clip", ",", "*", "*", "step_params", ")", "# visualisation", "vis", "=", "deprocess", "(", "net", ",", "src", ".", "data", "[", "0", "]", ")", "# If clipping is disabled, adjust image contrast.", "if", "not", "clip", ":", "vis", "=", "vis", "*", "(", "255.0", "/", "np", ".", "percentile", "(", "vis", ",", "99.98", ")", ")", "log", ".", "info", "(", "\"octave: {octave}, index: {index}, blob/layer: {end}, dimensions: {shape}\"", ".", "format", "(", "octave", "=", "octave", ",", "index", "=", "i", ",", "end", "=", "end", ",", "shape", "=", "vis", ".", "shape", ",", ")", ")", "# Extract details produced on the current octave.", "detail", "=", "src", ".", "data", "[", "0", "]", "-", "octave_base", "# Return the resulting image.", "return", "deprocess", "(", "net", ",", "src", ".", "data", "[", "0", "]", ")" ]
an ascent through different scales called "octaves"
[ "an", "ascent", "through", "different", "scales", "called", "octaves" ]
python
train
freakboy3742/pyxero
xero/auth.py
https://github.com/freakboy3742/pyxero/blob/5566f17fa06ed1f2fb9426c112951a72276b0f9a/xero/auth.py#L260-L279
def verify(self, verifier): "Verify an OAuth token" # Construct the credentials for the verification request oauth = OAuth1( self.consumer_key, client_secret=self.consumer_secret, resource_owner_key=self.oauth_token, resource_owner_secret=self.oauth_token_secret, verifier=verifier, rsa_key=self.rsa_key, signature_method=self._signature_method ) # Make the verification request, gettiung back an access token url = self.base_url + ACCESS_TOKEN_URL headers = {'User-Agent': self.user_agent} response = requests.post(url=url, headers=headers, auth=oauth) self._process_oauth_response(response) self.verified = True
[ "def", "verify", "(", "self", ",", "verifier", ")", ":", "# Construct the credentials for the verification request", "oauth", "=", "OAuth1", "(", "self", ".", "consumer_key", ",", "client_secret", "=", "self", ".", "consumer_secret", ",", "resource_owner_key", "=", "self", ".", "oauth_token", ",", "resource_owner_secret", "=", "self", ".", "oauth_token_secret", ",", "verifier", "=", "verifier", ",", "rsa_key", "=", "self", ".", "rsa_key", ",", "signature_method", "=", "self", ".", "_signature_method", ")", "# Make the verification request, gettiung back an access token", "url", "=", "self", ".", "base_url", "+", "ACCESS_TOKEN_URL", "headers", "=", "{", "'User-Agent'", ":", "self", ".", "user_agent", "}", "response", "=", "requests", ".", "post", "(", "url", "=", "url", ",", "headers", "=", "headers", ",", "auth", "=", "oauth", ")", "self", ".", "_process_oauth_response", "(", "response", ")", "self", ".", "verified", "=", "True" ]
Verify an OAuth token
[ "Verify", "an", "OAuth", "token" ]
python
train
ninuxorg/nodeshot
nodeshot/core/nodes/views.py
https://github.com/ninuxorg/nodeshot/blob/2466f0a55f522b2696026f196436ce7ba3f1e5c6/nodeshot/core/nodes/views.py#L56-L79
def get_queryset(self): """ Optionally restricts the returned nodes by filtering against a `search` query parameter in the URL. """ # retrieve all nodes which are published and accessible to current user # and use joins to retrieve related fields queryset = super(NodeList, self).get_queryset().select_related('status', 'user', 'layer') # query string params search = self.request.query_params.get('search', None) layers = self.request.query_params.get('layers', None) if search is not None: search_query = ( Q(name__icontains=search) | Q(slug__icontains=search) | Q(description__icontains=search) | Q(address__icontains=search) ) # add instructions for search to queryset queryset = queryset.filter(search_query) if layers is not None: # look for nodes that are assigned to the specified layers queryset = queryset.filter(Q(layer__slug__in=layers.split(','))) return queryset
[ "def", "get_queryset", "(", "self", ")", ":", "# retrieve all nodes which are published and accessible to current user", "# and use joins to retrieve related fields", "queryset", "=", "super", "(", "NodeList", ",", "self", ")", ".", "get_queryset", "(", ")", ".", "select_related", "(", "'status'", ",", "'user'", ",", "'layer'", ")", "# query string params", "search", "=", "self", ".", "request", ".", "query_params", ".", "get", "(", "'search'", ",", "None", ")", "layers", "=", "self", ".", "request", ".", "query_params", ".", "get", "(", "'layers'", ",", "None", ")", "if", "search", "is", "not", "None", ":", "search_query", "=", "(", "Q", "(", "name__icontains", "=", "search", ")", "|", "Q", "(", "slug__icontains", "=", "search", ")", "|", "Q", "(", "description__icontains", "=", "search", ")", "|", "Q", "(", "address__icontains", "=", "search", ")", ")", "# add instructions for search to queryset", "queryset", "=", "queryset", ".", "filter", "(", "search_query", ")", "if", "layers", "is", "not", "None", ":", "# look for nodes that are assigned to the specified layers", "queryset", "=", "queryset", ".", "filter", "(", "Q", "(", "layer__slug__in", "=", "layers", ".", "split", "(", "','", ")", ")", ")", "return", "queryset" ]
Optionally restricts the returned nodes by filtering against a `search` query parameter in the URL.
[ "Optionally", "restricts", "the", "returned", "nodes", "by", "filtering", "against", "a", "search", "query", "parameter", "in", "the", "URL", "." ]
python
train
bitprophet/ssh
ssh/channel.py
https://github.com/bitprophet/ssh/blob/e8bdad4c82a50158a749233dca58c29e47c60b76/ssh/channel.py#L767-L791
def sendall(self, s): """ Send data to the channel, without allowing partial results. Unlike L{send}, this method continues to send data from the given string until either all data has been sent or an error occurs. Nothing is returned. @param s: data to send. @type s: str @raise socket.timeout: if sending stalled for longer than the timeout set by L{settimeout}. @raise socket.error: if an error occured before the entire string was sent. @note: If the channel is closed while only part of the data hase been sent, there is no way to determine how much data (if any) was sent. This is irritating, but identically follows python's API. """ while s: if self.closed: # this doesn't seem useful, but it is the documented behavior of Socket raise socket.error('Socket is closed') sent = self.send(s) s = s[sent:] return None
[ "def", "sendall", "(", "self", ",", "s", ")", ":", "while", "s", ":", "if", "self", ".", "closed", ":", "# this doesn't seem useful, but it is the documented behavior of Socket", "raise", "socket", ".", "error", "(", "'Socket is closed'", ")", "sent", "=", "self", ".", "send", "(", "s", ")", "s", "=", "s", "[", "sent", ":", "]", "return", "None" ]
Send data to the channel, without allowing partial results. Unlike L{send}, this method continues to send data from the given string until either all data has been sent or an error occurs. Nothing is returned. @param s: data to send. @type s: str @raise socket.timeout: if sending stalled for longer than the timeout set by L{settimeout}. @raise socket.error: if an error occured before the entire string was sent. @note: If the channel is closed while only part of the data hase been sent, there is no way to determine how much data (if any) was sent. This is irritating, but identically follows python's API.
[ "Send", "data", "to", "the", "channel", "without", "allowing", "partial", "results", ".", "Unlike", "L", "{", "send", "}", "this", "method", "continues", "to", "send", "data", "from", "the", "given", "string", "until", "either", "all", "data", "has", "been", "sent", "or", "an", "error", "occurs", ".", "Nothing", "is", "returned", "." ]
python
train
mojaie/chorus
chorus/model/graphmol.py
https://github.com/mojaie/chorus/blob/fc7fe23a0272554c67671645ab07830b315eeb1b/chorus/model/graphmol.py#L145-L166
def add_molecule(self, mol, bond=None, base=None, target=None): """connect atom group (for SMILES parser) May requires recalculation of 2D coordinate for drawing Args: mol: graphmol.Compound() the original object will be copied. bond: Bond object to be connected. the original will not be copied so be careful. base: index of atom in self to connect target: index of atom in group to be connected Raises: TypeError """ ai = self.available_idx() mapping = {n: n + ai - 1 for n, _ in mol.atoms_iter()} relabeled = nx.relabel_nodes(mol.graph, mapping) # copy=True self.graph.add_nodes_from(relabeled.nodes(data=True)) self.graph.add_edges_from(relabeled.edges(data=True)) if bond: self.add_bond(base, mapping[target], bond)
[ "def", "add_molecule", "(", "self", ",", "mol", ",", "bond", "=", "None", ",", "base", "=", "None", ",", "target", "=", "None", ")", ":", "ai", "=", "self", ".", "available_idx", "(", ")", "mapping", "=", "{", "n", ":", "n", "+", "ai", "-", "1", "for", "n", ",", "_", "in", "mol", ".", "atoms_iter", "(", ")", "}", "relabeled", "=", "nx", ".", "relabel_nodes", "(", "mol", ".", "graph", ",", "mapping", ")", "# copy=True", "self", ".", "graph", ".", "add_nodes_from", "(", "relabeled", ".", "nodes", "(", "data", "=", "True", ")", ")", "self", ".", "graph", ".", "add_edges_from", "(", "relabeled", ".", "edges", "(", "data", "=", "True", ")", ")", "if", "bond", ":", "self", ".", "add_bond", "(", "base", ",", "mapping", "[", "target", "]", ",", "bond", ")" ]
connect atom group (for SMILES parser) May requires recalculation of 2D coordinate for drawing Args: mol: graphmol.Compound() the original object will be copied. bond: Bond object to be connected. the original will not be copied so be careful. base: index of atom in self to connect target: index of atom in group to be connected Raises: TypeError
[ "connect", "atom", "group", "(", "for", "SMILES", "parser", ")" ]
python
train
MacHu-GWU/single_file_module-project
sfm/ziplib.py
https://github.com/MacHu-GWU/single_file_module-project/blob/01f7a6b250853bebfd73de275895bf274325cfc1/sfm/ziplib.py#L52-L73
def compress(obj, level=6, return_type="bytes"): """Compress anything to bytes or string. :param obj: could be any object, usually it could be binary, string, or regular python objec.t :param level: :param return_type: if bytes, then return bytes; if str, then return base64.b64encode bytes in utf-8 string. """ if isinstance(obj, binary_type): b = _compress_bytes(obj, level) elif isinstance(obj, string_types): b = _compress_str(obj, level) else: b = _compress_obj(obj, level) if return_type == "bytes": return b elif return_type == "str": return base64.b64encode(b).decode("utf-8") else: raise ValueError("'return_type' has to be one of 'bytes', 'str'!")
[ "def", "compress", "(", "obj", ",", "level", "=", "6", ",", "return_type", "=", "\"bytes\"", ")", ":", "if", "isinstance", "(", "obj", ",", "binary_type", ")", ":", "b", "=", "_compress_bytes", "(", "obj", ",", "level", ")", "elif", "isinstance", "(", "obj", ",", "string_types", ")", ":", "b", "=", "_compress_str", "(", "obj", ",", "level", ")", "else", ":", "b", "=", "_compress_obj", "(", "obj", ",", "level", ")", "if", "return_type", "==", "\"bytes\"", ":", "return", "b", "elif", "return_type", "==", "\"str\"", ":", "return", "base64", ".", "b64encode", "(", "b", ")", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "raise", "ValueError", "(", "\"'return_type' has to be one of 'bytes', 'str'!\"", ")" ]
Compress anything to bytes or string. :param obj: could be any object, usually it could be binary, string, or regular python objec.t :param level: :param return_type: if bytes, then return bytes; if str, then return base64.b64encode bytes in utf-8 string.
[ "Compress", "anything", "to", "bytes", "or", "string", "." ]
python
train
thebjorn/pydeps
pydeps/pydeps.py
https://github.com/thebjorn/pydeps/blob/1e6715b7bea47a40e8042821b57937deaaa0fdc3/pydeps/pydeps.py#L66-L98
def externals(trgt, **kwargs): """Return a list of direct external dependencies of ``pkgname``. Called for the ``pydeps --externals`` command. """ kw = dict( T='svg', config=None, debug=False, display=None, exclude=[], externals=True, format='svg', max_bacon=2**65, no_config=True, nodot=False, noise_level=2**65, noshow=True, output=None, pylib=True, pylib_all=True, show=False, show_cycles=False, show_deps=False, show_dot=False, show_raw_deps=False, verbose=0, include_missing=True, ) kw.update(kwargs) depgraph = py2depgraph.py2dep(trgt, **kw) pkgname = trgt.fname log.info("DEPGRAPH: %s", depgraph) pkgname = os.path.splitext(pkgname)[0] res = {} ext = set() for k, src in list(depgraph.sources.items()): if k.startswith('_'): continue if not k.startswith(pkgname): continue if src.imports: imps = [imp for imp in src.imports if not imp.startswith(pkgname)] if imps: for imp in imps: ext.add(imp.split('.')[0]) res[k] = imps # return res # debug return list(sorted(ext))
[ "def", "externals", "(", "trgt", ",", "*", "*", "kwargs", ")", ":", "kw", "=", "dict", "(", "T", "=", "'svg'", ",", "config", "=", "None", ",", "debug", "=", "False", ",", "display", "=", "None", ",", "exclude", "=", "[", "]", ",", "externals", "=", "True", ",", "format", "=", "'svg'", ",", "max_bacon", "=", "2", "**", "65", ",", "no_config", "=", "True", ",", "nodot", "=", "False", ",", "noise_level", "=", "2", "**", "65", ",", "noshow", "=", "True", ",", "output", "=", "None", ",", "pylib", "=", "True", ",", "pylib_all", "=", "True", ",", "show", "=", "False", ",", "show_cycles", "=", "False", ",", "show_deps", "=", "False", ",", "show_dot", "=", "False", ",", "show_raw_deps", "=", "False", ",", "verbose", "=", "0", ",", "include_missing", "=", "True", ",", ")", "kw", ".", "update", "(", "kwargs", ")", "depgraph", "=", "py2depgraph", ".", "py2dep", "(", "trgt", ",", "*", "*", "kw", ")", "pkgname", "=", "trgt", ".", "fname", "log", ".", "info", "(", "\"DEPGRAPH: %s\"", ",", "depgraph", ")", "pkgname", "=", "os", ".", "path", ".", "splitext", "(", "pkgname", ")", "[", "0", "]", "res", "=", "{", "}", "ext", "=", "set", "(", ")", "for", "k", ",", "src", "in", "list", "(", "depgraph", ".", "sources", ".", "items", "(", ")", ")", ":", "if", "k", ".", "startswith", "(", "'_'", ")", ":", "continue", "if", "not", "k", ".", "startswith", "(", "pkgname", ")", ":", "continue", "if", "src", ".", "imports", ":", "imps", "=", "[", "imp", "for", "imp", "in", "src", ".", "imports", "if", "not", "imp", ".", "startswith", "(", "pkgname", ")", "]", "if", "imps", ":", "for", "imp", "in", "imps", ":", "ext", ".", "add", "(", "imp", ".", "split", "(", "'.'", ")", "[", "0", "]", ")", "res", "[", "k", "]", "=", "imps", "# return res # debug", "return", "list", "(", "sorted", "(", "ext", ")", ")" ]
Return a list of direct external dependencies of ``pkgname``. Called for the ``pydeps --externals`` command.
[ "Return", "a", "list", "of", "direct", "external", "dependencies", "of", "pkgname", ".", "Called", "for", "the", "pydeps", "--", "externals", "command", "." ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Debug.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Debug.py#L144-L162
def caller_trace(back=0): """ Trace caller stack and save info into global dicts, which are printed automatically at the end of SCons execution. """ global caller_bases, caller_dicts import traceback tb = traceback.extract_stack(limit=3+back) tb.reverse() callee = tb[1][:3] caller_bases[callee] = caller_bases.get(callee, 0) + 1 for caller in tb[2:]: caller = callee + caller[:3] try: entry = caller_dicts[callee] except KeyError: caller_dicts[callee] = entry = {} entry[caller] = entry.get(caller, 0) + 1 callee = caller
[ "def", "caller_trace", "(", "back", "=", "0", ")", ":", "global", "caller_bases", ",", "caller_dicts", "import", "traceback", "tb", "=", "traceback", ".", "extract_stack", "(", "limit", "=", "3", "+", "back", ")", "tb", ".", "reverse", "(", ")", "callee", "=", "tb", "[", "1", "]", "[", ":", "3", "]", "caller_bases", "[", "callee", "]", "=", "caller_bases", ".", "get", "(", "callee", ",", "0", ")", "+", "1", "for", "caller", "in", "tb", "[", "2", ":", "]", ":", "caller", "=", "callee", "+", "caller", "[", ":", "3", "]", "try", ":", "entry", "=", "caller_dicts", "[", "callee", "]", "except", "KeyError", ":", "caller_dicts", "[", "callee", "]", "=", "entry", "=", "{", "}", "entry", "[", "caller", "]", "=", "entry", ".", "get", "(", "caller", ",", "0", ")", "+", "1", "callee", "=", "caller" ]
Trace caller stack and save info into global dicts, which are printed automatically at the end of SCons execution.
[ "Trace", "caller", "stack", "and", "save", "info", "into", "global", "dicts", "which", "are", "printed", "automatically", "at", "the", "end", "of", "SCons", "execution", "." ]
python
train
numenta/htmresearch
htmresearch/frameworks/grid_cell_learning/CAN.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/grid_cell_learning/CAN.py#L531-L641
def learn(self, runs, dir=1, periodic=False, recurrent=True, randomSpeed=False): """ Traverses a sinusoidal trajectory across the environment, learning during the process. A pair of runs across the environment (one in each direction) takes 10 seconds if in a periodic larger environment, and 4 seconds in a smaller nonperiodic environment. :param runs: How many runs across the environment to do. Each "run" is defined as a full sweep across the environment in each direction. :param dir: Which direction to move in first. Valid values are 1 and -1. :param periodic: Whether or not the learning environment should be periodic (toroidal). :param recurrent: Whether or not recurrent connections should be active during learning. Warning: True can lead to instability. :param randomSpeed: Whether or not to use a random maximum speed for each run, to better simulate real learning. Can degrade performance. Only supported in periodic environments. """ # Set up plotting if self.plotting: self.fig = plt.figure() self.ax1 = self.fig.add_subplot(411) self.ax2 = self.fig.add_subplot(412) self.ax3 = self.fig.add_subplot(212) plt.ion() plt.tight_layout() self.ax3.set_xlabel("Inhibitory-Inhibitory connections") self.fig.show() self.fig.canvas.draw() # Set up the trajectories and running times. if not periodic: time = 4.*runs timings = [np.arange(0, time, self.dt)] trajectories = [(np.sin(dir*(times*np.pi/2 - np.pi/2.))+1)/2] else: # Space the starting points of the runs out. This tends to improve the # translation-invariance of the weight profiles, and thus gives better # overall path integration. startingPoint = 0 trajectories = [] timings = [] time = 0 residTime = 0 for run in xrange(runs): if randomSpeed: speed = np.random.random() + 0.5 else: speed = 1. length = 10./speed runTimes = np.arange(0, length, self.dt) trajectory = (np.sin(dir*(runTimes*np.pi/(5/speed) - np.pi/2.)) + 1)*\ 2.5 + startingPoint trajectories.append(trajectory) timings.append(runTimes + time) time += length startingPoint += 1./runs for trajectory, timing in zip(trajectories, timings): self.activationsI = np.zeros(self.activationsI.shape) self.activationsER = np.zeros(self.activationsER.shape) self.activationsEL = np.zeros(self.activationsEL.shape) velocities = np.diff(trajectory)/self.dt for i, t in enumerate(timing[:-1]): x = trajectory[i] % 1 v = velocities[i] feedforwardInputI = np.exp(-1.*(self.placeCodeI - x)**2 / (2*self.sigmaLoc**2)) feedforwardInputI *= self.placeGainI feedforwardInputI += self.globalTonicMagnitude feedforwardInputE = np.exp(-1.*(self.placeCodeE - x)**2 / (2*self.sigmaLoc**2)) feedforwardInputE *= self.placeGainE feedforwardInputE += self.globalTonicMagnitude self.update(feedforwardInputI, feedforwardInputE, v, recurrent=recurrent, envelope=(not periodic), iSpeedTuning=periodic, enforceDale=True, ) self.stdpUpdate(time=i) if self.plotting: residTime += self.dt if residTime > PLOT_INTERVAL: residTime -= PLOT_INTERVAL self.ax3.matshow(self.weightsII, cmap=plt.cm.coolwarm) self.plotActivation(position=x, time=t) # Carry out any hanging STDP updates. self.stdpUpdate(time=i, clearBuffer=True) # Finally, enforce Dale's law. Inhibitory neurons must be inhibitory, # excitatory neurons must be excitatory. # This could be handled through update, but it's faster to do it here. np.minimum(self.weightsII, 0, self.weightsII) np.minimum(self.weightsIER, 0, self.weightsIER) np.minimum(self.weightsIEL, 0, self.weightsIEL) np.maximum(self.weightsELI, 0, self.weightsELI) np.maximum(self.weightsERI, 0, self.weightsERI)
[ "def", "learn", "(", "self", ",", "runs", ",", "dir", "=", "1", ",", "periodic", "=", "False", ",", "recurrent", "=", "True", ",", "randomSpeed", "=", "False", ")", ":", "# Set up plotting", "if", "self", ".", "plotting", ":", "self", ".", "fig", "=", "plt", ".", "figure", "(", ")", "self", ".", "ax1", "=", "self", ".", "fig", ".", "add_subplot", "(", "411", ")", "self", ".", "ax2", "=", "self", ".", "fig", ".", "add_subplot", "(", "412", ")", "self", ".", "ax3", "=", "self", ".", "fig", ".", "add_subplot", "(", "212", ")", "plt", ".", "ion", "(", ")", "plt", ".", "tight_layout", "(", ")", "self", ".", "ax3", ".", "set_xlabel", "(", "\"Inhibitory-Inhibitory connections\"", ")", "self", ".", "fig", ".", "show", "(", ")", "self", ".", "fig", ".", "canvas", ".", "draw", "(", ")", "# Set up the trajectories and running times.", "if", "not", "periodic", ":", "time", "=", "4.", "*", "runs", "timings", "=", "[", "np", ".", "arange", "(", "0", ",", "time", ",", "self", ".", "dt", ")", "]", "trajectories", "=", "[", "(", "np", ".", "sin", "(", "dir", "*", "(", "times", "*", "np", ".", "pi", "/", "2", "-", "np", ".", "pi", "/", "2.", ")", ")", "+", "1", ")", "/", "2", "]", "else", ":", "# Space the starting points of the runs out. This tends to improve the", "# translation-invariance of the weight profiles, and thus gives better", "# overall path integration.", "startingPoint", "=", "0", "trajectories", "=", "[", "]", "timings", "=", "[", "]", "time", "=", "0", "residTime", "=", "0", "for", "run", "in", "xrange", "(", "runs", ")", ":", "if", "randomSpeed", ":", "speed", "=", "np", ".", "random", ".", "random", "(", ")", "+", "0.5", "else", ":", "speed", "=", "1.", "length", "=", "10.", "/", "speed", "runTimes", "=", "np", ".", "arange", "(", "0", ",", "length", ",", "self", ".", "dt", ")", "trajectory", "=", "(", "np", ".", "sin", "(", "dir", "*", "(", "runTimes", "*", "np", ".", "pi", "/", "(", "5", "/", "speed", ")", "-", "np", ".", "pi", "/", "2.", ")", ")", "+", "1", ")", "*", "2.5", "+", "startingPoint", "trajectories", ".", "append", "(", "trajectory", ")", "timings", ".", "append", "(", "runTimes", "+", "time", ")", "time", "+=", "length", "startingPoint", "+=", "1.", "/", "runs", "for", "trajectory", ",", "timing", "in", "zip", "(", "trajectories", ",", "timings", ")", ":", "self", ".", "activationsI", "=", "np", ".", "zeros", "(", "self", ".", "activationsI", ".", "shape", ")", "self", ".", "activationsER", "=", "np", ".", "zeros", "(", "self", ".", "activationsER", ".", "shape", ")", "self", ".", "activationsEL", "=", "np", ".", "zeros", "(", "self", ".", "activationsEL", ".", "shape", ")", "velocities", "=", "np", ".", "diff", "(", "trajectory", ")", "/", "self", ".", "dt", "for", "i", ",", "t", "in", "enumerate", "(", "timing", "[", ":", "-", "1", "]", ")", ":", "x", "=", "trajectory", "[", "i", "]", "%", "1", "v", "=", "velocities", "[", "i", "]", "feedforwardInputI", "=", "np", ".", "exp", "(", "-", "1.", "*", "(", "self", ".", "placeCodeI", "-", "x", ")", "**", "2", "/", "(", "2", "*", "self", ".", "sigmaLoc", "**", "2", ")", ")", "feedforwardInputI", "*=", "self", ".", "placeGainI", "feedforwardInputI", "+=", "self", ".", "globalTonicMagnitude", "feedforwardInputE", "=", "np", ".", "exp", "(", "-", "1.", "*", "(", "self", ".", "placeCodeE", "-", "x", ")", "**", "2", "/", "(", "2", "*", "self", ".", "sigmaLoc", "**", "2", ")", ")", "feedforwardInputE", "*=", "self", ".", "placeGainE", "feedforwardInputE", "+=", "self", ".", "globalTonicMagnitude", "self", ".", "update", "(", "feedforwardInputI", ",", "feedforwardInputE", ",", "v", ",", "recurrent", "=", "recurrent", ",", "envelope", "=", "(", "not", "periodic", ")", ",", "iSpeedTuning", "=", "periodic", ",", "enforceDale", "=", "True", ",", ")", "self", ".", "stdpUpdate", "(", "time", "=", "i", ")", "if", "self", ".", "plotting", ":", "residTime", "+=", "self", ".", "dt", "if", "residTime", ">", "PLOT_INTERVAL", ":", "residTime", "-=", "PLOT_INTERVAL", "self", ".", "ax3", ".", "matshow", "(", "self", ".", "weightsII", ",", "cmap", "=", "plt", ".", "cm", ".", "coolwarm", ")", "self", ".", "plotActivation", "(", "position", "=", "x", ",", "time", "=", "t", ")", "# Carry out any hanging STDP updates.", "self", ".", "stdpUpdate", "(", "time", "=", "i", ",", "clearBuffer", "=", "True", ")", "# Finally, enforce Dale's law. Inhibitory neurons must be inhibitory,", "# excitatory neurons must be excitatory.", "# This could be handled through update, but it's faster to do it here.", "np", ".", "minimum", "(", "self", ".", "weightsII", ",", "0", ",", "self", ".", "weightsII", ")", "np", ".", "minimum", "(", "self", ".", "weightsIER", ",", "0", ",", "self", ".", "weightsIER", ")", "np", ".", "minimum", "(", "self", ".", "weightsIEL", ",", "0", ",", "self", ".", "weightsIEL", ")", "np", ".", "maximum", "(", "self", ".", "weightsELI", ",", "0", ",", "self", ".", "weightsELI", ")", "np", ".", "maximum", "(", "self", ".", "weightsERI", ",", "0", ",", "self", ".", "weightsERI", ")" ]
Traverses a sinusoidal trajectory across the environment, learning during the process. A pair of runs across the environment (one in each direction) takes 10 seconds if in a periodic larger environment, and 4 seconds in a smaller nonperiodic environment. :param runs: How many runs across the environment to do. Each "run" is defined as a full sweep across the environment in each direction. :param dir: Which direction to move in first. Valid values are 1 and -1. :param periodic: Whether or not the learning environment should be periodic (toroidal). :param recurrent: Whether or not recurrent connections should be active during learning. Warning: True can lead to instability. :param randomSpeed: Whether or not to use a random maximum speed for each run, to better simulate real learning. Can degrade performance. Only supported in periodic environments.
[ "Traverses", "a", "sinusoidal", "trajectory", "across", "the", "environment", "learning", "during", "the", "process", ".", "A", "pair", "of", "runs", "across", "the", "environment", "(", "one", "in", "each", "direction", ")", "takes", "10", "seconds", "if", "in", "a", "periodic", "larger", "environment", "and", "4", "seconds", "in", "a", "smaller", "nonperiodic", "environment", ".", ":", "param", "runs", ":", "How", "many", "runs", "across", "the", "environment", "to", "do", ".", "Each", "run", "is", "defined", "as", "a", "full", "sweep", "across", "the", "environment", "in", "each", "direction", ".", ":", "param", "dir", ":", "Which", "direction", "to", "move", "in", "first", ".", "Valid", "values", "are", "1", "and", "-", "1", ".", ":", "param", "periodic", ":", "Whether", "or", "not", "the", "learning", "environment", "should", "be", "periodic", "(", "toroidal", ")", ".", ":", "param", "recurrent", ":", "Whether", "or", "not", "recurrent", "connections", "should", "be", "active", "during", "learning", ".", "Warning", ":", "True", "can", "lead", "to", "instability", ".", ":", "param", "randomSpeed", ":", "Whether", "or", "not", "to", "use", "a", "random", "maximum", "speed", "for", "each", "run", "to", "better", "simulate", "real", "learning", ".", "Can", "degrade", "performance", ".", "Only", "supported", "in", "periodic", "environments", "." ]
python
train
PyGithub/PyGithub
github/Organization.py
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/Organization.py#L789-L799
def get_teams(self): """ :calls: `GET /orgs/:org/teams <http://developer.github.com/v3/orgs/teams>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Team.Team` """ return github.PaginatedList.PaginatedList( github.Team.Team, self._requester, self.url + "/teams", None )
[ "def", "get_teams", "(", "self", ")", ":", "return", "github", ".", "PaginatedList", ".", "PaginatedList", "(", "github", ".", "Team", ".", "Team", ",", "self", ".", "_requester", ",", "self", ".", "url", "+", "\"/teams\"", ",", "None", ")" ]
:calls: `GET /orgs/:org/teams <http://developer.github.com/v3/orgs/teams>`_ :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Team.Team`
[ ":", "calls", ":", "GET", "/", "orgs", "/", ":", "org", "/", "teams", "<http", ":", "//", "developer", ".", "github", ".", "com", "/", "v3", "/", "orgs", "/", "teams", ">", "_", ":", "rtype", ":", ":", "class", ":", "github", ".", "PaginatedList", ".", "PaginatedList", "of", ":", "class", ":", "github", ".", "Team", ".", "Team" ]
python
train
hydpy-dev/hydpy
hydpy/core/sequencetools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/sequencetools.py#L910-L914
def seriesshape(self): """Shape of the whole time series (time being the first dimension).""" seriesshape = [len(hydpy.pub.timegrids.init)] seriesshape.extend(self.shape) return tuple(seriesshape)
[ "def", "seriesshape", "(", "self", ")", ":", "seriesshape", "=", "[", "len", "(", "hydpy", ".", "pub", ".", "timegrids", ".", "init", ")", "]", "seriesshape", ".", "extend", "(", "self", ".", "shape", ")", "return", "tuple", "(", "seriesshape", ")" ]
Shape of the whole time series (time being the first dimension).
[ "Shape", "of", "the", "whole", "time", "series", "(", "time", "being", "the", "first", "dimension", ")", "." ]
python
train
quora/qcore
qcore/decorators.py
https://github.com/quora/qcore/blob/fa5cd438eea554db35fd29cbc8dfbde69f09961c/qcore/decorators.py#L277-L304
def decorator_of_context_manager(ctxt): """Converts a context manager into a decorator. This decorator will run the decorated function in the context of the manager. :param ctxt: Context to run the function in. :return: Wrapper around the original function. """ def decorator_fn(*outer_args, **outer_kwargs): def decorator(fn): @functools.wraps(fn) def wrapper(*args, **kwargs): with ctxt(*outer_args, **outer_kwargs): return fn(*args, **kwargs) return wrapper return decorator if getattr(ctxt, "__doc__", None) is None: msg = "Decorator that runs the inner function in the context of %s" decorator_fn.__doc__ = msg % ctxt else: decorator_fn.__doc__ = ctxt.__doc__ return decorator_fn
[ "def", "decorator_of_context_manager", "(", "ctxt", ")", ":", "def", "decorator_fn", "(", "*", "outer_args", ",", "*", "*", "outer_kwargs", ")", ":", "def", "decorator", "(", "fn", ")", ":", "@", "functools", ".", "wraps", "(", "fn", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "ctxt", "(", "*", "outer_args", ",", "*", "*", "outer_kwargs", ")", ":", "return", "fn", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper", "return", "decorator", "if", "getattr", "(", "ctxt", ",", "\"__doc__\"", ",", "None", ")", "is", "None", ":", "msg", "=", "\"Decorator that runs the inner function in the context of %s\"", "decorator_fn", ".", "__doc__", "=", "msg", "%", "ctxt", "else", ":", "decorator_fn", ".", "__doc__", "=", "ctxt", ".", "__doc__", "return", "decorator_fn" ]
Converts a context manager into a decorator. This decorator will run the decorated function in the context of the manager. :param ctxt: Context to run the function in. :return: Wrapper around the original function.
[ "Converts", "a", "context", "manager", "into", "a", "decorator", "." ]
python
train
hugapi/hug
examples/html_serve.py
https://github.com/hugapi/hug/blob/080901c81576657f82e2432fd4a82f1d0d2f370c/examples/html_serve.py#L10-L15
def nagiosCommandHelp(**kwargs): """ Returns command help document when no command is specified """ with open(os.path.join(DIRECTORY, 'document.html')) as document: return document.read()
[ "def", "nagiosCommandHelp", "(", "*", "*", "kwargs", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "DIRECTORY", ",", "'document.html'", ")", ")", "as", "document", ":", "return", "document", ".", "read", "(", ")" ]
Returns command help document when no command is specified
[ "Returns", "command", "help", "document", "when", "no", "command", "is", "specified" ]
python
train
f3at/feat
src/feat/extern/log/log.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/extern/log/log.py#L377-L382
def warningObject(object, cat, format, *args): """ Log a warning message in the given category. This is used for non-fatal problems. """ doLog(WARN, object, cat, format, args)
[ "def", "warningObject", "(", "object", ",", "cat", ",", "format", ",", "*", "args", ")", ":", "doLog", "(", "WARN", ",", "object", ",", "cat", ",", "format", ",", "args", ")" ]
Log a warning message in the given category. This is used for non-fatal problems.
[ "Log", "a", "warning", "message", "in", "the", "given", "category", ".", "This", "is", "used", "for", "non", "-", "fatal", "problems", "." ]
python
train
juju/charm-helpers
charmhelpers/contrib/openstack/amulet/utils.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/amulet/utils.py#L1101-L1120
def get_ceph_pool_sample(self, sentry_unit, pool_id=0): """Take a sample of attributes of a ceph pool, returning ceph pool name, object count and disk space used for the specified pool ID number. :param sentry_unit: Pointer to amulet sentry instance (juju unit) :param pool_id: Ceph pool ID :returns: List of pool name, object count, kb disk space used """ df = self.get_ceph_df(sentry_unit) for pool in df['pools']: if pool['id'] == pool_id: pool_name = pool['name'] obj_count = pool['stats']['objects'] kb_used = pool['stats']['kb_used'] self.log.debug('Ceph {} pool (ID {}): {} objects, ' '{} kb used'.format(pool_name, pool_id, obj_count, kb_used)) return pool_name, obj_count, kb_used
[ "def", "get_ceph_pool_sample", "(", "self", ",", "sentry_unit", ",", "pool_id", "=", "0", ")", ":", "df", "=", "self", ".", "get_ceph_df", "(", "sentry_unit", ")", "for", "pool", "in", "df", "[", "'pools'", "]", ":", "if", "pool", "[", "'id'", "]", "==", "pool_id", ":", "pool_name", "=", "pool", "[", "'name'", "]", "obj_count", "=", "pool", "[", "'stats'", "]", "[", "'objects'", "]", "kb_used", "=", "pool", "[", "'stats'", "]", "[", "'kb_used'", "]", "self", ".", "log", ".", "debug", "(", "'Ceph {} pool (ID {}): {} objects, '", "'{} kb used'", ".", "format", "(", "pool_name", ",", "pool_id", ",", "obj_count", ",", "kb_used", ")", ")", "return", "pool_name", ",", "obj_count", ",", "kb_used" ]
Take a sample of attributes of a ceph pool, returning ceph pool name, object count and disk space used for the specified pool ID number. :param sentry_unit: Pointer to amulet sentry instance (juju unit) :param pool_id: Ceph pool ID :returns: List of pool name, object count, kb disk space used
[ "Take", "a", "sample", "of", "attributes", "of", "a", "ceph", "pool", "returning", "ceph", "pool", "name", "object", "count", "and", "disk", "space", "used", "for", "the", "specified", "pool", "ID", "number", "." ]
python
train
jantman/pypi-download-stats
pypi_download_stats/projectstats.py
https://github.com/jantman/pypi-download-stats/blob/44a7a6bbcd61a9e7f02bd02c52584a98183f80c5/pypi_download_stats/projectstats.py#L208-L222
def per_version_data(self): """ Return download data by version. :return: dict of cache data; keys are datetime objects, values are dict of version (str) to count (int) :rtype: dict """ ret = {} for cache_date in self.cache_dates: data = self._cache_get(cache_date) if len(data['by_version']) == 0: data['by_version'] = {'other': 0} ret[cache_date] = data['by_version'] return ret
[ "def", "per_version_data", "(", "self", ")", ":", "ret", "=", "{", "}", "for", "cache_date", "in", "self", ".", "cache_dates", ":", "data", "=", "self", ".", "_cache_get", "(", "cache_date", ")", "if", "len", "(", "data", "[", "'by_version'", "]", ")", "==", "0", ":", "data", "[", "'by_version'", "]", "=", "{", "'other'", ":", "0", "}", "ret", "[", "cache_date", "]", "=", "data", "[", "'by_version'", "]", "return", "ret" ]
Return download data by version. :return: dict of cache data; keys are datetime objects, values are dict of version (str) to count (int) :rtype: dict
[ "Return", "download", "data", "by", "version", "." ]
python
train
santoshphilip/eppy
eppy/simpleread.py
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/simpleread.py#L37-L53
def idf2txt(txt): """convert the idf text to a simple text""" astr = nocomment(txt) objs = astr.split(';') objs = [obj.split(',') for obj in objs] objs = [[line.strip() for line in obj] for obj in objs] objs = [[_tofloat(line) for line in obj] for obj in objs] objs = [tuple(obj) for obj in objs] objs.sort() lst = [] for obj in objs: for field in obj[:-1]: lst.append('%s,' % (field, )) lst.append('%s;\n' % (obj[-1], )) return '\n'.join(lst)
[ "def", "idf2txt", "(", "txt", ")", ":", "astr", "=", "nocomment", "(", "txt", ")", "objs", "=", "astr", ".", "split", "(", "';'", ")", "objs", "=", "[", "obj", ".", "split", "(", "','", ")", "for", "obj", "in", "objs", "]", "objs", "=", "[", "[", "line", ".", "strip", "(", ")", "for", "line", "in", "obj", "]", "for", "obj", "in", "objs", "]", "objs", "=", "[", "[", "_tofloat", "(", "line", ")", "for", "line", "in", "obj", "]", "for", "obj", "in", "objs", "]", "objs", "=", "[", "tuple", "(", "obj", ")", "for", "obj", "in", "objs", "]", "objs", ".", "sort", "(", ")", "lst", "=", "[", "]", "for", "obj", "in", "objs", ":", "for", "field", "in", "obj", "[", ":", "-", "1", "]", ":", "lst", ".", "append", "(", "'%s,'", "%", "(", "field", ",", ")", ")", "lst", ".", "append", "(", "'%s;\\n'", "%", "(", "obj", "[", "-", "1", "]", ",", ")", ")", "return", "'\\n'", ".", "join", "(", "lst", ")" ]
convert the idf text to a simple text
[ "convert", "the", "idf", "text", "to", "a", "simple", "text" ]
python
train
quantopian/pyfolio
pyfolio/round_trips.py
https://github.com/quantopian/pyfolio/blob/712716ab0cdebbec9fabb25eea3bf40e4354749d/pyfolio/round_trips.py#L322-L346
def apply_sector_mappings_to_round_trips(round_trips, sector_mappings): """ Translates round trip symbols to sectors. Parameters ---------- round_trips : pd.DataFrame DataFrame with one row per round trip trade. - See full explanation in round_trips.extract_round_trips sector_mappings : dict or pd.Series, optional Security identifier to sector mapping. Security ids as keys, sectors as values. Returns ------- sector_round_trips : pd.DataFrame Round trips with symbol names replaced by sector names. """ sector_round_trips = round_trips.copy() sector_round_trips.symbol = sector_round_trips.symbol.apply( lambda x: sector_mappings.get(x, 'No Sector Mapping')) sector_round_trips = sector_round_trips.dropna(axis=0) return sector_round_trips
[ "def", "apply_sector_mappings_to_round_trips", "(", "round_trips", ",", "sector_mappings", ")", ":", "sector_round_trips", "=", "round_trips", ".", "copy", "(", ")", "sector_round_trips", ".", "symbol", "=", "sector_round_trips", ".", "symbol", ".", "apply", "(", "lambda", "x", ":", "sector_mappings", ".", "get", "(", "x", ",", "'No Sector Mapping'", ")", ")", "sector_round_trips", "=", "sector_round_trips", ".", "dropna", "(", "axis", "=", "0", ")", "return", "sector_round_trips" ]
Translates round trip symbols to sectors. Parameters ---------- round_trips : pd.DataFrame DataFrame with one row per round trip trade. - See full explanation in round_trips.extract_round_trips sector_mappings : dict or pd.Series, optional Security identifier to sector mapping. Security ids as keys, sectors as values. Returns ------- sector_round_trips : pd.DataFrame Round trips with symbol names replaced by sector names.
[ "Translates", "round", "trip", "symbols", "to", "sectors", "." ]
python
valid
Kozea/cairocffi
cairocffi/patterns.py
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/patterns.py#L43-L61
def _from_pointer(pointer, incref): """Wrap an existing :c:type:`cairo_pattern_t *` cdata pointer. :type incref: bool :param incref: Whether increase the :ref:`reference count <refcounting>` now. :return: A new instance of :class:`Pattern` or one of its sub-classes, depending on the pattern’s type. """ if pointer == ffi.NULL: raise ValueError('Null pointer') if incref: cairo.cairo_pattern_reference(pointer) self = object.__new__(PATTERN_TYPE_TO_CLASS.get( cairo.cairo_pattern_get_type(pointer), Pattern)) Pattern.__init__(self, pointer) # Skip the subclass’s __init__ return self
[ "def", "_from_pointer", "(", "pointer", ",", "incref", ")", ":", "if", "pointer", "==", "ffi", ".", "NULL", ":", "raise", "ValueError", "(", "'Null pointer'", ")", "if", "incref", ":", "cairo", ".", "cairo_pattern_reference", "(", "pointer", ")", "self", "=", "object", ".", "__new__", "(", "PATTERN_TYPE_TO_CLASS", ".", "get", "(", "cairo", ".", "cairo_pattern_get_type", "(", "pointer", ")", ",", "Pattern", ")", ")", "Pattern", ".", "__init__", "(", "self", ",", "pointer", ")", "# Skip the subclass’s __init__", "return", "self" ]
Wrap an existing :c:type:`cairo_pattern_t *` cdata pointer. :type incref: bool :param incref: Whether increase the :ref:`reference count <refcounting>` now. :return: A new instance of :class:`Pattern` or one of its sub-classes, depending on the pattern’s type.
[ "Wrap", "an", "existing", ":", "c", ":", "type", ":", "cairo_pattern_t", "*", "cdata", "pointer", "." ]
python
train