repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
angr/claripy
claripy/backends/__init__.py
https://github.com/angr/claripy/blob/4ed61924880af1ea8fb778047d896ec0156412a6/claripy/backends/__init__.py#L630-L639
def satisfiable(self, extra_constraints=(), solver=None, model_callback=None): """ This function does a constraint check and checks if the solver is in a sat state. :param solver: The backend solver object. :param extra_constraints: Extra constraints (as ASTs) to add to s for this solve :param model_callback: a function that will be executed with recovered models (if any) :return: True if sat, otherwise false """ return self._satisfiable(extra_constraints=self.convert_list(extra_constraints), solver=solver, model_callback=model_callback)
[ "def", "satisfiable", "(", "self", ",", "extra_constraints", "=", "(", ")", ",", "solver", "=", "None", ",", "model_callback", "=", "None", ")", ":", "return", "self", ".", "_satisfiable", "(", "extra_constraints", "=", "self", ".", "convert_list", "(", "extra_constraints", ")", ",", "solver", "=", "solver", ",", "model_callback", "=", "model_callback", ")" ]
This function does a constraint check and checks if the solver is in a sat state. :param solver: The backend solver object. :param extra_constraints: Extra constraints (as ASTs) to add to s for this solve :param model_callback: a function that will be executed with recovered models (if any) :return: True if sat, otherwise false
[ "This", "function", "does", "a", "constraint", "check", "and", "checks", "if", "the", "solver", "is", "in", "a", "sat", "state", "." ]
python
train
trp07/messages
messages/email_.py
https://github.com/trp07/messages/blob/7789ebc960335a59ea5d319fceed3dd349023648/messages/email_.py#L185-L190
def _generate_email(self): """Put the parts of the email together.""" self.message = MIMEMultipart() self._add_header() self._add_body() self._add_attachments()
[ "def", "_generate_email", "(", "self", ")", ":", "self", ".", "message", "=", "MIMEMultipart", "(", ")", "self", ".", "_add_header", "(", ")", "self", ".", "_add_body", "(", ")", "self", ".", "_add_attachments", "(", ")" ]
Put the parts of the email together.
[ "Put", "the", "parts", "of", "the", "email", "together", "." ]
python
test
doakey3/DashTable
dashtable/simple2data/truncate_empty_lines.py
https://github.com/doakey3/DashTable/blob/744cfb6a717fa75a8092c83ebcd49b2668023681/dashtable/simple2data/truncate_empty_lines.py#L1-L21
def truncate_empty_lines(lines): """ Removes all empty lines from above and below the text. We can't just use text.strip() because that would remove the leading space for the table. Parameters ---------- lines : list of str Returns ------- lines : list of str The text lines without empty lines above or below """ while lines[0].rstrip() == '': lines.pop(0) while lines[len(lines) - 1].rstrip() == '': lines.pop(-1) return lines
[ "def", "truncate_empty_lines", "(", "lines", ")", ":", "while", "lines", "[", "0", "]", ".", "rstrip", "(", ")", "==", "''", ":", "lines", ".", "pop", "(", "0", ")", "while", "lines", "[", "len", "(", "lines", ")", "-", "1", "]", ".", "rstrip", "(", ")", "==", "''", ":", "lines", ".", "pop", "(", "-", "1", ")", "return", "lines" ]
Removes all empty lines from above and below the text. We can't just use text.strip() because that would remove the leading space for the table. Parameters ---------- lines : list of str Returns ------- lines : list of str The text lines without empty lines above or below
[ "Removes", "all", "empty", "lines", "from", "above", "and", "below", "the", "text", "." ]
python
train
garnaat/placebo
placebo/pill.py
https://github.com/garnaat/placebo/blob/1e8ab91b92fa7c5bb1fdbce2331f0c55455093d7/placebo/pill.py#L233-L259
def get_next_file_path(self, service, operation): """ Returns a tuple with the next file to read and the serializer format used """ base_name = '{0}.{1}'.format(service, operation) if self.prefix: base_name = '{0}.{1}'.format(self.prefix, base_name) LOG.debug('get_next_file_path: %s', base_name) next_file = None serializer_format = None index = self._index.setdefault(base_name, 1) while not next_file: file_name = os.path.join( self._data_path, base_name + '_{0}'.format(index)) next_file, serializer_format = self.find_file_format(file_name) if next_file: self._index[base_name] += 1 elif index != 1: index = 1 self._index[base_name] = 1 else: raise IOError('response file ({0}.[{1}]) not found'.format( file_name, "|".join(Format.ALLOWED))) return next_file, serializer_format
[ "def", "get_next_file_path", "(", "self", ",", "service", ",", "operation", ")", ":", "base_name", "=", "'{0}.{1}'", ".", "format", "(", "service", ",", "operation", ")", "if", "self", ".", "prefix", ":", "base_name", "=", "'{0}.{1}'", ".", "format", "(", "self", ".", "prefix", ",", "base_name", ")", "LOG", ".", "debug", "(", "'get_next_file_path: %s'", ",", "base_name", ")", "next_file", "=", "None", "serializer_format", "=", "None", "index", "=", "self", ".", "_index", ".", "setdefault", "(", "base_name", ",", "1", ")", "while", "not", "next_file", ":", "file_name", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_data_path", ",", "base_name", "+", "'_{0}'", ".", "format", "(", "index", ")", ")", "next_file", ",", "serializer_format", "=", "self", ".", "find_file_format", "(", "file_name", ")", "if", "next_file", ":", "self", ".", "_index", "[", "base_name", "]", "+=", "1", "elif", "index", "!=", "1", ":", "index", "=", "1", "self", ".", "_index", "[", "base_name", "]", "=", "1", "else", ":", "raise", "IOError", "(", "'response file ({0}.[{1}]) not found'", ".", "format", "(", "file_name", ",", "\"|\"", ".", "join", "(", "Format", ".", "ALLOWED", ")", ")", ")", "return", "next_file", ",", "serializer_format" ]
Returns a tuple with the next file to read and the serializer format used
[ "Returns", "a", "tuple", "with", "the", "next", "file", "to", "read", "and", "the", "serializer", "format", "used" ]
python
train
jobovy/galpy
galpy/actionAngle/actionAngleAxi.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/actionAngle/actionAngleAxi.py#L220-L241
def JR(self,**kwargs): """ NAME: JR PURPOSE: Calculate the radial action INPUT: +scipy.integrate.quad keywords OUTPUT: J_R(R,vT,vT)/ro/vc + estimate of the error HISTORY: 2010-12-01 - Written - Bovy (NYU) """ if hasattr(self,'_JR'): #pragma: no cover return self._JR (rperi,rap)= self.calcRapRperi(**kwargs) EL= self.calcEL(**kwargs) E, L= EL self._JR= 1./nu.pi*integrate.quad(_JRAxiIntegrand,rperi,rap, args=(E,L,self._pot), **kwargs)[0] return self._JR
[ "def", "JR", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "hasattr", "(", "self", ",", "'_JR'", ")", ":", "#pragma: no cover", "return", "self", ".", "_JR", "(", "rperi", ",", "rap", ")", "=", "self", ".", "calcRapRperi", "(", "*", "*", "kwargs", ")", "EL", "=", "self", ".", "calcEL", "(", "*", "*", "kwargs", ")", "E", ",", "L", "=", "EL", "self", ".", "_JR", "=", "1.", "/", "nu", ".", "pi", "*", "integrate", ".", "quad", "(", "_JRAxiIntegrand", ",", "rperi", ",", "rap", ",", "args", "=", "(", "E", ",", "L", ",", "self", ".", "_pot", ")", ",", "*", "*", "kwargs", ")", "[", "0", "]", "return", "self", ".", "_JR" ]
NAME: JR PURPOSE: Calculate the radial action INPUT: +scipy.integrate.quad keywords OUTPUT: J_R(R,vT,vT)/ro/vc + estimate of the error HISTORY: 2010-12-01 - Written - Bovy (NYU)
[ "NAME", ":", "JR", "PURPOSE", ":", "Calculate", "the", "radial", "action", "INPUT", ":", "+", "scipy", ".", "integrate", ".", "quad", "keywords", "OUTPUT", ":", "J_R", "(", "R", "vT", "vT", ")", "/", "ro", "/", "vc", "+", "estimate", "of", "the", "error", "HISTORY", ":", "2010", "-", "12", "-", "01", "-", "Written", "-", "Bovy", "(", "NYU", ")" ]
python
train
galaxyproject/pulsar
pulsar/managers/base/base_drmaa.py
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/managers/base/base_drmaa.py#L31-L37
def shutdown(self, timeout=None): """Cleanup DRMAA session and call shutdown of parent.""" try: super(BaseDrmaaManager, self).shutdown(timeout) except Exception: pass self.drmaa_session.close()
[ "def", "shutdown", "(", "self", ",", "timeout", "=", "None", ")", ":", "try", ":", "super", "(", "BaseDrmaaManager", ",", "self", ")", ".", "shutdown", "(", "timeout", ")", "except", "Exception", ":", "pass", "self", ".", "drmaa_session", ".", "close", "(", ")" ]
Cleanup DRMAA session and call shutdown of parent.
[ "Cleanup", "DRMAA", "session", "and", "call", "shutdown", "of", "parent", "." ]
python
train
quodlibet/mutagen
mutagen/id3/_file.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/id3/_file.py#L378-L395
def add_tags(self, ID3=None): """Add an empty ID3 tag to the file. Args: ID3 (ID3): An ID3 subclass to use or `None` to use the one that used when loading. A custom tag reader may be used in instead of the default `ID3` object, e.g. an `mutagen.easyid3.EasyID3` reader. """ if ID3 is None: ID3 = self.ID3 if self.tags is None: self.ID3 = ID3 self.tags = ID3() else: raise error("an ID3 tag already exists")
[ "def", "add_tags", "(", "self", ",", "ID3", "=", "None", ")", ":", "if", "ID3", "is", "None", ":", "ID3", "=", "self", ".", "ID3", "if", "self", ".", "tags", "is", "None", ":", "self", ".", "ID3", "=", "ID3", "self", ".", "tags", "=", "ID3", "(", ")", "else", ":", "raise", "error", "(", "\"an ID3 tag already exists\"", ")" ]
Add an empty ID3 tag to the file. Args: ID3 (ID3): An ID3 subclass to use or `None` to use the one that used when loading. A custom tag reader may be used in instead of the default `ID3` object, e.g. an `mutagen.easyid3.EasyID3` reader.
[ "Add", "an", "empty", "ID3", "tag", "to", "the", "file", "." ]
python
train
philklei/tahoma-api
tahoma_api/tahoma_api.py
https://github.com/philklei/tahoma-api/blob/fc84f6ba3b673d0cd0e9e618777834a74a3c7b64/tahoma_api/tahoma_api.py#L529-L560
def launch_action_group(self, action_id): """Start action group.""" header = BASE_HEADERS.copy() header['Cookie'] = self.__cookie request = requests.get( BASE_URL + 'launchActionGroup?oid=' + action_id, headers=header, timeout=10) if request.status_code != 200: self.__logged_in = False self.login() self.launch_action_group(action_id) return try: result = request.json() except ValueError as error: raise Exception( "Not a valid result for launch" + "action group, protocol error: " + request.status_code + ' - ' + request.reason + " (" + error + ")") if 'actionGroup' not in result.keys(): raise Exception( "Could not launch action" + "group, missing execId.") return result['actionGroup'][0]['execId']
[ "def", "launch_action_group", "(", "self", ",", "action_id", ")", ":", "header", "=", "BASE_HEADERS", ".", "copy", "(", ")", "header", "[", "'Cookie'", "]", "=", "self", ".", "__cookie", "request", "=", "requests", ".", "get", "(", "BASE_URL", "+", "'launchActionGroup?oid='", "+", "action_id", ",", "headers", "=", "header", ",", "timeout", "=", "10", ")", "if", "request", ".", "status_code", "!=", "200", ":", "self", ".", "__logged_in", "=", "False", "self", ".", "login", "(", ")", "self", ".", "launch_action_group", "(", "action_id", ")", "return", "try", ":", "result", "=", "request", ".", "json", "(", ")", "except", "ValueError", "as", "error", ":", "raise", "Exception", "(", "\"Not a valid result for launch\"", "+", "\"action group, protocol error: \"", "+", "request", ".", "status_code", "+", "' - '", "+", "request", ".", "reason", "+", "\" (\"", "+", "error", "+", "\")\"", ")", "if", "'actionGroup'", "not", "in", "result", ".", "keys", "(", ")", ":", "raise", "Exception", "(", "\"Could not launch action\"", "+", "\"group, missing execId.\"", ")", "return", "result", "[", "'actionGroup'", "]", "[", "0", "]", "[", "'execId'", "]" ]
Start action group.
[ "Start", "action", "group", "." ]
python
train
yatiml/yatiml
yatiml/recognizer.py
https://github.com/yatiml/yatiml/blob/4f55c058b72388350f0af3076ac3ea9bc1c142b0/yatiml/recognizer.py#L34-L51
def __recognize_scalar(self, node: yaml.Node, expected_type: Type) -> RecResult: """Recognize a node that we expect to be a scalar. Args: node: The node to recognize. expected_type: The type it is expected to be. Returns: A list of recognized types and an error message """ logger.debug('Recognizing as a scalar') if (isinstance(node, yaml.ScalarNode) and node.tag == scalar_type_to_tag[expected_type]): return [expected_type], '' message = 'Failed to recognize a {}\n{}\n'.format( type_to_desc(expected_type), node.start_mark) return [], message
[ "def", "__recognize_scalar", "(", "self", ",", "node", ":", "yaml", ".", "Node", ",", "expected_type", ":", "Type", ")", "->", "RecResult", ":", "logger", ".", "debug", "(", "'Recognizing as a scalar'", ")", "if", "(", "isinstance", "(", "node", ",", "yaml", ".", "ScalarNode", ")", "and", "node", ".", "tag", "==", "scalar_type_to_tag", "[", "expected_type", "]", ")", ":", "return", "[", "expected_type", "]", ",", "''", "message", "=", "'Failed to recognize a {}\\n{}\\n'", ".", "format", "(", "type_to_desc", "(", "expected_type", ")", ",", "node", ".", "start_mark", ")", "return", "[", "]", ",", "message" ]
Recognize a node that we expect to be a scalar. Args: node: The node to recognize. expected_type: The type it is expected to be. Returns: A list of recognized types and an error message
[ "Recognize", "a", "node", "that", "we", "expect", "to", "be", "a", "scalar", "." ]
python
train
InfoAgeTech/django-core
django_core/forms/fields.py
https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/forms/fields.py#L162-L172
def to_python(self, value): """Validates that the input can be converted to a list of decimals.""" if not value: return None if isinstance(value, list): for index, position_val in enumerate(value): val = super(MultipleDecimalField, self).to_python(position_val) value[index] = val return value
[ "def", "to_python", "(", "self", ",", "value", ")", ":", "if", "not", "value", ":", "return", "None", "if", "isinstance", "(", "value", ",", "list", ")", ":", "for", "index", ",", "position_val", "in", "enumerate", "(", "value", ")", ":", "val", "=", "super", "(", "MultipleDecimalField", ",", "self", ")", ".", "to_python", "(", "position_val", ")", "value", "[", "index", "]", "=", "val", "return", "value" ]
Validates that the input can be converted to a list of decimals.
[ "Validates", "that", "the", "input", "can", "be", "converted", "to", "a", "list", "of", "decimals", "." ]
python
train
singularityhub/singularity-cli
spython/main/parse/singularity.py
https://github.com/singularityhub/singularity-cli/blob/cb36b4504812ca87e29c6a40b222a545d1865799/spython/main/parse/singularity.py#L38-L61
def _setup(self, lines): '''setup required adding content from the host to the rootfs, so we try to capture with with ADD. ''' bot.warning('SETUP is error prone, please check output.') for line in lines: # For all lines, replace rootfs with actual root / line = re.sub('[$]{?SINGULARITY_ROOTFS}?', '', '$SINGULARITY_ROOTFS') # If we have nothing left, don't continue if line in ['', None]: continue # If the line starts with copy or move, assume is file from host if re.search('(^cp|^mv)', line): line = re.sub('(^cp|^mv)', '', line) self.files.append(line) # If it's a general command, add to install routine else: self.install.append(line)
[ "def", "_setup", "(", "self", ",", "lines", ")", ":", "bot", ".", "warning", "(", "'SETUP is error prone, please check output.'", ")", "for", "line", "in", "lines", ":", "# For all lines, replace rootfs with actual root /", "line", "=", "re", ".", "sub", "(", "'[$]{?SINGULARITY_ROOTFS}?'", ",", "''", ",", "'$SINGULARITY_ROOTFS'", ")", "# If we have nothing left, don't continue", "if", "line", "in", "[", "''", ",", "None", "]", ":", "continue", "# If the line starts with copy or move, assume is file from host", "if", "re", ".", "search", "(", "'(^cp|^mv)'", ",", "line", ")", ":", "line", "=", "re", ".", "sub", "(", "'(^cp|^mv)'", ",", "''", ",", "line", ")", "self", ".", "files", ".", "append", "(", "line", ")", "# If it's a general command, add to install routine", "else", ":", "self", ".", "install", ".", "append", "(", "line", ")" ]
setup required adding content from the host to the rootfs, so we try to capture with with ADD.
[ "setup", "required", "adding", "content", "from", "the", "host", "to", "the", "rootfs", "so", "we", "try", "to", "capture", "with", "with", "ADD", "." ]
python
train
dankelley/nota
nota/notaclass.py
https://github.com/dankelley/nota/blob/245cd575db60daaea6eebd5edc1d048c5fe23c9b/nota/notaclass.py#L289-L297
def list_books(self): ''' Return the list of book names ''' names = [] try: for n in self.cur.execute("SELECT name FROM book;").fetchall(): names.extend(n) except: self.error("ERROR: cannot find database table 'book'") return(names)
[ "def", "list_books", "(", "self", ")", ":", "names", "=", "[", "]", "try", ":", "for", "n", "in", "self", ".", "cur", ".", "execute", "(", "\"SELECT name FROM book;\"", ")", ".", "fetchall", "(", ")", ":", "names", ".", "extend", "(", "n", ")", "except", ":", "self", ".", "error", "(", "\"ERROR: cannot find database table 'book'\"", ")", "return", "(", "names", ")" ]
Return the list of book names
[ "Return", "the", "list", "of", "book", "names" ]
python
train
nats-io/python-nats
nats/io/client.py
https://github.com/nats-io/python-nats/blob/4a409319c409e7e55ce8377b64b406375c5f455b/nats/io/client.py#L909-L981
def _process_connect_init(self): """ Handles the initial part of the NATS protocol, moving from the (RE)CONNECTING to CONNECTED states when establishing a connection with the server. """ # INFO {...} line = yield self.io.read_until(_CRLF_, max_bytes=None) _, args = line.split(INFO_OP + _SPC_, 1) self._server_info = tornado.escape.json_decode((args)) if 'max_payload' in self._server_info: self._max_payload_size = self._server_info["max_payload"] # Check whether we need to upgrade to TLS first of all if 'tls_required' in self._server_info and self._server_info['tls_required']: # Detach and prepare for upgrading the TLS connection. self._loop.remove_handler(self._socket.fileno()) tls_opts = {} if "tls" in self.options: # Allow customizing the TLS version though default # to one that the server supports at least. tls_opts = self.options["tls"] # Rewrap using a TLS connection, can't do handshake on connect # as the socket is non blocking. self._socket = ssl.wrap_socket( self._socket, do_handshake_on_connect=False, **tls_opts) # Use the TLS stream instead from now self.io = tornado.iostream.SSLIOStream(self._socket) self.io.set_close_callback(self._process_op_err) self.io._do_ssl_handshake() # Refresh state of the parser upon reconnect. if self.is_reconnecting: self._ps.reset() # CONNECT then send a PING expecting a PONG to make a # roundtrip to the server and assert that sent commands sent # this far have been processed already. cmd = self.connect_command() yield self.io.write(cmd) yield self.io.write(PING_PROTO) # FIXME: Add readline timeout for these. next_op = yield self.io.read_until( _CRLF_, max_bytes=MAX_CONTROL_LINE_SIZE) if self.options["verbose"] and OK_OP in next_op: next_op = yield self.io.read_until( _CRLF_, max_bytes=MAX_CONTROL_LINE_SIZE) if ERR_OP in next_op: err_line = next_op.decode() _, err_msg = err_line.split(_SPC_, 1) # FIXME: Maybe handling could be more special here, # checking for ErrAuthorization for example. # yield from self._process_err(err_msg) raise NatsError("nats: " + err_msg.rstrip('\r\n')) if PONG_PROTO in next_op: self._status = Client.CONNECTED self._loop.spawn_callback(self._read_loop) self._pongs = [] self._pings_outstanding = 0 self._ping_timer = tornado.ioloop.PeriodicCallback( self._ping_interval, self.options["ping_interval"] * 1000) self._ping_timer.start() # Queue and flusher for coalescing writes to the server. self._flush_queue = tornado.queues.Queue(maxsize=1024) self._loop.spawn_callback(self._flusher_loop)
[ "def", "_process_connect_init", "(", "self", ")", ":", "# INFO {...}", "line", "=", "yield", "self", ".", "io", ".", "read_until", "(", "_CRLF_", ",", "max_bytes", "=", "None", ")", "_", ",", "args", "=", "line", ".", "split", "(", "INFO_OP", "+", "_SPC_", ",", "1", ")", "self", ".", "_server_info", "=", "tornado", ".", "escape", ".", "json_decode", "(", "(", "args", ")", ")", "if", "'max_payload'", "in", "self", ".", "_server_info", ":", "self", ".", "_max_payload_size", "=", "self", ".", "_server_info", "[", "\"max_payload\"", "]", "# Check whether we need to upgrade to TLS first of all", "if", "'tls_required'", "in", "self", ".", "_server_info", "and", "self", ".", "_server_info", "[", "'tls_required'", "]", ":", "# Detach and prepare for upgrading the TLS connection.", "self", ".", "_loop", ".", "remove_handler", "(", "self", ".", "_socket", ".", "fileno", "(", ")", ")", "tls_opts", "=", "{", "}", "if", "\"tls\"", "in", "self", ".", "options", ":", "# Allow customizing the TLS version though default", "# to one that the server supports at least.", "tls_opts", "=", "self", ".", "options", "[", "\"tls\"", "]", "# Rewrap using a TLS connection, can't do handshake on connect", "# as the socket is non blocking.", "self", ".", "_socket", "=", "ssl", ".", "wrap_socket", "(", "self", ".", "_socket", ",", "do_handshake_on_connect", "=", "False", ",", "*", "*", "tls_opts", ")", "# Use the TLS stream instead from now", "self", ".", "io", "=", "tornado", ".", "iostream", ".", "SSLIOStream", "(", "self", ".", "_socket", ")", "self", ".", "io", ".", "set_close_callback", "(", "self", ".", "_process_op_err", ")", "self", ".", "io", ".", "_do_ssl_handshake", "(", ")", "# Refresh state of the parser upon reconnect.", "if", "self", ".", "is_reconnecting", ":", "self", ".", "_ps", ".", "reset", "(", ")", "# CONNECT then send a PING expecting a PONG to make a", "# roundtrip to the server and assert that sent commands sent", "# this far have been processed already.", "cmd", "=", "self", ".", "connect_command", "(", ")", "yield", "self", ".", "io", ".", "write", "(", "cmd", ")", "yield", "self", ".", "io", ".", "write", "(", "PING_PROTO", ")", "# FIXME: Add readline timeout for these.", "next_op", "=", "yield", "self", ".", "io", ".", "read_until", "(", "_CRLF_", ",", "max_bytes", "=", "MAX_CONTROL_LINE_SIZE", ")", "if", "self", ".", "options", "[", "\"verbose\"", "]", "and", "OK_OP", "in", "next_op", ":", "next_op", "=", "yield", "self", ".", "io", ".", "read_until", "(", "_CRLF_", ",", "max_bytes", "=", "MAX_CONTROL_LINE_SIZE", ")", "if", "ERR_OP", "in", "next_op", ":", "err_line", "=", "next_op", ".", "decode", "(", ")", "_", ",", "err_msg", "=", "err_line", ".", "split", "(", "_SPC_", ",", "1", ")", "# FIXME: Maybe handling could be more special here,", "# checking for ErrAuthorization for example.", "# yield from self._process_err(err_msg)", "raise", "NatsError", "(", "\"nats: \"", "+", "err_msg", ".", "rstrip", "(", "'\\r\\n'", ")", ")", "if", "PONG_PROTO", "in", "next_op", ":", "self", ".", "_status", "=", "Client", ".", "CONNECTED", "self", ".", "_loop", ".", "spawn_callback", "(", "self", ".", "_read_loop", ")", "self", ".", "_pongs", "=", "[", "]", "self", ".", "_pings_outstanding", "=", "0", "self", ".", "_ping_timer", "=", "tornado", ".", "ioloop", ".", "PeriodicCallback", "(", "self", ".", "_ping_interval", ",", "self", ".", "options", "[", "\"ping_interval\"", "]", "*", "1000", ")", "self", ".", "_ping_timer", ".", "start", "(", ")", "# Queue and flusher for coalescing writes to the server.", "self", ".", "_flush_queue", "=", "tornado", ".", "queues", ".", "Queue", "(", "maxsize", "=", "1024", ")", "self", ".", "_loop", ".", "spawn_callback", "(", "self", ".", "_flusher_loop", ")" ]
Handles the initial part of the NATS protocol, moving from the (RE)CONNECTING to CONNECTED states when establishing a connection with the server.
[ "Handles", "the", "initial", "part", "of", "the", "NATS", "protocol", "moving", "from", "the", "(", "RE", ")", "CONNECTING", "to", "CONNECTED", "states", "when", "establishing", "a", "connection", "with", "the", "server", "." ]
python
train
ajenhl/tacl
tacl/results.py
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/results.py#L88-L116
def add_label_work_count(self): """Adds to each result row a count of the number of works within the label contain that n-gram. This counts works that have at least one witness carrying the n-gram. This correctly handles cases where an n-gram has only zero counts for a given work (possible with zero-fill followed by filtering by maximum count). """ self._logger.info('Adding label work count') def add_label_text_count(df): work_maxima = df.groupby(constants.WORK_FIELDNAME, sort=False).any() df.loc[:, constants.LABEL_WORK_COUNT_FIELDNAME] = work_maxima[ constants.COUNT_FIELDNAME].sum() return df if self._matches.empty: self._matches[constants.LABEL_WORK_COUNT_FIELDNAME] = 0 else: self._matches.loc[:, constants.LABEL_WORK_COUNT_FIELDNAME] = 0 self._matches = self._matches.groupby( [constants.LABEL_FIELDNAME, constants.NGRAM_FIELDNAME], sort=False).apply(add_label_text_count) self._logger.info('Finished adding label work count')
[ "def", "add_label_work_count", "(", "self", ")", ":", "self", ".", "_logger", ".", "info", "(", "'Adding label work count'", ")", "def", "add_label_text_count", "(", "df", ")", ":", "work_maxima", "=", "df", ".", "groupby", "(", "constants", ".", "WORK_FIELDNAME", ",", "sort", "=", "False", ")", ".", "any", "(", ")", "df", ".", "loc", "[", ":", ",", "constants", ".", "LABEL_WORK_COUNT_FIELDNAME", "]", "=", "work_maxima", "[", "constants", ".", "COUNT_FIELDNAME", "]", ".", "sum", "(", ")", "return", "df", "if", "self", ".", "_matches", ".", "empty", ":", "self", ".", "_matches", "[", "constants", ".", "LABEL_WORK_COUNT_FIELDNAME", "]", "=", "0", "else", ":", "self", ".", "_matches", ".", "loc", "[", ":", ",", "constants", ".", "LABEL_WORK_COUNT_FIELDNAME", "]", "=", "0", "self", ".", "_matches", "=", "self", ".", "_matches", ".", "groupby", "(", "[", "constants", ".", "LABEL_FIELDNAME", ",", "constants", ".", "NGRAM_FIELDNAME", "]", ",", "sort", "=", "False", ")", ".", "apply", "(", "add_label_text_count", ")", "self", ".", "_logger", ".", "info", "(", "'Finished adding label work count'", ")" ]
Adds to each result row a count of the number of works within the label contain that n-gram. This counts works that have at least one witness carrying the n-gram. This correctly handles cases where an n-gram has only zero counts for a given work (possible with zero-fill followed by filtering by maximum count).
[ "Adds", "to", "each", "result", "row", "a", "count", "of", "the", "number", "of", "works", "within", "the", "label", "contain", "that", "n", "-", "gram", "." ]
python
train
hydraplatform/hydra-base
hydra_base/db/model.py
https://github.com/hydraplatform/hydra-base/blob/9251ff7946505f7a272c87837390acd1c435bc6e/hydra_base/db/model.py#L1702-L1710
def is_admin(self): """ Check that the user has a role with the code 'admin' """ for ur in self.roleusers: if ur.role.code == 'admin': return True return False
[ "def", "is_admin", "(", "self", ")", ":", "for", "ur", "in", "self", ".", "roleusers", ":", "if", "ur", ".", "role", ".", "code", "==", "'admin'", ":", "return", "True", "return", "False" ]
Check that the user has a role with the code 'admin'
[ "Check", "that", "the", "user", "has", "a", "role", "with", "the", "code", "admin" ]
python
train
saltstack/salt
salt/modules/boto_ec2.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_ec2.py#L150-L194
def get_unassociated_eip_address(domain='standard', region=None, key=None, keyid=None, profile=None): ''' Return the first unassociated EIP domain Indicates whether the address is an EC2 address or a VPC address (standard|vpc). CLI Example: .. code-block:: bash salt-call boto_ec2.get_unassociated_eip_address .. versionadded:: 2016.3.0 ''' eip = None for address in get_all_eip_addresses(region=region, key=key, keyid=keyid, profile=profile): address_info = get_eip_address_info(addresses=address, region=region, key=key, keyid=keyid, profile=profile)[0] if address_info['instance_id']: log.debug('%s is already associated with the instance %s', address, address_info['instance_id']) continue if address_info['network_interface_id']: log.debug('%s is already associated with the network interface %s', address, address_info['network_interface_id']) continue if address_info['domain'] == domain: log.debug( "The first unassociated EIP address in the domain '%s' is %s", domain, address ) eip = address break if not eip: log.debug('No unassociated Elastic IP found!') return eip
[ "def", "get_unassociated_eip_address", "(", "domain", "=", "'standard'", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "eip", "=", "None", "for", "address", "in", "get_all_eip_addresses", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", ":", "address_info", "=", "get_eip_address_info", "(", "addresses", "=", "address", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "[", "0", "]", "if", "address_info", "[", "'instance_id'", "]", ":", "log", ".", "debug", "(", "'%s is already associated with the instance %s'", ",", "address", ",", "address_info", "[", "'instance_id'", "]", ")", "continue", "if", "address_info", "[", "'network_interface_id'", "]", ":", "log", ".", "debug", "(", "'%s is already associated with the network interface %s'", ",", "address", ",", "address_info", "[", "'network_interface_id'", "]", ")", "continue", "if", "address_info", "[", "'domain'", "]", "==", "domain", ":", "log", ".", "debug", "(", "\"The first unassociated EIP address in the domain '%s' is %s\"", ",", "domain", ",", "address", ")", "eip", "=", "address", "break", "if", "not", "eip", ":", "log", ".", "debug", "(", "'No unassociated Elastic IP found!'", ")", "return", "eip" ]
Return the first unassociated EIP domain Indicates whether the address is an EC2 address or a VPC address (standard|vpc). CLI Example: .. code-block:: bash salt-call boto_ec2.get_unassociated_eip_address .. versionadded:: 2016.3.0
[ "Return", "the", "first", "unassociated", "EIP" ]
python
train
jborean93/ntlm-auth
ntlm_auth/compute_keys.py
https://github.com/jborean93/ntlm-auth/blob/2c7cd81516d9bfd42e8ff473a534d876b21ebb38/ntlm_auth/compute_keys.py#L11-L52
def _get_exchange_key_ntlm_v1(negotiate_flags, session_base_key, server_challenge, lm_challenge_response, lm_hash): """ [MS-NLMP] v28.0 2016-07-14 3.4.5.1 KXKEY Calculates the Key Exchange Key for NTLMv1 authentication. Used for signing and sealing messages :param negotiate_flags: The negotiated NTLM flags :param session_base_key: A session key calculated from the user password challenge :param server_challenge: A random 8-byte response generated by the server in the CHALLENGE_MESSAGE :param lm_challenge_response: The LmChallengeResponse value computed in ComputeResponse :param lm_hash: The LMOWF computed in Compute Response :return: The Key Exchange Key (KXKEY) used to sign and seal messages and compute the ExportedSessionKey """ if negotiate_flags & \ NegotiateFlags.NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY: key_exchange_key = hmac.new( session_base_key, server_challenge + lm_challenge_response[:8], digestmod=hashlib.md5 ).digest() elif negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_LM_KEY: des_handler = DES(DES.key56_to_key64(lm_hash[:7])) first_des = des_handler.encrypt(lm_challenge_response[:8]) second_des_key = lm_hash[7:8] + b"\xbd\xbd\xbd\xbd\xbd\xbd" des_handler = DES(DES.key56_to_key64(second_des_key)) second_des = des_handler.encrypt(lm_challenge_response[:8]) key_exchange_key = first_des + second_des elif negotiate_flags & NegotiateFlags.NTLMSSP_REQUEST_NON_NT_SESSION_KEY: key_exchange_key = lm_hash[:8] + b'\0' * 8 else: key_exchange_key = session_base_key return key_exchange_key
[ "def", "_get_exchange_key_ntlm_v1", "(", "negotiate_flags", ",", "session_base_key", ",", "server_challenge", ",", "lm_challenge_response", ",", "lm_hash", ")", ":", "if", "negotiate_flags", "&", "NegotiateFlags", ".", "NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY", ":", "key_exchange_key", "=", "hmac", ".", "new", "(", "session_base_key", ",", "server_challenge", "+", "lm_challenge_response", "[", ":", "8", "]", ",", "digestmod", "=", "hashlib", ".", "md5", ")", ".", "digest", "(", ")", "elif", "negotiate_flags", "&", "NegotiateFlags", ".", "NTLMSSP_NEGOTIATE_LM_KEY", ":", "des_handler", "=", "DES", "(", "DES", ".", "key56_to_key64", "(", "lm_hash", "[", ":", "7", "]", ")", ")", "first_des", "=", "des_handler", ".", "encrypt", "(", "lm_challenge_response", "[", ":", "8", "]", ")", "second_des_key", "=", "lm_hash", "[", "7", ":", "8", "]", "+", "b\"\\xbd\\xbd\\xbd\\xbd\\xbd\\xbd\"", "des_handler", "=", "DES", "(", "DES", ".", "key56_to_key64", "(", "second_des_key", ")", ")", "second_des", "=", "des_handler", ".", "encrypt", "(", "lm_challenge_response", "[", ":", "8", "]", ")", "key_exchange_key", "=", "first_des", "+", "second_des", "elif", "negotiate_flags", "&", "NegotiateFlags", ".", "NTLMSSP_REQUEST_NON_NT_SESSION_KEY", ":", "key_exchange_key", "=", "lm_hash", "[", ":", "8", "]", "+", "b'\\0'", "*", "8", "else", ":", "key_exchange_key", "=", "session_base_key", "return", "key_exchange_key" ]
[MS-NLMP] v28.0 2016-07-14 3.4.5.1 KXKEY Calculates the Key Exchange Key for NTLMv1 authentication. Used for signing and sealing messages :param negotiate_flags: The negotiated NTLM flags :param session_base_key: A session key calculated from the user password challenge :param server_challenge: A random 8-byte response generated by the server in the CHALLENGE_MESSAGE :param lm_challenge_response: The LmChallengeResponse value computed in ComputeResponse :param lm_hash: The LMOWF computed in Compute Response :return: The Key Exchange Key (KXKEY) used to sign and seal messages and compute the ExportedSessionKey
[ "[", "MS", "-", "NLMP", "]", "v28", ".", "0", "2016", "-", "07", "-", "14" ]
python
train
openstax/cnx-archive
cnxarchive/views/legacy_redirect.py
https://github.com/openstax/cnx-archive/blob/d31d34aa8bbc8a9fde6cd4227a0df92726e8daf4/cnxarchive/views/legacy_redirect.py#L72-L119
def redirect_legacy_content(request): """Redirect from legacy /content/id/version to new /contents/uuid@version. Handles collection context (book) as well. """ routing_args = request.matchdict objid = routing_args['objid'] objver = routing_args.get('objver') filename = routing_args.get('filename') id, version = _convert_legacy_id(objid, objver) if not id: raise httpexceptions.HTTPNotFound() # We always use 301 redirects (HTTPMovedPermanently) here # because we want search engines to move to the newer links # We cache these redirects only briefly because, even when versioned, # legacy collection versions don't include the minor version, # so the latest archive url could change if filename: with db_connect() as db_connection: with db_connection.cursor() as cursor: args = dict(id=id, version=version, filename=filename) cursor.execute(SQL['get-resourceid-by-filename'], args) try: res = cursor.fetchone() resourceid = res[0] raise httpexceptions.HTTPMovedPermanently( request.route_path('resource', hash=resourceid, ignore=u'/{}'.format(filename)), headers=[("Cache-Control", "max-age=60, public")]) except TypeError: # None returned raise httpexceptions.HTTPNotFound() ident_hash = join_ident_hash(id, version) params = request.params if params.get('collection'): # page in book objid, objver = split_legacy_hash(params['collection']) book_uuid, book_version = _convert_legacy_id(objid, objver) if book_uuid: id, ident_hash = \ _get_page_in_book(id, version, book_uuid, book_version) raise httpexceptions.HTTPMovedPermanently( request.route_path('content', ident_hash=ident_hash), headers=[("Cache-Control", "max-age=60, public")])
[ "def", "redirect_legacy_content", "(", "request", ")", ":", "routing_args", "=", "request", ".", "matchdict", "objid", "=", "routing_args", "[", "'objid'", "]", "objver", "=", "routing_args", ".", "get", "(", "'objver'", ")", "filename", "=", "routing_args", ".", "get", "(", "'filename'", ")", "id", ",", "version", "=", "_convert_legacy_id", "(", "objid", ",", "objver", ")", "if", "not", "id", ":", "raise", "httpexceptions", ".", "HTTPNotFound", "(", ")", "# We always use 301 redirects (HTTPMovedPermanently) here", "# because we want search engines to move to the newer links", "# We cache these redirects only briefly because, even when versioned,", "# legacy collection versions don't include the minor version,", "# so the latest archive url could change", "if", "filename", ":", "with", "db_connect", "(", ")", "as", "db_connection", ":", "with", "db_connection", ".", "cursor", "(", ")", "as", "cursor", ":", "args", "=", "dict", "(", "id", "=", "id", ",", "version", "=", "version", ",", "filename", "=", "filename", ")", "cursor", ".", "execute", "(", "SQL", "[", "'get-resourceid-by-filename'", "]", ",", "args", ")", "try", ":", "res", "=", "cursor", ".", "fetchone", "(", ")", "resourceid", "=", "res", "[", "0", "]", "raise", "httpexceptions", ".", "HTTPMovedPermanently", "(", "request", ".", "route_path", "(", "'resource'", ",", "hash", "=", "resourceid", ",", "ignore", "=", "u'/{}'", ".", "format", "(", "filename", ")", ")", ",", "headers", "=", "[", "(", "\"Cache-Control\"", ",", "\"max-age=60, public\"", ")", "]", ")", "except", "TypeError", ":", "# None returned", "raise", "httpexceptions", ".", "HTTPNotFound", "(", ")", "ident_hash", "=", "join_ident_hash", "(", "id", ",", "version", ")", "params", "=", "request", ".", "params", "if", "params", ".", "get", "(", "'collection'", ")", ":", "# page in book", "objid", ",", "objver", "=", "split_legacy_hash", "(", "params", "[", "'collection'", "]", ")", "book_uuid", ",", "book_version", "=", "_convert_legacy_id", "(", "objid", ",", "objver", ")", "if", "book_uuid", ":", "id", ",", "ident_hash", "=", "_get_page_in_book", "(", "id", ",", "version", ",", "book_uuid", ",", "book_version", ")", "raise", "httpexceptions", ".", "HTTPMovedPermanently", "(", "request", ".", "route_path", "(", "'content'", ",", "ident_hash", "=", "ident_hash", ")", ",", "headers", "=", "[", "(", "\"Cache-Control\"", ",", "\"max-age=60, public\"", ")", "]", ")" ]
Redirect from legacy /content/id/version to new /contents/uuid@version. Handles collection context (book) as well.
[ "Redirect", "from", "legacy", "/", "content", "/", "id", "/", "version", "to", "new", "/", "contents", "/", "uuid@version", "." ]
python
train
DataBiosphere/dsub
dsub/providers/google_v2.py
https://github.com/DataBiosphere/dsub/blob/443ce31daa6023dc2fd65ef2051796e19d18d5a7/dsub/providers/google_v2.py#L872-L931
def submit_job(self, job_descriptor, skip_if_output_present): """Submit the job (or tasks) to be executed. Args: job_descriptor: all parameters needed to launch all job tasks skip_if_output_present: (boolean) if true, skip tasks whose output is present (see --skip flag for more explanation). Returns: A dictionary containing the 'user-id', 'job-id', and 'task-id' list. For jobs that are not task array jobs, the task-id list should be empty. Raises: ValueError: if job resources or task data contain illegal values. """ # Validate task data and resources. param_util.validate_submit_args_or_fail( job_descriptor, provider_name=_PROVIDER_NAME, input_providers=_SUPPORTED_INPUT_PROVIDERS, output_providers=_SUPPORTED_OUTPUT_PROVIDERS, logging_providers=_SUPPORTED_LOGGING_PROVIDERS) # Prepare and submit jobs. launched_tasks = [] requests = [] for task_view in job_model.task_view_generator(job_descriptor): job_params = task_view.job_params task_params = task_view.task_descriptors[0].task_params outputs = job_params['outputs'] | task_params['outputs'] if skip_if_output_present: # check whether the output's already there if dsub_util.outputs_are_present(outputs): print('Skipping task because its outputs are present') continue request = self._build_pipeline_request(task_view) if self._dry_run: requests.append(request) else: task_id = self._submit_pipeline(request) launched_tasks.append(task_id) # If this is a dry-run, emit all the pipeline request objects if self._dry_run: print( json.dumps( requests, indent=2, sort_keys=True, separators=(',', ': '))) if not requests and not launched_tasks: return {'job-id': dsub_util.NO_JOB} return { 'job-id': job_descriptor.job_metadata['job-id'], 'user-id': job_descriptor.job_metadata['user-id'], 'task-id': [task_id for task_id in launched_tasks if task_id], }
[ "def", "submit_job", "(", "self", ",", "job_descriptor", ",", "skip_if_output_present", ")", ":", "# Validate task data and resources.", "param_util", ".", "validate_submit_args_or_fail", "(", "job_descriptor", ",", "provider_name", "=", "_PROVIDER_NAME", ",", "input_providers", "=", "_SUPPORTED_INPUT_PROVIDERS", ",", "output_providers", "=", "_SUPPORTED_OUTPUT_PROVIDERS", ",", "logging_providers", "=", "_SUPPORTED_LOGGING_PROVIDERS", ")", "# Prepare and submit jobs.", "launched_tasks", "=", "[", "]", "requests", "=", "[", "]", "for", "task_view", "in", "job_model", ".", "task_view_generator", "(", "job_descriptor", ")", ":", "job_params", "=", "task_view", ".", "job_params", "task_params", "=", "task_view", ".", "task_descriptors", "[", "0", "]", ".", "task_params", "outputs", "=", "job_params", "[", "'outputs'", "]", "|", "task_params", "[", "'outputs'", "]", "if", "skip_if_output_present", ":", "# check whether the output's already there", "if", "dsub_util", ".", "outputs_are_present", "(", "outputs", ")", ":", "print", "(", "'Skipping task because its outputs are present'", ")", "continue", "request", "=", "self", ".", "_build_pipeline_request", "(", "task_view", ")", "if", "self", ".", "_dry_run", ":", "requests", ".", "append", "(", "request", ")", "else", ":", "task_id", "=", "self", ".", "_submit_pipeline", "(", "request", ")", "launched_tasks", ".", "append", "(", "task_id", ")", "# If this is a dry-run, emit all the pipeline request objects", "if", "self", ".", "_dry_run", ":", "print", "(", "json", ".", "dumps", "(", "requests", ",", "indent", "=", "2", ",", "sort_keys", "=", "True", ",", "separators", "=", "(", "','", ",", "': '", ")", ")", ")", "if", "not", "requests", "and", "not", "launched_tasks", ":", "return", "{", "'job-id'", ":", "dsub_util", ".", "NO_JOB", "}", "return", "{", "'job-id'", ":", "job_descriptor", ".", "job_metadata", "[", "'job-id'", "]", ",", "'user-id'", ":", "job_descriptor", ".", "job_metadata", "[", "'user-id'", "]", ",", "'task-id'", ":", "[", "task_id", "for", "task_id", "in", "launched_tasks", "if", "task_id", "]", ",", "}" ]
Submit the job (or tasks) to be executed. Args: job_descriptor: all parameters needed to launch all job tasks skip_if_output_present: (boolean) if true, skip tasks whose output is present (see --skip flag for more explanation). Returns: A dictionary containing the 'user-id', 'job-id', and 'task-id' list. For jobs that are not task array jobs, the task-id list should be empty. Raises: ValueError: if job resources or task data contain illegal values.
[ "Submit", "the", "job", "(", "or", "tasks", ")", "to", "be", "executed", "." ]
python
valid
relekang/python-semantic-release
semantic_release/cli.py
https://github.com/relekang/python-semantic-release/blob/76123f410180599a19e7c48da413880185bbea20/semantic_release/cli.py#L96-L129
def changelog(**kwargs): """ Generates the changelog since the last release. :raises ImproperConfigurationError: if there is no current version """ current_version = get_current_version() debug('changelog got current_version', current_version) if current_version is None: raise ImproperConfigurationError( "Unable to get the current version. " "Make sure semantic_release.version_variable " "is setup correctly" ) previous_version = get_previous_version(current_version) debug('changelog got previous_version', previous_version) log = generate_changelog(previous_version, current_version) click.echo(markdown_changelog(current_version, log, header=False)) debug('noop={}, post={}'.format(kwargs.get('noop'), kwargs.get('post'))) if not kwargs.get('noop') and kwargs.get('post'): if check_token(): owner, name = get_repository_owner_and_name() click.echo('Updating changelog') post_changelog( owner, name, current_version, markdown_changelog(current_version, log, header=False) ) else: click.echo( click.style('Missing token: cannot post changelog', 'red'), err=True)
[ "def", "changelog", "(", "*", "*", "kwargs", ")", ":", "current_version", "=", "get_current_version", "(", ")", "debug", "(", "'changelog got current_version'", ",", "current_version", ")", "if", "current_version", "is", "None", ":", "raise", "ImproperConfigurationError", "(", "\"Unable to get the current version. \"", "\"Make sure semantic_release.version_variable \"", "\"is setup correctly\"", ")", "previous_version", "=", "get_previous_version", "(", "current_version", ")", "debug", "(", "'changelog got previous_version'", ",", "previous_version", ")", "log", "=", "generate_changelog", "(", "previous_version", ",", "current_version", ")", "click", ".", "echo", "(", "markdown_changelog", "(", "current_version", ",", "log", ",", "header", "=", "False", ")", ")", "debug", "(", "'noop={}, post={}'", ".", "format", "(", "kwargs", ".", "get", "(", "'noop'", ")", ",", "kwargs", ".", "get", "(", "'post'", ")", ")", ")", "if", "not", "kwargs", ".", "get", "(", "'noop'", ")", "and", "kwargs", ".", "get", "(", "'post'", ")", ":", "if", "check_token", "(", ")", ":", "owner", ",", "name", "=", "get_repository_owner_and_name", "(", ")", "click", ".", "echo", "(", "'Updating changelog'", ")", "post_changelog", "(", "owner", ",", "name", ",", "current_version", ",", "markdown_changelog", "(", "current_version", ",", "log", ",", "header", "=", "False", ")", ")", "else", ":", "click", ".", "echo", "(", "click", ".", "style", "(", "'Missing token: cannot post changelog'", ",", "'red'", ")", ",", "err", "=", "True", ")" ]
Generates the changelog since the last release. :raises ImproperConfigurationError: if there is no current version
[ "Generates", "the", "changelog", "since", "the", "last", "release", ".", ":", "raises", "ImproperConfigurationError", ":", "if", "there", "is", "no", "current", "version" ]
python
train
dade-ai/snipy
snipy/io/fileutil.py
https://github.com/dade-ai/snipy/blob/408520867179f99b3158b57520e2619f3fecd69b/snipy/io/fileutil.py#L334-L345
def findfolder(toppath, match='*', exclude=''): """ recursively find folder path from toppath. patterns to decide to walk folder path or not :type toppath: str :type match: str or list(str) :type exclude: str or list(str) :rtype: generator for path str """ pred = _pred_pattern(match, exclude) return (p for p in walkfolder(toppath, pred))
[ "def", "findfolder", "(", "toppath", ",", "match", "=", "'*'", ",", "exclude", "=", "''", ")", ":", "pred", "=", "_pred_pattern", "(", "match", ",", "exclude", ")", "return", "(", "p", "for", "p", "in", "walkfolder", "(", "toppath", ",", "pred", ")", ")" ]
recursively find folder path from toppath. patterns to decide to walk folder path or not :type toppath: str :type match: str or list(str) :type exclude: str or list(str) :rtype: generator for path str
[ "recursively", "find", "folder", "path", "from", "toppath", ".", "patterns", "to", "decide", "to", "walk", "folder", "path", "or", "not", ":", "type", "toppath", ":", "str", ":", "type", "match", ":", "str", "or", "list", "(", "str", ")", ":", "type", "exclude", ":", "str", "or", "list", "(", "str", ")", ":", "rtype", ":", "generator", "for", "path", "str" ]
python
valid
googledatalab/pydatalab
google/datalab/bigquery/_query.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/google/datalab/bigquery/_query.py#L122-L167
def _expanded_sql(self, sampling=None): """Get the expanded SQL of this object, including all subqueries, UDFs, and external datasources Returns: The expanded SQL string of this object """ # use lists to preserve the order of subqueries, bigquery will not like listing subqueries # out of order if they depend on each other. for example. the following will be rejected: # WITH q2 as (SELECT * FROM q1), # q1 as (SELECT * FROM mytable), # SELECT * FROM q2 # so when we're getting the dependencies, use recursion into a list to maintain the order udfs = [] subqueries = [] expanded_sql = '' def _recurse_subqueries(query): """Recursively scan subqueries and add their pieces to global scope udfs and subqueries """ if query._subqueries: for subquery in query._subqueries: _recurse_subqueries(subquery[1]) subqueries.extend([s for s in query._subqueries if s not in subqueries]) if query._udfs: # query._udfs is a list of (name, UDF) tuples; we just want the UDF. udfs.extend([u[1] for u in query._udfs if u[1] not in udfs]) _recurse_subqueries(self) if udfs: expanded_sql += '\n'.join([udf._expanded_sql() for udf in udfs]) expanded_sql += '\n' def _indent_query(subquery): return ' ' + subquery._sql.replace('\n', '\n ') if subqueries: expanded_sql += 'WITH ' + \ '\n),\n'.join(['%s AS (\n%s' % (sq[0], _indent_query(sq[1])) for sq in subqueries]) expanded_sql += '\n)\n\n' expanded_sql += sampling(self._sql) if sampling else self._sql return expanded_sql
[ "def", "_expanded_sql", "(", "self", ",", "sampling", "=", "None", ")", ":", "# use lists to preserve the order of subqueries, bigquery will not like listing subqueries", "# out of order if they depend on each other. for example. the following will be rejected:", "# WITH q2 as (SELECT * FROM q1),", "# q1 as (SELECT * FROM mytable),", "# SELECT * FROM q2", "# so when we're getting the dependencies, use recursion into a list to maintain the order", "udfs", "=", "[", "]", "subqueries", "=", "[", "]", "expanded_sql", "=", "''", "def", "_recurse_subqueries", "(", "query", ")", ":", "\"\"\"Recursively scan subqueries and add their pieces to global scope udfs and subqueries\n \"\"\"", "if", "query", ".", "_subqueries", ":", "for", "subquery", "in", "query", ".", "_subqueries", ":", "_recurse_subqueries", "(", "subquery", "[", "1", "]", ")", "subqueries", ".", "extend", "(", "[", "s", "for", "s", "in", "query", ".", "_subqueries", "if", "s", "not", "in", "subqueries", "]", ")", "if", "query", ".", "_udfs", ":", "# query._udfs is a list of (name, UDF) tuples; we just want the UDF.", "udfs", ".", "extend", "(", "[", "u", "[", "1", "]", "for", "u", "in", "query", ".", "_udfs", "if", "u", "[", "1", "]", "not", "in", "udfs", "]", ")", "_recurse_subqueries", "(", "self", ")", "if", "udfs", ":", "expanded_sql", "+=", "'\\n'", ".", "join", "(", "[", "udf", ".", "_expanded_sql", "(", ")", "for", "udf", "in", "udfs", "]", ")", "expanded_sql", "+=", "'\\n'", "def", "_indent_query", "(", "subquery", ")", ":", "return", "' '", "+", "subquery", ".", "_sql", ".", "replace", "(", "'\\n'", ",", "'\\n '", ")", "if", "subqueries", ":", "expanded_sql", "+=", "'WITH '", "+", "'\\n),\\n'", ".", "join", "(", "[", "'%s AS (\\n%s'", "%", "(", "sq", "[", "0", "]", ",", "_indent_query", "(", "sq", "[", "1", "]", ")", ")", "for", "sq", "in", "subqueries", "]", ")", "expanded_sql", "+=", "'\\n)\\n\\n'", "expanded_sql", "+=", "sampling", "(", "self", ".", "_sql", ")", "if", "sampling", "else", "self", ".", "_sql", "return", "expanded_sql" ]
Get the expanded SQL of this object, including all subqueries, UDFs, and external datasources Returns: The expanded SQL string of this object
[ "Get", "the", "expanded", "SQL", "of", "this", "object", "including", "all", "subqueries", "UDFs", "and", "external", "datasources" ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/controllers/execution_ticker.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/execution_ticker.py#L72-L82
def on_config_value_changed(self, config_m, prop_name, info): """Callback when a config value has been changed :param ConfigModel config_m: The config model that has been changed :param str prop_name: Should always be 'config' :param dict info: Information e.g. about the changed config key """ config_key = info['args'][1] if config_key in ["EXECUTION_TICKER_ENABLED"]: self.check_configuration()
[ "def", "on_config_value_changed", "(", "self", ",", "config_m", ",", "prop_name", ",", "info", ")", ":", "config_key", "=", "info", "[", "'args'", "]", "[", "1", "]", "if", "config_key", "in", "[", "\"EXECUTION_TICKER_ENABLED\"", "]", ":", "self", ".", "check_configuration", "(", ")" ]
Callback when a config value has been changed :param ConfigModel config_m: The config model that has been changed :param str prop_name: Should always be 'config' :param dict info: Information e.g. about the changed config key
[ "Callback", "when", "a", "config", "value", "has", "been", "changed" ]
python
train
erdc/RAPIDpy
RAPIDpy/helper_functions.py
https://github.com/erdc/RAPIDpy/blob/50e14e130554b254a00ff23b226cd7e4c6cfe91a/RAPIDpy/helper_functions.py#L108-L112
def compare_csv_timeseries_files(file1, file2, header=True): """ This function compares two csv files """ return compare_csv_decimal_files(file1, file2, header, True)
[ "def", "compare_csv_timeseries_files", "(", "file1", ",", "file2", ",", "header", "=", "True", ")", ":", "return", "compare_csv_decimal_files", "(", "file1", ",", "file2", ",", "header", ",", "True", ")" ]
This function compares two csv files
[ "This", "function", "compares", "two", "csv", "files" ]
python
train
raiden-network/raiden-contracts
raiden_contracts/deploy/__main__.py
https://github.com/raiden-network/raiden-contracts/blob/a7e72a9477f2204b03f3706360ea8d9c0a8e7063/raiden_contracts/deploy/__main__.py#L49-L84
def common_options(func): """A decorator that combines commonly appearing @click.option decorators.""" @click.option( '--private-key', required=True, help='Path to a private key store.', ) @click.option( '--rpc-provider', default='http://127.0.0.1:8545', help='Address of the Ethereum RPC provider', ) @click.option( '--wait', default=300, help='Max tx wait time in s.', ) @click.option( '--gas-price', default=5, type=int, help='Gas price to use in gwei', ) @click.option( '--gas-limit', default=5_500_000, ) @click.option( '--contracts-version', default=None, help='Contracts version to verify. Current version will be used by default.', ) @functools.wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) return wrapper
[ "def", "common_options", "(", "func", ")", ":", "@", "click", ".", "option", "(", "'--private-key'", ",", "required", "=", "True", ",", "help", "=", "'Path to a private key store.'", ",", ")", "@", "click", ".", "option", "(", "'--rpc-provider'", ",", "default", "=", "'http://127.0.0.1:8545'", ",", "help", "=", "'Address of the Ethereum RPC provider'", ",", ")", "@", "click", ".", "option", "(", "'--wait'", ",", "default", "=", "300", ",", "help", "=", "'Max tx wait time in s.'", ",", ")", "@", "click", ".", "option", "(", "'--gas-price'", ",", "default", "=", "5", ",", "type", "=", "int", ",", "help", "=", "'Gas price to use in gwei'", ",", ")", "@", "click", ".", "option", "(", "'--gas-limit'", ",", "default", "=", "5_500_000", ",", ")", "@", "click", ".", "option", "(", "'--contracts-version'", ",", "default", "=", "None", ",", "help", "=", "'Contracts version to verify. Current version will be used by default.'", ",", ")", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
A decorator that combines commonly appearing @click.option decorators.
[ "A", "decorator", "that", "combines", "commonly", "appearing" ]
python
train
thelabnyc/wagtail_blog
blog/wp_xml_parser.py
https://github.com/thelabnyc/wagtail_blog/blob/7e092c02d10ec427c9a2c4b5dcbe910d88c628cf/blog/wp_xml_parser.py#L155-L178
def translate_item(self, item_dict): """cleanup item keys to match API json format""" if not item_dict.get('title'): return None # Skip attachments if item_dict.get('{wp}post_type', None) == 'attachment': return None ret_dict = {} # slugify post title if no slug exists ret_dict['slug']= item_dict.get('{wp}post_name') or re.sub(item_dict['title'],' ','-') ret_dict['ID']= item_dict['guid'] ret_dict['title']= item_dict['title'] ret_dict['description']= item_dict['description'] ret_dict['content']= item_dict['{content}encoded'] # fake user object ret_dict['author']= {'username':item_dict['{dc}creator'], 'first_name':'', 'last_name':''} ret_dict['terms']= item_dict.get('terms') ret_dict['date']= self.convert_date( item_dict['pubDate'], fallback=item_dict.get('{wp}post_date','') ) return ret_dict
[ "def", "translate_item", "(", "self", ",", "item_dict", ")", ":", "if", "not", "item_dict", ".", "get", "(", "'title'", ")", ":", "return", "None", "# Skip attachments", "if", "item_dict", ".", "get", "(", "'{wp}post_type'", ",", "None", ")", "==", "'attachment'", ":", "return", "None", "ret_dict", "=", "{", "}", "# slugify post title if no slug exists", "ret_dict", "[", "'slug'", "]", "=", "item_dict", ".", "get", "(", "'{wp}post_name'", ")", "or", "re", ".", "sub", "(", "item_dict", "[", "'title'", "]", ",", "' '", ",", "'-'", ")", "ret_dict", "[", "'ID'", "]", "=", "item_dict", "[", "'guid'", "]", "ret_dict", "[", "'title'", "]", "=", "item_dict", "[", "'title'", "]", "ret_dict", "[", "'description'", "]", "=", "item_dict", "[", "'description'", "]", "ret_dict", "[", "'content'", "]", "=", "item_dict", "[", "'{content}encoded'", "]", "# fake user object", "ret_dict", "[", "'author'", "]", "=", "{", "'username'", ":", "item_dict", "[", "'{dc}creator'", "]", ",", "'first_name'", ":", "''", ",", "'last_name'", ":", "''", "}", "ret_dict", "[", "'terms'", "]", "=", "item_dict", ".", "get", "(", "'terms'", ")", "ret_dict", "[", "'date'", "]", "=", "self", ".", "convert_date", "(", "item_dict", "[", "'pubDate'", "]", ",", "fallback", "=", "item_dict", ".", "get", "(", "'{wp}post_date'", ",", "''", ")", ")", "return", "ret_dict" ]
cleanup item keys to match API json format
[ "cleanup", "item", "keys", "to", "match", "API", "json", "format" ]
python
train
JoeVirtual/KonFoo
konfoo/core.py
https://github.com/JoeVirtual/KonFoo/blob/0c62ef5c2bed4deaf908b34082e4de2544532fdc/konfoo/core.py#L1105-L1121
def read_from(self, provider, **options): """ All :class:`Pointer` fields in the `Sequence` read the necessary number of bytes from the data :class:`Provider` for their referenced :attr:`~Pointer.data` object. Null pointer are ignored. :param Provider provider: data :class:`Provider`. :keyword bool nested: if ``True`` all :class:`Pointer` fields in the :attr:`~Pointer.data` objects of all :class:`Pointer` fields in the `Sequence` reads their referenced :attr:`~Pointer.data` object as well (chained method call). Each :class:`Pointer` field stores the bytes for its referenced :attr:`~Pointer.data` object in its :attr:`~Pointer.bytestream`. """ for item in iter(self): # Container or Pointer if is_mixin(item): item.read_from(provider, **options)
[ "def", "read_from", "(", "self", ",", "provider", ",", "*", "*", "options", ")", ":", "for", "item", "in", "iter", "(", "self", ")", ":", "# Container or Pointer", "if", "is_mixin", "(", "item", ")", ":", "item", ".", "read_from", "(", "provider", ",", "*", "*", "options", ")" ]
All :class:`Pointer` fields in the `Sequence` read the necessary number of bytes from the data :class:`Provider` for their referenced :attr:`~Pointer.data` object. Null pointer are ignored. :param Provider provider: data :class:`Provider`. :keyword bool nested: if ``True`` all :class:`Pointer` fields in the :attr:`~Pointer.data` objects of all :class:`Pointer` fields in the `Sequence` reads their referenced :attr:`~Pointer.data` object as well (chained method call). Each :class:`Pointer` field stores the bytes for its referenced :attr:`~Pointer.data` object in its :attr:`~Pointer.bytestream`.
[ "All", ":", "class", ":", "Pointer", "fields", "in", "the", "Sequence", "read", "the", "necessary", "number", "of", "bytes", "from", "the", "data", ":", "class", ":", "Provider", "for", "their", "referenced", ":", "attr", ":", "~Pointer", ".", "data", "object", ".", "Null", "pointer", "are", "ignored", "." ]
python
train
trec-kba/streamcorpus-pipeline
streamcorpus_pipeline/_pipeline.py
https://github.com/trec-kba/streamcorpus-pipeline/blob/8bb82ea1beb83c6b40ed03fa1659df2897c2292a/streamcorpus_pipeline/_pipeline.py#L514-L650
def run(self, i_str, start_count=0, start_chunk_time=None): '''Run the pipeline. This runs all of the steps described in the pipeline constructor, reading from some input and writing to some output. :param str i_str: name of the input file, or other reader-specific description of where to get input :param int start_count: index of the first stream item :param int start_chunk_time: timestamp for the first stream item ''' try: if not os.path.exists(self.tmp_dir_path): os.makedirs(self.tmp_dir_path) if start_chunk_time is None: start_chunk_time = time.time() ## the reader returns generators of StreamItems i_chunk = self.reader(i_str) ## t_path points to the currently in-progress temp chunk t_path = None ## loop over all docs in the chunk processing and cutting ## smaller chunks if needed len_clean_visible = 0 sources = set() next_idx = 0 ## how many have we input and actually done processing on? input_item_count = 0 for si in i_chunk: # TODO: break out a _process_stream_item function? next_idx += 1 ## yield to the gevent hub to allow other things to run if gevent: gevent.sleep(0) ## skip forward until we reach start_count if next_idx <= start_count: continue if next_idx % self.rate_log_interval == 0: ## indexing is zero-based, so next_idx corresponds ## to length of list of SIs processed so far elapsed = time.time() - start_chunk_time if elapsed > 0: rate = float(next_idx) / elapsed logger.info('%d in %.1f --> %.1f per sec on ' '(pre-partial_commit) %s', next_idx - start_count, elapsed, rate, i_str) if not self.t_chunk: ## make a temporary chunk at a temporary path # (Lazy allocation after we've read an item that might get processed out to the new chunk file) # TODO: make this EVEN LAZIER by not opening the t_chunk until inside _run_incremental_transforms whe the first output si is ready t_path = os.path.join(self.tmp_dir_path, 't_chunk-%s' % uuid.uuid4().hex) self.t_chunk = streamcorpus.Chunk(path=t_path, mode='wb') assert self.t_chunk.message == streamcorpus.StreamItem_v0_3_0, self.t_chunk.message # TODO: a set of incremental transforms is equivalent # to a batch transform. Make the pipeline explicitly # configurable as such: # # batch_transforms: [[incr set 1], batch op, [incr set 2], ...] # # OR: for some list of transforms (mixed incremental # and batch) pipeline can detect and batchify as needed ## incremental transforms populate t_chunk ## let the incremental transforms destroy the si by ## returning None si = self._run_incremental_transforms( si, self.incremental_transforms) ## insist that every chunk has only one source string if si: sources.add(si.source) if self.assert_single_source and len(sources) != 1: raise InvalidStreamItem( 'stream item %r had source %r, not %r ' '(set assert_single_source: false to suppress)' % (si.stream_id, si.source, sources)) if si and si.body and si.body.clean_visible: len_clean_visible += len(si.body.clean_visible) ## log binned clean_visible lengths, for quick stats estimates #logger.debug('len(si.body.clean_visible)=%d' % int(10 * int(math.floor(float(len(si.body.clean_visible)) / 2**10)/10))) #logger.debug('len(si.body.clean_visible)=%d' % len(si.body.clean_visible)) if ((self.output_chunk_max_count is not None and len(self.t_chunk) == self.output_chunk_max_count)): logger.info('reached output_chunk_max_count (%d) at: %d', len(self.t_chunk), next_idx) self._process_output_chunk( start_count, next_idx, sources, i_str, t_path) start_count = next_idx elif (self.output_max_clean_visible_bytes is not None and len_clean_visible >= self.output_chunk_max_clean_visible_bytes): logger.info( 'reached output_chunk_max_clean_visible_bytes ' '(%d) at: %d', self.output_chunk_max_clean_visible_bytes, len_clean_visible) len_clean_visible = 0 self._process_output_chunk( start_count, next_idx, sources, i_str, t_path) start_count = next_idx input_item_count += 1 if (((self.input_item_limit is not None) and (input_item_count > self.input_item_limit))): break if self.t_chunk is not None: self._process_output_chunk( start_count, next_idx, sources, i_str, t_path) ## return how many stream items we processed return next_idx finally: if self.t_chunk is not None: self.t_chunk.close() for transform in self.batch_transforms: transform.shutdown() if self.cleanup_tmp_files: rmtree(self.tmp_dir_path)
[ "def", "run", "(", "self", ",", "i_str", ",", "start_count", "=", "0", ",", "start_chunk_time", "=", "None", ")", ":", "try", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "tmp_dir_path", ")", ":", "os", ".", "makedirs", "(", "self", ".", "tmp_dir_path", ")", "if", "start_chunk_time", "is", "None", ":", "start_chunk_time", "=", "time", ".", "time", "(", ")", "## the reader returns generators of StreamItems", "i_chunk", "=", "self", ".", "reader", "(", "i_str", ")", "## t_path points to the currently in-progress temp chunk", "t_path", "=", "None", "## loop over all docs in the chunk processing and cutting", "## smaller chunks if needed", "len_clean_visible", "=", "0", "sources", "=", "set", "(", ")", "next_idx", "=", "0", "## how many have we input and actually done processing on?", "input_item_count", "=", "0", "for", "si", "in", "i_chunk", ":", "# TODO: break out a _process_stream_item function?", "next_idx", "+=", "1", "## yield to the gevent hub to allow other things to run", "if", "gevent", ":", "gevent", ".", "sleep", "(", "0", ")", "## skip forward until we reach start_count", "if", "next_idx", "<=", "start_count", ":", "continue", "if", "next_idx", "%", "self", ".", "rate_log_interval", "==", "0", ":", "## indexing is zero-based, so next_idx corresponds", "## to length of list of SIs processed so far", "elapsed", "=", "time", ".", "time", "(", ")", "-", "start_chunk_time", "if", "elapsed", ">", "0", ":", "rate", "=", "float", "(", "next_idx", ")", "/", "elapsed", "logger", ".", "info", "(", "'%d in %.1f --> %.1f per sec on '", "'(pre-partial_commit) %s'", ",", "next_idx", "-", "start_count", ",", "elapsed", ",", "rate", ",", "i_str", ")", "if", "not", "self", ".", "t_chunk", ":", "## make a temporary chunk at a temporary path", "# (Lazy allocation after we've read an item that might get processed out to the new chunk file)", "# TODO: make this EVEN LAZIER by not opening the t_chunk until inside _run_incremental_transforms whe the first output si is ready", "t_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "tmp_dir_path", ",", "'t_chunk-%s'", "%", "uuid", ".", "uuid4", "(", ")", ".", "hex", ")", "self", ".", "t_chunk", "=", "streamcorpus", ".", "Chunk", "(", "path", "=", "t_path", ",", "mode", "=", "'wb'", ")", "assert", "self", ".", "t_chunk", ".", "message", "==", "streamcorpus", ".", "StreamItem_v0_3_0", ",", "self", ".", "t_chunk", ".", "message", "# TODO: a set of incremental transforms is equivalent", "# to a batch transform. Make the pipeline explicitly", "# configurable as such:", "#", "# batch_transforms: [[incr set 1], batch op, [incr set 2], ...]", "#", "# OR: for some list of transforms (mixed incremental", "# and batch) pipeline can detect and batchify as needed", "## incremental transforms populate t_chunk", "## let the incremental transforms destroy the si by", "## returning None", "si", "=", "self", ".", "_run_incremental_transforms", "(", "si", ",", "self", ".", "incremental_transforms", ")", "## insist that every chunk has only one source string", "if", "si", ":", "sources", ".", "add", "(", "si", ".", "source", ")", "if", "self", ".", "assert_single_source", "and", "len", "(", "sources", ")", "!=", "1", ":", "raise", "InvalidStreamItem", "(", "'stream item %r had source %r, not %r '", "'(set assert_single_source: false to suppress)'", "%", "(", "si", ".", "stream_id", ",", "si", ".", "source", ",", "sources", ")", ")", "if", "si", "and", "si", ".", "body", "and", "si", ".", "body", ".", "clean_visible", ":", "len_clean_visible", "+=", "len", "(", "si", ".", "body", ".", "clean_visible", ")", "## log binned clean_visible lengths, for quick stats estimates", "#logger.debug('len(si.body.clean_visible)=%d' % int(10 * int(math.floor(float(len(si.body.clean_visible)) / 2**10)/10)))", "#logger.debug('len(si.body.clean_visible)=%d' % len(si.body.clean_visible))", "if", "(", "(", "self", ".", "output_chunk_max_count", "is", "not", "None", "and", "len", "(", "self", ".", "t_chunk", ")", "==", "self", ".", "output_chunk_max_count", ")", ")", ":", "logger", ".", "info", "(", "'reached output_chunk_max_count (%d) at: %d'", ",", "len", "(", "self", ".", "t_chunk", ")", ",", "next_idx", ")", "self", ".", "_process_output_chunk", "(", "start_count", ",", "next_idx", ",", "sources", ",", "i_str", ",", "t_path", ")", "start_count", "=", "next_idx", "elif", "(", "self", ".", "output_max_clean_visible_bytes", "is", "not", "None", "and", "len_clean_visible", ">=", "self", ".", "output_chunk_max_clean_visible_bytes", ")", ":", "logger", ".", "info", "(", "'reached output_chunk_max_clean_visible_bytes '", "'(%d) at: %d'", ",", "self", ".", "output_chunk_max_clean_visible_bytes", ",", "len_clean_visible", ")", "len_clean_visible", "=", "0", "self", ".", "_process_output_chunk", "(", "start_count", ",", "next_idx", ",", "sources", ",", "i_str", ",", "t_path", ")", "start_count", "=", "next_idx", "input_item_count", "+=", "1", "if", "(", "(", "(", "self", ".", "input_item_limit", "is", "not", "None", ")", "and", "(", "input_item_count", ">", "self", ".", "input_item_limit", ")", ")", ")", ":", "break", "if", "self", ".", "t_chunk", "is", "not", "None", ":", "self", ".", "_process_output_chunk", "(", "start_count", ",", "next_idx", ",", "sources", ",", "i_str", ",", "t_path", ")", "## return how many stream items we processed", "return", "next_idx", "finally", ":", "if", "self", ".", "t_chunk", "is", "not", "None", ":", "self", ".", "t_chunk", ".", "close", "(", ")", "for", "transform", "in", "self", ".", "batch_transforms", ":", "transform", ".", "shutdown", "(", ")", "if", "self", ".", "cleanup_tmp_files", ":", "rmtree", "(", "self", ".", "tmp_dir_path", ")" ]
Run the pipeline. This runs all of the steps described in the pipeline constructor, reading from some input and writing to some output. :param str i_str: name of the input file, or other reader-specific description of where to get input :param int start_count: index of the first stream item :param int start_chunk_time: timestamp for the first stream item
[ "Run", "the", "pipeline", "." ]
python
test
tonysimpson/nanomsg-python
nanomsg/__init__.py
https://github.com/tonysimpson/nanomsg-python/blob/3acd9160f90f91034d4a43ce603aaa19fbaf1f2e/nanomsg/__init__.py#L94-L127
def poll(in_sockets, out_sockets, timeout=-1): """ Poll a list of sockets :param in_sockets: sockets for reading :param out_sockets: sockets for writing :param timeout: poll timeout in seconds, -1 is infinite wait :return: tuple (read socket list, write socket list) """ sockets = {} # reverse map fd => socket fd_sockets = {} for s in in_sockets: sockets[s.fd] = POLLIN fd_sockets[s.fd] = s for s in out_sockets: modes = sockets.get(s.fd, 0) sockets[s.fd] = modes | POLLOUT fd_sockets[s.fd] = s # convert to milliseconds or -1 if timeout >= 0: timeout_ms = int(timeout*1000) else: timeout_ms = -1 res, sockets = wrapper.nn_poll(sockets, timeout_ms) _nn_check_positive_rtn(res) read_list, write_list = [], [] for fd, result in sockets.items(): if (result & POLLIN) != 0: read_list.append(fd_sockets[fd]) if (result & POLLOUT) != 0: write_list.append(fd_sockets[fd]) return read_list, write_list
[ "def", "poll", "(", "in_sockets", ",", "out_sockets", ",", "timeout", "=", "-", "1", ")", ":", "sockets", "=", "{", "}", "# reverse map fd => socket", "fd_sockets", "=", "{", "}", "for", "s", "in", "in_sockets", ":", "sockets", "[", "s", ".", "fd", "]", "=", "POLLIN", "fd_sockets", "[", "s", ".", "fd", "]", "=", "s", "for", "s", "in", "out_sockets", ":", "modes", "=", "sockets", ".", "get", "(", "s", ".", "fd", ",", "0", ")", "sockets", "[", "s", ".", "fd", "]", "=", "modes", "|", "POLLOUT", "fd_sockets", "[", "s", ".", "fd", "]", "=", "s", "# convert to milliseconds or -1", "if", "timeout", ">=", "0", ":", "timeout_ms", "=", "int", "(", "timeout", "*", "1000", ")", "else", ":", "timeout_ms", "=", "-", "1", "res", ",", "sockets", "=", "wrapper", ".", "nn_poll", "(", "sockets", ",", "timeout_ms", ")", "_nn_check_positive_rtn", "(", "res", ")", "read_list", ",", "write_list", "=", "[", "]", ",", "[", "]", "for", "fd", ",", "result", "in", "sockets", ".", "items", "(", ")", ":", "if", "(", "result", "&", "POLLIN", ")", "!=", "0", ":", "read_list", ".", "append", "(", "fd_sockets", "[", "fd", "]", ")", "if", "(", "result", "&", "POLLOUT", ")", "!=", "0", ":", "write_list", ".", "append", "(", "fd_sockets", "[", "fd", "]", ")", "return", "read_list", ",", "write_list" ]
Poll a list of sockets :param in_sockets: sockets for reading :param out_sockets: sockets for writing :param timeout: poll timeout in seconds, -1 is infinite wait :return: tuple (read socket list, write socket list)
[ "Poll", "a", "list", "of", "sockets", ":", "param", "in_sockets", ":", "sockets", "for", "reading", ":", "param", "out_sockets", ":", "sockets", "for", "writing", ":", "param", "timeout", ":", "poll", "timeout", "in", "seconds", "-", "1", "is", "infinite", "wait", ":", "return", ":", "tuple", "(", "read", "socket", "list", "write", "socket", "list", ")" ]
python
train
sernst/cauldron
cauldron/session/writing/components/definitions.py
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/session/writing/components/definitions.py#L61-L84
def combine_lists_reducer( key: str, merged_list: list, component: COMPONENT ) -> list: """ Reducer function to combine the lists for the specified key into a single, flat list :param key: The key on the COMPONENT instances to operate upon :param merged_list: The accumulated list of values populated by previous calls to this reducer function :param component: The COMPONENT instance from which to append values to the merged_list :return: The updated merged_list with the values for the COMPONENT added onto it """ merged_list.extend(getattr(component, key)) return merged_list
[ "def", "combine_lists_reducer", "(", "key", ":", "str", ",", "merged_list", ":", "list", ",", "component", ":", "COMPONENT", ")", "->", "list", ":", "merged_list", ".", "extend", "(", "getattr", "(", "component", ",", "key", ")", ")", "return", "merged_list" ]
Reducer function to combine the lists for the specified key into a single, flat list :param key: The key on the COMPONENT instances to operate upon :param merged_list: The accumulated list of values populated by previous calls to this reducer function :param component: The COMPONENT instance from which to append values to the merged_list :return: The updated merged_list with the values for the COMPONENT added onto it
[ "Reducer", "function", "to", "combine", "the", "lists", "for", "the", "specified", "key", "into", "a", "single", "flat", "list" ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavextra.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavextra.py#L716-L722
def demix2(servo1, servo2, gain=0.5): '''de-mix a mixed servo output''' s1 = servo1 - 1500 s2 = servo2 - 1500 out1 = (s1+s2)*gain out2 = (s1-s2)*gain return out2+1500
[ "def", "demix2", "(", "servo1", ",", "servo2", ",", "gain", "=", "0.5", ")", ":", "s1", "=", "servo1", "-", "1500", "s2", "=", "servo2", "-", "1500", "out1", "=", "(", "s1", "+", "s2", ")", "*", "gain", "out2", "=", "(", "s1", "-", "s2", ")", "*", "gain", "return", "out2", "+", "1500" ]
de-mix a mixed servo output
[ "de", "-", "mix", "a", "mixed", "servo", "output" ]
python
train
saltstack/salt
salt/modules/lxd.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/lxd.py#L2492-L2539
def image_list(list_aliases=False, remote_addr=None, cert=None, key=None, verify_cert=True): ''' Lists all images from the LXD. list_aliases : Return a dict with the fingerprint as key and a list of aliases as value instead. remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. CLI Examples: .. code-block:: bash $ salt '*' lxd.image_list true --out=json $ salt '*' lxd.image_list --out=json ''' client = pylxd_client_get(remote_addr, cert, key, verify_cert) images = client.images.all() if list_aliases: return {i.fingerprint: [a['name'] for a in i.aliases] for i in images} return map(_pylxd_model_to_dict, images)
[ "def", "image_list", "(", "list_aliases", "=", "False", ",", "remote_addr", "=", "None", ",", "cert", "=", "None", ",", "key", "=", "None", ",", "verify_cert", "=", "True", ")", ":", "client", "=", "pylxd_client_get", "(", "remote_addr", ",", "cert", ",", "key", ",", "verify_cert", ")", "images", "=", "client", ".", "images", ".", "all", "(", ")", "if", "list_aliases", ":", "return", "{", "i", ".", "fingerprint", ":", "[", "a", "[", "'name'", "]", "for", "a", "in", "i", ".", "aliases", "]", "for", "i", "in", "images", "}", "return", "map", "(", "_pylxd_model_to_dict", ",", "images", ")" ]
Lists all images from the LXD. list_aliases : Return a dict with the fingerprint as key and a list of aliases as value instead. remote_addr : An URL to a remote Server, you also have to give cert and key if you provide remote_addr and its a TCP Address! Examples: https://myserver.lan:8443 /var/lib/mysocket.sock cert : PEM Formatted SSL Certificate. Examples: ~/.config/lxc/client.crt key : PEM Formatted SSL Key. Examples: ~/.config/lxc/client.key verify_cert : True Wherever to verify the cert, this is by default True but in the most cases you want to set it off as LXD normaly uses self-signed certificates. CLI Examples: .. code-block:: bash $ salt '*' lxd.image_list true --out=json $ salt '*' lxd.image_list --out=json
[ "Lists", "all", "images", "from", "the", "LXD", "." ]
python
train
adafruit/Adafruit_Python_PN532
Adafruit_PN532/PN532.py
https://github.com/adafruit/Adafruit_Python_PN532/blob/343521a8ec842ea82f680a5ed868fee16e9609bd/Adafruit_PN532/PN532.py#L241-L274
def _read_frame(self, length): """Read a response frame from the PN532 of at most length bytes in size. Returns the data inside the frame if found, otherwise raises an exception if there is an error parsing the frame. Note that less than length bytes might be returned! """ # Read frame with expected length of data. response = self._read_data(length+8) logger.debug('Read frame: 0x{0}'.format(binascii.hexlify(response))) # Check frame starts with 0x01 and then has 0x00FF (preceeded by optional # zeros). if response[0] != 0x01: raise RuntimeError('Response frame does not start with 0x01!') # Swallow all the 0x00 values that preceed 0xFF. offset = 1 while response[offset] == 0x00: offset += 1 if offset >= len(response): raise RuntimeError('Response frame preamble does not contain 0x00FF!') if response[offset] != 0xFF: raise RuntimeError('Response frame preamble does not contain 0x00FF!') offset += 1 if offset >= len(response): raise RuntimeError('Response contains no data!') # Check length & length checksum match. frame_len = response[offset] if (frame_len + response[offset+1]) & 0xFF != 0: raise RuntimeError('Response length checksum did not match length!') # Check frame checksum value matches bytes. checksum = reduce(self._uint8_add, response[offset+2:offset+2+frame_len+1], 0) if checksum != 0: raise RuntimeError('Response checksum did not match expected value!') # Return frame data. return response[offset+2:offset+2+frame_len]
[ "def", "_read_frame", "(", "self", ",", "length", ")", ":", "# Read frame with expected length of data.", "response", "=", "self", ".", "_read_data", "(", "length", "+", "8", ")", "logger", ".", "debug", "(", "'Read frame: 0x{0}'", ".", "format", "(", "binascii", ".", "hexlify", "(", "response", ")", ")", ")", "# Check frame starts with 0x01 and then has 0x00FF (preceeded by optional", "# zeros).", "if", "response", "[", "0", "]", "!=", "0x01", ":", "raise", "RuntimeError", "(", "'Response frame does not start with 0x01!'", ")", "# Swallow all the 0x00 values that preceed 0xFF.", "offset", "=", "1", "while", "response", "[", "offset", "]", "==", "0x00", ":", "offset", "+=", "1", "if", "offset", ">=", "len", "(", "response", ")", ":", "raise", "RuntimeError", "(", "'Response frame preamble does not contain 0x00FF!'", ")", "if", "response", "[", "offset", "]", "!=", "0xFF", ":", "raise", "RuntimeError", "(", "'Response frame preamble does not contain 0x00FF!'", ")", "offset", "+=", "1", "if", "offset", ">=", "len", "(", "response", ")", ":", "raise", "RuntimeError", "(", "'Response contains no data!'", ")", "# Check length & length checksum match.", "frame_len", "=", "response", "[", "offset", "]", "if", "(", "frame_len", "+", "response", "[", "offset", "+", "1", "]", ")", "&", "0xFF", "!=", "0", ":", "raise", "RuntimeError", "(", "'Response length checksum did not match length!'", ")", "# Check frame checksum value matches bytes.", "checksum", "=", "reduce", "(", "self", ".", "_uint8_add", ",", "response", "[", "offset", "+", "2", ":", "offset", "+", "2", "+", "frame_len", "+", "1", "]", ",", "0", ")", "if", "checksum", "!=", "0", ":", "raise", "RuntimeError", "(", "'Response checksum did not match expected value!'", ")", "# Return frame data.", "return", "response", "[", "offset", "+", "2", ":", "offset", "+", "2", "+", "frame_len", "]" ]
Read a response frame from the PN532 of at most length bytes in size. Returns the data inside the frame if found, otherwise raises an exception if there is an error parsing the frame. Note that less than length bytes might be returned!
[ "Read", "a", "response", "frame", "from", "the", "PN532", "of", "at", "most", "length", "bytes", "in", "size", ".", "Returns", "the", "data", "inside", "the", "frame", "if", "found", "otherwise", "raises", "an", "exception", "if", "there", "is", "an", "error", "parsing", "the", "frame", ".", "Note", "that", "less", "than", "length", "bytes", "might", "be", "returned!" ]
python
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L78-L84
def Unpack(self, msg): """Unpacks the current Any message into specified message.""" descriptor = msg.DESCRIPTOR if not self.Is(descriptor): return False msg.ParseFromString(self.value) return True
[ "def", "Unpack", "(", "self", ",", "msg", ")", ":", "descriptor", "=", "msg", ".", "DESCRIPTOR", "if", "not", "self", ".", "Is", "(", "descriptor", ")", ":", "return", "False", "msg", ".", "ParseFromString", "(", "self", ".", "value", ")", "return", "True" ]
Unpacks the current Any message into specified message.
[ "Unpacks", "the", "current", "Any", "message", "into", "specified", "message", "." ]
python
train
quantopian/pyfolio
pyfolio/plotting.py
https://github.com/quantopian/pyfolio/blob/712716ab0cdebbec9fabb25eea3bf40e4354749d/pyfolio/plotting.py#L1114-L1194
def show_and_plot_top_positions(returns, positions_alloc, show_and_plot=2, hide_positions=False, legend_loc='real_best', ax=None, **kwargs): """ Prints and/or plots the exposures of the top 10 held positions of all time. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. positions_alloc : pd.DataFrame Portfolio allocation of positions. See pos.get_percent_alloc. show_and_plot : int, optional By default, this is 2, and both prints and plots. If this is 0, it will only plot; if 1, it will only print. hide_positions : bool, optional If True, will not output any symbol names. legend_loc : matplotlib.loc, optional The location of the legend on the plot. By default, the legend will display below the plot. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes, conditional The axes that were plotted on. """ positions_alloc = positions_alloc.copy() positions_alloc.columns = positions_alloc.columns.map(utils.format_asset) df_top_long, df_top_short, df_top_abs = pos.get_top_long_short_abs( positions_alloc) if show_and_plot == 1 or show_and_plot == 2: utils.print_table(pd.DataFrame(df_top_long * 100, columns=['max']), float_format='{0:.2f}%'.format, name='Top 10 long positions of all time') utils.print_table(pd.DataFrame(df_top_short * 100, columns=['max']), float_format='{0:.2f}%'.format, name='Top 10 short positions of all time') utils.print_table(pd.DataFrame(df_top_abs * 100, columns=['max']), float_format='{0:.2f}%'.format, name='Top 10 positions of all time') if show_and_plot == 0 or show_and_plot == 2: if ax is None: ax = plt.gca() positions_alloc[df_top_abs.index].plot( title='Portfolio allocation over time, only top 10 holdings', alpha=0.5, ax=ax, **kwargs) # Place legend below plot, shrink plot by 20% if legend_loc == 'real_best': box = ax.get_position() ax.set_position([box.x0, box.y0 + box.height * 0.1, box.width, box.height * 0.9]) # Put a legend below current axis ax.legend(loc='upper center', frameon=True, framealpha=0.5, bbox_to_anchor=(0.5, -0.14), ncol=5) else: ax.legend(loc=legend_loc) ax.set_xlim((returns.index[0], returns.index[-1])) ax.set_ylabel('Exposure by holding') if hide_positions: ax.legend_.remove() return ax
[ "def", "show_and_plot_top_positions", "(", "returns", ",", "positions_alloc", ",", "show_and_plot", "=", "2", ",", "hide_positions", "=", "False", ",", "legend_loc", "=", "'real_best'", ",", "ax", "=", "None", ",", "*", "*", "kwargs", ")", ":", "positions_alloc", "=", "positions_alloc", ".", "copy", "(", ")", "positions_alloc", ".", "columns", "=", "positions_alloc", ".", "columns", ".", "map", "(", "utils", ".", "format_asset", ")", "df_top_long", ",", "df_top_short", ",", "df_top_abs", "=", "pos", ".", "get_top_long_short_abs", "(", "positions_alloc", ")", "if", "show_and_plot", "==", "1", "or", "show_and_plot", "==", "2", ":", "utils", ".", "print_table", "(", "pd", ".", "DataFrame", "(", "df_top_long", "*", "100", ",", "columns", "=", "[", "'max'", "]", ")", ",", "float_format", "=", "'{0:.2f}%'", ".", "format", ",", "name", "=", "'Top 10 long positions of all time'", ")", "utils", ".", "print_table", "(", "pd", ".", "DataFrame", "(", "df_top_short", "*", "100", ",", "columns", "=", "[", "'max'", "]", ")", ",", "float_format", "=", "'{0:.2f}%'", ".", "format", ",", "name", "=", "'Top 10 short positions of all time'", ")", "utils", ".", "print_table", "(", "pd", ".", "DataFrame", "(", "df_top_abs", "*", "100", ",", "columns", "=", "[", "'max'", "]", ")", ",", "float_format", "=", "'{0:.2f}%'", ".", "format", ",", "name", "=", "'Top 10 positions of all time'", ")", "if", "show_and_plot", "==", "0", "or", "show_and_plot", "==", "2", ":", "if", "ax", "is", "None", ":", "ax", "=", "plt", ".", "gca", "(", ")", "positions_alloc", "[", "df_top_abs", ".", "index", "]", ".", "plot", "(", "title", "=", "'Portfolio allocation over time, only top 10 holdings'", ",", "alpha", "=", "0.5", ",", "ax", "=", "ax", ",", "*", "*", "kwargs", ")", "# Place legend below plot, shrink plot by 20%", "if", "legend_loc", "==", "'real_best'", ":", "box", "=", "ax", ".", "get_position", "(", ")", "ax", ".", "set_position", "(", "[", "box", ".", "x0", ",", "box", ".", "y0", "+", "box", ".", "height", "*", "0.1", ",", "box", ".", "width", ",", "box", ".", "height", "*", "0.9", "]", ")", "# Put a legend below current axis", "ax", ".", "legend", "(", "loc", "=", "'upper center'", ",", "frameon", "=", "True", ",", "framealpha", "=", "0.5", ",", "bbox_to_anchor", "=", "(", "0.5", ",", "-", "0.14", ")", ",", "ncol", "=", "5", ")", "else", ":", "ax", ".", "legend", "(", "loc", "=", "legend_loc", ")", "ax", ".", "set_xlim", "(", "(", "returns", ".", "index", "[", "0", "]", ",", "returns", ".", "index", "[", "-", "1", "]", ")", ")", "ax", ".", "set_ylabel", "(", "'Exposure by holding'", ")", "if", "hide_positions", ":", "ax", ".", "legend_", ".", "remove", "(", ")", "return", "ax" ]
Prints and/or plots the exposures of the top 10 held positions of all time. Parameters ---------- returns : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet. positions_alloc : pd.DataFrame Portfolio allocation of positions. See pos.get_percent_alloc. show_and_plot : int, optional By default, this is 2, and both prints and plots. If this is 0, it will only plot; if 1, it will only print. hide_positions : bool, optional If True, will not output any symbol names. legend_loc : matplotlib.loc, optional The location of the legend on the plot. By default, the legend will display below the plot. ax : matplotlib.Axes, optional Axes upon which to plot. **kwargs, optional Passed to plotting function. Returns ------- ax : matplotlib.Axes, conditional The axes that were plotted on.
[ "Prints", "and", "/", "or", "plots", "the", "exposures", "of", "the", "top", "10", "held", "positions", "of", "all", "time", "." ]
python
valid
maxpumperla/elephas
elephas/spark_model.py
https://github.com/maxpumperla/elephas/blob/84605acdc9564673c487637dcb27f5def128bcc7/elephas/spark_model.py#L156-L197
def _fit(self, rdd, epochs, batch_size, verbose, validation_split): """Protected train method to make wrapping of modes easier """ self._master_network.compile(optimizer=self.master_optimizer, loss=self.master_loss, metrics=self.master_metrics) if self.mode in ['asynchronous', 'hogwild']: self.start_server() train_config = self.get_train_config( epochs, batch_size, verbose, validation_split) mode = self.parameter_server_mode freq = self.frequency optimizer = self.master_optimizer loss = self.master_loss metrics = self.master_metrics custom = self.custom_objects yaml = self._master_network.to_yaml() init = self._master_network.get_weights() parameters = rdd.context.broadcast(init) if self.mode in ['asynchronous', 'hogwild']: print('>>> Initialize workers') worker = AsynchronousSparkWorker( yaml, parameters, mode, train_config, freq, optimizer, loss, metrics, custom) print('>>> Distribute load') rdd.mapPartitions(worker.train).collect() print('>>> Async training complete.') new_parameters = self.client.get_parameters() elif self.mode == 'synchronous': worker = SparkWorker(yaml, parameters, train_config, optimizer, loss, metrics, custom) gradients = rdd.mapPartitions(worker.train).collect() new_parameters = self._master_network.get_weights() for grad in gradients: # simply accumulate gradients one by one new_parameters = subtract_params(new_parameters, grad) print('>>> Synchronous training complete.') else: raise ValueError("Unsupported mode {}".format(self.mode)) self._master_network.set_weights(new_parameters) if self.mode in ['asynchronous', 'hogwild']: self.stop_server()
[ "def", "_fit", "(", "self", ",", "rdd", ",", "epochs", ",", "batch_size", ",", "verbose", ",", "validation_split", ")", ":", "self", ".", "_master_network", ".", "compile", "(", "optimizer", "=", "self", ".", "master_optimizer", ",", "loss", "=", "self", ".", "master_loss", ",", "metrics", "=", "self", ".", "master_metrics", ")", "if", "self", ".", "mode", "in", "[", "'asynchronous'", ",", "'hogwild'", "]", ":", "self", ".", "start_server", "(", ")", "train_config", "=", "self", ".", "get_train_config", "(", "epochs", ",", "batch_size", ",", "verbose", ",", "validation_split", ")", "mode", "=", "self", ".", "parameter_server_mode", "freq", "=", "self", ".", "frequency", "optimizer", "=", "self", ".", "master_optimizer", "loss", "=", "self", ".", "master_loss", "metrics", "=", "self", ".", "master_metrics", "custom", "=", "self", ".", "custom_objects", "yaml", "=", "self", ".", "_master_network", ".", "to_yaml", "(", ")", "init", "=", "self", ".", "_master_network", ".", "get_weights", "(", ")", "parameters", "=", "rdd", ".", "context", ".", "broadcast", "(", "init", ")", "if", "self", ".", "mode", "in", "[", "'asynchronous'", ",", "'hogwild'", "]", ":", "print", "(", "'>>> Initialize workers'", ")", "worker", "=", "AsynchronousSparkWorker", "(", "yaml", ",", "parameters", ",", "mode", ",", "train_config", ",", "freq", ",", "optimizer", ",", "loss", ",", "metrics", ",", "custom", ")", "print", "(", "'>>> Distribute load'", ")", "rdd", ".", "mapPartitions", "(", "worker", ".", "train", ")", ".", "collect", "(", ")", "print", "(", "'>>> Async training complete.'", ")", "new_parameters", "=", "self", ".", "client", ".", "get_parameters", "(", ")", "elif", "self", ".", "mode", "==", "'synchronous'", ":", "worker", "=", "SparkWorker", "(", "yaml", ",", "parameters", ",", "train_config", ",", "optimizer", ",", "loss", ",", "metrics", ",", "custom", ")", "gradients", "=", "rdd", ".", "mapPartitions", "(", "worker", ".", "train", ")", ".", "collect", "(", ")", "new_parameters", "=", "self", ".", "_master_network", ".", "get_weights", "(", ")", "for", "grad", "in", "gradients", ":", "# simply accumulate gradients one by one", "new_parameters", "=", "subtract_params", "(", "new_parameters", ",", "grad", ")", "print", "(", "'>>> Synchronous training complete.'", ")", "else", ":", "raise", "ValueError", "(", "\"Unsupported mode {}\"", ".", "format", "(", "self", ".", "mode", ")", ")", "self", ".", "_master_network", ".", "set_weights", "(", "new_parameters", ")", "if", "self", ".", "mode", "in", "[", "'asynchronous'", ",", "'hogwild'", "]", ":", "self", ".", "stop_server", "(", ")" ]
Protected train method to make wrapping of modes easier
[ "Protected", "train", "method", "to", "make", "wrapping", "of", "modes", "easier" ]
python
train
Grk0/python-libconf
libconf.py
https://github.com/Grk0/python-libconf/blob/9c4cf5f56d56ebbc1fe0e1596807218b7d5d5da4/libconf.py#L476-L498
def load(f, filename=None, includedir=''): '''Load the contents of ``f`` (a file-like object) to a Python object The returned object is a subclass of ``dict`` that exposes string keys as attributes as well. Example: >>> with open('test/example.cfg') as f: ... config = libconf.load(f) >>> config['window']['title'] 'libconfig example' >>> config.window.title 'libconfig example' ''' if isinstance(f.read(0), bytes): raise TypeError("libconf.load() input file must by unicode") tokenstream = TokenStream.from_file(f, filename=filename, includedir=includedir) return Parser(tokenstream).parse()
[ "def", "load", "(", "f", ",", "filename", "=", "None", ",", "includedir", "=", "''", ")", ":", "if", "isinstance", "(", "f", ".", "read", "(", "0", ")", ",", "bytes", ")", ":", "raise", "TypeError", "(", "\"libconf.load() input file must by unicode\"", ")", "tokenstream", "=", "TokenStream", ".", "from_file", "(", "f", ",", "filename", "=", "filename", ",", "includedir", "=", "includedir", ")", "return", "Parser", "(", "tokenstream", ")", ".", "parse", "(", ")" ]
Load the contents of ``f`` (a file-like object) to a Python object The returned object is a subclass of ``dict`` that exposes string keys as attributes as well. Example: >>> with open('test/example.cfg') as f: ... config = libconf.load(f) >>> config['window']['title'] 'libconfig example' >>> config.window.title 'libconfig example'
[ "Load", "the", "contents", "of", "f", "(", "a", "file", "-", "like", "object", ")", "to", "a", "Python", "object" ]
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/FortranCommon.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/FortranCommon.py#L234-L248
def add_f03_to_env(env): """Add Builders and construction variables for f03 to an Environment.""" try: F03Suffixes = env['F03FILESUFFIXES'] except KeyError: F03Suffixes = ['.f03'] #print("Adding %s to f95 suffixes" % F95Suffixes) try: F03PPSuffixes = env['F03PPFILESUFFIXES'] except KeyError: F03PPSuffixes = [] DialectAddToEnv(env, "F03", F03Suffixes, F03PPSuffixes, support_module = 1)
[ "def", "add_f03_to_env", "(", "env", ")", ":", "try", ":", "F03Suffixes", "=", "env", "[", "'F03FILESUFFIXES'", "]", "except", "KeyError", ":", "F03Suffixes", "=", "[", "'.f03'", "]", "#print(\"Adding %s to f95 suffixes\" % F95Suffixes)", "try", ":", "F03PPSuffixes", "=", "env", "[", "'F03PPFILESUFFIXES'", "]", "except", "KeyError", ":", "F03PPSuffixes", "=", "[", "]", "DialectAddToEnv", "(", "env", ",", "\"F03\"", ",", "F03Suffixes", ",", "F03PPSuffixes", ",", "support_module", "=", "1", ")" ]
Add Builders and construction variables for f03 to an Environment.
[ "Add", "Builders", "and", "construction", "variables", "for", "f03", "to", "an", "Environment", "." ]
python
train
sixty-north/cosmic-ray
src/cosmic_ray/commands/execute.py
https://github.com/sixty-north/cosmic-ray/blob/c654e074afbb7b7fcbc23359083c1287c0d3e991/src/cosmic_ray/commands/execute.py#L33-L61
def execute(db_name): """Execute any pending work in the database stored in `db_name`, recording the results. This looks for any work in `db_name` which has no results, schedules it to be executed, and records any results that arrive. """ try: with use_db(db_name, mode=WorkDB.Mode.open) as work_db: _update_progress(work_db) config = work_db.get_config() engine = get_execution_engine(config.execution_engine_name) def on_task_complete(job_id, work_result): work_db.set_result(job_id, work_result) _update_progress(work_db) log.info("Job %s complete", job_id) log.info("Beginning execution") engine( work_db.pending_work_items, config, on_task_complete=on_task_complete) log.info("Execution finished") except FileNotFoundError as exc: raise FileNotFoundError( str(exc).replace('Requested file', 'Corresponding database', 1)) from exc
[ "def", "execute", "(", "db_name", ")", ":", "try", ":", "with", "use_db", "(", "db_name", ",", "mode", "=", "WorkDB", ".", "Mode", ".", "open", ")", "as", "work_db", ":", "_update_progress", "(", "work_db", ")", "config", "=", "work_db", ".", "get_config", "(", ")", "engine", "=", "get_execution_engine", "(", "config", ".", "execution_engine_name", ")", "def", "on_task_complete", "(", "job_id", ",", "work_result", ")", ":", "work_db", ".", "set_result", "(", "job_id", ",", "work_result", ")", "_update_progress", "(", "work_db", ")", "log", ".", "info", "(", "\"Job %s complete\"", ",", "job_id", ")", "log", ".", "info", "(", "\"Beginning execution\"", ")", "engine", "(", "work_db", ".", "pending_work_items", ",", "config", ",", "on_task_complete", "=", "on_task_complete", ")", "log", ".", "info", "(", "\"Execution finished\"", ")", "except", "FileNotFoundError", "as", "exc", ":", "raise", "FileNotFoundError", "(", "str", "(", "exc", ")", ".", "replace", "(", "'Requested file'", ",", "'Corresponding database'", ",", "1", ")", ")", "from", "exc" ]
Execute any pending work in the database stored in `db_name`, recording the results. This looks for any work in `db_name` which has no results, schedules it to be executed, and records any results that arrive.
[ "Execute", "any", "pending", "work", "in", "the", "database", "stored", "in", "db_name", "recording", "the", "results", "." ]
python
train
Yelp/kafka-utils
kafka_utils/util/offsets.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/util/offsets.py#L234-L320
def get_topics_watermarks(kafka_client, topics, raise_on_error=True): """ Get current topic watermarks. NOTE: This method does not refresh client metadata. It is up to the caller to use avoid using stale metadata. If any partition leader is not available, the request fails for all the other topics. This is the tradeoff of sending all topic requests in batch and save both in performance and Kafka load. :param kafka_client: a connected KafkaToolClient :param topics: topic list or dict {<topic>: [partitions]} :param raise_on_error: if False the method ignores missing topics and missing partitions. It still may fail on the request send. :returns: a dict topic: partition: Part :raises: :py:class:`~kafka_utils.util.error.UnknownTopic`: upon missing topics and raise_on_error=True :py:class:`~kafka_utils.util.error.UnknownPartition`: upon missing partitions and raise_on_error=True FailedPayloadsError: upon send request error. """ topics = _verify_topics_and_partitions( kafka_client, topics, raise_on_error, ) highmark_offset_reqs = [] lowmark_offset_reqs = [] for topic, partitions in six.iteritems(topics): # Batch watermark requests for partition in partitions: # Request the the latest offset highmark_offset_reqs.append( OffsetRequestPayload( topic, partition, -1, max_offsets=1 ) ) # Request the earliest offset lowmark_offset_reqs.append( OffsetRequestPayload( topic, partition, -2, max_offsets=1 ) ) watermark_offsets = {} if not (len(highmark_offset_reqs) + len(lowmark_offset_reqs)): return watermark_offsets # fail_on_error = False does not prevent network errors highmark_resps = kafka_client.send_offset_request( highmark_offset_reqs, fail_on_error=False, callback=_check_fetch_response_error, ) lowmark_resps = kafka_client.send_offset_request( lowmark_offset_reqs, fail_on_error=False, callback=_check_fetch_response_error, ) # At this point highmark and lowmark should ideally have the same length. assert len(highmark_resps) == len(lowmark_resps) aggregated_offsets = defaultdict(lambda: defaultdict(dict)) for resp in highmark_resps: aggregated_offsets[resp.topic][resp.partition]['highmark'] = \ resp.offsets[0] for resp in lowmark_resps: aggregated_offsets[resp.topic][resp.partition]['lowmark'] = \ resp.offsets[0] for topic, partition_watermarks in six.iteritems(aggregated_offsets): for partition, watermarks in six.iteritems(partition_watermarks): watermark_offsets.setdefault( topic, {}, )[partition] = PartitionOffsets( topic, partition, watermarks['highmark'], watermarks['lowmark'], ) return watermark_offsets
[ "def", "get_topics_watermarks", "(", "kafka_client", ",", "topics", ",", "raise_on_error", "=", "True", ")", ":", "topics", "=", "_verify_topics_and_partitions", "(", "kafka_client", ",", "topics", ",", "raise_on_error", ",", ")", "highmark_offset_reqs", "=", "[", "]", "lowmark_offset_reqs", "=", "[", "]", "for", "topic", ",", "partitions", "in", "six", ".", "iteritems", "(", "topics", ")", ":", "# Batch watermark requests", "for", "partition", "in", "partitions", ":", "# Request the the latest offset", "highmark_offset_reqs", ".", "append", "(", "OffsetRequestPayload", "(", "topic", ",", "partition", ",", "-", "1", ",", "max_offsets", "=", "1", ")", ")", "# Request the earliest offset", "lowmark_offset_reqs", ".", "append", "(", "OffsetRequestPayload", "(", "topic", ",", "partition", ",", "-", "2", ",", "max_offsets", "=", "1", ")", ")", "watermark_offsets", "=", "{", "}", "if", "not", "(", "len", "(", "highmark_offset_reqs", ")", "+", "len", "(", "lowmark_offset_reqs", ")", ")", ":", "return", "watermark_offsets", "# fail_on_error = False does not prevent network errors", "highmark_resps", "=", "kafka_client", ".", "send_offset_request", "(", "highmark_offset_reqs", ",", "fail_on_error", "=", "False", ",", "callback", "=", "_check_fetch_response_error", ",", ")", "lowmark_resps", "=", "kafka_client", ".", "send_offset_request", "(", "lowmark_offset_reqs", ",", "fail_on_error", "=", "False", ",", "callback", "=", "_check_fetch_response_error", ",", ")", "# At this point highmark and lowmark should ideally have the same length.", "assert", "len", "(", "highmark_resps", ")", "==", "len", "(", "lowmark_resps", ")", "aggregated_offsets", "=", "defaultdict", "(", "lambda", ":", "defaultdict", "(", "dict", ")", ")", "for", "resp", "in", "highmark_resps", ":", "aggregated_offsets", "[", "resp", ".", "topic", "]", "[", "resp", ".", "partition", "]", "[", "'highmark'", "]", "=", "resp", ".", "offsets", "[", "0", "]", "for", "resp", "in", "lowmark_resps", ":", "aggregated_offsets", "[", "resp", ".", "topic", "]", "[", "resp", ".", "partition", "]", "[", "'lowmark'", "]", "=", "resp", ".", "offsets", "[", "0", "]", "for", "topic", ",", "partition_watermarks", "in", "six", ".", "iteritems", "(", "aggregated_offsets", ")", ":", "for", "partition", ",", "watermarks", "in", "six", ".", "iteritems", "(", "partition_watermarks", ")", ":", "watermark_offsets", ".", "setdefault", "(", "topic", ",", "{", "}", ",", ")", "[", "partition", "]", "=", "PartitionOffsets", "(", "topic", ",", "partition", ",", "watermarks", "[", "'highmark'", "]", ",", "watermarks", "[", "'lowmark'", "]", ",", ")", "return", "watermark_offsets" ]
Get current topic watermarks. NOTE: This method does not refresh client metadata. It is up to the caller to use avoid using stale metadata. If any partition leader is not available, the request fails for all the other topics. This is the tradeoff of sending all topic requests in batch and save both in performance and Kafka load. :param kafka_client: a connected KafkaToolClient :param topics: topic list or dict {<topic>: [partitions]} :param raise_on_error: if False the method ignores missing topics and missing partitions. It still may fail on the request send. :returns: a dict topic: partition: Part :raises: :py:class:`~kafka_utils.util.error.UnknownTopic`: upon missing topics and raise_on_error=True :py:class:`~kafka_utils.util.error.UnknownPartition`: upon missing partitions and raise_on_error=True FailedPayloadsError: upon send request error.
[ "Get", "current", "topic", "watermarks", "." ]
python
train
zartstrom/snaptime
snaptime/main.py
https://github.com/zartstrom/snaptime/blob/b05ae09d4dccb1b5c8c4ace9c1937b8139672a3c/snaptime/main.py#L119-L131
def apply_to_with_tz(self, dttm, timezone): """We make sure that after truncating we use the correct timezone, even if we 'jump' over a daylight saving time switch. I.e. if we apply "@d" to `Sun Oct 30 04:30:00 CET 2016` (1477798200) we want to have `Sun Oct 30 00:00:00 CEST 2016` (1477778400) but not `Sun Oct 30 00:00:00 CET 2016` (1477782000) """ result = self.apply_to(dttm) if self.unit in [DAYS, WEEKS, MONTHS, YEARS]: naive_dttm = datetime(result.year, result.month, result.day) result = timezone.localize(naive_dttm) return result
[ "def", "apply_to_with_tz", "(", "self", ",", "dttm", ",", "timezone", ")", ":", "result", "=", "self", ".", "apply_to", "(", "dttm", ")", "if", "self", ".", "unit", "in", "[", "DAYS", ",", "WEEKS", ",", "MONTHS", ",", "YEARS", "]", ":", "naive_dttm", "=", "datetime", "(", "result", ".", "year", ",", "result", ".", "month", ",", "result", ".", "day", ")", "result", "=", "timezone", ".", "localize", "(", "naive_dttm", ")", "return", "result" ]
We make sure that after truncating we use the correct timezone, even if we 'jump' over a daylight saving time switch. I.e. if we apply "@d" to `Sun Oct 30 04:30:00 CET 2016` (1477798200) we want to have `Sun Oct 30 00:00:00 CEST 2016` (1477778400) but not `Sun Oct 30 00:00:00 CET 2016` (1477782000)
[ "We", "make", "sure", "that", "after", "truncating", "we", "use", "the", "correct", "timezone", "even", "if", "we", "jump", "over", "a", "daylight", "saving", "time", "switch", "." ]
python
test
Chilipp/psyplot
psyplot/project.py
https://github.com/Chilipp/psyplot/blob/75a0a15a9a1dd018e79d2df270d56c4bf5f311d5/psyplot/project.py#L1103-L1126
def from_dataset(cls, *args, **kwargs): """Construct an ArrayList instance from an existing base dataset Parameters ---------- %(ArrayList.from_dataset.parameters)s main: Project The main project that this project corresponds to Other Parameters ---------------- %(ArrayList.from_dataset.other_parameters)s Returns ------- Project The newly created project instance """ main = kwargs.pop('main', None) ret = super(Project, cls).from_dataset(*args, **kwargs) if main is not None: ret.main = main main.extend(ret, new_name=False) return ret
[ "def", "from_dataset", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "main", "=", "kwargs", ".", "pop", "(", "'main'", ",", "None", ")", "ret", "=", "super", "(", "Project", ",", "cls", ")", ".", "from_dataset", "(", "*", "args", ",", "*", "*", "kwargs", ")", "if", "main", "is", "not", "None", ":", "ret", ".", "main", "=", "main", "main", ".", "extend", "(", "ret", ",", "new_name", "=", "False", ")", "return", "ret" ]
Construct an ArrayList instance from an existing base dataset Parameters ---------- %(ArrayList.from_dataset.parameters)s main: Project The main project that this project corresponds to Other Parameters ---------------- %(ArrayList.from_dataset.other_parameters)s Returns ------- Project The newly created project instance
[ "Construct", "an", "ArrayList", "instance", "from", "an", "existing", "base", "dataset" ]
python
train
srevenant/onetimejwt
onetimejwt/__init__.py
https://github.com/srevenant/onetimejwt/blob/f3ed561253eb4a8e1522c64f59bf64d275e9d315/onetimejwt/__init__.py#L140-L174
def valid(self, token): """is this token valid?""" now = time.time() if 'Bearer ' in token: token = token[7:] data = None for secret in self.secrets: try: data = jwt.decode(token, secret) break except jwt.DecodeError: continue except jwt.ExpiredSignatureError: raise JwtFailed("Jwt expired") if not data: raise JwtFailed("Jwt cannot be decoded") exp = data.get('exp') if not exp: raise JwtFailed("Jwt missing expiration (exp)") if now - exp > self.age: raise JwtFailed("Jwt bad expiration - greater than I want to accept") jti = data.get('jti') if not jti: raise JwtFailed("Jwt missing one-time id (jti)") if self.already_used(jti): raise JwtFailed("Jwt re-use disallowed (jti={})".format(jti)) return data
[ "def", "valid", "(", "self", ",", "token", ")", ":", "now", "=", "time", ".", "time", "(", ")", "if", "'Bearer '", "in", "token", ":", "token", "=", "token", "[", "7", ":", "]", "data", "=", "None", "for", "secret", "in", "self", ".", "secrets", ":", "try", ":", "data", "=", "jwt", ".", "decode", "(", "token", ",", "secret", ")", "break", "except", "jwt", ".", "DecodeError", ":", "continue", "except", "jwt", ".", "ExpiredSignatureError", ":", "raise", "JwtFailed", "(", "\"Jwt expired\"", ")", "if", "not", "data", ":", "raise", "JwtFailed", "(", "\"Jwt cannot be decoded\"", ")", "exp", "=", "data", ".", "get", "(", "'exp'", ")", "if", "not", "exp", ":", "raise", "JwtFailed", "(", "\"Jwt missing expiration (exp)\"", ")", "if", "now", "-", "exp", ">", "self", ".", "age", ":", "raise", "JwtFailed", "(", "\"Jwt bad expiration - greater than I want to accept\"", ")", "jti", "=", "data", ".", "get", "(", "'jti'", ")", "if", "not", "jti", ":", "raise", "JwtFailed", "(", "\"Jwt missing one-time id (jti)\"", ")", "if", "self", ".", "already_used", "(", "jti", ")", ":", "raise", "JwtFailed", "(", "\"Jwt re-use disallowed (jti={})\"", ".", "format", "(", "jti", ")", ")", "return", "data" ]
is this token valid?
[ "is", "this", "token", "valid?" ]
python
test
gawel/panoramisk
panoramisk/utils.py
https://github.com/gawel/panoramisk/blob/2ccb5d18be28a8e8f444dc0cd3a3bfb59aa19a8e/panoramisk/utils.py#L137-L141
def get_instances(self): """Mostly used for debugging""" return ["<%s prefix:%s (uid:%s)>" % (self.__class__.__name__, i.prefix, self.uid) for i in self.instances]
[ "def", "get_instances", "(", "self", ")", ":", "return", "[", "\"<%s prefix:%s (uid:%s)>\"", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "i", ".", "prefix", ",", "self", ".", "uid", ")", "for", "i", "in", "self", ".", "instances", "]" ]
Mostly used for debugging
[ "Mostly", "used", "for", "debugging" ]
python
test
BernardFW/bernard
src/bernard/engine/fsm.py
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/engine/fsm.py#L159-L169
def _make_allowed_states(self) -> Iterator[Text]: """ Sometimes we load states from the database. In order to avoid loading an arbitrary class, we list here the state classes that are allowed. """ for trans in self.transitions: yield trans.dest.name() if trans.origin: yield trans.origin.name()
[ "def", "_make_allowed_states", "(", "self", ")", "->", "Iterator", "[", "Text", "]", ":", "for", "trans", "in", "self", ".", "transitions", ":", "yield", "trans", ".", "dest", ".", "name", "(", ")", "if", "trans", ".", "origin", ":", "yield", "trans", ".", "origin", ".", "name", "(", ")" ]
Sometimes we load states from the database. In order to avoid loading an arbitrary class, we list here the state classes that are allowed.
[ "Sometimes", "we", "load", "states", "from", "the", "database", ".", "In", "order", "to", "avoid", "loading", "an", "arbitrary", "class", "we", "list", "here", "the", "state", "classes", "that", "are", "allowed", "." ]
python
train
mdickinson/bigfloat
bigfloat/core.py
https://github.com/mdickinson/bigfloat/blob/e5fdd1048615191ed32a2b7460e14b3b3ff24662/bigfloat/core.py#L2117-L2129
def jn(n, x, context=None): """ Return the value of the first kind Bessel function of order ``n`` at ``x``. ``n`` should be a Python integer. """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_jn, (n, BigFloat._implicit_convert(x)), context, )
[ "def", "jn", "(", "n", ",", "x", ",", "context", "=", "None", ")", ":", "return", "_apply_function_in_current_context", "(", "BigFloat", ",", "mpfr", ".", "mpfr_jn", ",", "(", "n", ",", "BigFloat", ".", "_implicit_convert", "(", "x", ")", ")", ",", "context", ",", ")" ]
Return the value of the first kind Bessel function of order ``n`` at ``x``. ``n`` should be a Python integer.
[ "Return", "the", "value", "of", "the", "first", "kind", "Bessel", "function", "of", "order", "n", "at", "x", "." ]
python
train
vsoch/helpme
helpme/main/discourse/__init__.py
https://github.com/vsoch/helpme/blob/e609172260b10cddadb2d2023ab26da8082a9feb/helpme/main/discourse/__init__.py#L65-L77
def check_env(self, envar, value): '''ensure that variable envar is set to some value, otherwise exit on error. Parameters ========== envar: the environment variable name value: the setting that shouldn't be None ''' if value is None: bot.error('You must export %s to use Discourse' % envar) print('https://vsoch.github.io/helpme/helper-discourse') sys.exit(1)
[ "def", "check_env", "(", "self", ",", "envar", ",", "value", ")", ":", "if", "value", "is", "None", ":", "bot", ".", "error", "(", "'You must export %s to use Discourse'", "%", "envar", ")", "print", "(", "'https://vsoch.github.io/helpme/helper-discourse'", ")", "sys", ".", "exit", "(", "1", ")" ]
ensure that variable envar is set to some value, otherwise exit on error. Parameters ========== envar: the environment variable name value: the setting that shouldn't be None
[ "ensure", "that", "variable", "envar", "is", "set", "to", "some", "value", "otherwise", "exit", "on", "error", ".", "Parameters", "==========", "envar", ":", "the", "environment", "variable", "name", "value", ":", "the", "setting", "that", "shouldn", "t", "be", "None" ]
python
train
kurtbrose/faststat
faststat/faststat.py
https://github.com/kurtbrose/faststat/blob/5060c0e10acaafd4a48de3f16869bfccc1deb44a/faststat/faststat.py#L310-L323
def _commit(self, ref): 'commit a walkers data after it is collected' path_times = self._weakref_path_map[ref] path_times.append(nanotime()) del self._weakref_path_map[ref] path = tuple(path_times[1::2]) times = path_times[::2] if path not in self.path_stats: # tuple to save a tiny bit of memory self.path_stats[path] = tuple([ Duration(interval=False) for i in range(len(path))]) path_stats = self.path_stats[path] for i in range(1, len(times)): path_stats[i - 1]._stats.add(times[i] - times[i - 1])
[ "def", "_commit", "(", "self", ",", "ref", ")", ":", "path_times", "=", "self", ".", "_weakref_path_map", "[", "ref", "]", "path_times", ".", "append", "(", "nanotime", "(", ")", ")", "del", "self", ".", "_weakref_path_map", "[", "ref", "]", "path", "=", "tuple", "(", "path_times", "[", "1", ":", ":", "2", "]", ")", "times", "=", "path_times", "[", ":", ":", "2", "]", "if", "path", "not", "in", "self", ".", "path_stats", ":", "# tuple to save a tiny bit of memory\r", "self", ".", "path_stats", "[", "path", "]", "=", "tuple", "(", "[", "Duration", "(", "interval", "=", "False", ")", "for", "i", "in", "range", "(", "len", "(", "path", ")", ")", "]", ")", "path_stats", "=", "self", ".", "path_stats", "[", "path", "]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "times", ")", ")", ":", "path_stats", "[", "i", "-", "1", "]", ".", "_stats", ".", "add", "(", "times", "[", "i", "]", "-", "times", "[", "i", "-", "1", "]", ")" ]
commit a walkers data after it is collected
[ "commit", "a", "walkers", "data", "after", "it", "is", "collected" ]
python
train
pymupdf/PyMuPDF
fitz/fitz.py
https://github.com/pymupdf/PyMuPDF/blob/917f2d83482510e26ba0ff01fd2392c26f3a8e90/fitz/fitz.py#L2745-L2756
def addFreetextAnnot(self, rect, text, fontsize=12, fontname=None, color=None, rotate=0): """Add a 'FreeText' annotation in rectangle 'rect'.""" CheckParent(self) val = _fitz.Page_addFreetextAnnot(self, rect, text, fontsize, fontname, color, rotate) if not val: return val.thisown = True val.parent = weakref.proxy(self) self._annot_refs[id(val)] = val return val
[ "def", "addFreetextAnnot", "(", "self", ",", "rect", ",", "text", ",", "fontsize", "=", "12", ",", "fontname", "=", "None", ",", "color", "=", "None", ",", "rotate", "=", "0", ")", ":", "CheckParent", "(", "self", ")", "val", "=", "_fitz", ".", "Page_addFreetextAnnot", "(", "self", ",", "rect", ",", "text", ",", "fontsize", ",", "fontname", ",", "color", ",", "rotate", ")", "if", "not", "val", ":", "return", "val", ".", "thisown", "=", "True", "val", ".", "parent", "=", "weakref", ".", "proxy", "(", "self", ")", "self", ".", "_annot_refs", "[", "id", "(", "val", ")", "]", "=", "val", "return", "val" ]
Add a 'FreeText' annotation in rectangle 'rect'.
[ "Add", "a", "FreeText", "annotation", "in", "rectangle", "rect", "." ]
python
train
binux/pyspider
pyspider/fetcher/tornado_fetcher.py
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/fetcher/tornado_fetcher.py#L831-L846
def on_result(self, type, task, result): '''Called after task fetched''' status_code = result.get('status_code', 599) if status_code != 599: status_code = (int(status_code) / 100 * 100) self._cnt['5m'].event((task.get('project'), status_code), +1) self._cnt['1h'].event((task.get('project'), status_code), +1) if type in ('http', 'phantomjs') and result.get('time'): content_len = len(result.get('content', '')) self._cnt['5m'].event((task.get('project'), 'speed'), float(content_len) / result.get('time')) self._cnt['1h'].event((task.get('project'), 'speed'), float(content_len) / result.get('time')) self._cnt['5m'].event((task.get('project'), 'time'), result.get('time')) self._cnt['1h'].event((task.get('project'), 'time'), result.get('time'))
[ "def", "on_result", "(", "self", ",", "type", ",", "task", ",", "result", ")", ":", "status_code", "=", "result", ".", "get", "(", "'status_code'", ",", "599", ")", "if", "status_code", "!=", "599", ":", "status_code", "=", "(", "int", "(", "status_code", ")", "/", "100", "*", "100", ")", "self", ".", "_cnt", "[", "'5m'", "]", ".", "event", "(", "(", "task", ".", "get", "(", "'project'", ")", ",", "status_code", ")", ",", "+", "1", ")", "self", ".", "_cnt", "[", "'1h'", "]", ".", "event", "(", "(", "task", ".", "get", "(", "'project'", ")", ",", "status_code", ")", ",", "+", "1", ")", "if", "type", "in", "(", "'http'", ",", "'phantomjs'", ")", "and", "result", ".", "get", "(", "'time'", ")", ":", "content_len", "=", "len", "(", "result", ".", "get", "(", "'content'", ",", "''", ")", ")", "self", ".", "_cnt", "[", "'5m'", "]", ".", "event", "(", "(", "task", ".", "get", "(", "'project'", ")", ",", "'speed'", ")", ",", "float", "(", "content_len", ")", "/", "result", ".", "get", "(", "'time'", ")", ")", "self", ".", "_cnt", "[", "'1h'", "]", ".", "event", "(", "(", "task", ".", "get", "(", "'project'", ")", ",", "'speed'", ")", ",", "float", "(", "content_len", ")", "/", "result", ".", "get", "(", "'time'", ")", ")", "self", ".", "_cnt", "[", "'5m'", "]", ".", "event", "(", "(", "task", ".", "get", "(", "'project'", ")", ",", "'time'", ")", ",", "result", ".", "get", "(", "'time'", ")", ")", "self", ".", "_cnt", "[", "'1h'", "]", ".", "event", "(", "(", "task", ".", "get", "(", "'project'", ")", ",", "'time'", ")", ",", "result", ".", "get", "(", "'time'", ")", ")" ]
Called after task fetched
[ "Called", "after", "task", "fetched" ]
python
train
tanghaibao/jcvi
jcvi/annotation/maker.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/annotation/maker.py#L425-L479
def split(args): """ %prog split split.bed evidences.bed predictor1.gff predictor2.gff fastafile Split MAKER models by checking against predictors (such as AUGUSTUS and FGENESH). For each region covered by a working model. Find out the combination of predictors that gives the best accuracy against evidences (such as PASA). `split.bed` can be generated by pulling out subset from a list of ids $ python -m jcvi.formats.base join split.ids working.bed --column=0,3 --noheader | cut -f2-7 > split.bed """ from jcvi.formats.bed import Bed p = OptionParser(split.__doc__) p.add_option("--key", default="Name", help="Key in the attributes to extract predictor.gff [default: %default]") p.add_option("--parents", default="match", help="list of features to extract, use comma to separate (e.g." "'gene,mRNA') [default: %default]") p.add_option("--children", default="match_part", help="list of features to extract, use comma to separate (e.g." "'five_prime_UTR,CDS,three_prime_UTR') [default: %default]") opts, args = p.parse_args(args) if len(args) != 5: sys.exit(not p.print_help()) split_bed, evidences_bed, p1_gff, p2_gff, fastafile = args parents = opts.parents children = opts.children key = opts.key bed = Bed(split_bed) s1 = get_splits(split_bed, p1_gff, parents, key) s2 = get_splits(split_bed, p2_gff, parents, key) for b in bed: query = "{0}:{1}-{2}".format(b.seqid, b.start, b.end) b1 = get_accuracy(query, p1_gff, evidences_bed, fastafile, children, key) b2 = get_accuracy(query, p2_gff, evidences_bed, fastafile, children, key) accn = b.accn c1 = "|".join(s1[accn]) c2 = "|".join(s2[accn]) ac1 = b1.accuracy ac2 = b2.accuracy tag = p1_gff if ac1 >= ac2 else p2_gff tag = tag.split(".")[0] ac1 = "{0:.3f}".format(ac1) ac2 = "{0:.3f}".format(ac2) print("\t".join((accn, tag, ac1, ac2, c1, c2)))
[ "def", "split", "(", "args", ")", ":", "from", "jcvi", ".", "formats", ".", "bed", "import", "Bed", "p", "=", "OptionParser", "(", "split", ".", "__doc__", ")", "p", ".", "add_option", "(", "\"--key\"", ",", "default", "=", "\"Name\"", ",", "help", "=", "\"Key in the attributes to extract predictor.gff [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--parents\"", ",", "default", "=", "\"match\"", ",", "help", "=", "\"list of features to extract, use comma to separate (e.g.\"", "\"'gene,mRNA') [default: %default]\"", ")", "p", ".", "add_option", "(", "\"--children\"", ",", "default", "=", "\"match_part\"", ",", "help", "=", "\"list of features to extract, use comma to separate (e.g.\"", "\"'five_prime_UTR,CDS,three_prime_UTR') [default: %default]\"", ")", "opts", ",", "args", "=", "p", ".", "parse_args", "(", "args", ")", "if", "len", "(", "args", ")", "!=", "5", ":", "sys", ".", "exit", "(", "not", "p", ".", "print_help", "(", ")", ")", "split_bed", ",", "evidences_bed", ",", "p1_gff", ",", "p2_gff", ",", "fastafile", "=", "args", "parents", "=", "opts", ".", "parents", "children", "=", "opts", ".", "children", "key", "=", "opts", ".", "key", "bed", "=", "Bed", "(", "split_bed", ")", "s1", "=", "get_splits", "(", "split_bed", ",", "p1_gff", ",", "parents", ",", "key", ")", "s2", "=", "get_splits", "(", "split_bed", ",", "p2_gff", ",", "parents", ",", "key", ")", "for", "b", "in", "bed", ":", "query", "=", "\"{0}:{1}-{2}\"", ".", "format", "(", "b", ".", "seqid", ",", "b", ".", "start", ",", "b", ".", "end", ")", "b1", "=", "get_accuracy", "(", "query", ",", "p1_gff", ",", "evidences_bed", ",", "fastafile", ",", "children", ",", "key", ")", "b2", "=", "get_accuracy", "(", "query", ",", "p2_gff", ",", "evidences_bed", ",", "fastafile", ",", "children", ",", "key", ")", "accn", "=", "b", ".", "accn", "c1", "=", "\"|\"", ".", "join", "(", "s1", "[", "accn", "]", ")", "c2", "=", "\"|\"", ".", "join", "(", "s2", "[", "accn", "]", ")", "ac1", "=", "b1", ".", "accuracy", "ac2", "=", "b2", ".", "accuracy", "tag", "=", "p1_gff", "if", "ac1", ">=", "ac2", "else", "p2_gff", "tag", "=", "tag", ".", "split", "(", "\".\"", ")", "[", "0", "]", "ac1", "=", "\"{0:.3f}\"", ".", "format", "(", "ac1", ")", "ac2", "=", "\"{0:.3f}\"", ".", "format", "(", "ac2", ")", "print", "(", "\"\\t\"", ".", "join", "(", "(", "accn", ",", "tag", ",", "ac1", ",", "ac2", ",", "c1", ",", "c2", ")", ")", ")" ]
%prog split split.bed evidences.bed predictor1.gff predictor2.gff fastafile Split MAKER models by checking against predictors (such as AUGUSTUS and FGENESH). For each region covered by a working model. Find out the combination of predictors that gives the best accuracy against evidences (such as PASA). `split.bed` can be generated by pulling out subset from a list of ids $ python -m jcvi.formats.base join split.ids working.bed --column=0,3 --noheader | cut -f2-7 > split.bed
[ "%prog", "split", "split", ".", "bed", "evidences", ".", "bed", "predictor1", ".", "gff", "predictor2", ".", "gff", "fastafile" ]
python
train
saltstack/salt
salt/modules/sqlite3.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/sqlite3.py#L77-L94
def fetch(db=None, sql=None): ''' Retrieve data from an sqlite3 db (returns all rows, be careful!) CLI Example: .. code-block:: bash salt '*' sqlite3.fetch /root/test.db 'SELECT * FROM test;' ''' cur = _connect(db) if not cur: return False cur.execute(sql) rows = cur.fetchall() return rows
[ "def", "fetch", "(", "db", "=", "None", ",", "sql", "=", "None", ")", ":", "cur", "=", "_connect", "(", "db", ")", "if", "not", "cur", ":", "return", "False", "cur", ".", "execute", "(", "sql", ")", "rows", "=", "cur", ".", "fetchall", "(", ")", "return", "rows" ]
Retrieve data from an sqlite3 db (returns all rows, be careful!) CLI Example: .. code-block:: bash salt '*' sqlite3.fetch /root/test.db 'SELECT * FROM test;'
[ "Retrieve", "data", "from", "an", "sqlite3", "db", "(", "returns", "all", "rows", "be", "careful!", ")" ]
python
train
newville/wxmplot
wxmplot/basepanel.py
https://github.com/newville/wxmplot/blob/8e0dc037453e5cdf18c968dc5a3d29efd761edee/wxmplot/basepanel.py#L268-L275
def onLeftUp(self, event=None): """ left button up""" if event is None: return self.cursor_mode_action('leftup', event=event) self.canvas.draw_idle() self.canvas.draw() self.ForwardEvent(event=event.guiEvent)
[ "def", "onLeftUp", "(", "self", ",", "event", "=", "None", ")", ":", "if", "event", "is", "None", ":", "return", "self", ".", "cursor_mode_action", "(", "'leftup'", ",", "event", "=", "event", ")", "self", ".", "canvas", ".", "draw_idle", "(", ")", "self", ".", "canvas", ".", "draw", "(", ")", "self", ".", "ForwardEvent", "(", "event", "=", "event", ".", "guiEvent", ")" ]
left button up
[ "left", "button", "up" ]
python
train
uw-it-aca/uw-restclients-canvas
uw_canvas/terms.py
https://github.com/uw-it-aca/uw-restclients-canvas/blob/9845faf33d49a8f06908efc22640c001116d6ea2/uw_canvas/terms.py#L7-L23
def get_all_terms(self): """ Return all of the terms in the account. https://canvas.instructure.com/doc/api/enrollment_terms.html#method.terms_api.index """ if not self._canvas_account_id: raise MissingAccountID() params = {"workflow_state": 'all', 'per_page': 500} url = ACCOUNTS_API.format(self._canvas_account_id) + "/terms" data_key = 'enrollment_terms' terms = [] response = self._get_paged_resource(url, params, data_key) for data in response[data_key]: terms.append(CanvasTerm(data=data)) return terms
[ "def", "get_all_terms", "(", "self", ")", ":", "if", "not", "self", ".", "_canvas_account_id", ":", "raise", "MissingAccountID", "(", ")", "params", "=", "{", "\"workflow_state\"", ":", "'all'", ",", "'per_page'", ":", "500", "}", "url", "=", "ACCOUNTS_API", ".", "format", "(", "self", ".", "_canvas_account_id", ")", "+", "\"/terms\"", "data_key", "=", "'enrollment_terms'", "terms", "=", "[", "]", "response", "=", "self", ".", "_get_paged_resource", "(", "url", ",", "params", ",", "data_key", ")", "for", "data", "in", "response", "[", "data_key", "]", ":", "terms", ".", "append", "(", "CanvasTerm", "(", "data", "=", "data", ")", ")", "return", "terms" ]
Return all of the terms in the account. https://canvas.instructure.com/doc/api/enrollment_terms.html#method.terms_api.index
[ "Return", "all", "of", "the", "terms", "in", "the", "account", ".", "https", ":", "//", "canvas", ".", "instructure", ".", "com", "/", "doc", "/", "api", "/", "enrollment_terms", ".", "html#method", ".", "terms_api", ".", "index" ]
python
test
iotaledger/iota.lib.py
examples/address_generator.py
https://github.com/iotaledger/iota.lib.py/blob/97cdd1e241498446b46157b79b2a1ea2ec6d387a/examples/address_generator.py#L59-L80
def output_seed(seed): # type: (Seed) -> None """ Outputs the user's seed to stdout, along with lots of warnings about security. """ print( 'WARNING: Anyone who has your seed can spend your IOTAs! ' 'Clear the screen after recording your seed!' ) compat.input('') print('Your seed is:') print('') print(binary_type(seed).decode('ascii')) print('') print( 'Clear the screen to prevent shoulder surfing, ' 'and press return to continue.' ) print('https://en.wikipedia.org/wiki/Shoulder_surfing_(computer_security)') compat.input('')
[ "def", "output_seed", "(", "seed", ")", ":", "# type: (Seed) -> None", "print", "(", "'WARNING: Anyone who has your seed can spend your IOTAs! '", "'Clear the screen after recording your seed!'", ")", "compat", ".", "input", "(", "''", ")", "print", "(", "'Your seed is:'", ")", "print", "(", "''", ")", "print", "(", "binary_type", "(", "seed", ")", ".", "decode", "(", "'ascii'", ")", ")", "print", "(", "''", ")", "print", "(", "'Clear the screen to prevent shoulder surfing, '", "'and press return to continue.'", ")", "print", "(", "'https://en.wikipedia.org/wiki/Shoulder_surfing_(computer_security)'", ")", "compat", ".", "input", "(", "''", ")" ]
Outputs the user's seed to stdout, along with lots of warnings about security.
[ "Outputs", "the", "user", "s", "seed", "to", "stdout", "along", "with", "lots", "of", "warnings", "about", "security", "." ]
python
test
google/grr
grr/server/grr_response_server/check_lib/triggers.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/check_lib/triggers.py#L88-L101
def Artifacts(self, os_name=None, cpe=None, label=None): """Whether the conditions applies, modulo host data. Args: os_name: An OS string. cpe: A CPE string. label: A label string. Returns: True if os_name, cpe or labels match. Empty values are ignored. """ hit = lambda x: x[0] == x[1] or not x[0] seq = [(self.os_name, os_name), (self.cpe, cpe), (self.label, label)] return all(map(hit, seq))
[ "def", "Artifacts", "(", "self", ",", "os_name", "=", "None", ",", "cpe", "=", "None", ",", "label", "=", "None", ")", ":", "hit", "=", "lambda", "x", ":", "x", "[", "0", "]", "==", "x", "[", "1", "]", "or", "not", "x", "[", "0", "]", "seq", "=", "[", "(", "self", ".", "os_name", ",", "os_name", ")", ",", "(", "self", ".", "cpe", ",", "cpe", ")", ",", "(", "self", ".", "label", ",", "label", ")", "]", "return", "all", "(", "map", "(", "hit", ",", "seq", ")", ")" ]
Whether the conditions applies, modulo host data. Args: os_name: An OS string. cpe: A CPE string. label: A label string. Returns: True if os_name, cpe or labels match. Empty values are ignored.
[ "Whether", "the", "conditions", "applies", "modulo", "host", "data", "." ]
python
train
osrg/ryu
ryu/lib/igmplib.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/igmplib.py#L297-L308
def packet_in_handler(self, req_igmp, msg): """the process when the querier received IGMP.""" ofproto = msg.datapath.ofproto if ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION: in_port = msg.in_port else: in_port = msg.match['in_port'] if (igmp.IGMP_TYPE_REPORT_V1 == req_igmp.msgtype or igmp.IGMP_TYPE_REPORT_V2 == req_igmp.msgtype): self._do_report(req_igmp, in_port, msg) elif igmp.IGMP_TYPE_LEAVE == req_igmp.msgtype: self._do_leave(req_igmp, in_port, msg)
[ "def", "packet_in_handler", "(", "self", ",", "req_igmp", ",", "msg", ")", ":", "ofproto", "=", "msg", ".", "datapath", ".", "ofproto", "if", "ofproto", ".", "OFP_VERSION", "==", "ofproto_v1_0", ".", "OFP_VERSION", ":", "in_port", "=", "msg", ".", "in_port", "else", ":", "in_port", "=", "msg", ".", "match", "[", "'in_port'", "]", "if", "(", "igmp", ".", "IGMP_TYPE_REPORT_V1", "==", "req_igmp", ".", "msgtype", "or", "igmp", ".", "IGMP_TYPE_REPORT_V2", "==", "req_igmp", ".", "msgtype", ")", ":", "self", ".", "_do_report", "(", "req_igmp", ",", "in_port", ",", "msg", ")", "elif", "igmp", ".", "IGMP_TYPE_LEAVE", "==", "req_igmp", ".", "msgtype", ":", "self", ".", "_do_leave", "(", "req_igmp", ",", "in_port", ",", "msg", ")" ]
the process when the querier received IGMP.
[ "the", "process", "when", "the", "querier", "received", "IGMP", "." ]
python
train
bear/ronkyuu
ronkyuu/webmention.py
https://github.com/bear/ronkyuu/blob/91a05fbe220b661760467b7b3d3d780a9ba28afa/ronkyuu/webmention.py#L41-L112
def findMentions(sourceURL, targetURL=None, exclude_domains=[], content=None, test_urls=True, headers={}, timeout=None): """Find all <a /> elements in the given html for a post. Only scan html element matching all criteria in look_in. optionally the content to be scanned can be given as an argument. If any have an href attribute that is not from the one of the items in exclude_domains, append it to our lists. :param sourceURL: the URL for the post we are scanning :param exclude_domains: a list of domains to exclude from the search :type exclude_domains: list :param content: the content to be scanned for mentions :param look_in: dictionary with name, id and class_. only element matching all of these will be scanned :param test_urls: optional flag to test URLs for validation :param headers: optional headers to send with any web requests :type headers: dict :param timeout: optional timeout for web requests :type timeout float :rtype: dictionary of Mentions """ __doc__ = None if test_urls: URLValidator(message='invalid source URL')(sourceURL) if content: result = {'status': requests.codes.ok, 'headers': None, } else: r = requests.get(sourceURL, verify=True, headers=headers, timeout=timeout) result = {'status': r.status_code, 'headers': r.headers } # Check for character encodings and use 'correct' data if 'charset' in r.headers.get('content-type', ''): content = r.text else: content = r.content result.update({'refs': set(), 'post-url': sourceURL}) if result['status'] == requests.codes.ok: # Allow passing BS doc as content if isinstance(content, BeautifulSoup): __doc__ = content # result.update({'content': unicode(__doc__)}) result.update({'content': str(__doc__)}) else: __doc__ = BeautifulSoup(content, _html_parser) result.update({'content': content}) # try to find first h-entry else use full document entry = __doc__.find(class_="h-entry") or __doc__ # Allow finding particular URL if targetURL: # find only targetURL all_links = entry.find_all('a', href=targetURL) else: # find all links with a href all_links = entry.find_all('a', href=True) for link in all_links: href = link.get('href', None) if href: url = urlparse(href) if url.scheme in ('http', 'https'): if url.hostname and url.hostname not in exclude_domains: result['refs'].add(href) return result
[ "def", "findMentions", "(", "sourceURL", ",", "targetURL", "=", "None", ",", "exclude_domains", "=", "[", "]", ",", "content", "=", "None", ",", "test_urls", "=", "True", ",", "headers", "=", "{", "}", ",", "timeout", "=", "None", ")", ":", "__doc__", "=", "None", "if", "test_urls", ":", "URLValidator", "(", "message", "=", "'invalid source URL'", ")", "(", "sourceURL", ")", "if", "content", ":", "result", "=", "{", "'status'", ":", "requests", ".", "codes", ".", "ok", ",", "'headers'", ":", "None", ",", "}", "else", ":", "r", "=", "requests", ".", "get", "(", "sourceURL", ",", "verify", "=", "True", ",", "headers", "=", "headers", ",", "timeout", "=", "timeout", ")", "result", "=", "{", "'status'", ":", "r", ".", "status_code", ",", "'headers'", ":", "r", ".", "headers", "}", "# Check for character encodings and use 'correct' data", "if", "'charset'", "in", "r", ".", "headers", ".", "get", "(", "'content-type'", ",", "''", ")", ":", "content", "=", "r", ".", "text", "else", ":", "content", "=", "r", ".", "content", "result", ".", "update", "(", "{", "'refs'", ":", "set", "(", ")", ",", "'post-url'", ":", "sourceURL", "}", ")", "if", "result", "[", "'status'", "]", "==", "requests", ".", "codes", ".", "ok", ":", "# Allow passing BS doc as content", "if", "isinstance", "(", "content", ",", "BeautifulSoup", ")", ":", "__doc__", "=", "content", "# result.update({'content': unicode(__doc__)})", "result", ".", "update", "(", "{", "'content'", ":", "str", "(", "__doc__", ")", "}", ")", "else", ":", "__doc__", "=", "BeautifulSoup", "(", "content", ",", "_html_parser", ")", "result", ".", "update", "(", "{", "'content'", ":", "content", "}", ")", "# try to find first h-entry else use full document", "entry", "=", "__doc__", ".", "find", "(", "class_", "=", "\"h-entry\"", ")", "or", "__doc__", "# Allow finding particular URL", "if", "targetURL", ":", "# find only targetURL", "all_links", "=", "entry", ".", "find_all", "(", "'a'", ",", "href", "=", "targetURL", ")", "else", ":", "# find all links with a href", "all_links", "=", "entry", ".", "find_all", "(", "'a'", ",", "href", "=", "True", ")", "for", "link", "in", "all_links", ":", "href", "=", "link", ".", "get", "(", "'href'", ",", "None", ")", "if", "href", ":", "url", "=", "urlparse", "(", "href", ")", "if", "url", ".", "scheme", "in", "(", "'http'", ",", "'https'", ")", ":", "if", "url", ".", "hostname", "and", "url", ".", "hostname", "not", "in", "exclude_domains", ":", "result", "[", "'refs'", "]", ".", "add", "(", "href", ")", "return", "result" ]
Find all <a /> elements in the given html for a post. Only scan html element matching all criteria in look_in. optionally the content to be scanned can be given as an argument. If any have an href attribute that is not from the one of the items in exclude_domains, append it to our lists. :param sourceURL: the URL for the post we are scanning :param exclude_domains: a list of domains to exclude from the search :type exclude_domains: list :param content: the content to be scanned for mentions :param look_in: dictionary with name, id and class_. only element matching all of these will be scanned :param test_urls: optional flag to test URLs for validation :param headers: optional headers to send with any web requests :type headers: dict :param timeout: optional timeout for web requests :type timeout float :rtype: dictionary of Mentions
[ "Find", "all", "<a", "/", ">", "elements", "in", "the", "given", "html", "for", "a", "post", ".", "Only", "scan", "html", "element", "matching", "all", "criteria", "in", "look_in", "." ]
python
train
mitsei/dlkit
dlkit/json_/resource/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/resource/sessions.py#L1476-L1493
def get_bins_by_resource(self, resource_id): """Gets the list of ``Bin`` objects mapped to a ``Resource``. arg: resource_id (osid.id.Id): ``Id`` of a ``Resource`` return: (osid.resource.BinList) - list of bins raise: NotFound - ``resource_id`` is not found raise: NullArgument - ``resource_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinSession.get_bins_by_resource mgr = self._get_provider_manager('RESOURCE', local=True) lookup_session = mgr.get_bin_lookup_session(proxy=self._proxy) return lookup_session.get_bins_by_ids( self.get_bin_ids_by_resource(resource_id))
[ "def", "get_bins_by_resource", "(", "self", ",", "resource_id", ")", ":", "# Implemented from template for", "# osid.resource.ResourceBinSession.get_bins_by_resource", "mgr", "=", "self", ".", "_get_provider_manager", "(", "'RESOURCE'", ",", "local", "=", "True", ")", "lookup_session", "=", "mgr", ".", "get_bin_lookup_session", "(", "proxy", "=", "self", ".", "_proxy", ")", "return", "lookup_session", ".", "get_bins_by_ids", "(", "self", ".", "get_bin_ids_by_resource", "(", "resource_id", ")", ")" ]
Gets the list of ``Bin`` objects mapped to a ``Resource``. arg: resource_id (osid.id.Id): ``Id`` of a ``Resource`` return: (osid.resource.BinList) - list of bins raise: NotFound - ``resource_id`` is not found raise: NullArgument - ``resource_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "list", "of", "Bin", "objects", "mapped", "to", "a", "Resource", "." ]
python
train
numenta/htmresearch
projects/sequence_prediction/continuous_sequence/run_lstm_suite.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/projects/sequence_prediction/continuous_sequence/run_lstm_suite.py#L39-L66
def readDataSet(dataSet, noise=0): """ :param dataSet: dataset name :param noise: amount of noise added to the dataset :return: """ filePath = 'data/'+dataSet+'.csv' if dataSet == 'nyc_taxi' or dataSet == 'nyc_taxi_perturb' or dataSet == 'nyc_taxi_perturb_baseline': seq = pd.read_csv(filePath, header=0, skiprows=[1, 2], names=['time', 'data', 'timeofday', 'dayofweek']) seq['time'] = pd.to_datetime(seq['time']) if noise > 0: for i in xrange(len(seq)): value = seq['data'][i] noiseValue = np.random.normal(scale=(value * noise)) value += noiseValue value = max(0, value) value = min(40000, value) seq['data'][i] = value # elif dataSet == 'sine': # df = pd.read_csv(filePath, header=0, skiprows=[1, 2], names=['time', 'data']) # sequence = df['data'] # seq = pd.DataFrame(np.array(sequence), columns=['data']) else: raise(' unrecognized dataset type ') return seq
[ "def", "readDataSet", "(", "dataSet", ",", "noise", "=", "0", ")", ":", "filePath", "=", "'data/'", "+", "dataSet", "+", "'.csv'", "if", "dataSet", "==", "'nyc_taxi'", "or", "dataSet", "==", "'nyc_taxi_perturb'", "or", "dataSet", "==", "'nyc_taxi_perturb_baseline'", ":", "seq", "=", "pd", ".", "read_csv", "(", "filePath", ",", "header", "=", "0", ",", "skiprows", "=", "[", "1", ",", "2", "]", ",", "names", "=", "[", "'time'", ",", "'data'", ",", "'timeofday'", ",", "'dayofweek'", "]", ")", "seq", "[", "'time'", "]", "=", "pd", ".", "to_datetime", "(", "seq", "[", "'time'", "]", ")", "if", "noise", ">", "0", ":", "for", "i", "in", "xrange", "(", "len", "(", "seq", ")", ")", ":", "value", "=", "seq", "[", "'data'", "]", "[", "i", "]", "noiseValue", "=", "np", ".", "random", ".", "normal", "(", "scale", "=", "(", "value", "*", "noise", ")", ")", "value", "+=", "noiseValue", "value", "=", "max", "(", "0", ",", "value", ")", "value", "=", "min", "(", "40000", ",", "value", ")", "seq", "[", "'data'", "]", "[", "i", "]", "=", "value", "# elif dataSet == 'sine':", "# df = pd.read_csv(filePath, header=0, skiprows=[1, 2], names=['time', 'data'])", "# sequence = df['data']", "# seq = pd.DataFrame(np.array(sequence), columns=['data'])", "else", ":", "raise", "(", "' unrecognized dataset type '", ")", "return", "seq" ]
:param dataSet: dataset name :param noise: amount of noise added to the dataset :return:
[ ":", "param", "dataSet", ":", "dataset", "name", ":", "param", "noise", ":", "amount", "of", "noise", "added", "to", "the", "dataset", ":", "return", ":" ]
python
train
mkouhei/bootstrap-py
bootstrap_py/update.py
https://github.com/mkouhei/bootstrap-py/blob/95d56ed98ef409fd9f019dc352fd1c3711533275/bootstrap_py/update.py#L37-L43
def show_message(self): """Show message updatable.""" print( 'current version: {current_version}\n' 'latest version : {latest_version}'.format( current_version=self.current_version, latest_version=self.latest_version))
[ "def", "show_message", "(", "self", ")", ":", "print", "(", "'current version: {current_version}\\n'", "'latest version : {latest_version}'", ".", "format", "(", "current_version", "=", "self", ".", "current_version", ",", "latest_version", "=", "self", ".", "latest_version", ")", ")" ]
Show message updatable.
[ "Show", "message", "updatable", "." ]
python
train
OpenAssets/openassets
openassets/transactions.py
https://github.com/OpenAssets/openassets/blob/e8eb5b80b9703c80980cb275dd85f17d50e39c60/openassets/transactions.py#L66-L115
def transfer(self, asset_transfer_specs, btc_transfer_spec, fees): """ Creates a transaction for sending assets and bitcoins. :param list[(bytes, TransferParameters)] asset_transfer_specs: A list of tuples. In each tuple: - The first element is the ID of an asset. - The second element is the parameters of the transfer. :param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred. :param int fees: The fees to include in the transaction. :return: An unsigned transaction for sending assets and bitcoins. :rtype: CTransaction """ inputs = [] outputs = [] asset_quantities = [] for asset_id, transfer_spec in asset_transfer_specs: colored_outputs, collected_amount = self._collect_colored_outputs( transfer_spec.unspent_outputs, asset_id, transfer_spec.amount) inputs.extend(colored_outputs) outputs.append(self._get_colored_output(transfer_spec.to_script)) asset_quantities.append(transfer_spec.amount) if collected_amount > transfer_spec.amount: outputs.append(self._get_colored_output(transfer_spec.change_script)) asset_quantities.append(collected_amount - transfer_spec.amount) btc_excess = sum([input.output.value for input in inputs]) - sum([output.nValue for output in outputs]) if btc_excess < btc_transfer_spec.amount + fees: # Not enough bitcoin inputs uncolored_outputs, total_amount = self._collect_uncolored_outputs( btc_transfer_spec.unspent_outputs, btc_transfer_spec.amount + fees - btc_excess) inputs.extend(uncolored_outputs) btc_excess += total_amount change = btc_excess - btc_transfer_spec.amount - fees if change > 0: # Too much bitcoin in input, send it back as change outputs.append(self._get_uncolored_output(btc_transfer_spec.change_script, change)) if btc_transfer_spec.amount > 0: outputs.append(self._get_uncolored_output(btc_transfer_spec.to_script, btc_transfer_spec.amount)) if asset_quantities: outputs.insert(0, self._get_marker_output(asset_quantities, b'')) return bitcoin.core.CTransaction( vin=[bitcoin.core.CTxIn(item.out_point, item.output.script) for item in inputs], vout=outputs )
[ "def", "transfer", "(", "self", ",", "asset_transfer_specs", ",", "btc_transfer_spec", ",", "fees", ")", ":", "inputs", "=", "[", "]", "outputs", "=", "[", "]", "asset_quantities", "=", "[", "]", "for", "asset_id", ",", "transfer_spec", "in", "asset_transfer_specs", ":", "colored_outputs", ",", "collected_amount", "=", "self", ".", "_collect_colored_outputs", "(", "transfer_spec", ".", "unspent_outputs", ",", "asset_id", ",", "transfer_spec", ".", "amount", ")", "inputs", ".", "extend", "(", "colored_outputs", ")", "outputs", ".", "append", "(", "self", ".", "_get_colored_output", "(", "transfer_spec", ".", "to_script", ")", ")", "asset_quantities", ".", "append", "(", "transfer_spec", ".", "amount", ")", "if", "collected_amount", ">", "transfer_spec", ".", "amount", ":", "outputs", ".", "append", "(", "self", ".", "_get_colored_output", "(", "transfer_spec", ".", "change_script", ")", ")", "asset_quantities", ".", "append", "(", "collected_amount", "-", "transfer_spec", ".", "amount", ")", "btc_excess", "=", "sum", "(", "[", "input", ".", "output", ".", "value", "for", "input", "in", "inputs", "]", ")", "-", "sum", "(", "[", "output", ".", "nValue", "for", "output", "in", "outputs", "]", ")", "if", "btc_excess", "<", "btc_transfer_spec", ".", "amount", "+", "fees", ":", "# Not enough bitcoin inputs", "uncolored_outputs", ",", "total_amount", "=", "self", ".", "_collect_uncolored_outputs", "(", "btc_transfer_spec", ".", "unspent_outputs", ",", "btc_transfer_spec", ".", "amount", "+", "fees", "-", "btc_excess", ")", "inputs", ".", "extend", "(", "uncolored_outputs", ")", "btc_excess", "+=", "total_amount", "change", "=", "btc_excess", "-", "btc_transfer_spec", ".", "amount", "-", "fees", "if", "change", ">", "0", ":", "# Too much bitcoin in input, send it back as change", "outputs", ".", "append", "(", "self", ".", "_get_uncolored_output", "(", "btc_transfer_spec", ".", "change_script", ",", "change", ")", ")", "if", "btc_transfer_spec", ".", "amount", ">", "0", ":", "outputs", ".", "append", "(", "self", ".", "_get_uncolored_output", "(", "btc_transfer_spec", ".", "to_script", ",", "btc_transfer_spec", ".", "amount", ")", ")", "if", "asset_quantities", ":", "outputs", ".", "insert", "(", "0", ",", "self", ".", "_get_marker_output", "(", "asset_quantities", ",", "b''", ")", ")", "return", "bitcoin", ".", "core", ".", "CTransaction", "(", "vin", "=", "[", "bitcoin", ".", "core", ".", "CTxIn", "(", "item", ".", "out_point", ",", "item", ".", "output", ".", "script", ")", "for", "item", "in", "inputs", "]", ",", "vout", "=", "outputs", ")" ]
Creates a transaction for sending assets and bitcoins. :param list[(bytes, TransferParameters)] asset_transfer_specs: A list of tuples. In each tuple: - The first element is the ID of an asset. - The second element is the parameters of the transfer. :param TransferParameters btc_transfer_spec: The parameters of the bitcoins being transferred. :param int fees: The fees to include in the transaction. :return: An unsigned transaction for sending assets and bitcoins. :rtype: CTransaction
[ "Creates", "a", "transaction", "for", "sending", "assets", "and", "bitcoins", "." ]
python
train
SFDO-Tooling/CumulusCI
cumulusci/core/keychain/BaseProjectKeychain.py
https://github.com/SFDO-Tooling/CumulusCI/blob/e19047921ca771a297e045f22f0bb201651bb6f7/cumulusci/core/keychain/BaseProjectKeychain.py#L184-L190
def set_service(self, name, service_config, project=False): """ Store a ServiceConfig in the keychain """ if not self.project_config.services or name not in self.project_config.services: self._raise_service_not_valid(name) self._validate_service(name, service_config) self._set_service(name, service_config, project) self._load_services()
[ "def", "set_service", "(", "self", ",", "name", ",", "service_config", ",", "project", "=", "False", ")", ":", "if", "not", "self", ".", "project_config", ".", "services", "or", "name", "not", "in", "self", ".", "project_config", ".", "services", ":", "self", ".", "_raise_service_not_valid", "(", "name", ")", "self", ".", "_validate_service", "(", "name", ",", "service_config", ")", "self", ".", "_set_service", "(", "name", ",", "service_config", ",", "project", ")", "self", ".", "_load_services", "(", ")" ]
Store a ServiceConfig in the keychain
[ "Store", "a", "ServiceConfig", "in", "the", "keychain" ]
python
train
spyder-ide/spyder
spyder/plugins/editor/utils/editor.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/utils/editor.py#L1007-L1021
def set_collapsed(block, val): """ Sets the fold trigger state (collapsed or expanded). :param block: The block to modify :param val: The new trigger state (True=collapsed, False=expanded) """ if block is None: return state = block.userState() if state == -1: state = 0 state &= 0x77FFFFFF state |= int(val) << 27 block.setUserState(state)
[ "def", "set_collapsed", "(", "block", ",", "val", ")", ":", "if", "block", "is", "None", ":", "return", "state", "=", "block", ".", "userState", "(", ")", "if", "state", "==", "-", "1", ":", "state", "=", "0", "state", "&=", "0x77FFFFFF", "state", "|=", "int", "(", "val", ")", "<<", "27", "block", ".", "setUserState", "(", "state", ")" ]
Sets the fold trigger state (collapsed or expanded). :param block: The block to modify :param val: The new trigger state (True=collapsed, False=expanded)
[ "Sets", "the", "fold", "trigger", "state", "(", "collapsed", "or", "expanded", ")", "." ]
python
train
mesbahamin/chronophore
chronophore/qtview.py
https://github.com/mesbahamin/chronophore/blob/ee140c61b4dfada966f078de8304bac737cec6f7/chronophore/qtview.py#L159-L228
def _sign_button_press(self): """Validate input from ent_id, then sign in to the Timesheet.""" user_id = self.ent_id.text().strip() try: status = controller.sign(user_id) # ERROR: User type is unknown (!student and !tutor) except ValueError as e: logger.error(e, exc_info=True) QMessageBox.critical( self, __title__ + ' Error', str(e), buttons=QMessageBox.Ok, defaultButton=QMessageBox.Ok, ) # ERROR: User is unregistered except controller.UnregisteredUser as e: logger.debug(e) QMessageBox.warning( self, 'Unregistered User', str(e), buttons=QMessageBox.Ok, defaultButton=QMessageBox.Ok, ) # User needs to select type except controller.AmbiguousUserType as e: logger.debug(e) u = QtUserTypeSelectionDialog('Select User Type: ', self) if u.exec_() == QDialog.Accepted: status = controller.sign(user_id, user_type=u.user_type) self._show_feedback_label( 'Signed {}: {} ({})'.format( status.in_or_out, status.user_name, status.user_type ) ) # User has signed in or out normally else: sign_choice_confirmed = QMessageBox.question( self, 'Confirm Sign-{}'.format(status.in_or_out), 'Sign {}: {}?'.format(status.in_or_out, status.user_name), buttons=QMessageBox.Yes | QMessageBox.No, defaultButton=QMessageBox.Yes, ) logger.debug('Sign {} confirmed: {}'.format( status.in_or_out, sign_choice_confirmed )) if sign_choice_confirmed == QMessageBox.No: # Undo sign-in or sign-out if status.in_or_out == 'in': controller.undo_sign_in(status.entry) elif status.in_or_out == 'out': controller.undo_sign_out(status.entry) else: self._show_feedback_label( 'Signed {}: {}'.format(status.in_or_out, status.user_name) ) finally: self._set_signed_in() self.ent_id.clear() self.ent_id.setFocus()
[ "def", "_sign_button_press", "(", "self", ")", ":", "user_id", "=", "self", ".", "ent_id", ".", "text", "(", ")", ".", "strip", "(", ")", "try", ":", "status", "=", "controller", ".", "sign", "(", "user_id", ")", "# ERROR: User type is unknown (!student and !tutor)", "except", "ValueError", "as", "e", ":", "logger", ".", "error", "(", "e", ",", "exc_info", "=", "True", ")", "QMessageBox", ".", "critical", "(", "self", ",", "__title__", "+", "' Error'", ",", "str", "(", "e", ")", ",", "buttons", "=", "QMessageBox", ".", "Ok", ",", "defaultButton", "=", "QMessageBox", ".", "Ok", ",", ")", "# ERROR: User is unregistered", "except", "controller", ".", "UnregisteredUser", "as", "e", ":", "logger", ".", "debug", "(", "e", ")", "QMessageBox", ".", "warning", "(", "self", ",", "'Unregistered User'", ",", "str", "(", "e", ")", ",", "buttons", "=", "QMessageBox", ".", "Ok", ",", "defaultButton", "=", "QMessageBox", ".", "Ok", ",", ")", "# User needs to select type", "except", "controller", ".", "AmbiguousUserType", "as", "e", ":", "logger", ".", "debug", "(", "e", ")", "u", "=", "QtUserTypeSelectionDialog", "(", "'Select User Type: '", ",", "self", ")", "if", "u", ".", "exec_", "(", ")", "==", "QDialog", ".", "Accepted", ":", "status", "=", "controller", ".", "sign", "(", "user_id", ",", "user_type", "=", "u", ".", "user_type", ")", "self", ".", "_show_feedback_label", "(", "'Signed {}: {} ({})'", ".", "format", "(", "status", ".", "in_or_out", ",", "status", ".", "user_name", ",", "status", ".", "user_type", ")", ")", "# User has signed in or out normally", "else", ":", "sign_choice_confirmed", "=", "QMessageBox", ".", "question", "(", "self", ",", "'Confirm Sign-{}'", ".", "format", "(", "status", ".", "in_or_out", ")", ",", "'Sign {}: {}?'", ".", "format", "(", "status", ".", "in_or_out", ",", "status", ".", "user_name", ")", ",", "buttons", "=", "QMessageBox", ".", "Yes", "|", "QMessageBox", ".", "No", ",", "defaultButton", "=", "QMessageBox", ".", "Yes", ",", ")", "logger", ".", "debug", "(", "'Sign {} confirmed: {}'", ".", "format", "(", "status", ".", "in_or_out", ",", "sign_choice_confirmed", ")", ")", "if", "sign_choice_confirmed", "==", "QMessageBox", ".", "No", ":", "# Undo sign-in or sign-out", "if", "status", ".", "in_or_out", "==", "'in'", ":", "controller", ".", "undo_sign_in", "(", "status", ".", "entry", ")", "elif", "status", ".", "in_or_out", "==", "'out'", ":", "controller", ".", "undo_sign_out", "(", "status", ".", "entry", ")", "else", ":", "self", ".", "_show_feedback_label", "(", "'Signed {}: {}'", ".", "format", "(", "status", ".", "in_or_out", ",", "status", ".", "user_name", ")", ")", "finally", ":", "self", ".", "_set_signed_in", "(", ")", "self", ".", "ent_id", ".", "clear", "(", ")", "self", ".", "ent_id", ".", "setFocus", "(", ")" ]
Validate input from ent_id, then sign in to the Timesheet.
[ "Validate", "input", "from", "ent_id", "then", "sign", "in", "to", "the", "Timesheet", "." ]
python
train
ianmiell/shutit
shutit_class.py
https://github.com/ianmiell/shutit/blob/19cd64cdfb23515b106b40213dccff4101617076/shutit_class.py#L3195-L3237
def config_collection(self): """Collect core config from config files for all seen modules. """ shutit_global.shutit_global_object.yield_to_draw() self.log('In config_collection',level=logging.DEBUG) cfg = self.cfg for module_id in self.module_ids(): # Default to None so we can interpret as ifneeded self.get_config(module_id, 'shutit.core.module.build', None, boolean=True, forcenone=True) self.get_config(module_id, 'shutit.core.module.remove', False, boolean=True) self.get_config(module_id, 'shutit.core.module.tag', False, boolean=True) # Default to allow any image self.get_config(module_id, 'shutit.core.module.allowed_images', [".*"]) module = self.shutit_map[module_id] cfg_file = os.path.dirname(get_module_file(self,module)) + '/configs/build.cnf' if os.path.isfile(cfg_file): # use self.get_config, forcing the passed-in default config_parser = ConfigParser.ConfigParser() config_parser.read(cfg_file) for section in config_parser.sections(): if section == module_id: for option in config_parser.options(section): if option == 'shutit.core.module.allowed_images': override = False for mod, opt, val in self.build['config_overrides']: val = val # pylint # skip overrides if mod == module_id and opt == option: override = True if override: continue value = config_parser.get(section,option) if option == 'shutit.core.module.allowed_images': value = json.loads(value) self.get_config(module_id, option, value, forceask=True) # ifneeded will (by default) only take effect if 'build' is not # specified. It can, however, be forced to a value, but this # should be unusual. if cfg[module_id]['shutit.core.module.build'] is None: self.get_config(module_id, 'shutit.core.module.build_ifneeded', True, boolean=True) cfg[module_id]['shutit.core.module.build'] = False else: self.get_config(module_id, 'shutit.core.module.build_ifneeded', False, boolean=True)
[ "def", "config_collection", "(", "self", ")", ":", "shutit_global", ".", "shutit_global_object", ".", "yield_to_draw", "(", ")", "self", ".", "log", "(", "'In config_collection'", ",", "level", "=", "logging", ".", "DEBUG", ")", "cfg", "=", "self", ".", "cfg", "for", "module_id", "in", "self", ".", "module_ids", "(", ")", ":", "# Default to None so we can interpret as ifneeded", "self", ".", "get_config", "(", "module_id", ",", "'shutit.core.module.build'", ",", "None", ",", "boolean", "=", "True", ",", "forcenone", "=", "True", ")", "self", ".", "get_config", "(", "module_id", ",", "'shutit.core.module.remove'", ",", "False", ",", "boolean", "=", "True", ")", "self", ".", "get_config", "(", "module_id", ",", "'shutit.core.module.tag'", ",", "False", ",", "boolean", "=", "True", ")", "# Default to allow any image", "self", ".", "get_config", "(", "module_id", ",", "'shutit.core.module.allowed_images'", ",", "[", "\".*\"", "]", ")", "module", "=", "self", ".", "shutit_map", "[", "module_id", "]", "cfg_file", "=", "os", ".", "path", ".", "dirname", "(", "get_module_file", "(", "self", ",", "module", ")", ")", "+", "'/configs/build.cnf'", "if", "os", ".", "path", ".", "isfile", "(", "cfg_file", ")", ":", "# use self.get_config, forcing the passed-in default", "config_parser", "=", "ConfigParser", ".", "ConfigParser", "(", ")", "config_parser", ".", "read", "(", "cfg_file", ")", "for", "section", "in", "config_parser", ".", "sections", "(", ")", ":", "if", "section", "==", "module_id", ":", "for", "option", "in", "config_parser", ".", "options", "(", "section", ")", ":", "if", "option", "==", "'shutit.core.module.allowed_images'", ":", "override", "=", "False", "for", "mod", ",", "opt", ",", "val", "in", "self", ".", "build", "[", "'config_overrides'", "]", ":", "val", "=", "val", "# pylint", "# skip overrides", "if", "mod", "==", "module_id", "and", "opt", "==", "option", ":", "override", "=", "True", "if", "override", ":", "continue", "value", "=", "config_parser", ".", "get", "(", "section", ",", "option", ")", "if", "option", "==", "'shutit.core.module.allowed_images'", ":", "value", "=", "json", ".", "loads", "(", "value", ")", "self", ".", "get_config", "(", "module_id", ",", "option", ",", "value", ",", "forceask", "=", "True", ")", "# ifneeded will (by default) only take effect if 'build' is not", "# specified. It can, however, be forced to a value, but this", "# should be unusual.", "if", "cfg", "[", "module_id", "]", "[", "'shutit.core.module.build'", "]", "is", "None", ":", "self", ".", "get_config", "(", "module_id", ",", "'shutit.core.module.build_ifneeded'", ",", "True", ",", "boolean", "=", "True", ")", "cfg", "[", "module_id", "]", "[", "'shutit.core.module.build'", "]", "=", "False", "else", ":", "self", ".", "get_config", "(", "module_id", ",", "'shutit.core.module.build_ifneeded'", ",", "False", ",", "boolean", "=", "True", ")" ]
Collect core config from config files for all seen modules.
[ "Collect", "core", "config", "from", "config", "files", "for", "all", "seen", "modules", "." ]
python
train
dls-controls/pymalcolm
malcolm/core/notifier.py
https://github.com/dls-controls/pymalcolm/blob/80ea667e4da26365a6cebc0249f52fdc744bd983/malcolm/core/notifier.py#L193-L215
def _update_data(self, data): # type: (Any) -> Dict[str, List] """Set our data and notify any subscribers of children what has changed Args: data (object): The new data Returns: dict: {child_name: [path_list, optional child_data]} of the change that needs to be passed to a child as a result of this """ self.data = data child_change_dict = {} # Reflect change of data to children for name in self.children: child_data = getattr(data, name, None) if child_data is None: # Deletion child_change_dict[name] = [[]] else: # Change child_change_dict[name] = [[], child_data] return child_change_dict
[ "def", "_update_data", "(", "self", ",", "data", ")", ":", "# type: (Any) -> Dict[str, List]", "self", ".", "data", "=", "data", "child_change_dict", "=", "{", "}", "# Reflect change of data to children", "for", "name", "in", "self", ".", "children", ":", "child_data", "=", "getattr", "(", "data", ",", "name", ",", "None", ")", "if", "child_data", "is", "None", ":", "# Deletion", "child_change_dict", "[", "name", "]", "=", "[", "[", "]", "]", "else", ":", "# Change", "child_change_dict", "[", "name", "]", "=", "[", "[", "]", ",", "child_data", "]", "return", "child_change_dict" ]
Set our data and notify any subscribers of children what has changed Args: data (object): The new data Returns: dict: {child_name: [path_list, optional child_data]} of the change that needs to be passed to a child as a result of this
[ "Set", "our", "data", "and", "notify", "any", "subscribers", "of", "children", "what", "has", "changed" ]
python
train
OpenGov/carpenter
carpenter/blocks/block.py
https://github.com/OpenGov/carpenter/blob/0ab3c54c05133b9b0468c63e834a7ce3a6fb575b/carpenter/blocks/block.py#L365-L392
def _validate_rows(self): ''' Checks for any missing data row by row. It also checks for changes in cell type and flags multiple switches as an error. ''' for row_index in range(self.start[0], self.end[0]): table_row = self.table[row_index] used_row = self.used_cells[row_index] row_type = None if self.end[1] > self.start[1]: row_type = get_cell_type(table_row[self.start[1]]) num_type_changes = 0 for column_index in range(self.start[1], self.end[1]): if used_row[column_index]: self.flag_change(self.flags, 'error', (row_index, column_index), self.worksheet, self.FLAGS['used']) if not check_cell_type(table_row[column_index], row_type): row_type = get_cell_type(table_row[column_index]) num_type_changes += 1 if num_type_changes > 1: self.flag_change(self.flags, 'warning', (row_index, column_index-1), self.worksheet, self.FLAGS['unexpected-change']) # Decrement this to catch other cells which change again num_type_changes -= 1 # Mark this cell as used used_row[column_index] = True
[ "def", "_validate_rows", "(", "self", ")", ":", "for", "row_index", "in", "range", "(", "self", ".", "start", "[", "0", "]", ",", "self", ".", "end", "[", "0", "]", ")", ":", "table_row", "=", "self", ".", "table", "[", "row_index", "]", "used_row", "=", "self", ".", "used_cells", "[", "row_index", "]", "row_type", "=", "None", "if", "self", ".", "end", "[", "1", "]", ">", "self", ".", "start", "[", "1", "]", ":", "row_type", "=", "get_cell_type", "(", "table_row", "[", "self", ".", "start", "[", "1", "]", "]", ")", "num_type_changes", "=", "0", "for", "column_index", "in", "range", "(", "self", ".", "start", "[", "1", "]", ",", "self", ".", "end", "[", "1", "]", ")", ":", "if", "used_row", "[", "column_index", "]", ":", "self", ".", "flag_change", "(", "self", ".", "flags", ",", "'error'", ",", "(", "row_index", ",", "column_index", ")", ",", "self", ".", "worksheet", ",", "self", ".", "FLAGS", "[", "'used'", "]", ")", "if", "not", "check_cell_type", "(", "table_row", "[", "column_index", "]", ",", "row_type", ")", ":", "row_type", "=", "get_cell_type", "(", "table_row", "[", "column_index", "]", ")", "num_type_changes", "+=", "1", "if", "num_type_changes", ">", "1", ":", "self", ".", "flag_change", "(", "self", ".", "flags", ",", "'warning'", ",", "(", "row_index", ",", "column_index", "-", "1", ")", ",", "self", ".", "worksheet", ",", "self", ".", "FLAGS", "[", "'unexpected-change'", "]", ")", "# Decrement this to catch other cells which change again", "num_type_changes", "-=", "1", "# Mark this cell as used", "used_row", "[", "column_index", "]", "=", "True" ]
Checks for any missing data row by row. It also checks for changes in cell type and flags multiple switches as an error.
[ "Checks", "for", "any", "missing", "data", "row", "by", "row", ".", "It", "also", "checks", "for", "changes", "in", "cell", "type", "and", "flags", "multiple", "switches", "as", "an", "error", "." ]
python
train
QUANTAXIS/QUANTAXIS
QUANTAXIS/QAFetch/QATdx.py
https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAFetch/QATdx.py#L336-L409
def QA_fetch_get_stock_day(code, start_date, end_date, if_fq='00', frequence='day', ip=None, port=None): """获取日线及以上级别的数据 Arguments: code {str:6} -- code 是一个单独的code 6位长度的str start_date {str:10} -- 10位长度的日期 比如'2017-01-01' end_date {str:10} -- 10位长度的日期 比如'2018-01-01' Keyword Arguments: if_fq {str} -- '00'/'bfq' -- 不复权 '01'/'qfq' -- 前复权 '02'/'hfq' -- 后复权 '03'/'ddqfq' -- 定点前复权 '04'/'ddhfq' --定点后复权 frequency {str} -- day/week/month/quarter/year 也可以是简写 D/W/M/Q/Y ip {str} -- [description] (default: None) ip可以通过select_best_ip()函数重新获取 port {int} -- [description] (default: {None}) Returns: pd.DataFrame/None -- 返回的是dataframe,如果出错比如只获取了一天,而当天停牌,返回None Exception: 如果出现网络问题/服务器拒绝, 会出现socket:time out 尝试再次获取/更换ip即可, 本函数不做处理 """ ip, port = get_mainmarket_ip(ip, port) api = TdxHq_API() try: with api.connect(ip, port, time_out=0.7): if frequence in ['day', 'd', 'D', 'DAY', 'Day']: frequence = 9 elif frequence in ['w', 'W', 'Week', 'week']: frequence = 5 elif frequence in ['month', 'M', 'm', 'Month']: frequence = 6 elif frequence in ['quarter', 'Q', 'Quarter', 'q']: frequence = 10 elif frequence in ['y', 'Y', 'year', 'Year']: frequence = 11 start_date = str(start_date)[0:10] today_ = datetime.date.today() lens = QA_util_get_trade_gap(start_date, today_) data = pd.concat([api.to_df(api.get_security_bars(frequence, _select_market_code( code), code, (int(lens / 800) - i) * 800, 800)) for i in range(int(lens / 800) + 1)], axis=0) # 这里的问题是: 如果只取了一天的股票,而当天停牌, 那么就直接返回None了 if len(data) < 1: return None data = data[data['open'] != 0] data = data.assign(date=data['datetime'].apply(lambda x: str(x[0:10])), code=str(code), date_stamp=data['datetime'].apply(lambda x: QA_util_date_stamp(str(x)[0:10]))) \ .set_index('date', drop=False, inplace=False) end_date = str(end_date)[0:10] data = data.drop(['year', 'month', 'day', 'hour', 'minute', 'datetime'], axis=1)[ start_date:end_date] if if_fq in ['00', 'bfq']: return data else: print('CURRENTLY NOT SUPPORT REALTIME FUQUAN') return None # xdxr = QA_fetch_get_stock_xdxr(code) # if if_fq in ['01','qfq']: # return QA_data_make_qfq(data,xdxr) # elif if_fq in ['02','hfq']: # return QA_data_make_hfq(data,xdxr) except Exception as e: if isinstance(e, TypeError): print('Tushare内置的pytdx版本和QUANTAXIS使用的pytdx 版本不同, 请重新安装pytdx以解决此问题') print('pip uninstall pytdx') print('pip install pytdx') else: print(e)
[ "def", "QA_fetch_get_stock_day", "(", "code", ",", "start_date", ",", "end_date", ",", "if_fq", "=", "'00'", ",", "frequence", "=", "'day'", ",", "ip", "=", "None", ",", "port", "=", "None", ")", ":", "ip", ",", "port", "=", "get_mainmarket_ip", "(", "ip", ",", "port", ")", "api", "=", "TdxHq_API", "(", ")", "try", ":", "with", "api", ".", "connect", "(", "ip", ",", "port", ",", "time_out", "=", "0.7", ")", ":", "if", "frequence", "in", "[", "'day'", ",", "'d'", ",", "'D'", ",", "'DAY'", ",", "'Day'", "]", ":", "frequence", "=", "9", "elif", "frequence", "in", "[", "'w'", ",", "'W'", ",", "'Week'", ",", "'week'", "]", ":", "frequence", "=", "5", "elif", "frequence", "in", "[", "'month'", ",", "'M'", ",", "'m'", ",", "'Month'", "]", ":", "frequence", "=", "6", "elif", "frequence", "in", "[", "'quarter'", ",", "'Q'", ",", "'Quarter'", ",", "'q'", "]", ":", "frequence", "=", "10", "elif", "frequence", "in", "[", "'y'", ",", "'Y'", ",", "'year'", ",", "'Year'", "]", ":", "frequence", "=", "11", "start_date", "=", "str", "(", "start_date", ")", "[", "0", ":", "10", "]", "today_", "=", "datetime", ".", "date", ".", "today", "(", ")", "lens", "=", "QA_util_get_trade_gap", "(", "start_date", ",", "today_", ")", "data", "=", "pd", ".", "concat", "(", "[", "api", ".", "to_df", "(", "api", ".", "get_security_bars", "(", "frequence", ",", "_select_market_code", "(", "code", ")", ",", "code", ",", "(", "int", "(", "lens", "/", "800", ")", "-", "i", ")", "*", "800", ",", "800", ")", ")", "for", "i", "in", "range", "(", "int", "(", "lens", "/", "800", ")", "+", "1", ")", "]", ",", "axis", "=", "0", ")", "# 这里的问题是: 如果只取了一天的股票,而当天停牌, 那么就直接返回None了", "if", "len", "(", "data", ")", "<", "1", ":", "return", "None", "data", "=", "data", "[", "data", "[", "'open'", "]", "!=", "0", "]", "data", "=", "data", ".", "assign", "(", "date", "=", "data", "[", "'datetime'", "]", ".", "apply", "(", "lambda", "x", ":", "str", "(", "x", "[", "0", ":", "10", "]", ")", ")", ",", "code", "=", "str", "(", "code", ")", ",", "date_stamp", "=", "data", "[", "'datetime'", "]", ".", "apply", "(", "lambda", "x", ":", "QA_util_date_stamp", "(", "str", "(", "x", ")", "[", "0", ":", "10", "]", ")", ")", ")", ".", "set_index", "(", "'date'", ",", "drop", "=", "False", ",", "inplace", "=", "False", ")", "end_date", "=", "str", "(", "end_date", ")", "[", "0", ":", "10", "]", "data", "=", "data", ".", "drop", "(", "[", "'year'", ",", "'month'", ",", "'day'", ",", "'hour'", ",", "'minute'", ",", "'datetime'", "]", ",", "axis", "=", "1", ")", "[", "start_date", ":", "end_date", "]", "if", "if_fq", "in", "[", "'00'", ",", "'bfq'", "]", ":", "return", "data", "else", ":", "print", "(", "'CURRENTLY NOT SUPPORT REALTIME FUQUAN'", ")", "return", "None", "# xdxr = QA_fetch_get_stock_xdxr(code)", "# if if_fq in ['01','qfq']:", "# return QA_data_make_qfq(data,xdxr)", "# elif if_fq in ['02','hfq']:", "# return QA_data_make_hfq(data,xdxr)", "except", "Exception", "as", "e", ":", "if", "isinstance", "(", "e", ",", "TypeError", ")", ":", "print", "(", "'Tushare内置的pytdx版本和QUANTAXIS使用的pytdx 版本不同, 请重新安装pytdx以解决此问题')", "", "print", "(", "'pip uninstall pytdx'", ")", "print", "(", "'pip install pytdx'", ")", "else", ":", "print", "(", "e", ")" ]
获取日线及以上级别的数据 Arguments: code {str:6} -- code 是一个单独的code 6位长度的str start_date {str:10} -- 10位长度的日期 比如'2017-01-01' end_date {str:10} -- 10位长度的日期 比如'2018-01-01' Keyword Arguments: if_fq {str} -- '00'/'bfq' -- 不复权 '01'/'qfq' -- 前复权 '02'/'hfq' -- 后复权 '03'/'ddqfq' -- 定点前复权 '04'/'ddhfq' --定点后复权 frequency {str} -- day/week/month/quarter/year 也可以是简写 D/W/M/Q/Y ip {str} -- [description] (default: None) ip可以通过select_best_ip()函数重新获取 port {int} -- [description] (default: {None}) Returns: pd.DataFrame/None -- 返回的是dataframe,如果出错比如只获取了一天,而当天停牌,返回None Exception: 如果出现网络问题/服务器拒绝, 会出现socket:time out 尝试再次获取/更换ip即可, 本函数不做处理
[ "获取日线及以上级别的数据" ]
python
train
rossant/ipymd
ipymd/formats/markdown.py
https://github.com/rossant/ipymd/blob/d87c9ebc59d67fe78b0139ee00e0e5307682e303/ipymd/formats/markdown.py#L201-L206
def _code_cell(self, source): """Split the source into input and output.""" input, output = self._prompt.to_cell(source) return {'cell_type': 'code', 'input': input, 'output': output}
[ "def", "_code_cell", "(", "self", ",", "source", ")", ":", "input", ",", "output", "=", "self", ".", "_prompt", ".", "to_cell", "(", "source", ")", "return", "{", "'cell_type'", ":", "'code'", ",", "'input'", ":", "input", ",", "'output'", ":", "output", "}" ]
Split the source into input and output.
[ "Split", "the", "source", "into", "input", "and", "output", "." ]
python
train
spyder-ide/spyder-kernels
spyder_kernels/utils/nsview.py
https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/utils/nsview.py#L580-L597
def globalsfilter(input_dict, check_all=False, filters=None, exclude_private=None, exclude_capitalized=None, exclude_uppercase=None, exclude_unsupported=None, excluded_names=None): """Keep only objects that can be pickled""" output_dict = {} for key, value in list(input_dict.items()): excluded = (exclude_private and key.startswith('_')) or \ (exclude_capitalized and key[0].isupper()) or \ (exclude_uppercase and key.isupper() and len(key) > 1 and not key[1:].isdigit()) or \ (key in excluded_names) or \ (exclude_unsupported and \ not is_supported(value, check_all=check_all, filters=filters)) if not excluded: output_dict[key] = value return output_dict
[ "def", "globalsfilter", "(", "input_dict", ",", "check_all", "=", "False", ",", "filters", "=", "None", ",", "exclude_private", "=", "None", ",", "exclude_capitalized", "=", "None", ",", "exclude_uppercase", "=", "None", ",", "exclude_unsupported", "=", "None", ",", "excluded_names", "=", "None", ")", ":", "output_dict", "=", "{", "}", "for", "key", ",", "value", "in", "list", "(", "input_dict", ".", "items", "(", ")", ")", ":", "excluded", "=", "(", "exclude_private", "and", "key", ".", "startswith", "(", "'_'", ")", ")", "or", "(", "exclude_capitalized", "and", "key", "[", "0", "]", ".", "isupper", "(", ")", ")", "or", "(", "exclude_uppercase", "and", "key", ".", "isupper", "(", ")", "and", "len", "(", "key", ")", ">", "1", "and", "not", "key", "[", "1", ":", "]", ".", "isdigit", "(", ")", ")", "or", "(", "key", "in", "excluded_names", ")", "or", "(", "exclude_unsupported", "and", "not", "is_supported", "(", "value", ",", "check_all", "=", "check_all", ",", "filters", "=", "filters", ")", ")", "if", "not", "excluded", ":", "output_dict", "[", "key", "]", "=", "value", "return", "output_dict" ]
Keep only objects that can be pickled
[ "Keep", "only", "objects", "that", "can", "be", "pickled" ]
python
train
coreGreenberet/homematicip-rest-api
homematicip/home.py
https://github.com/coreGreenberet/homematicip-rest-api/blob/d4c8df53281577e01709f75cacb78b1a5a1d00db/homematicip/home.py#L403-L415
def search_device_by_id(self, deviceID) -> Device: """ searches a device by given id Args: deviceID(str): the device to search for Returns the Device object or None if it couldn't find a device """ for d in self.devices: if d.id == deviceID: return d return None
[ "def", "search_device_by_id", "(", "self", ",", "deviceID", ")", "->", "Device", ":", "for", "d", "in", "self", ".", "devices", ":", "if", "d", ".", "id", "==", "deviceID", ":", "return", "d", "return", "None" ]
searches a device by given id Args: deviceID(str): the device to search for Returns the Device object or None if it couldn't find a device
[ "searches", "a", "device", "by", "given", "id", "Args", ":", "deviceID", "(", "str", ")", ":", "the", "device", "to", "search", "for", "Returns", "the", "Device", "object", "or", "None", "if", "it", "couldn", "t", "find", "a", "device" ]
python
train
rochacbruno/flasgger
flasgger/base.py
https://github.com/rochacbruno/flasgger/blob/fef154f61d7afca548067be0c758c3dd71cc4c97/flasgger/base.py#L246-L255
def get_def_models(self, definition_filter=None): """ Used for class based definitions """ model_filter = definition_filter or (lambda tag: True) return { definition.name: definition.obj for definition in self.definition_models if model_filter(definition) }
[ "def", "get_def_models", "(", "self", ",", "definition_filter", "=", "None", ")", ":", "model_filter", "=", "definition_filter", "or", "(", "lambda", "tag", ":", "True", ")", "return", "{", "definition", ".", "name", ":", "definition", ".", "obj", "for", "definition", "in", "self", ".", "definition_models", "if", "model_filter", "(", "definition", ")", "}" ]
Used for class based definitions
[ "Used", "for", "class", "based", "definitions" ]
python
train
bcbio/bcbio-nextgen
bcbio/galaxy/api.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/galaxy/api.py#L52-L64
def run_details(self, run_bc, run_date=None): """Next Gen LIMS specific API functionality. """ try: details = self._get("/nglims/api_run_details", dict(run=run_bc)) except ValueError: raise ValueError("Could not find information in Galaxy for run: %s" % run_bc) if "error" in details and run_date is not None: try: details = self._get("/nglims/api_run_details", dict(run=run_date)) except ValueError: raise ValueError("Could not find information in Galaxy for run: %s" % run_date) return details
[ "def", "run_details", "(", "self", ",", "run_bc", ",", "run_date", "=", "None", ")", ":", "try", ":", "details", "=", "self", ".", "_get", "(", "\"/nglims/api_run_details\"", ",", "dict", "(", "run", "=", "run_bc", ")", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Could not find information in Galaxy for run: %s\"", "%", "run_bc", ")", "if", "\"error\"", "in", "details", "and", "run_date", "is", "not", "None", ":", "try", ":", "details", "=", "self", ".", "_get", "(", "\"/nglims/api_run_details\"", ",", "dict", "(", "run", "=", "run_date", ")", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "\"Could not find information in Galaxy for run: %s\"", "%", "run_date", ")", "return", "details" ]
Next Gen LIMS specific API functionality.
[ "Next", "Gen", "LIMS", "specific", "API", "functionality", "." ]
python
train
metachris/RPIO
source/RPIO/_RPIO.py
https://github.com/metachris/RPIO/blob/be1942a69b2592ddacd9dc833d2668a19aafd8d2/source/RPIO/_RPIO.py#L273-L322
def wait_for_interrupts(self, epoll_timeout=1): """ Blocking loop to listen for GPIO interrupts and distribute them to associated callbacks. epoll_timeout is an easy way to shutdown the blocking function. Per default the timeout is set to 1 second; if `_is_waiting_for_interrupts` is set to False the loop will exit. If an exception occurs while waiting for interrupts, the interrupt gpio interfaces will be cleaned up (/sys/class/gpio unexports). In this case all interrupts will be reset and you'd need to add the callbacks again before using `wait_for_interrupts(..)` again. """ self._is_waiting_for_interrupts = True while self._is_waiting_for_interrupts: events = self._epoll.poll(epoll_timeout) for fileno, event in events: debug("- epoll event on fd %s: %s" % (fileno, event)) if fileno in self._tcp_server_sockets: # New client connection to socket server serversocket, cb = self._tcp_server_sockets[fileno] connection, address = serversocket.accept() connection.setblocking(0) f = connection.fileno() self._epoll.register(f, select.EPOLLIN) self._tcp_client_sockets[f] = (connection, cb) elif event & select.EPOLLIN: # Input from TCP socket socket, cb = self._tcp_client_sockets[fileno] content = socket.recv(1024) if not content or not content.strip(): # No content means quitting self.close_tcp_client(fileno) else: sock, cb = self._tcp_client_sockets[fileno] cb(self._tcp_client_sockets[fileno][0], \ content.strip()) elif event & select.EPOLLHUP: # TCP Socket Hangup self.close_tcp_client(fileno) elif event & select.EPOLLPRI: # GPIO interrupts f = self._map_fileno_to_file[fileno] # read() is workaround for not getting new values # with read(1) val = f.read().strip() f.seek(0) self._handle_interrupt(fileno, val)
[ "def", "wait_for_interrupts", "(", "self", ",", "epoll_timeout", "=", "1", ")", ":", "self", ".", "_is_waiting_for_interrupts", "=", "True", "while", "self", ".", "_is_waiting_for_interrupts", ":", "events", "=", "self", ".", "_epoll", ".", "poll", "(", "epoll_timeout", ")", "for", "fileno", ",", "event", "in", "events", ":", "debug", "(", "\"- epoll event on fd %s: %s\"", "%", "(", "fileno", ",", "event", ")", ")", "if", "fileno", "in", "self", ".", "_tcp_server_sockets", ":", "# New client connection to socket server", "serversocket", ",", "cb", "=", "self", ".", "_tcp_server_sockets", "[", "fileno", "]", "connection", ",", "address", "=", "serversocket", ".", "accept", "(", ")", "connection", ".", "setblocking", "(", "0", ")", "f", "=", "connection", ".", "fileno", "(", ")", "self", ".", "_epoll", ".", "register", "(", "f", ",", "select", ".", "EPOLLIN", ")", "self", ".", "_tcp_client_sockets", "[", "f", "]", "=", "(", "connection", ",", "cb", ")", "elif", "event", "&", "select", ".", "EPOLLIN", ":", "# Input from TCP socket", "socket", ",", "cb", "=", "self", ".", "_tcp_client_sockets", "[", "fileno", "]", "content", "=", "socket", ".", "recv", "(", "1024", ")", "if", "not", "content", "or", "not", "content", ".", "strip", "(", ")", ":", "# No content means quitting", "self", ".", "close_tcp_client", "(", "fileno", ")", "else", ":", "sock", ",", "cb", "=", "self", ".", "_tcp_client_sockets", "[", "fileno", "]", "cb", "(", "self", ".", "_tcp_client_sockets", "[", "fileno", "]", "[", "0", "]", ",", "content", ".", "strip", "(", ")", ")", "elif", "event", "&", "select", ".", "EPOLLHUP", ":", "# TCP Socket Hangup", "self", ".", "close_tcp_client", "(", "fileno", ")", "elif", "event", "&", "select", ".", "EPOLLPRI", ":", "# GPIO interrupts", "f", "=", "self", ".", "_map_fileno_to_file", "[", "fileno", "]", "# read() is workaround for not getting new values", "# with read(1)", "val", "=", "f", ".", "read", "(", ")", ".", "strip", "(", ")", "f", ".", "seek", "(", "0", ")", "self", ".", "_handle_interrupt", "(", "fileno", ",", "val", ")" ]
Blocking loop to listen for GPIO interrupts and distribute them to associated callbacks. epoll_timeout is an easy way to shutdown the blocking function. Per default the timeout is set to 1 second; if `_is_waiting_for_interrupts` is set to False the loop will exit. If an exception occurs while waiting for interrupts, the interrupt gpio interfaces will be cleaned up (/sys/class/gpio unexports). In this case all interrupts will be reset and you'd need to add the callbacks again before using `wait_for_interrupts(..)` again.
[ "Blocking", "loop", "to", "listen", "for", "GPIO", "interrupts", "and", "distribute", "them", "to", "associated", "callbacks", ".", "epoll_timeout", "is", "an", "easy", "way", "to", "shutdown", "the", "blocking", "function", ".", "Per", "default", "the", "timeout", "is", "set", "to", "1", "second", ";", "if", "_is_waiting_for_interrupts", "is", "set", "to", "False", "the", "loop", "will", "exit", "." ]
python
train
fedora-python/pyp2rpm
pyp2rpm/metadata_extractors.py
https://github.com/fedora-python/pyp2rpm/blob/853eb3d226689a5ccdcdb9358b1a3394fafbd2b5/pyp2rpm/metadata_extractors.py#L235-L243
def srcname(self): """Return srcname for the macro if the pypi name should be changed. Those cases are: - name was provided with -r option - pypi name is like python-<name> """ if self.rpm_name or self.name.startswith(('python-', 'Python-')): return self.name_convertor.base_name(self.rpm_name or self.name)
[ "def", "srcname", "(", "self", ")", ":", "if", "self", ".", "rpm_name", "or", "self", ".", "name", ".", "startswith", "(", "(", "'python-'", ",", "'Python-'", ")", ")", ":", "return", "self", ".", "name_convertor", ".", "base_name", "(", "self", ".", "rpm_name", "or", "self", ".", "name", ")" ]
Return srcname for the macro if the pypi name should be changed. Those cases are: - name was provided with -r option - pypi name is like python-<name>
[ "Return", "srcname", "for", "the", "macro", "if", "the", "pypi", "name", "should", "be", "changed", "." ]
python
train
google/grr
grr/server/grr_response_server/hunts/standard.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/hunts/standard.py#L90-L102
def Start(self): """Create the hunt, in the paused state.""" # Anyone can create the hunt but it will be created in the paused # state. Permissions are required to actually start it. with implementation.StartHunt( runner_args=self.args.hunt_runner_args, args=self.args.hunt_args, token=self.token) as hunt_obj: # Nothing really to do here - hunts are always created in the paused # state. self.Log("User %s created a new %s hunt (%s)", self.token.username, hunt_obj.args.flow_runner_args.flow_name, hunt_obj.urn)
[ "def", "Start", "(", "self", ")", ":", "# Anyone can create the hunt but it will be created in the paused", "# state. Permissions are required to actually start it.", "with", "implementation", ".", "StartHunt", "(", "runner_args", "=", "self", ".", "args", ".", "hunt_runner_args", ",", "args", "=", "self", ".", "args", ".", "hunt_args", ",", "token", "=", "self", ".", "token", ")", "as", "hunt_obj", ":", "# Nothing really to do here - hunts are always created in the paused", "# state.", "self", ".", "Log", "(", "\"User %s created a new %s hunt (%s)\"", ",", "self", ".", "token", ".", "username", ",", "hunt_obj", ".", "args", ".", "flow_runner_args", ".", "flow_name", ",", "hunt_obj", ".", "urn", ")" ]
Create the hunt, in the paused state.
[ "Create", "the", "hunt", "in", "the", "paused", "state", "." ]
python
train
NuGrid/NuGridPy
nugridpy/grain.py
https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/grain.py#L1156-L1170
def get_svnpath(): ''' This subroutine gives back the path of the whole svn tree installation, which is necessary for the script to run. ''' svnpathtmp = __file__ splitsvnpath = svnpathtmp.split('/') if len(splitsvnpath) == 1: svnpath = os.path.abspath('.') + '/../../' else: svnpath = '' for i in range(len(splitsvnpath)-3): svnpath += splitsvnpath[i] + '/' return svnpath
[ "def", "get_svnpath", "(", ")", ":", "svnpathtmp", "=", "__file__", "splitsvnpath", "=", "svnpathtmp", ".", "split", "(", "'/'", ")", "if", "len", "(", "splitsvnpath", ")", "==", "1", ":", "svnpath", "=", "os", ".", "path", ".", "abspath", "(", "'.'", ")", "+", "'/../../'", "else", ":", "svnpath", "=", "''", "for", "i", "in", "range", "(", "len", "(", "splitsvnpath", ")", "-", "3", ")", ":", "svnpath", "+=", "splitsvnpath", "[", "i", "]", "+", "'/'", "return", "svnpath" ]
This subroutine gives back the path of the whole svn tree installation, which is necessary for the script to run.
[ "This", "subroutine", "gives", "back", "the", "path", "of", "the", "whole", "svn", "tree", "installation", "which", "is", "necessary", "for", "the", "script", "to", "run", "." ]
python
train
RRZE-HPC/kerncraft
kerncraft/machinemodel.py
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/machinemodel.py#L193-L218
def get_compiler(self, compiler=None, flags=None): """ Return tuple of compiler and compiler flags. Selects compiler and flags from machine description file, commandline arguments or call arguements. """ if self._args: compiler = compiler or self._args.compiler flags = flags or self._args.compiler_flags if compiler is None: # Select first available compiler in machine description file's compiler dict for c in self['compiler'].keys(): # Making sure compiler is available: if find_executable(c) is not None: compiler = c break else: raise RuntimeError("No compiler ({}) was found. Add different one in machine file, " "via --compiler argument or make sure it will be found in " "$PATH.".format(list(self['compiler'].keys()))) if flags is None: # Select from machine description file flags = self['compiler'].get(compiler, '') return compiler, flags.split(' ')
[ "def", "get_compiler", "(", "self", ",", "compiler", "=", "None", ",", "flags", "=", "None", ")", ":", "if", "self", ".", "_args", ":", "compiler", "=", "compiler", "or", "self", ".", "_args", ".", "compiler", "flags", "=", "flags", "or", "self", ".", "_args", ".", "compiler_flags", "if", "compiler", "is", "None", ":", "# Select first available compiler in machine description file's compiler dict", "for", "c", "in", "self", "[", "'compiler'", "]", ".", "keys", "(", ")", ":", "# Making sure compiler is available:", "if", "find_executable", "(", "c", ")", "is", "not", "None", ":", "compiler", "=", "c", "break", "else", ":", "raise", "RuntimeError", "(", "\"No compiler ({}) was found. Add different one in machine file, \"", "\"via --compiler argument or make sure it will be found in \"", "\"$PATH.\"", ".", "format", "(", "list", "(", "self", "[", "'compiler'", "]", ".", "keys", "(", ")", ")", ")", ")", "if", "flags", "is", "None", ":", "# Select from machine description file", "flags", "=", "self", "[", "'compiler'", "]", ".", "get", "(", "compiler", ",", "''", ")", "return", "compiler", ",", "flags", ".", "split", "(", "' '", ")" ]
Return tuple of compiler and compiler flags. Selects compiler and flags from machine description file, commandline arguments or call arguements.
[ "Return", "tuple", "of", "compiler", "and", "compiler", "flags", "." ]
python
test
digidotcom/python-wvalib
wva/http_client.py
https://github.com/digidotcom/python-wvalib/blob/4252735e2775f80ebaffd813fbe84046d26906b3/wva/http_client.py#L79-L97
def raw_request(self, method, uri, **kwargs): """Perform a WVA web services request and return the raw response object :param method: The HTTP method to use when making this request :param uri: The path past /ws to request. That is, the path requested for a relpath of `a/b/c` would be `/ws/a/b/c`. :raises WVAHttpSocketError: if there was an error making the HTTP request. That is, the request was unable to make it to the WVA for some reason. """ with warnings.catch_warnings(): # catch warning about certs not being verified warnings.simplefilter("ignore", urllib3.exceptions.InsecureRequestWarning) warnings.simplefilter("ignore", urllib3.exceptions.InsecurePlatformWarning) try: response = self._get_session().request(method, self._get_ws_url(uri), **kwargs) except requests.RequestException as e: # e.g. raise new_exc from old_exc six.raise_from(WVAHttpRequestError(e), e) else: return response
[ "def", "raw_request", "(", "self", ",", "method", ",", "uri", ",", "*", "*", "kwargs", ")", ":", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "# catch warning about certs not being verified", "warnings", ".", "simplefilter", "(", "\"ignore\"", ",", "urllib3", ".", "exceptions", ".", "InsecureRequestWarning", ")", "warnings", ".", "simplefilter", "(", "\"ignore\"", ",", "urllib3", ".", "exceptions", ".", "InsecurePlatformWarning", ")", "try", ":", "response", "=", "self", ".", "_get_session", "(", ")", ".", "request", "(", "method", ",", "self", ".", "_get_ws_url", "(", "uri", ")", ",", "*", "*", "kwargs", ")", "except", "requests", ".", "RequestException", "as", "e", ":", "# e.g. raise new_exc from old_exc", "six", ".", "raise_from", "(", "WVAHttpRequestError", "(", "e", ")", ",", "e", ")", "else", ":", "return", "response" ]
Perform a WVA web services request and return the raw response object :param method: The HTTP method to use when making this request :param uri: The path past /ws to request. That is, the path requested for a relpath of `a/b/c` would be `/ws/a/b/c`. :raises WVAHttpSocketError: if there was an error making the HTTP request. That is, the request was unable to make it to the WVA for some reason.
[ "Perform", "a", "WVA", "web", "services", "request", "and", "return", "the", "raw", "response", "object" ]
python
train
quantmind/pulsar
pulsar/utils/structures/zset.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/structures/zset.py#L71-L76
def update(self, score_vals): '''Update the :class:`zset` with an iterable over pairs of scores and values.''' add = self.add for score, value in score_vals: add(score, value)
[ "def", "update", "(", "self", ",", "score_vals", ")", ":", "add", "=", "self", ".", "add", "for", "score", ",", "value", "in", "score_vals", ":", "add", "(", "score", ",", "value", ")" ]
Update the :class:`zset` with an iterable over pairs of scores and values.
[ "Update", "the", ":", "class", ":", "zset", "with", "an", "iterable", "over", "pairs", "of", "scores", "and", "values", "." ]
python
train
rdussurget/py-altimetry
altimetry/data/hydro.py
https://github.com/rdussurget/py-altimetry/blob/57ce7f2d63c6bbc4993821af0bbe46929e3a2d98/altimetry/data/hydro.py#L644-L656
def get_currentDim(self): ''' returns the current dimensions of the object ''' selfDim = self._dimensions.copy() if not isinstance(selfDim,dimStr): if selfDim.has_key('_ndims') : nself = selfDim.pop('_ndims') else : self.warning(1, 'self._dimensions does not have the _ndims key') nself = len(selfDim) else : nself = selfDim['_ndims'] curDim = [[key for key in selfDim.keys()],[selfDim[key] for key in selfDim.keys()]] return curDim, nself
[ "def", "get_currentDim", "(", "self", ")", ":", "selfDim", "=", "self", ".", "_dimensions", ".", "copy", "(", ")", "if", "not", "isinstance", "(", "selfDim", ",", "dimStr", ")", ":", "if", "selfDim", ".", "has_key", "(", "'_ndims'", ")", ":", "nself", "=", "selfDim", ".", "pop", "(", "'_ndims'", ")", "else", ":", "self", ".", "warning", "(", "1", ",", "'self._dimensions does not have the _ndims key'", ")", "nself", "=", "len", "(", "selfDim", ")", "else", ":", "nself", "=", "selfDim", "[", "'_ndims'", "]", "curDim", "=", "[", "[", "key", "for", "key", "in", "selfDim", ".", "keys", "(", ")", "]", ",", "[", "selfDim", "[", "key", "]", "for", "key", "in", "selfDim", ".", "keys", "(", ")", "]", "]", "return", "curDim", ",", "nself" ]
returns the current dimensions of the object
[ "returns", "the", "current", "dimensions", "of", "the", "object" ]
python
train
PmagPy/PmagPy
pmagpy/builder2.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/builder2.py#L508-L581
def get_data(self): """ attempt to read measurements file in working directory. """ meas_file = os.path.join(self.WD, 'magic_measurements.txt') if not os.path.isfile(meas_file): print("-I- No magic_measurements.txt file") return {} try: meas_data, file_type = pmag.magic_read(meas_file) except IOError: print("-I- No magic_measurements.txt file") return {} if file_type == 'bad_file': print("-E- ERROR: Can't read magic_measurements.txt file. File is corrupted.") old_specimen_name = '' #start_time = time.time() meas_name_list = [measurement.name for measurement in self.measurements] for rec in meas_data: # get citation information citation = rec.get('er_citation_names', 'This study') if 'This study' not in citation: citation = citation.strip() + ':This study' er_data = {'er_citation_names': citation} pmag_data = {'er_citation_names': 'This study'} specimen_name = rec["er_specimen_name"] # ignore measurement if there is no specimen if specimen_name == "" or specimen_name == " ": continue # if we've moved onto a new specimen, make sure a sample/site/location # exists for that specimen if specimen_name != old_specimen_name: sample_name = rec["er_sample_name"] site_name = rec["er_site_name"] location_name = rec["er_location_name"] # add items and parents location = self.find_by_name(location_name, self.locations) if location_name and not location: location = self.add_location(location_name, er_data=er_data, pmag_data=pmag_data) site = self.find_by_name(site_name, self.sites) if site_name and not site: site = self.add_site(site_name, location_name, er_data, pmag_data) sample = self.find_by_name(sample_name, self.samples) if sample_name and not sample: sample = self.add_sample(sample_name, site_name, er_data, pmag_data) specimen = self.find_by_name(specimen_name, self.specimens) if specimen_name and not specimen: specimen = self.add_specimen(specimen_name, sample_name, er_data, pmag_data) # add child_items if sample and not self.find_by_name(specimen_name, sample.specimens): sample.specimens.append(specimen) if site and not self.find_by_name(sample_name, site.samples): site.samples.append(sample) if location and not self.find_by_name(site_name, location.sites): location.sites.append(site) exp_name = rec['magic_experiment_name'] meas_num = rec['measurement_number'] meas_name = exp_name + '_' + str(meas_num) measurement = self.find_by_name(meas_name, self.measurements, meas_name_list) if not measurement: self.add_measurement(exp_name, meas_num, specimen.name, rec) meas_name_list.append(meas_name) old_specimen_name = specimen_name
[ "def", "get_data", "(", "self", ")", ":", "meas_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "WD", ",", "'magic_measurements.txt'", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "meas_file", ")", ":", "print", "(", "\"-I- No magic_measurements.txt file\"", ")", "return", "{", "}", "try", ":", "meas_data", ",", "file_type", "=", "pmag", ".", "magic_read", "(", "meas_file", ")", "except", "IOError", ":", "print", "(", "\"-I- No magic_measurements.txt file\"", ")", "return", "{", "}", "if", "file_type", "==", "'bad_file'", ":", "print", "(", "\"-E- ERROR: Can't read magic_measurements.txt file. File is corrupted.\"", ")", "old_specimen_name", "=", "''", "#start_time = time.time()", "meas_name_list", "=", "[", "measurement", ".", "name", "for", "measurement", "in", "self", ".", "measurements", "]", "for", "rec", "in", "meas_data", ":", "# get citation information", "citation", "=", "rec", ".", "get", "(", "'er_citation_names'", ",", "'This study'", ")", "if", "'This study'", "not", "in", "citation", ":", "citation", "=", "citation", ".", "strip", "(", ")", "+", "':This study'", "er_data", "=", "{", "'er_citation_names'", ":", "citation", "}", "pmag_data", "=", "{", "'er_citation_names'", ":", "'This study'", "}", "specimen_name", "=", "rec", "[", "\"er_specimen_name\"", "]", "# ignore measurement if there is no specimen", "if", "specimen_name", "==", "\"\"", "or", "specimen_name", "==", "\" \"", ":", "continue", "# if we've moved onto a new specimen, make sure a sample/site/location", "# exists for that specimen", "if", "specimen_name", "!=", "old_specimen_name", ":", "sample_name", "=", "rec", "[", "\"er_sample_name\"", "]", "site_name", "=", "rec", "[", "\"er_site_name\"", "]", "location_name", "=", "rec", "[", "\"er_location_name\"", "]", "# add items and parents", "location", "=", "self", ".", "find_by_name", "(", "location_name", ",", "self", ".", "locations", ")", "if", "location_name", "and", "not", "location", ":", "location", "=", "self", ".", "add_location", "(", "location_name", ",", "er_data", "=", "er_data", ",", "pmag_data", "=", "pmag_data", ")", "site", "=", "self", ".", "find_by_name", "(", "site_name", ",", "self", ".", "sites", ")", "if", "site_name", "and", "not", "site", ":", "site", "=", "self", ".", "add_site", "(", "site_name", ",", "location_name", ",", "er_data", ",", "pmag_data", ")", "sample", "=", "self", ".", "find_by_name", "(", "sample_name", ",", "self", ".", "samples", ")", "if", "sample_name", "and", "not", "sample", ":", "sample", "=", "self", ".", "add_sample", "(", "sample_name", ",", "site_name", ",", "er_data", ",", "pmag_data", ")", "specimen", "=", "self", ".", "find_by_name", "(", "specimen_name", ",", "self", ".", "specimens", ")", "if", "specimen_name", "and", "not", "specimen", ":", "specimen", "=", "self", ".", "add_specimen", "(", "specimen_name", ",", "sample_name", ",", "er_data", ",", "pmag_data", ")", "# add child_items", "if", "sample", "and", "not", "self", ".", "find_by_name", "(", "specimen_name", ",", "sample", ".", "specimens", ")", ":", "sample", ".", "specimens", ".", "append", "(", "specimen", ")", "if", "site", "and", "not", "self", ".", "find_by_name", "(", "sample_name", ",", "site", ".", "samples", ")", ":", "site", ".", "samples", ".", "append", "(", "sample", ")", "if", "location", "and", "not", "self", ".", "find_by_name", "(", "site_name", ",", "location", ".", "sites", ")", ":", "location", ".", "sites", ".", "append", "(", "site", ")", "exp_name", "=", "rec", "[", "'magic_experiment_name'", "]", "meas_num", "=", "rec", "[", "'measurement_number'", "]", "meas_name", "=", "exp_name", "+", "'_'", "+", "str", "(", "meas_num", ")", "measurement", "=", "self", ".", "find_by_name", "(", "meas_name", ",", "self", ".", "measurements", ",", "meas_name_list", ")", "if", "not", "measurement", ":", "self", ".", "add_measurement", "(", "exp_name", ",", "meas_num", ",", "specimen", ".", "name", ",", "rec", ")", "meas_name_list", ".", "append", "(", "meas_name", ")", "old_specimen_name", "=", "specimen_name" ]
attempt to read measurements file in working directory.
[ "attempt", "to", "read", "measurements", "file", "in", "working", "directory", "." ]
python
train
CyberReboot/vent
vent/menus/add_options.py
https://github.com/CyberReboot/vent/blob/9956a09146b11a89a0eabab3bc7ce8906d124885/vent/menus/add_options.py#L16-L39
def repo_values(self): """ Set the appropriate repo dir and get the branches and commits of it """ branches = [] commits = {} m_helper = Tools() status = m_helper.repo_branches(self.parentApp.repo_value['repo']) # branches and commits must both be retrieved successfully if status[0]: branches = status[1] status = m_helper.repo_commits(self.parentApp.repo_value['repo']) if status[0]: r_commits = status[1] for commit in r_commits: commits[commit[0]] = commit[1] else: # if commits failed, return commit errors return status else: # if branch failed, return branch errors return status # if everything is good, return branches with commits return branches, commits
[ "def", "repo_values", "(", "self", ")", ":", "branches", "=", "[", "]", "commits", "=", "{", "}", "m_helper", "=", "Tools", "(", ")", "status", "=", "m_helper", ".", "repo_branches", "(", "self", ".", "parentApp", ".", "repo_value", "[", "'repo'", "]", ")", "# branches and commits must both be retrieved successfully", "if", "status", "[", "0", "]", ":", "branches", "=", "status", "[", "1", "]", "status", "=", "m_helper", ".", "repo_commits", "(", "self", ".", "parentApp", ".", "repo_value", "[", "'repo'", "]", ")", "if", "status", "[", "0", "]", ":", "r_commits", "=", "status", "[", "1", "]", "for", "commit", "in", "r_commits", ":", "commits", "[", "commit", "[", "0", "]", "]", "=", "commit", "[", "1", "]", "else", ":", "# if commits failed, return commit errors", "return", "status", "else", ":", "# if branch failed, return branch errors", "return", "status", "# if everything is good, return branches with commits", "return", "branches", ",", "commits" ]
Set the appropriate repo dir and get the branches and commits of it
[ "Set", "the", "appropriate", "repo", "dir", "and", "get", "the", "branches", "and", "commits", "of", "it" ]
python
train
Microsoft/botbuilder-python
libraries/botbuilder-ai/botbuilder/ai/qna/qnamaker.py
https://github.com/Microsoft/botbuilder-python/blob/274663dd91c811bae6ac4488915ba5880771b0a7/libraries/botbuilder-ai/botbuilder/ai/qna/qnamaker.py#L174-L197
async def get_answers( self, context: TurnContext, options: QnAMakerOptions = None, telemetry_properties: Dict[str,str] = None, telemetry_metrics: Dict[str,int] = None ) -> [QueryResult]: """ Generates answers from the knowledge base. :return: A list of answers for the user's query, sorted in decreasing order of ranking score. :rtype: [QueryResult] """ hydrated_options = self._hydrate_options(options) self._validate_options(hydrated_options) result = self._query_qna_service(context.activity, hydrated_options) await self._emit_trace_info(context, result, hydrated_options) return result
[ "async", "def", "get_answers", "(", "self", ",", "context", ":", "TurnContext", ",", "options", ":", "QnAMakerOptions", "=", "None", ",", "telemetry_properties", ":", "Dict", "[", "str", ",", "str", "]", "=", "None", ",", "telemetry_metrics", ":", "Dict", "[", "str", ",", "int", "]", "=", "None", ")", "->", "[", "QueryResult", "]", ":", "hydrated_options", "=", "self", ".", "_hydrate_options", "(", "options", ")", "self", ".", "_validate_options", "(", "hydrated_options", ")", "result", "=", "self", ".", "_query_qna_service", "(", "context", ".", "activity", ",", "hydrated_options", ")", "await", "self", ".", "_emit_trace_info", "(", "context", ",", "result", ",", "hydrated_options", ")", "return", "result" ]
Generates answers from the knowledge base. :return: A list of answers for the user's query, sorted in decreasing order of ranking score. :rtype: [QueryResult]
[ "Generates", "answers", "from", "the", "knowledge", "base", ".", ":", "return", ":", "A", "list", "of", "answers", "for", "the", "user", "s", "query", "sorted", "in", "decreasing", "order", "of", "ranking", "score", ".", ":", "rtype", ":", "[", "QueryResult", "]" ]
python
test
bitesofcode/projexui
projexui/widgets/xdocktoolbar.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xdocktoolbar.py#L197-L208
def actionAt(self, pos): """ Returns the action at the given position. :param pos | <QPoint> :return <QAction> || None """ child = self.childAt(pos) if child: return child.action() return None
[ "def", "actionAt", "(", "self", ",", "pos", ")", ":", "child", "=", "self", ".", "childAt", "(", "pos", ")", "if", "child", ":", "return", "child", ".", "action", "(", ")", "return", "None" ]
Returns the action at the given position. :param pos | <QPoint> :return <QAction> || None
[ "Returns", "the", "action", "at", "the", "given", "position", ".", ":", "param", "pos", "|", "<QPoint", ">", ":", "return", "<QAction", ">", "||", "None" ]
python
train
deepmind/pysc2
pysc2/lib/features.py
https://github.com/deepmind/pysc2/blob/df4cc4b00f07a2242be9ba153d4a7f4ad2017897/pysc2/lib/features.py#L774-L832
def observation_spec(self): """The observation spec for the SC2 environment. It's worth noting that the image-like observations are in y,x/row,column order which is different than the actions which are in x,y order. This is due to conflicting conventions, and to facilitate printing of the images. Returns: The dict of observation names to their tensor shapes. Shapes with a 0 can vary in length, for example the number of valid actions depends on which units you have selected. """ obs_spec = named_array.NamedDict({ "action_result": (0,), # See error.proto: ActionResult. "alerts": (0,), # See sc2api.proto: Alert. "available_actions": (0,), "build_queue": (0, len(UnitLayer)), # pytype: disable=wrong-arg-types "cargo": (0, len(UnitLayer)), # pytype: disable=wrong-arg-types "cargo_slots_available": (1,), "control_groups": (10, 2), "game_loop": (1,), "last_actions": (0,), "multi_select": (0, len(UnitLayer)), # pytype: disable=wrong-arg-types "player": (len(Player),), # pytype: disable=wrong-arg-types "score_cumulative": (len(ScoreCumulative),), # pytype: disable=wrong-arg-types "score_by_category": (len(ScoreByCategory), len(ScoreCategories)), # pytype: disable=wrong-arg-types "score_by_vital": (len(ScoreByVital), len(ScoreVitals)), # pytype: disable=wrong-arg-types "single_select": (0, len(UnitLayer)), # Only (n, 7) for n in (0, 1). # pytype: disable=wrong-arg-types }) aif = self._agent_interface_format if aif.feature_dimensions: obs_spec["feature_screen"] = (len(SCREEN_FEATURES), aif.feature_dimensions.screen.y, aif.feature_dimensions.screen.x) obs_spec["feature_minimap"] = (len(MINIMAP_FEATURES), aif.feature_dimensions.minimap.y, aif.feature_dimensions.minimap.x) if aif.rgb_dimensions: obs_spec["rgb_screen"] = (aif.rgb_dimensions.screen.y, aif.rgb_dimensions.screen.x, 3) obs_spec["rgb_minimap"] = (aif.rgb_dimensions.minimap.y, aif.rgb_dimensions.minimap.x, 3) if aif.use_feature_units: obs_spec["feature_units"] = (0, len(FeatureUnit)) # pytype: disable=wrong-arg-types if aif.use_raw_units: obs_spec["raw_units"] = (0, len(FeatureUnit)) if aif.use_unit_counts: obs_spec["unit_counts"] = (0, len(UnitCounts)) if aif.use_camera_position: obs_spec["camera_position"] = (2,) return obs_spec
[ "def", "observation_spec", "(", "self", ")", ":", "obs_spec", "=", "named_array", ".", "NamedDict", "(", "{", "\"action_result\"", ":", "(", "0", ",", ")", ",", "# See error.proto: ActionResult.", "\"alerts\"", ":", "(", "0", ",", ")", ",", "# See sc2api.proto: Alert.", "\"available_actions\"", ":", "(", "0", ",", ")", ",", "\"build_queue\"", ":", "(", "0", ",", "len", "(", "UnitLayer", ")", ")", ",", "# pytype: disable=wrong-arg-types", "\"cargo\"", ":", "(", "0", ",", "len", "(", "UnitLayer", ")", ")", ",", "# pytype: disable=wrong-arg-types", "\"cargo_slots_available\"", ":", "(", "1", ",", ")", ",", "\"control_groups\"", ":", "(", "10", ",", "2", ")", ",", "\"game_loop\"", ":", "(", "1", ",", ")", ",", "\"last_actions\"", ":", "(", "0", ",", ")", ",", "\"multi_select\"", ":", "(", "0", ",", "len", "(", "UnitLayer", ")", ")", ",", "# pytype: disable=wrong-arg-types", "\"player\"", ":", "(", "len", "(", "Player", ")", ",", ")", ",", "# pytype: disable=wrong-arg-types", "\"score_cumulative\"", ":", "(", "len", "(", "ScoreCumulative", ")", ",", ")", ",", "# pytype: disable=wrong-arg-types", "\"score_by_category\"", ":", "(", "len", "(", "ScoreByCategory", ")", ",", "len", "(", "ScoreCategories", ")", ")", ",", "# pytype: disable=wrong-arg-types", "\"score_by_vital\"", ":", "(", "len", "(", "ScoreByVital", ")", ",", "len", "(", "ScoreVitals", ")", ")", ",", "# pytype: disable=wrong-arg-types", "\"single_select\"", ":", "(", "0", ",", "len", "(", "UnitLayer", ")", ")", ",", "# Only (n, 7) for n in (0, 1). # pytype: disable=wrong-arg-types", "}", ")", "aif", "=", "self", ".", "_agent_interface_format", "if", "aif", ".", "feature_dimensions", ":", "obs_spec", "[", "\"feature_screen\"", "]", "=", "(", "len", "(", "SCREEN_FEATURES", ")", ",", "aif", ".", "feature_dimensions", ".", "screen", ".", "y", ",", "aif", ".", "feature_dimensions", ".", "screen", ".", "x", ")", "obs_spec", "[", "\"feature_minimap\"", "]", "=", "(", "len", "(", "MINIMAP_FEATURES", ")", ",", "aif", ".", "feature_dimensions", ".", "minimap", ".", "y", ",", "aif", ".", "feature_dimensions", ".", "minimap", ".", "x", ")", "if", "aif", ".", "rgb_dimensions", ":", "obs_spec", "[", "\"rgb_screen\"", "]", "=", "(", "aif", ".", "rgb_dimensions", ".", "screen", ".", "y", ",", "aif", ".", "rgb_dimensions", ".", "screen", ".", "x", ",", "3", ")", "obs_spec", "[", "\"rgb_minimap\"", "]", "=", "(", "aif", ".", "rgb_dimensions", ".", "minimap", ".", "y", ",", "aif", ".", "rgb_dimensions", ".", "minimap", ".", "x", ",", "3", ")", "if", "aif", ".", "use_feature_units", ":", "obs_spec", "[", "\"feature_units\"", "]", "=", "(", "0", ",", "len", "(", "FeatureUnit", ")", ")", "# pytype: disable=wrong-arg-types", "if", "aif", ".", "use_raw_units", ":", "obs_spec", "[", "\"raw_units\"", "]", "=", "(", "0", ",", "len", "(", "FeatureUnit", ")", ")", "if", "aif", ".", "use_unit_counts", ":", "obs_spec", "[", "\"unit_counts\"", "]", "=", "(", "0", ",", "len", "(", "UnitCounts", ")", ")", "if", "aif", ".", "use_camera_position", ":", "obs_spec", "[", "\"camera_position\"", "]", "=", "(", "2", ",", ")", "return", "obs_spec" ]
The observation spec for the SC2 environment. It's worth noting that the image-like observations are in y,x/row,column order which is different than the actions which are in x,y order. This is due to conflicting conventions, and to facilitate printing of the images. Returns: The dict of observation names to their tensor shapes. Shapes with a 0 can vary in length, for example the number of valid actions depends on which units you have selected.
[ "The", "observation", "spec", "for", "the", "SC2", "environment", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_layers.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L1157-L1188
def add_timing_signal(x, min_timescale=1, max_timescale=1e4, num_timescales=16): """Adds a bunch of sinusoids of different frequencies to a Tensor. This allows attention to learn to use absolute and relative positions. The timing signal should be added to some precursor of both the source and the target of the attention. The use of relative position is possible because sin(x+y) and cos(x+y) can be expressed in terms of y, sin(x) and cos(x). In particular, we use a geometric sequence of timescales starting with min_timescale and ending with max_timescale. For each timescale, we generate the two sinusoidal signals sin(timestep/timescale) and cos(timestep/timescale). All of these sinusoids are concatenated in the depth dimension, padded with zeros to be the same depth as the input, and added into input. Args: x: a Tensor with shape [?, length, ?, depth] min_timescale: a float max_timescale: a float num_timescales: an int <= depth/2 Returns: a Tensor the same shape as x. """ length = shape_list(x)[1] depth = shape_list(x)[3] signal = get_timing_signal(length, min_timescale, max_timescale, num_timescales) padded_signal = tf.pad(signal, [[0, 0], [0, depth - 2 * num_timescales]]) return x + tf.reshape(padded_signal, [1, length, 1, depth])
[ "def", "add_timing_signal", "(", "x", ",", "min_timescale", "=", "1", ",", "max_timescale", "=", "1e4", ",", "num_timescales", "=", "16", ")", ":", "length", "=", "shape_list", "(", "x", ")", "[", "1", "]", "depth", "=", "shape_list", "(", "x", ")", "[", "3", "]", "signal", "=", "get_timing_signal", "(", "length", ",", "min_timescale", ",", "max_timescale", ",", "num_timescales", ")", "padded_signal", "=", "tf", ".", "pad", "(", "signal", ",", "[", "[", "0", ",", "0", "]", ",", "[", "0", ",", "depth", "-", "2", "*", "num_timescales", "]", "]", ")", "return", "x", "+", "tf", ".", "reshape", "(", "padded_signal", ",", "[", "1", ",", "length", ",", "1", ",", "depth", "]", ")" ]
Adds a bunch of sinusoids of different frequencies to a Tensor. This allows attention to learn to use absolute and relative positions. The timing signal should be added to some precursor of both the source and the target of the attention. The use of relative position is possible because sin(x+y) and cos(x+y) can be expressed in terms of y, sin(x) and cos(x). In particular, we use a geometric sequence of timescales starting with min_timescale and ending with max_timescale. For each timescale, we generate the two sinusoidal signals sin(timestep/timescale) and cos(timestep/timescale). All of these sinusoids are concatenated in the depth dimension, padded with zeros to be the same depth as the input, and added into input. Args: x: a Tensor with shape [?, length, ?, depth] min_timescale: a float max_timescale: a float num_timescales: an int <= depth/2 Returns: a Tensor the same shape as x.
[ "Adds", "a", "bunch", "of", "sinusoids", "of", "different", "frequencies", "to", "a", "Tensor", "." ]
python
train
alexa/alexa-skills-kit-sdk-for-python
ask-sdk-runtime/ask_sdk_runtime/skill_builder.py
https://github.com/alexa/alexa-skills-kit-sdk-for-python/blob/097b6406aa12d5ca0b825b00c936861b530cbf39/ask-sdk-runtime/ask_sdk_runtime/skill_builder.py#L216-L250
def global_response_interceptor(self): # type: () -> Callable """Decorator that can be used to add global response interceptors easily to the builder. The returned wrapper function can be applied as a decorator on any function that processes the input and the response generated by the request handler. The function should follow the signature of the process function in :py:class:`ask_sdk_runtime.dispatch_components.request_components.AbstractResponseInterceptor` class. :return: Wrapper function that can be decorated on a interceptor process function. """ def wrapper(process_func): if not callable(process_func): raise SkillBuilderException( "Global Response Interceptor process_func input " "parameter should be callable") class_attributes = { "process": ( lambda self, handler_input, response: process_func( handler_input, response)) } response_interceptor = type( "ResponseInterceptor{}".format( process_func.__name__.title().replace("_", "")), (AbstractResponseInterceptor,), class_attributes) self.add_global_response_interceptor( response_interceptor=response_interceptor()) return wrapper
[ "def", "global_response_interceptor", "(", "self", ")", ":", "# type: () -> Callable", "def", "wrapper", "(", "process_func", ")", ":", "if", "not", "callable", "(", "process_func", ")", ":", "raise", "SkillBuilderException", "(", "\"Global Response Interceptor process_func input \"", "\"parameter should be callable\"", ")", "class_attributes", "=", "{", "\"process\"", ":", "(", "lambda", "self", ",", "handler_input", ",", "response", ":", "process_func", "(", "handler_input", ",", "response", ")", ")", "}", "response_interceptor", "=", "type", "(", "\"ResponseInterceptor{}\"", ".", "format", "(", "process_func", ".", "__name__", ".", "title", "(", ")", ".", "replace", "(", "\"_\"", ",", "\"\"", ")", ")", ",", "(", "AbstractResponseInterceptor", ",", ")", ",", "class_attributes", ")", "self", ".", "add_global_response_interceptor", "(", "response_interceptor", "=", "response_interceptor", "(", ")", ")", "return", "wrapper" ]
Decorator that can be used to add global response interceptors easily to the builder. The returned wrapper function can be applied as a decorator on any function that processes the input and the response generated by the request handler. The function should follow the signature of the process function in :py:class:`ask_sdk_runtime.dispatch_components.request_components.AbstractResponseInterceptor` class. :return: Wrapper function that can be decorated on a interceptor process function.
[ "Decorator", "that", "can", "be", "used", "to", "add", "global", "response", "interceptors", "easily", "to", "the", "builder", "." ]
python
train
mcs07/ChemDataExtractor
chemdataextractor/text/__init__.py
https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/text/__init__.py#L232-L258
def levenshtein(s1, s2, allow_substring=False): """Return the Levenshtein distance between two strings. The Levenshtein distance (a.k.a "edit difference") is the number of characters that need to be substituted, inserted or deleted to transform s1 into s2. Setting the `allow_substring` parameter to True allows s1 to be a substring of s2, so that, for example, "hello" and "hello there" would have a distance of zero. :param string s1: The first string :param string s2: The second string :param bool allow_substring: Whether to allow s1 to be a substring of s2 :returns: Levenshtein distance. :rtype int """ len1, len2 = len(s1), len(s2) lev = [] for i in range(len1 + 1): lev.append([0] * (len2 + 1)) for i in range(len1 + 1): lev[i][0] = i for j in range(len2 + 1): lev[0][j] = 0 if allow_substring else j for i in range(len1): for j in range(len2): lev[i + 1][j + 1] = min(lev[i][j + 1] + 1, lev[i + 1][j] + 1, lev[i][j] + (s1[i] != s2[j])) return min(lev[len1]) if allow_substring else lev[len1][len2]
[ "def", "levenshtein", "(", "s1", ",", "s2", ",", "allow_substring", "=", "False", ")", ":", "len1", ",", "len2", "=", "len", "(", "s1", ")", ",", "len", "(", "s2", ")", "lev", "=", "[", "]", "for", "i", "in", "range", "(", "len1", "+", "1", ")", ":", "lev", ".", "append", "(", "[", "0", "]", "*", "(", "len2", "+", "1", ")", ")", "for", "i", "in", "range", "(", "len1", "+", "1", ")", ":", "lev", "[", "i", "]", "[", "0", "]", "=", "i", "for", "j", "in", "range", "(", "len2", "+", "1", ")", ":", "lev", "[", "0", "]", "[", "j", "]", "=", "0", "if", "allow_substring", "else", "j", "for", "i", "in", "range", "(", "len1", ")", ":", "for", "j", "in", "range", "(", "len2", ")", ":", "lev", "[", "i", "+", "1", "]", "[", "j", "+", "1", "]", "=", "min", "(", "lev", "[", "i", "]", "[", "j", "+", "1", "]", "+", "1", ",", "lev", "[", "i", "+", "1", "]", "[", "j", "]", "+", "1", ",", "lev", "[", "i", "]", "[", "j", "]", "+", "(", "s1", "[", "i", "]", "!=", "s2", "[", "j", "]", ")", ")", "return", "min", "(", "lev", "[", "len1", "]", ")", "if", "allow_substring", "else", "lev", "[", "len1", "]", "[", "len2", "]" ]
Return the Levenshtein distance between two strings. The Levenshtein distance (a.k.a "edit difference") is the number of characters that need to be substituted, inserted or deleted to transform s1 into s2. Setting the `allow_substring` parameter to True allows s1 to be a substring of s2, so that, for example, "hello" and "hello there" would have a distance of zero. :param string s1: The first string :param string s2: The second string :param bool allow_substring: Whether to allow s1 to be a substring of s2 :returns: Levenshtein distance. :rtype int
[ "Return", "the", "Levenshtein", "distance", "between", "two", "strings", "." ]
python
train
quantumlib/Cirq
cirq/google/sim/xmon_stepper.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/google/sim/xmon_stepper.py#L36-L46
def ensure_pool(func): """Decorator that ensures a pool is available for a stepper.""" def func_wrapper(*args, **kwargs): if len(args) == 0 or not isinstance(args[0], Stepper): raise Exception('@ensure_pool can only be used on Stepper methods.') if args[0]._pool is None: with args[0]: return func(*args, **kwargs) else: return func(*args, **kwargs) return func_wrapper
[ "def", "ensure_pool", "(", "func", ")", ":", "def", "func_wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "len", "(", "args", ")", "==", "0", "or", "not", "isinstance", "(", "args", "[", "0", "]", ",", "Stepper", ")", ":", "raise", "Exception", "(", "'@ensure_pool can only be used on Stepper methods.'", ")", "if", "args", "[", "0", "]", ".", "_pool", "is", "None", ":", "with", "args", "[", "0", "]", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "else", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "func_wrapper" ]
Decorator that ensures a pool is available for a stepper.
[ "Decorator", "that", "ensures", "a", "pool", "is", "available", "for", "a", "stepper", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/qt/console/rich_ipython_widget.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/rich_ipython_widget.py#L78-L98
def _context_menu_make(self, pos): """ Reimplemented to return a custom context menu for images. """ format = self._control.cursorForPosition(pos).charFormat() name = format.stringProperty(QtGui.QTextFormat.ImageName) if name: menu = QtGui.QMenu() menu.addAction('Copy Image', lambda: self._copy_image(name)) menu.addAction('Save Image As...', lambda: self._save_image(name)) menu.addSeparator() svg = self._name_to_svg_map.get(name, None) if svg is not None: menu.addSeparator() menu.addAction('Copy SVG', lambda: svg_to_clipboard(svg)) menu.addAction('Save SVG As...', lambda: save_svg(svg, self._control)) else: menu = super(RichIPythonWidget, self)._context_menu_make(pos) return menu
[ "def", "_context_menu_make", "(", "self", ",", "pos", ")", ":", "format", "=", "self", ".", "_control", ".", "cursorForPosition", "(", "pos", ")", ".", "charFormat", "(", ")", "name", "=", "format", ".", "stringProperty", "(", "QtGui", ".", "QTextFormat", ".", "ImageName", ")", "if", "name", ":", "menu", "=", "QtGui", ".", "QMenu", "(", ")", "menu", ".", "addAction", "(", "'Copy Image'", ",", "lambda", ":", "self", ".", "_copy_image", "(", "name", ")", ")", "menu", ".", "addAction", "(", "'Save Image As...'", ",", "lambda", ":", "self", ".", "_save_image", "(", "name", ")", ")", "menu", ".", "addSeparator", "(", ")", "svg", "=", "self", ".", "_name_to_svg_map", ".", "get", "(", "name", ",", "None", ")", "if", "svg", "is", "not", "None", ":", "menu", ".", "addSeparator", "(", ")", "menu", ".", "addAction", "(", "'Copy SVG'", ",", "lambda", ":", "svg_to_clipboard", "(", "svg", ")", ")", "menu", ".", "addAction", "(", "'Save SVG As...'", ",", "lambda", ":", "save_svg", "(", "svg", ",", "self", ".", "_control", ")", ")", "else", ":", "menu", "=", "super", "(", "RichIPythonWidget", ",", "self", ")", ".", "_context_menu_make", "(", "pos", ")", "return", "menu" ]
Reimplemented to return a custom context menu for images.
[ "Reimplemented", "to", "return", "a", "custom", "context", "menu", "for", "images", "." ]
python
test
mapillary/mapillary_tools
mapillary_tools/geo.py
https://github.com/mapillary/mapillary_tools/blob/816785e90c589cae6e8e34a5530ce8417d29591c/mapillary_tools/geo.py#L191-L202
def normalize_bearing(bearing, check_hex=False): ''' Normalize bearing and convert from hex if ''' if bearing > 360 and check_hex: # fix negative value wrongly parsed in exifread # -360 degree -> 4294966935 when converting from hex bearing = bin(int(bearing))[2:] bearing = ''.join([str(int(int(a) == 0)) for a in bearing]) bearing = -float(int(bearing, 2)) bearing %= 360 return bearing
[ "def", "normalize_bearing", "(", "bearing", ",", "check_hex", "=", "False", ")", ":", "if", "bearing", ">", "360", "and", "check_hex", ":", "# fix negative value wrongly parsed in exifread", "# -360 degree -> 4294966935 when converting from hex", "bearing", "=", "bin", "(", "int", "(", "bearing", ")", ")", "[", "2", ":", "]", "bearing", "=", "''", ".", "join", "(", "[", "str", "(", "int", "(", "int", "(", "a", ")", "==", "0", ")", ")", "for", "a", "in", "bearing", "]", ")", "bearing", "=", "-", "float", "(", "int", "(", "bearing", ",", "2", ")", ")", "bearing", "%=", "360", "return", "bearing" ]
Normalize bearing and convert from hex if
[ "Normalize", "bearing", "and", "convert", "from", "hex", "if" ]
python
train
Azure/azure-cosmos-python
azure/cosmos/execution_context/document_producer.py
https://github.com/Azure/azure-cosmos-python/blob/dd01b3c5d308c6da83cfcaa0ab7083351a476353/azure/cosmos/execution_context/document_producer.py#L76-L88
def next(self): """ :return: The next result item. :rtype: dict :raises StopIteration: If there is no more result. """ if self._cur_item is not None: res = self._cur_item self._cur_item = None return res return next(self._ex_context)
[ "def", "next", "(", "self", ")", ":", "if", "self", ".", "_cur_item", "is", "not", "None", ":", "res", "=", "self", ".", "_cur_item", "self", ".", "_cur_item", "=", "None", "return", "res", "return", "next", "(", "self", ".", "_ex_context", ")" ]
:return: The next result item. :rtype: dict :raises StopIteration: If there is no more result.
[ ":", "return", ":", "The", "next", "result", "item", ".", ":", "rtype", ":", "dict", ":", "raises", "StopIteration", ":", "If", "there", "is", "no", "more", "result", "." ]
python
train
pypa/pipenv
pipenv/vendor/urllib3/util/retry.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/urllib3/util/retry.py#L329-L402
def increment(self, method=None, url=None, response=None, error=None, _pool=None, _stacktrace=None): """ Return a new Retry object with incremented retry counters. :param response: A response object, or None, if the server did not return a response. :type response: :class:`~urllib3.response.HTTPResponse` :param Exception error: An error encountered during the request, or None if the response was received successfully. :return: A new ``Retry`` object. """ if self.total is False and error: # Disabled, indicate to re-raise the error. raise six.reraise(type(error), error, _stacktrace) total = self.total if total is not None: total -= 1 connect = self.connect read = self.read redirect = self.redirect status_count = self.status cause = 'unknown' status = None redirect_location = None if error and self._is_connection_error(error): # Connect retry? if connect is False: raise six.reraise(type(error), error, _stacktrace) elif connect is not None: connect -= 1 elif error and self._is_read_error(error): # Read retry? if read is False or not self._is_method_retryable(method): raise six.reraise(type(error), error, _stacktrace) elif read is not None: read -= 1 elif response and response.get_redirect_location(): # Redirect retry? if redirect is not None: redirect -= 1 cause = 'too many redirects' redirect_location = response.get_redirect_location() status = response.status else: # Incrementing because of a server error like a 500 in # status_forcelist and a the given method is in the whitelist cause = ResponseError.GENERIC_ERROR if response and response.status: if status_count is not None: status_count -= 1 cause = ResponseError.SPECIFIC_ERROR.format( status_code=response.status) status = response.status history = self.history + (RequestHistory(method, url, error, status, redirect_location),) new_retry = self.new( total=total, connect=connect, read=read, redirect=redirect, status=status_count, history=history) if new_retry.is_exhausted(): raise MaxRetryError(_pool, url, error or ResponseError(cause)) log.debug("Incremented Retry for (url='%s'): %r", url, new_retry) return new_retry
[ "def", "increment", "(", "self", ",", "method", "=", "None", ",", "url", "=", "None", ",", "response", "=", "None", ",", "error", "=", "None", ",", "_pool", "=", "None", ",", "_stacktrace", "=", "None", ")", ":", "if", "self", ".", "total", "is", "False", "and", "error", ":", "# Disabled, indicate to re-raise the error.", "raise", "six", ".", "reraise", "(", "type", "(", "error", ")", ",", "error", ",", "_stacktrace", ")", "total", "=", "self", ".", "total", "if", "total", "is", "not", "None", ":", "total", "-=", "1", "connect", "=", "self", ".", "connect", "read", "=", "self", ".", "read", "redirect", "=", "self", ".", "redirect", "status_count", "=", "self", ".", "status", "cause", "=", "'unknown'", "status", "=", "None", "redirect_location", "=", "None", "if", "error", "and", "self", ".", "_is_connection_error", "(", "error", ")", ":", "# Connect retry?", "if", "connect", "is", "False", ":", "raise", "six", ".", "reraise", "(", "type", "(", "error", ")", ",", "error", ",", "_stacktrace", ")", "elif", "connect", "is", "not", "None", ":", "connect", "-=", "1", "elif", "error", "and", "self", ".", "_is_read_error", "(", "error", ")", ":", "# Read retry?", "if", "read", "is", "False", "or", "not", "self", ".", "_is_method_retryable", "(", "method", ")", ":", "raise", "six", ".", "reraise", "(", "type", "(", "error", ")", ",", "error", ",", "_stacktrace", ")", "elif", "read", "is", "not", "None", ":", "read", "-=", "1", "elif", "response", "and", "response", ".", "get_redirect_location", "(", ")", ":", "# Redirect retry?", "if", "redirect", "is", "not", "None", ":", "redirect", "-=", "1", "cause", "=", "'too many redirects'", "redirect_location", "=", "response", ".", "get_redirect_location", "(", ")", "status", "=", "response", ".", "status", "else", ":", "# Incrementing because of a server error like a 500 in", "# status_forcelist and a the given method is in the whitelist", "cause", "=", "ResponseError", ".", "GENERIC_ERROR", "if", "response", "and", "response", ".", "status", ":", "if", "status_count", "is", "not", "None", ":", "status_count", "-=", "1", "cause", "=", "ResponseError", ".", "SPECIFIC_ERROR", ".", "format", "(", "status_code", "=", "response", ".", "status", ")", "status", "=", "response", ".", "status", "history", "=", "self", ".", "history", "+", "(", "RequestHistory", "(", "method", ",", "url", ",", "error", ",", "status", ",", "redirect_location", ")", ",", ")", "new_retry", "=", "self", ".", "new", "(", "total", "=", "total", ",", "connect", "=", "connect", ",", "read", "=", "read", ",", "redirect", "=", "redirect", ",", "status", "=", "status_count", ",", "history", "=", "history", ")", "if", "new_retry", ".", "is_exhausted", "(", ")", ":", "raise", "MaxRetryError", "(", "_pool", ",", "url", ",", "error", "or", "ResponseError", "(", "cause", ")", ")", "log", ".", "debug", "(", "\"Incremented Retry for (url='%s'): %r\"", ",", "url", ",", "new_retry", ")", "return", "new_retry" ]
Return a new Retry object with incremented retry counters. :param response: A response object, or None, if the server did not return a response. :type response: :class:`~urllib3.response.HTTPResponse` :param Exception error: An error encountered during the request, or None if the response was received successfully. :return: A new ``Retry`` object.
[ "Return", "a", "new", "Retry", "object", "with", "incremented", "retry", "counters", "." ]
python
train
pyviz/holoviews
holoviews/plotting/util.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/util.py#L282-L306
def get_nested_plot_frame(obj, key_map, cached=False): """Extracts a single frame from a nested object. Replaces any HoloMap or DynamicMap in the nested data structure, with the item corresponding to the supplied key. Args: obj: Nested Dimensioned object key_map: Dictionary mapping between dimensions and key value cached: Whether to allow looking up key in cache Returns: Nested datastructure where maps are replaced with single frames """ clone = obj.map(lambda x: x) # Ensure that DynamicMaps in the cloned frame have # identical callback inputs to allow memoization to work for it1, it2 in zip(obj.traverse(lambda x: x), clone.traverse(lambda x: x)): if isinstance(it1, DynamicMap): with disable_constant(it2.callback): it2.callback.inputs = it1.callback.inputs with item_check(False): return clone.map(lambda x: get_plot_frame(x, key_map, cached=cached), [DynamicMap, HoloMap], clone=False)
[ "def", "get_nested_plot_frame", "(", "obj", ",", "key_map", ",", "cached", "=", "False", ")", ":", "clone", "=", "obj", ".", "map", "(", "lambda", "x", ":", "x", ")", "# Ensure that DynamicMaps in the cloned frame have", "# identical callback inputs to allow memoization to work", "for", "it1", ",", "it2", "in", "zip", "(", "obj", ".", "traverse", "(", "lambda", "x", ":", "x", ")", ",", "clone", ".", "traverse", "(", "lambda", "x", ":", "x", ")", ")", ":", "if", "isinstance", "(", "it1", ",", "DynamicMap", ")", ":", "with", "disable_constant", "(", "it2", ".", "callback", ")", ":", "it2", ".", "callback", ".", "inputs", "=", "it1", ".", "callback", ".", "inputs", "with", "item_check", "(", "False", ")", ":", "return", "clone", ".", "map", "(", "lambda", "x", ":", "get_plot_frame", "(", "x", ",", "key_map", ",", "cached", "=", "cached", ")", ",", "[", "DynamicMap", ",", "HoloMap", "]", ",", "clone", "=", "False", ")" ]
Extracts a single frame from a nested object. Replaces any HoloMap or DynamicMap in the nested data structure, with the item corresponding to the supplied key. Args: obj: Nested Dimensioned object key_map: Dictionary mapping between dimensions and key value cached: Whether to allow looking up key in cache Returns: Nested datastructure where maps are replaced with single frames
[ "Extracts", "a", "single", "frame", "from", "a", "nested", "object", "." ]
python
train
inasafe/inasafe
safe/messaging/item/text.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/messaging/item/text.py#L151-L170
def to_html(self): """Render as html Args: None Returns: Str the html representation Raises: Errors are propagated """ icon = self.html_icon() attributes = self.html_attributes() # Deal with long file names that prevent wrapping wrappable_text = self.to_text().replace(os.sep, '<wbr>' + os.sep) if icon is not '' and attributes is not '': return '<span%s>%s%s</span>' % (attributes, icon, wrappable_text) else: return self.to_text()
[ "def", "to_html", "(", "self", ")", ":", "icon", "=", "self", ".", "html_icon", "(", ")", "attributes", "=", "self", ".", "html_attributes", "(", ")", "# Deal with long file names that prevent wrapping", "wrappable_text", "=", "self", ".", "to_text", "(", ")", ".", "replace", "(", "os", ".", "sep", ",", "'<wbr>'", "+", "os", ".", "sep", ")", "if", "icon", "is", "not", "''", "and", "attributes", "is", "not", "''", ":", "return", "'<span%s>%s%s</span>'", "%", "(", "attributes", ",", "icon", ",", "wrappable_text", ")", "else", ":", "return", "self", ".", "to_text", "(", ")" ]
Render as html Args: None Returns: Str the html representation Raises: Errors are propagated
[ "Render", "as", "html" ]
python
train
codelv/enaml-native
src/enamlnative/android/android_tab_layout.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_tab_layout.py#L90-L98
def init_widget(self): """ Initialize the underlying widget. """ super(AndroidTabLayout, self).init_widget() w = self.widget w.addOnTabSelectedListener(w.getId()) w.onTabSelected.connect(self.on_tab_selected) w.onTabUnselected.connect(self.on_tab_unselected)
[ "def", "init_widget", "(", "self", ")", ":", "super", "(", "AndroidTabLayout", ",", "self", ")", ".", "init_widget", "(", ")", "w", "=", "self", ".", "widget", "w", ".", "addOnTabSelectedListener", "(", "w", ".", "getId", "(", ")", ")", "w", ".", "onTabSelected", ".", "connect", "(", "self", ".", "on_tab_selected", ")", "w", ".", "onTabUnselected", ".", "connect", "(", "self", ".", "on_tab_unselected", ")" ]
Initialize the underlying widget.
[ "Initialize", "the", "underlying", "widget", "." ]
python
train
carlcarl/grabflickr
grabflickr/grabflickr.py
https://github.com/carlcarl/grabflickr/blob/e9cb2365de80c1819cfd5083c032d0d985f3c614/grabflickr/grabflickr.py#L53-L69
def enter_api_key(parser=None): """Prompt for API key and secret Then write them to CONFIG_PATH(Default: ~/.grabflickr.conf) :param parser: Config parser :type parser: SafeConfigParser """ if parser is None: parser = SafeConfigParser() parser.add_section('flickr') global api_key, api_secret api_key = raw_input('Enter your API key: ') api_secret = raw_input('Enter your API secret: ') parser.set('flickr', 'API_KEY', api_key) parser.set('flickr', 'API_SECRET', api_secret) with open(CONFIG_PATH, 'wb') as f: parser.write(f)
[ "def", "enter_api_key", "(", "parser", "=", "None", ")", ":", "if", "parser", "is", "None", ":", "parser", "=", "SafeConfigParser", "(", ")", "parser", ".", "add_section", "(", "'flickr'", ")", "global", "api_key", ",", "api_secret", "api_key", "=", "raw_input", "(", "'Enter your API key: '", ")", "api_secret", "=", "raw_input", "(", "'Enter your API secret: '", ")", "parser", ".", "set", "(", "'flickr'", ",", "'API_KEY'", ",", "api_key", ")", "parser", ".", "set", "(", "'flickr'", ",", "'API_SECRET'", ",", "api_secret", ")", "with", "open", "(", "CONFIG_PATH", ",", "'wb'", ")", "as", "f", ":", "parser", ".", "write", "(", "f", ")" ]
Prompt for API key and secret Then write them to CONFIG_PATH(Default: ~/.grabflickr.conf) :param parser: Config parser :type parser: SafeConfigParser
[ "Prompt", "for", "API", "key", "and", "secret", "Then", "write", "them", "to", "CONFIG_PATH", "(", "Default", ":", "~", "/", ".", "grabflickr", ".", "conf", ")" ]
python
train
googleapis/dialogflow-python-client-v2
samples/knowledge_base_management.py
https://github.com/googleapis/dialogflow-python-client-v2/blob/8c9c8709222efe427b76c9c8fcc04a0c4a0760b5/samples/knowledge_base_management.py#L74-L89
def get_knowledge_base(project_id, knowledge_base_id): """Gets a specific Knowledge base. Args: project_id: The GCP project linked with the agent. knowledge_base_id: Id of the Knowledge base.""" import dialogflow_v2beta1 as dialogflow client = dialogflow.KnowledgeBasesClient() knowledge_base_path = client.knowledge_base_path( project_id, knowledge_base_id) response = client.get_knowledge_base(knowledge_base_path) print('Got Knowledge Base:') print(' - Display Name: {}'.format(response.display_name)) print(' - Knowledge ID: {}'.format(response.name))
[ "def", "get_knowledge_base", "(", "project_id", ",", "knowledge_base_id", ")", ":", "import", "dialogflow_v2beta1", "as", "dialogflow", "client", "=", "dialogflow", ".", "KnowledgeBasesClient", "(", ")", "knowledge_base_path", "=", "client", ".", "knowledge_base_path", "(", "project_id", ",", "knowledge_base_id", ")", "response", "=", "client", ".", "get_knowledge_base", "(", "knowledge_base_path", ")", "print", "(", "'Got Knowledge Base:'", ")", "print", "(", "' - Display Name: {}'", ".", "format", "(", "response", ".", "display_name", ")", ")", "print", "(", "' - Knowledge ID: {}'", ".", "format", "(", "response", ".", "name", ")", ")" ]
Gets a specific Knowledge base. Args: project_id: The GCP project linked with the agent. knowledge_base_id: Id of the Knowledge base.
[ "Gets", "a", "specific", "Knowledge", "base", "." ]
python
train