repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
tony-landis/datomic-py
datomic/datomic.py
https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L129-L137
def retract(self, e, a, v): """ redact the value of an attribute """ ta = datetime.datetime.now() ret = u"[:db/retract %i :%s %s]" % (e, a, dump_edn_val(v)) rs = self.tx(ret) tb = datetime.datetime.now() - ta print cl('<<< retracted %s,%s,%s in %sms' % (e,a,v, tb.microseconds/1000.0), 'cyan') return rs
[ "def", "retract", "(", "self", ",", "e", ",", "a", ",", "v", ")", ":", "ta", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "ret", "=", "u\"[:db/retract %i :%s %s]\"", "%", "(", "e", ",", "a", ",", "dump_edn_val", "(", "v", ")", ")", "rs", "=", "self", ".", "tx", "(", "ret", ")", "tb", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "-", "ta", "print", "cl", "(", "'<<< retracted %s,%s,%s in %sms'", "%", "(", "e", ",", "a", ",", "v", ",", "tb", ".", "microseconds", "/", "1000.0", ")", ",", "'cyan'", ")", "return", "rs" ]
redact the value of an attribute
[ "redact", "the", "value", "of", "an", "attribute" ]
python
train
f3at/feat
src/feat/agencies/agency.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/agencies/agency.py#L1477-L1526
def _start_host_agent(self): ''' This method starts saves the host agent descriptor and runs it. To make this happen following conditions needs to be fulfilled: - it is a master agency, - we are not starting a host agent already, - we are not terminating, - and last but not least, we dont have a host agent running. ''' def set_flag(value): self._starting_host = value if not self._can_start_host_agent(): return def handle_error_on_get(fail, connection, doc_id): fail.trap(NotFoundError) factory = serialization.lookup('host_agent') desc = factory(shard=u'lobby', doc_id=doc_id, agency_id=self.agency_id) self.info("Host Agent descriptor not found in database, " "creating a brand new instance.") return connection.save_document(desc) def handle_success_on_get(desc): msg = ("Descriptor of host agent has been found in " "database (hostname: %s). I will wait patiently " "until in disappears. It should happened any minute now " "providing there is still a monitor agent in the " "cluster who will take care of it. In other case " "you would have to cleanup the database. In development " "the --force-host-restart feat script option is your " "only friend. " % desc.doc_id) raise error.FeatError(msg) set_flag(True) self.info('Starting host agent.') conn = self._database.get_connection() doc_id = self._get_host_agent_id() d = defer.Deferred() d.addCallback(defer.drop_param, self._database.wait_connected) d.addCallback(defer.drop_param, conn.get_document, doc_id) d.addCallbacks(handle_success_on_get, handle_error_on_get, errbackArgs=(conn, doc_id, )) d.addCallback(self.start_agent, hostdef=self._hostdef) d.addBoth(defer.bridge_param, set_flag, False) d.addErrback(self._host_restart_failed) time.callLater(0, d.callback, None)
[ "def", "_start_host_agent", "(", "self", ")", ":", "def", "set_flag", "(", "value", ")", ":", "self", ".", "_starting_host", "=", "value", "if", "not", "self", ".", "_can_start_host_agent", "(", ")", ":", "return", "def", "handle_error_on_get", "(", "fail", ",", "connection", ",", "doc_id", ")", ":", "fail", ".", "trap", "(", "NotFoundError", ")", "factory", "=", "serialization", ".", "lookup", "(", "'host_agent'", ")", "desc", "=", "factory", "(", "shard", "=", "u'lobby'", ",", "doc_id", "=", "doc_id", ",", "agency_id", "=", "self", ".", "agency_id", ")", "self", ".", "info", "(", "\"Host Agent descriptor not found in database, \"", "\"creating a brand new instance.\"", ")", "return", "connection", ".", "save_document", "(", "desc", ")", "def", "handle_success_on_get", "(", "desc", ")", ":", "msg", "=", "(", "\"Descriptor of host agent has been found in \"", "\"database (hostname: %s). I will wait patiently \"", "\"until in disappears. It should happened any minute now \"", "\"providing there is still a monitor agent in the \"", "\"cluster who will take care of it. In other case \"", "\"you would have to cleanup the database. In development \"", "\"the --force-host-restart feat script option is your \"", "\"only friend. \"", "%", "desc", ".", "doc_id", ")", "raise", "error", ".", "FeatError", "(", "msg", ")", "set_flag", "(", "True", ")", "self", ".", "info", "(", "'Starting host agent.'", ")", "conn", "=", "self", ".", "_database", ".", "get_connection", "(", ")", "doc_id", "=", "self", ".", "_get_host_agent_id", "(", ")", "d", "=", "defer", ".", "Deferred", "(", ")", "d", ".", "addCallback", "(", "defer", ".", "drop_param", ",", "self", ".", "_database", ".", "wait_connected", ")", "d", ".", "addCallback", "(", "defer", ".", "drop_param", ",", "conn", ".", "get_document", ",", "doc_id", ")", "d", ".", "addCallbacks", "(", "handle_success_on_get", ",", "handle_error_on_get", ",", "errbackArgs", "=", "(", "conn", ",", "doc_id", ",", ")", ")", "d", ".", "addCallback", "(", "self", ".", "start_agent", ",", "hostdef", "=", "self", ".", "_hostdef", ")", "d", ".", "addBoth", "(", "defer", ".", "bridge_param", ",", "set_flag", ",", "False", ")", "d", ".", "addErrback", "(", "self", ".", "_host_restart_failed", ")", "time", ".", "callLater", "(", "0", ",", "d", ".", "callback", ",", "None", ")" ]
This method starts saves the host agent descriptor and runs it. To make this happen following conditions needs to be fulfilled: - it is a master agency, - we are not starting a host agent already, - we are not terminating, - and last but not least, we dont have a host agent running.
[ "This", "method", "starts", "saves", "the", "host", "agent", "descriptor", "and", "runs", "it", ".", "To", "make", "this", "happen", "following", "conditions", "needs", "to", "be", "fulfilled", ":", "-", "it", "is", "a", "master", "agency", "-", "we", "are", "not", "starting", "a", "host", "agent", "already", "-", "we", "are", "not", "terminating", "-", "and", "last", "but", "not", "least", "we", "dont", "have", "a", "host", "agent", "running", "." ]
python
train
nicferrier/md
src/mdlib/pull.py
https://github.com/nicferrier/md/blob/302ca8882dae060fb15bd5ae470d8e661fb67ec4/src/mdlib/pull.py#L144-L152
def _filter(msgdata, mailparser, mdfolder, mailfilters): """Filter msgdata by mailfilters""" if mailfilters: for f in mailfilters: msg = mailparser.parse(StringIO(msgdata)) rule = f(msg, folder=mdfolder) if rule: yield rule return
[ "def", "_filter", "(", "msgdata", ",", "mailparser", ",", "mdfolder", ",", "mailfilters", ")", ":", "if", "mailfilters", ":", "for", "f", "in", "mailfilters", ":", "msg", "=", "mailparser", ".", "parse", "(", "StringIO", "(", "msgdata", ")", ")", "rule", "=", "f", "(", "msg", ",", "folder", "=", "mdfolder", ")", "if", "rule", ":", "yield", "rule", "return" ]
Filter msgdata by mailfilters
[ "Filter", "msgdata", "by", "mailfilters" ]
python
train
CZ-NIC/yangson
yangson/statement.py
https://github.com/CZ-NIC/yangson/blob/a4b9464041fa8b28f6020a420ababf18fddf5d4a/yangson/statement.py#L291-L320
def argument(self) -> bool: """Parse statement argument. Return ``True`` if the argument is followed by block of substatements. """ next = self.peek() if next == "'": quoted = True self.sq_argument() elif next == '"': quoted = True self.dq_argument() elif self._arg == "": quoted = False self.unq_argument() else: raise UnexpectedInput(self, "single or double quote") self.opt_separator() next = self.peek() if next == ";": return False if next == "{": return True elif quoted and next == "+": self.offset += 1 self.opt_separator() return self.argument() else: raise UnexpectedInput(self, "';', '{'" + (" or '+'" if quoted else ""))
[ "def", "argument", "(", "self", ")", "->", "bool", ":", "next", "=", "self", ".", "peek", "(", ")", "if", "next", "==", "\"'\"", ":", "quoted", "=", "True", "self", ".", "sq_argument", "(", ")", "elif", "next", "==", "'\"'", ":", "quoted", "=", "True", "self", ".", "dq_argument", "(", ")", "elif", "self", ".", "_arg", "==", "\"\"", ":", "quoted", "=", "False", "self", ".", "unq_argument", "(", ")", "else", ":", "raise", "UnexpectedInput", "(", "self", ",", "\"single or double quote\"", ")", "self", ".", "opt_separator", "(", ")", "next", "=", "self", ".", "peek", "(", ")", "if", "next", "==", "\";\"", ":", "return", "False", "if", "next", "==", "\"{\"", ":", "return", "True", "elif", "quoted", "and", "next", "==", "\"+\"", ":", "self", ".", "offset", "+=", "1", "self", ".", "opt_separator", "(", ")", "return", "self", ".", "argument", "(", ")", "else", ":", "raise", "UnexpectedInput", "(", "self", ",", "\"';', '{'\"", "+", "(", "\" or '+'\"", "if", "quoted", "else", "\"\"", ")", ")" ]
Parse statement argument. Return ``True`` if the argument is followed by block of substatements.
[ "Parse", "statement", "argument", "." ]
python
train
ganguli-lab/proxalgs
proxalgs/operators.py
https://github.com/ganguli-lab/proxalgs/blob/74f54467ad072d3229edea93fa84ddd98dd77c67/proxalgs/operators.py#L214-L244
def nucnorm(x0, rho, gamma): """ Proximal operator for the nuclear norm (sum of the singular values of a matrix) Parameters ---------- x0 : array_like The starting or initial point used in the proximal update step rho : float Momentum parameter for the proximal step (larger value -> stays closer to x0) gamma : float A constant that weights how strongly to enforce the constraint Returns ------- theta : array_like The parameter vector found after running the proximal update step """ # compute SVD u, s, v = np.linalg.svd(x0, full_matrices=False) # soft threshold the singular values sthr = np.maximum(s - (gamma / float(rho)), 0) # reconstruct x_out = (u.dot(np.diag(sthr)).dot(v)) return x_out
[ "def", "nucnorm", "(", "x0", ",", "rho", ",", "gamma", ")", ":", "# compute SVD", "u", ",", "s", ",", "v", "=", "np", ".", "linalg", ".", "svd", "(", "x0", ",", "full_matrices", "=", "False", ")", "# soft threshold the singular values", "sthr", "=", "np", ".", "maximum", "(", "s", "-", "(", "gamma", "/", "float", "(", "rho", ")", ")", ",", "0", ")", "# reconstruct", "x_out", "=", "(", "u", ".", "dot", "(", "np", ".", "diag", "(", "sthr", ")", ")", ".", "dot", "(", "v", ")", ")", "return", "x_out" ]
Proximal operator for the nuclear norm (sum of the singular values of a matrix) Parameters ---------- x0 : array_like The starting or initial point used in the proximal update step rho : float Momentum parameter for the proximal step (larger value -> stays closer to x0) gamma : float A constant that weights how strongly to enforce the constraint Returns ------- theta : array_like The parameter vector found after running the proximal update step
[ "Proximal", "operator", "for", "the", "nuclear", "norm", "(", "sum", "of", "the", "singular", "values", "of", "a", "matrix", ")" ]
python
train
cloudera/impyla
impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py
https://github.com/cloudera/impyla/blob/547fa2ba3b6151e2a98b3544301471a643212dc3/impala/_thrift_gen/hive_metastore/ThriftHiveMetastore.py#L3481-L3490
def get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts): """ Parameters: - db_name - tbl_name - filter - max_parts """ self.send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts) return self.recv_get_partitions_by_filter()
[ "def", "get_partitions_by_filter", "(", "self", ",", "db_name", ",", "tbl_name", ",", "filter", ",", "max_parts", ")", ":", "self", ".", "send_get_partitions_by_filter", "(", "db_name", ",", "tbl_name", ",", "filter", ",", "max_parts", ")", "return", "self", ".", "recv_get_partitions_by_filter", "(", ")" ]
Parameters: - db_name - tbl_name - filter - max_parts
[ "Parameters", ":", "-", "db_name", "-", "tbl_name", "-", "filter", "-", "max_parts" ]
python
train
xtuml/pyxtuml
xtuml/meta.py
https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/xtuml/meta.py#L632-L644
def clone(self, instance): ''' Create a shallow clone of an *instance*. **Note:** the clone and the original instance **does not** have to be part of the same metaclass. ''' args = list() for name, _ in get_metaclass(instance).attributes: value = getattr(instance, name) args.append(value) return self.new(*args)
[ "def", "clone", "(", "self", ",", "instance", ")", ":", "args", "=", "list", "(", ")", "for", "name", ",", "_", "in", "get_metaclass", "(", "instance", ")", ".", "attributes", ":", "value", "=", "getattr", "(", "instance", ",", "name", ")", "args", ".", "append", "(", "value", ")", "return", "self", ".", "new", "(", "*", "args", ")" ]
Create a shallow clone of an *instance*. **Note:** the clone and the original instance **does not** have to be part of the same metaclass.
[ "Create", "a", "shallow", "clone", "of", "an", "*", "instance", "*", ".", "**", "Note", ":", "**", "the", "clone", "and", "the", "original", "instance", "**", "does", "not", "**", "have", "to", "be", "part", "of", "the", "same", "metaclass", "." ]
python
test
log2timeline/dfvfs
dfvfs/lib/sqlite_database.py
https://github.com/log2timeline/dfvfs/blob/2b3ccd115f9901d89f383397d4a1376a873c83c4/dfvfs/lib/sqlite_database.py#L61-L93
def GetNumberOfRows(self, table_name): """Retrieves the number of rows in the table. Args: table_name (str): name of the table. Returns: int: number of rows. Raises: IOError: if the file-like object has not been opened. OSError: if the file-like object has not been opened. """ if not self._connection: raise IOError('Not opened.') self._cursor.execute(self._NUMBER_OF_ROWS_QUERY.format(table_name)) row = self._cursor.fetchone() if not row: raise IOError( 'Unable to retrieve number of rows of table: {0:s}'.format( table_name)) number_of_rows = row[0] if isinstance(number_of_rows, py2to3.STRING_TYPES): try: number_of_rows = int(number_of_rows, 10) except ValueError as exception: raise IOError(( 'Unable to determine number of rows of table: {0:s} ' 'with error: {1!s}').format(table_name, exception)) return number_of_rows
[ "def", "GetNumberOfRows", "(", "self", ",", "table_name", ")", ":", "if", "not", "self", ".", "_connection", ":", "raise", "IOError", "(", "'Not opened.'", ")", "self", ".", "_cursor", ".", "execute", "(", "self", ".", "_NUMBER_OF_ROWS_QUERY", ".", "format", "(", "table_name", ")", ")", "row", "=", "self", ".", "_cursor", ".", "fetchone", "(", ")", "if", "not", "row", ":", "raise", "IOError", "(", "'Unable to retrieve number of rows of table: {0:s}'", ".", "format", "(", "table_name", ")", ")", "number_of_rows", "=", "row", "[", "0", "]", "if", "isinstance", "(", "number_of_rows", ",", "py2to3", ".", "STRING_TYPES", ")", ":", "try", ":", "number_of_rows", "=", "int", "(", "number_of_rows", ",", "10", ")", "except", "ValueError", "as", "exception", ":", "raise", "IOError", "(", "(", "'Unable to determine number of rows of table: {0:s} '", "'with error: {1!s}'", ")", ".", "format", "(", "table_name", ",", "exception", ")", ")", "return", "number_of_rows" ]
Retrieves the number of rows in the table. Args: table_name (str): name of the table. Returns: int: number of rows. Raises: IOError: if the file-like object has not been opened. OSError: if the file-like object has not been opened.
[ "Retrieves", "the", "number", "of", "rows", "in", "the", "table", "." ]
python
train
summa-tx/riemann
riemann/tx/tx.py
https://github.com/summa-tx/riemann/blob/04ae336dfd4007ceaed748daadc91cc32fa278ec/riemann/tx/tx.py#L604-L614
def _sighash_final_hashing(self, copy_tx, sighash_type): ''' Tx, int -> bytes Returns the hash that should be signed https://en.bitcoin.it/wiki/OP_CHECKSIG#Procedure_for_Hashtype_SIGHASH_ANYONECANPAY ''' sighash = ByteData() sighash += copy_tx.to_bytes() sighash += utils.i2le_padded(sighash_type, 4) return utils.hash256(sighash.to_bytes())
[ "def", "_sighash_final_hashing", "(", "self", ",", "copy_tx", ",", "sighash_type", ")", ":", "sighash", "=", "ByteData", "(", ")", "sighash", "+=", "copy_tx", ".", "to_bytes", "(", ")", "sighash", "+=", "utils", ".", "i2le_padded", "(", "sighash_type", ",", "4", ")", "return", "utils", ".", "hash256", "(", "sighash", ".", "to_bytes", "(", ")", ")" ]
Tx, int -> bytes Returns the hash that should be signed https://en.bitcoin.it/wiki/OP_CHECKSIG#Procedure_for_Hashtype_SIGHASH_ANYONECANPAY
[ "Tx", "int", "-", ">", "bytes", "Returns", "the", "hash", "that", "should", "be", "signed", "https", ":", "//", "en", ".", "bitcoin", ".", "it", "/", "wiki", "/", "OP_CHECKSIG#Procedure_for_Hashtype_SIGHASH_ANYONECANPAY" ]
python
train
numenta/nupic
src/nupic/database/client_jobs_dao.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/database/client_jobs_dao.py#L2782-L2825
def modelSetCompleted(self, modelID, completionReason, completionMsg, cpuTime=0, useConnectionID=True): """ Mark a model as completed, with the given completionReason and completionMsg. This will fail if the model does not currently belong to this client (connection_id doesn't match). Parameters: ---------------------------------------------------------------- modelID: model ID of model to modify completionReason: completionReason string completionMsg: completionMsg string cpuTime: amount of CPU time spent on this model useConnectionID: True if the connection id of the calling function must be the same as the connection that created the job. Set to True for hypersearch workers, which use this mechanism for orphaned model detection. """ if completionMsg is None: completionMsg = '' query = 'UPDATE %s SET status=%%s, ' \ ' completion_reason=%%s, ' \ ' completion_msg=%%s, ' \ ' end_time=UTC_TIMESTAMP(), ' \ ' cpu_time=%%s, ' \ ' _eng_last_update_time=UTC_TIMESTAMP(), ' \ ' update_counter=update_counter+1 ' \ ' WHERE model_id=%%s' \ % (self.modelsTableName,) sqlParams = [self.STATUS_COMPLETED, completionReason, completionMsg, cpuTime, modelID] if useConnectionID: query += " AND _eng_worker_conn_id=%s" sqlParams.append(self._connectionID) with ConnectionFactory.get() as conn: numRowsAffected = conn.cursor.execute(query, sqlParams) if numRowsAffected != 1: raise InvalidConnectionException( ("Tried to set modelID=%r using connectionID=%r, but this model " "belongs to some other worker or modelID not found; " "numRowsAffected=%r") % (modelID, self._connectionID, numRowsAffected))
[ "def", "modelSetCompleted", "(", "self", ",", "modelID", ",", "completionReason", ",", "completionMsg", ",", "cpuTime", "=", "0", ",", "useConnectionID", "=", "True", ")", ":", "if", "completionMsg", "is", "None", ":", "completionMsg", "=", "''", "query", "=", "'UPDATE %s SET status=%%s, '", "' completion_reason=%%s, '", "' completion_msg=%%s, '", "' end_time=UTC_TIMESTAMP(), '", "' cpu_time=%%s, '", "' _eng_last_update_time=UTC_TIMESTAMP(), '", "' update_counter=update_counter+1 '", "' WHERE model_id=%%s'", "%", "(", "self", ".", "modelsTableName", ",", ")", "sqlParams", "=", "[", "self", ".", "STATUS_COMPLETED", ",", "completionReason", ",", "completionMsg", ",", "cpuTime", ",", "modelID", "]", "if", "useConnectionID", ":", "query", "+=", "\" AND _eng_worker_conn_id=%s\"", "sqlParams", ".", "append", "(", "self", ".", "_connectionID", ")", "with", "ConnectionFactory", ".", "get", "(", ")", "as", "conn", ":", "numRowsAffected", "=", "conn", ".", "cursor", ".", "execute", "(", "query", ",", "sqlParams", ")", "if", "numRowsAffected", "!=", "1", ":", "raise", "InvalidConnectionException", "(", "(", "\"Tried to set modelID=%r using connectionID=%r, but this model \"", "\"belongs to some other worker or modelID not found; \"", "\"numRowsAffected=%r\"", ")", "%", "(", "modelID", ",", "self", ".", "_connectionID", ",", "numRowsAffected", ")", ")" ]
Mark a model as completed, with the given completionReason and completionMsg. This will fail if the model does not currently belong to this client (connection_id doesn't match). Parameters: ---------------------------------------------------------------- modelID: model ID of model to modify completionReason: completionReason string completionMsg: completionMsg string cpuTime: amount of CPU time spent on this model useConnectionID: True if the connection id of the calling function must be the same as the connection that created the job. Set to True for hypersearch workers, which use this mechanism for orphaned model detection.
[ "Mark", "a", "model", "as", "completed", "with", "the", "given", "completionReason", "and", "completionMsg", ".", "This", "will", "fail", "if", "the", "model", "does", "not", "currently", "belong", "to", "this", "client", "(", "connection_id", "doesn", "t", "match", ")", "." ]
python
valid
apache/incubator-heron
heronpy/streamlet/streamlet.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heronpy/streamlet/streamlet.py#L233-L244
def _default_stage_name_calculator(self, prefix, existing_stage_names): """This is the method that's implemented by the operators to get the name of the Streamlet :return: The name of the operator """ index = 1 calculated_name = "" while True: calculated_name = prefix + "-" + str(index) if calculated_name not in existing_stage_names: return calculated_name index = index + 1 return "Should Never Got Here"
[ "def", "_default_stage_name_calculator", "(", "self", ",", "prefix", ",", "existing_stage_names", ")", ":", "index", "=", "1", "calculated_name", "=", "\"\"", "while", "True", ":", "calculated_name", "=", "prefix", "+", "\"-\"", "+", "str", "(", "index", ")", "if", "calculated_name", "not", "in", "existing_stage_names", ":", "return", "calculated_name", "index", "=", "index", "+", "1", "return", "\"Should Never Got Here\"" ]
This is the method that's implemented by the operators to get the name of the Streamlet :return: The name of the operator
[ "This", "is", "the", "method", "that", "s", "implemented", "by", "the", "operators", "to", "get", "the", "name", "of", "the", "Streamlet", ":", "return", ":", "The", "name", "of", "the", "operator" ]
python
valid
ZeitOnline/briefkasten
application/briefkasten/__init__.py
https://github.com/ZeitOnline/briefkasten/blob/ce6b6eeb89196014fe21d68614c20059d02daa11/application/briefkasten/__init__.py#L51-L62
def is_equal(a, b): """ a constant time comparison implementation taken from http://codahale.com/a-lesson-in-timing-attacks/ and Django's `util` module https://github.com/django/django/blob/master/django/utils/crypto.py#L82 """ if len(a) != len(b): return False result = 0 for x, y in zip(a, b): result |= ord(x) ^ ord(y) return result == 0
[ "def", "is_equal", "(", "a", ",", "b", ")", ":", "if", "len", "(", "a", ")", "!=", "len", "(", "b", ")", ":", "return", "False", "result", "=", "0", "for", "x", ",", "y", "in", "zip", "(", "a", ",", "b", ")", ":", "result", "|=", "ord", "(", "x", ")", "^", "ord", "(", "y", ")", "return", "result", "==", "0" ]
a constant time comparison implementation taken from http://codahale.com/a-lesson-in-timing-attacks/ and Django's `util` module https://github.com/django/django/blob/master/django/utils/crypto.py#L82
[ "a", "constant", "time", "comparison", "implementation", "taken", "from", "http", ":", "//", "codahale", ".", "com", "/", "a", "-", "lesson", "-", "in", "-", "timing", "-", "attacks", "/", "and", "Django", "s", "util", "module", "https", ":", "//", "github", ".", "com", "/", "django", "/", "django", "/", "blob", "/", "master", "/", "django", "/", "utils", "/", "crypto", ".", "py#L82" ]
python
valid
sporsh/carnifex
fabfile.py
https://github.com/sporsh/carnifex/blob/82dd3bd2bc134dfb69a78f43171e227f2127060b/fabfile.py#L22-L30
def trial(path=TESTS_PATH, coverage=False): """Run tests using trial """ args = ['trial'] if coverage: args.append('--coverage') args.append(path) print args local(' '.join(args))
[ "def", "trial", "(", "path", "=", "TESTS_PATH", ",", "coverage", "=", "False", ")", ":", "args", "=", "[", "'trial'", "]", "if", "coverage", ":", "args", ".", "append", "(", "'--coverage'", ")", "args", ".", "append", "(", "path", ")", "print", "args", "local", "(", "' '", ".", "join", "(", "args", ")", ")" ]
Run tests using trial
[ "Run", "tests", "using", "trial" ]
python
train
kxxoling/flask-decorators
flask_decorators/__init__.py
https://github.com/kxxoling/flask-decorators/blob/e0bf4fc1a5260548063ef8b8adbb782151cd72cc/flask_decorators/__init__.py#L46-L64
def gen(mimetype): """``gen`` is a decorator factory function, you just need to set a mimetype before using:: @app.route('/') @gen('') def index(): pass A full demo for creating a image stream is available on `GitHub <https://github.com/kxxoling/flask-video-streaming>`__ . """ def streaming(func, *args, **kwargs): @wraps(func) def _(): return Response(func(*args, **kwargs), mimetype=mimetype) return _ return streaming
[ "def", "gen", "(", "mimetype", ")", ":", "def", "streaming", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "@", "wraps", "(", "func", ")", "def", "_", "(", ")", ":", "return", "Response", "(", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", ",", "mimetype", "=", "mimetype", ")", "return", "_", "return", "streaming" ]
``gen`` is a decorator factory function, you just need to set a mimetype before using:: @app.route('/') @gen('') def index(): pass A full demo for creating a image stream is available on `GitHub <https://github.com/kxxoling/flask-video-streaming>`__ .
[ "gen", "is", "a", "decorator", "factory", "function", "you", "just", "need", "to", "set", "a", "mimetype", "before", "using", "::" ]
python
train
indico/indico-plugins
piwik/indico_piwik/queries/metrics.py
https://github.com/indico/indico-plugins/blob/fe50085cc63be9b8161b09539e662e7b04e4b38e/piwik/indico_piwik/queries/metrics.py#L69-L81
def _get_cumulative_results(self, results): """ Returns a dictionary of {'total': x, 'unique': y} for the date range. """ hits = {'total': 0, 'unique': 0} day_hits = list(hits[0] for hits in results.values() if hits) for metrics in day_hits: hits['total'] += metrics['nb_hits'] hits['unique'] += metrics['nb_uniq_visitors'] return hits
[ "def", "_get_cumulative_results", "(", "self", ",", "results", ")", ":", "hits", "=", "{", "'total'", ":", "0", ",", "'unique'", ":", "0", "}", "day_hits", "=", "list", "(", "hits", "[", "0", "]", "for", "hits", "in", "results", ".", "values", "(", ")", "if", "hits", ")", "for", "metrics", "in", "day_hits", ":", "hits", "[", "'total'", "]", "+=", "metrics", "[", "'nb_hits'", "]", "hits", "[", "'unique'", "]", "+=", "metrics", "[", "'nb_uniq_visitors'", "]", "return", "hits" ]
Returns a dictionary of {'total': x, 'unique': y} for the date range.
[ "Returns", "a", "dictionary", "of", "{", "total", ":", "x", "unique", ":", "y", "}", "for", "the", "date", "range", "." ]
python
train
django-danceschool/django-danceschool
danceschool/core/forms.py
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/core/forms.py#L942-L947
def save(self, commit=True): ''' If the staff member is an instructor, also update the availableForPrivates field on the Instructor record. ''' if getattr(self.instance,'instructor',None): self.instance.instructor.availableForPrivates = self.cleaned_data.pop('availableForPrivates',self.instance.instructor.availableForPrivates) self.instance.instructor.save(update_fields=['availableForPrivates',]) super(StaffMemberBioChangeForm,self).save(commit=True)
[ "def", "save", "(", "self", ",", "commit", "=", "True", ")", ":", "if", "getattr", "(", "self", ".", "instance", ",", "'instructor'", ",", "None", ")", ":", "self", ".", "instance", ".", "instructor", ".", "availableForPrivates", "=", "self", ".", "cleaned_data", ".", "pop", "(", "'availableForPrivates'", ",", "self", ".", "instance", ".", "instructor", ".", "availableForPrivates", ")", "self", ".", "instance", ".", "instructor", ".", "save", "(", "update_fields", "=", "[", "'availableForPrivates'", ",", "]", ")", "super", "(", "StaffMemberBioChangeForm", ",", "self", ")", ".", "save", "(", "commit", "=", "True", ")" ]
If the staff member is an instructor, also update the availableForPrivates field on the Instructor record.
[ "If", "the", "staff", "member", "is", "an", "instructor", "also", "update", "the", "availableForPrivates", "field", "on", "the", "Instructor", "record", "." ]
python
train
Kane610/deconz
pydeconz/group.py
https://github.com/Kane610/deconz/blob/8a9498dbbc8c168d4a081173ad6c3b1e17fffdf6/pydeconz/group.py#L46-L61
async def async_set_state(self, data): """Set state of light group. { "on": true, "bri": 180, "hue": 43680, "sat": 255, "transitiontime": 10 } Also update local values of group since websockets doesn't. """ field = self.deconz_id + '/action' await self._async_set_state_callback(field, data) self.async_update({'state': data})
[ "async", "def", "async_set_state", "(", "self", ",", "data", ")", ":", "field", "=", "self", ".", "deconz_id", "+", "'/action'", "await", "self", ".", "_async_set_state_callback", "(", "field", ",", "data", ")", "self", ".", "async_update", "(", "{", "'state'", ":", "data", "}", ")" ]
Set state of light group. { "on": true, "bri": 180, "hue": 43680, "sat": 255, "transitiontime": 10 } Also update local values of group since websockets doesn't.
[ "Set", "state", "of", "light", "group", "." ]
python
train
Qiskit/qiskit-terra
qiskit/visualization/counts_visualization.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/visualization/counts_visualization.py#L45-L184
def plot_histogram(data, figsize=(7, 5), color=None, number_to_keep=None, sort='asc', target_string=None, legend=None, bar_labels=True, title=None): """Plot a histogram of data. Args: data (list or dict): This is either a list of dictionaries or a single dict containing the values to represent (ex {'001': 130}) figsize (tuple): Figure size in inches. color (list or str): String or list of strings for histogram bar colors. number_to_keep (int): The number of terms to plot and rest is made into a single bar called 'rest'. sort (string): Could be 'asc', 'desc', or 'hamming'. target_string (str): Target string if 'sort' is a distance measure. legend(list): A list of strings to use for labels of the data. The number of entries must match the length of data (if data is a list or 1 if it's a dict) bar_labels (bool): Label each bar in histogram with probability value. title (str): A string to use for the plot title Returns: matplotlib.Figure: A figure for the rendered histogram. Raises: ImportError: Matplotlib not available. VisualizationError: When legend is provided and the length doesn't match the input data. """ if not HAS_MATPLOTLIB: raise ImportError('Must have Matplotlib installed.') if sort not in VALID_SORTS: raise VisualizationError("Value of sort option, %s, isn't a " "valid choice. Must be 'asc', " "'desc', or 'hamming'") elif sort in DIST_MEAS.keys() and target_string is None: err_msg = 'Must define target_string when using distance measure.' raise VisualizationError(err_msg) if isinstance(data, dict): data = [data] if legend and len(legend) != len(data): raise VisualizationError("Length of legendL (%s) doesn't match " "number of input executions: %s" % (len(legend), len(data))) fig, ax = plt.subplots(figsize=figsize) labels = list(sorted( functools.reduce(lambda x, y: x.union(y.keys()), data, set()))) if number_to_keep is not None: labels.append('rest') if sort in DIST_MEAS.keys(): dist = [] for item in labels: dist.append(DIST_MEAS[sort](item, target_string)) labels = [list(x) for x in zip(*sorted(zip(dist, labels), key=lambda pair: pair[0]))][1] labels_dict = OrderedDict() # Set bar colors if color is None: color = ['#648fff', '#dc267f', '#785ef0', '#ffb000', '#fe6100'] elif isinstance(color, str): color = [color] all_pvalues = [] length = len(data) for item, execution in enumerate(data): if number_to_keep is not None: data_temp = dict(Counter(execution).most_common(number_to_keep)) data_temp["rest"] = sum(execution.values()) - sum(data_temp.values()) execution = data_temp values = [] for key in labels: if key not in execution: if number_to_keep is None: labels_dict[key] = 1 values.append(0) else: values.append(-1) else: labels_dict[key] = 1 values.append(execution[key]) values = np.array(values, dtype=float) where_idx = np.where(values >= 0)[0] pvalues = values[where_idx] / sum(values[where_idx]) for value in pvalues: all_pvalues.append(value) numelem = len(values[where_idx]) ind = np.arange(numelem) # the x locations for the groups width = 1/(len(data)+1) # the width of the bars rects = [] for idx, val in enumerate(pvalues): label = None if not idx and legend: label = legend[item] if val >= 0: rects.append(ax.bar(idx+item*width, val, width, label=label, color=color[item % len(color)], zorder=2)) bar_center = (width / 2) * (length - 1) ax.set_xticks(ind + bar_center) ax.set_xticklabels(labels_dict.keys(), fontsize=14, rotation=70) # attach some text labels if bar_labels: for rect in rects: for rec in rect: height = rec.get_height() if height >= 1e-3: ax.text(rec.get_x() + rec.get_width() / 2., 1.05 * height, '%.3f' % float(height), ha='center', va='bottom', zorder=3) else: ax.text(rec.get_x() + rec.get_width() / 2., 1.05 * height, '0', ha='center', va='bottom', zorder=3) # add some text for labels, title, and axes ticks ax.set_ylabel('Probabilities', fontsize=14) ax.set_ylim([0., min([1.2, max([1.2 * val for val in all_pvalues])])]) if sort == 'desc': ax.invert_xaxis() ax.yaxis.set_major_locator(MaxNLocator(5)) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize(14) ax.set_facecolor('#eeeeee') plt.grid(which='major', axis='y', zorder=0, linestyle='--') if title: plt.title(title) if legend: ax.legend(loc='upper left', bbox_to_anchor=(1.01, 1.0), ncol=1, borderaxespad=0, frameon=True, fontsize=12) if fig: plt.close(fig) return fig
[ "def", "plot_histogram", "(", "data", ",", "figsize", "=", "(", "7", ",", "5", ")", ",", "color", "=", "None", ",", "number_to_keep", "=", "None", ",", "sort", "=", "'asc'", ",", "target_string", "=", "None", ",", "legend", "=", "None", ",", "bar_labels", "=", "True", ",", "title", "=", "None", ")", ":", "if", "not", "HAS_MATPLOTLIB", ":", "raise", "ImportError", "(", "'Must have Matplotlib installed.'", ")", "if", "sort", "not", "in", "VALID_SORTS", ":", "raise", "VisualizationError", "(", "\"Value of sort option, %s, isn't a \"", "\"valid choice. Must be 'asc', \"", "\"'desc', or 'hamming'\"", ")", "elif", "sort", "in", "DIST_MEAS", ".", "keys", "(", ")", "and", "target_string", "is", "None", ":", "err_msg", "=", "'Must define target_string when using distance measure.'", "raise", "VisualizationError", "(", "err_msg", ")", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "data", "=", "[", "data", "]", "if", "legend", "and", "len", "(", "legend", ")", "!=", "len", "(", "data", ")", ":", "raise", "VisualizationError", "(", "\"Length of legendL (%s) doesn't match \"", "\"number of input executions: %s\"", "%", "(", "len", "(", "legend", ")", ",", "len", "(", "data", ")", ")", ")", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", "figsize", "=", "figsize", ")", "labels", "=", "list", "(", "sorted", "(", "functools", ".", "reduce", "(", "lambda", "x", ",", "y", ":", "x", ".", "union", "(", "y", ".", "keys", "(", ")", ")", ",", "data", ",", "set", "(", ")", ")", ")", ")", "if", "number_to_keep", "is", "not", "None", ":", "labels", ".", "append", "(", "'rest'", ")", "if", "sort", "in", "DIST_MEAS", ".", "keys", "(", ")", ":", "dist", "=", "[", "]", "for", "item", "in", "labels", ":", "dist", ".", "append", "(", "DIST_MEAS", "[", "sort", "]", "(", "item", ",", "target_string", ")", ")", "labels", "=", "[", "list", "(", "x", ")", "for", "x", "in", "zip", "(", "*", "sorted", "(", "zip", "(", "dist", ",", "labels", ")", ",", "key", "=", "lambda", "pair", ":", "pair", "[", "0", "]", ")", ")", "]", "[", "1", "]", "labels_dict", "=", "OrderedDict", "(", ")", "# Set bar colors", "if", "color", "is", "None", ":", "color", "=", "[", "'#648fff'", ",", "'#dc267f'", ",", "'#785ef0'", ",", "'#ffb000'", ",", "'#fe6100'", "]", "elif", "isinstance", "(", "color", ",", "str", ")", ":", "color", "=", "[", "color", "]", "all_pvalues", "=", "[", "]", "length", "=", "len", "(", "data", ")", "for", "item", ",", "execution", "in", "enumerate", "(", "data", ")", ":", "if", "number_to_keep", "is", "not", "None", ":", "data_temp", "=", "dict", "(", "Counter", "(", "execution", ")", ".", "most_common", "(", "number_to_keep", ")", ")", "data_temp", "[", "\"rest\"", "]", "=", "sum", "(", "execution", ".", "values", "(", ")", ")", "-", "sum", "(", "data_temp", ".", "values", "(", ")", ")", "execution", "=", "data_temp", "values", "=", "[", "]", "for", "key", "in", "labels", ":", "if", "key", "not", "in", "execution", ":", "if", "number_to_keep", "is", "None", ":", "labels_dict", "[", "key", "]", "=", "1", "values", ".", "append", "(", "0", ")", "else", ":", "values", ".", "append", "(", "-", "1", ")", "else", ":", "labels_dict", "[", "key", "]", "=", "1", "values", ".", "append", "(", "execution", "[", "key", "]", ")", "values", "=", "np", ".", "array", "(", "values", ",", "dtype", "=", "float", ")", "where_idx", "=", "np", ".", "where", "(", "values", ">=", "0", ")", "[", "0", "]", "pvalues", "=", "values", "[", "where_idx", "]", "/", "sum", "(", "values", "[", "where_idx", "]", ")", "for", "value", "in", "pvalues", ":", "all_pvalues", ".", "append", "(", "value", ")", "numelem", "=", "len", "(", "values", "[", "where_idx", "]", ")", "ind", "=", "np", ".", "arange", "(", "numelem", ")", "# the x locations for the groups", "width", "=", "1", "/", "(", "len", "(", "data", ")", "+", "1", ")", "# the width of the bars", "rects", "=", "[", "]", "for", "idx", ",", "val", "in", "enumerate", "(", "pvalues", ")", ":", "label", "=", "None", "if", "not", "idx", "and", "legend", ":", "label", "=", "legend", "[", "item", "]", "if", "val", ">=", "0", ":", "rects", ".", "append", "(", "ax", ".", "bar", "(", "idx", "+", "item", "*", "width", ",", "val", ",", "width", ",", "label", "=", "label", ",", "color", "=", "color", "[", "item", "%", "len", "(", "color", ")", "]", ",", "zorder", "=", "2", ")", ")", "bar_center", "=", "(", "width", "/", "2", ")", "*", "(", "length", "-", "1", ")", "ax", ".", "set_xticks", "(", "ind", "+", "bar_center", ")", "ax", ".", "set_xticklabels", "(", "labels_dict", ".", "keys", "(", ")", ",", "fontsize", "=", "14", ",", "rotation", "=", "70", ")", "# attach some text labels", "if", "bar_labels", ":", "for", "rect", "in", "rects", ":", "for", "rec", "in", "rect", ":", "height", "=", "rec", ".", "get_height", "(", ")", "if", "height", ">=", "1e-3", ":", "ax", ".", "text", "(", "rec", ".", "get_x", "(", ")", "+", "rec", ".", "get_width", "(", ")", "/", "2.", ",", "1.05", "*", "height", ",", "'%.3f'", "%", "float", "(", "height", ")", ",", "ha", "=", "'center'", ",", "va", "=", "'bottom'", ",", "zorder", "=", "3", ")", "else", ":", "ax", ".", "text", "(", "rec", ".", "get_x", "(", ")", "+", "rec", ".", "get_width", "(", ")", "/", "2.", ",", "1.05", "*", "height", ",", "'0'", ",", "ha", "=", "'center'", ",", "va", "=", "'bottom'", ",", "zorder", "=", "3", ")", "# add some text for labels, title, and axes ticks", "ax", ".", "set_ylabel", "(", "'Probabilities'", ",", "fontsize", "=", "14", ")", "ax", ".", "set_ylim", "(", "[", "0.", ",", "min", "(", "[", "1.2", ",", "max", "(", "[", "1.2", "*", "val", "for", "val", "in", "all_pvalues", "]", ")", "]", ")", "]", ")", "if", "sort", "==", "'desc'", ":", "ax", ".", "invert_xaxis", "(", ")", "ax", ".", "yaxis", ".", "set_major_locator", "(", "MaxNLocator", "(", "5", ")", ")", "for", "tick", "in", "ax", ".", "yaxis", ".", "get_major_ticks", "(", ")", ":", "tick", ".", "label", ".", "set_fontsize", "(", "14", ")", "ax", ".", "set_facecolor", "(", "'#eeeeee'", ")", "plt", ".", "grid", "(", "which", "=", "'major'", ",", "axis", "=", "'y'", ",", "zorder", "=", "0", ",", "linestyle", "=", "'--'", ")", "if", "title", ":", "plt", ".", "title", "(", "title", ")", "if", "legend", ":", "ax", ".", "legend", "(", "loc", "=", "'upper left'", ",", "bbox_to_anchor", "=", "(", "1.01", ",", "1.0", ")", ",", "ncol", "=", "1", ",", "borderaxespad", "=", "0", ",", "frameon", "=", "True", ",", "fontsize", "=", "12", ")", "if", "fig", ":", "plt", ".", "close", "(", "fig", ")", "return", "fig" ]
Plot a histogram of data. Args: data (list or dict): This is either a list of dictionaries or a single dict containing the values to represent (ex {'001': 130}) figsize (tuple): Figure size in inches. color (list or str): String or list of strings for histogram bar colors. number_to_keep (int): The number of terms to plot and rest is made into a single bar called 'rest'. sort (string): Could be 'asc', 'desc', or 'hamming'. target_string (str): Target string if 'sort' is a distance measure. legend(list): A list of strings to use for labels of the data. The number of entries must match the length of data (if data is a list or 1 if it's a dict) bar_labels (bool): Label each bar in histogram with probability value. title (str): A string to use for the plot title Returns: matplotlib.Figure: A figure for the rendered histogram. Raises: ImportError: Matplotlib not available. VisualizationError: When legend is provided and the length doesn't match the input data.
[ "Plot", "a", "histogram", "of", "data", "." ]
python
test
basecrm/basecrm-python
basecrm/services.py
https://github.com/basecrm/basecrm-python/blob/7c1cf97dbaba8aeb9ff89f8a54f945a8702349f6/basecrm/services.py#L528-L541
def list(self, **params): """ Retrieve all deal unqualified reasons Returns all deal unqualified reasons available to the user according to the parameters provided :calls: ``get /deal_unqualified_reasons`` :param dict params: (optional) Search options. :return: List of dictionaries that support attriubte-style access, which represent collection of DealUnqualifiedReasons. :rtype: list """ _, _, deal_unqualified_reasons = self.http_client.get("/deal_unqualified_reasons", params=params) return deal_unqualified_reasons
[ "def", "list", "(", "self", ",", "*", "*", "params", ")", ":", "_", ",", "_", ",", "deal_unqualified_reasons", "=", "self", ".", "http_client", ".", "get", "(", "\"/deal_unqualified_reasons\"", ",", "params", "=", "params", ")", "return", "deal_unqualified_reasons" ]
Retrieve all deal unqualified reasons Returns all deal unqualified reasons available to the user according to the parameters provided :calls: ``get /deal_unqualified_reasons`` :param dict params: (optional) Search options. :return: List of dictionaries that support attriubte-style access, which represent collection of DealUnqualifiedReasons. :rtype: list
[ "Retrieve", "all", "deal", "unqualified", "reasons" ]
python
train
AltSchool/dynamic-rest
dynamic_rest/routers.py
https://github.com/AltSchool/dynamic-rest/blob/5b0338c3dd8bc638d60c3bb92645857c5b89c920/dynamic_rest/routers.py#L108-L154
def register(self, prefix, viewset, base_name=None): """Add any registered route into a global API directory. If the prefix includes a path separator, store the URL in the directory under the first path segment. Otherwise, store it as-is. For example, if there are two registered prefixes, 'v1/users' and 'groups', `directory` will look liks: { 'v1': { 'users': { '_url': 'users-list' '_viewset': <class 'UserViewSet'> }, } 'groups': { '_url': 'groups-list' '_viewset': <class 'GroupViewSet'> } } """ if base_name is None: base_name = prefix super(DynamicRouter, self).register(prefix, viewset, base_name) prefix_parts = prefix.split('/') if len(prefix_parts) > 1: prefix = prefix_parts[0] endpoint = '/'.join(prefix_parts[1:]) else: endpoint = prefix prefix = None if prefix and prefix not in directory: current = directory[prefix] = {} else: current = directory.get(prefix, directory) list_name = self.routes[0].name url_name = list_name.format(basename=base_name) if endpoint not in current: current[endpoint] = {} current[endpoint]['_url'] = url_name current[endpoint]['_viewset'] = viewset
[ "def", "register", "(", "self", ",", "prefix", ",", "viewset", ",", "base_name", "=", "None", ")", ":", "if", "base_name", "is", "None", ":", "base_name", "=", "prefix", "super", "(", "DynamicRouter", ",", "self", ")", ".", "register", "(", "prefix", ",", "viewset", ",", "base_name", ")", "prefix_parts", "=", "prefix", ".", "split", "(", "'/'", ")", "if", "len", "(", "prefix_parts", ")", ">", "1", ":", "prefix", "=", "prefix_parts", "[", "0", "]", "endpoint", "=", "'/'", ".", "join", "(", "prefix_parts", "[", "1", ":", "]", ")", "else", ":", "endpoint", "=", "prefix", "prefix", "=", "None", "if", "prefix", "and", "prefix", "not", "in", "directory", ":", "current", "=", "directory", "[", "prefix", "]", "=", "{", "}", "else", ":", "current", "=", "directory", ".", "get", "(", "prefix", ",", "directory", ")", "list_name", "=", "self", ".", "routes", "[", "0", "]", ".", "name", "url_name", "=", "list_name", ".", "format", "(", "basename", "=", "base_name", ")", "if", "endpoint", "not", "in", "current", ":", "current", "[", "endpoint", "]", "=", "{", "}", "current", "[", "endpoint", "]", "[", "'_url'", "]", "=", "url_name", "current", "[", "endpoint", "]", "[", "'_viewset'", "]", "=", "viewset" ]
Add any registered route into a global API directory. If the prefix includes a path separator, store the URL in the directory under the first path segment. Otherwise, store it as-is. For example, if there are two registered prefixes, 'v1/users' and 'groups', `directory` will look liks: { 'v1': { 'users': { '_url': 'users-list' '_viewset': <class 'UserViewSet'> }, } 'groups': { '_url': 'groups-list' '_viewset': <class 'GroupViewSet'> } }
[ "Add", "any", "registered", "route", "into", "a", "global", "API", "directory", "." ]
python
train
twilio/twilio-python
twilio/rest/ip_messaging/v2/service/channel/message.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/ip_messaging/v2/service/channel/message.py#L230-L244
def get_instance(self, payload): """ Build an instance of MessageInstance :param dict payload: Payload response from the API :returns: twilio.rest.chat.v2.service.channel.message.MessageInstance :rtype: twilio.rest.chat.v2.service.channel.message.MessageInstance """ return MessageInstance( self._version, payload, service_sid=self._solution['service_sid'], channel_sid=self._solution['channel_sid'], )
[ "def", "get_instance", "(", "self", ",", "payload", ")", ":", "return", "MessageInstance", "(", "self", ".", "_version", ",", "payload", ",", "service_sid", "=", "self", ".", "_solution", "[", "'service_sid'", "]", ",", "channel_sid", "=", "self", ".", "_solution", "[", "'channel_sid'", "]", ",", ")" ]
Build an instance of MessageInstance :param dict payload: Payload response from the API :returns: twilio.rest.chat.v2.service.channel.message.MessageInstance :rtype: twilio.rest.chat.v2.service.channel.message.MessageInstance
[ "Build", "an", "instance", "of", "MessageInstance" ]
python
train
dillonhicks/rekt
rekt/service.py
https://github.com/dillonhicks/rekt/blob/3848b272726c78214cb96b906f9b9f289497f27e/rekt/service.py#L227-L249
def create_rest_client_class(name, apis, BaseClass=RestClient): """ Generate the api call functions and attach them to the generated RestClient subclass with the name <Service>Client. """ apis_with_actions = list(itertools.chain.from_iterable([ zip([api] * len(api.actions), api.actions) for api in apis])) api_funcs = [create_api_call_func(api, verb) for api, verb in apis_with_actions] api_funcs.extend([create_async_api_call_func(api, verb) for api, verb in apis_with_actions]) api_mapper = dict([ (f.__name__, f) for f in api_funcs ]) # Adapted from : # http://stackoverflow.com/questions/15247075/how-can-i-dynamically-create-derived-classes-from-a-base-class def __init__(self, thread_count=_ASYNC_WORKER_THREAD_COUNT, **reqargs): BaseClass.__init__(self) setattr(self, 'reqargs', read_only_dict(reqargs)) self._executor = concurrent.futures.ThreadPoolExecutor(thread_count) api_mapper['__init__'] = __init__ ClientClass = type(_CLIENT_NAME_FMT.format(name), (BaseClass,), api_mapper) return ClientClass
[ "def", "create_rest_client_class", "(", "name", ",", "apis", ",", "BaseClass", "=", "RestClient", ")", ":", "apis_with_actions", "=", "list", "(", "itertools", ".", "chain", ".", "from_iterable", "(", "[", "zip", "(", "[", "api", "]", "*", "len", "(", "api", ".", "actions", ")", ",", "api", ".", "actions", ")", "for", "api", "in", "apis", "]", ")", ")", "api_funcs", "=", "[", "create_api_call_func", "(", "api", ",", "verb", ")", "for", "api", ",", "verb", "in", "apis_with_actions", "]", "api_funcs", ".", "extend", "(", "[", "create_async_api_call_func", "(", "api", ",", "verb", ")", "for", "api", ",", "verb", "in", "apis_with_actions", "]", ")", "api_mapper", "=", "dict", "(", "[", "(", "f", ".", "__name__", ",", "f", ")", "for", "f", "in", "api_funcs", "]", ")", "# Adapted from :", "# http://stackoverflow.com/questions/15247075/how-can-i-dynamically-create-derived-classes-from-a-base-class", "def", "__init__", "(", "self", ",", "thread_count", "=", "_ASYNC_WORKER_THREAD_COUNT", ",", "*", "*", "reqargs", ")", ":", "BaseClass", ".", "__init__", "(", "self", ")", "setattr", "(", "self", ",", "'reqargs'", ",", "read_only_dict", "(", "reqargs", ")", ")", "self", ".", "_executor", "=", "concurrent", ".", "futures", ".", "ThreadPoolExecutor", "(", "thread_count", ")", "api_mapper", "[", "'__init__'", "]", "=", "__init__", "ClientClass", "=", "type", "(", "_CLIENT_NAME_FMT", ".", "format", "(", "name", ")", ",", "(", "BaseClass", ",", ")", ",", "api_mapper", ")", "return", "ClientClass" ]
Generate the api call functions and attach them to the generated RestClient subclass with the name <Service>Client.
[ "Generate", "the", "api", "call", "functions", "and", "attach", "them", "to", "the", "generated", "RestClient", "subclass", "with", "the", "name", "<Service", ">", "Client", "." ]
python
train
cuihantao/andes
andes/utils/time.py
https://github.com/cuihantao/andes/blob/7067898d4f26ce7534e968b8486c4aa8fe3a511a/andes/utils/time.py#L5-L19
def elapsed(t0=0.0): """get elapsed time from the give time Returns: now: the absolute time now dt_str: elapsed time in string """ now = time() dt = now - t0 dt_sec = Decimal(str(dt)).quantize(Decimal('.0001'), rounding=ROUND_DOWN) if dt_sec <= 1: dt_str = str(dt_sec) + ' second' else: dt_str = str(dt_sec) + ' seconds' return now, dt_str
[ "def", "elapsed", "(", "t0", "=", "0.0", ")", ":", "now", "=", "time", "(", ")", "dt", "=", "now", "-", "t0", "dt_sec", "=", "Decimal", "(", "str", "(", "dt", ")", ")", ".", "quantize", "(", "Decimal", "(", "'.0001'", ")", ",", "rounding", "=", "ROUND_DOWN", ")", "if", "dt_sec", "<=", "1", ":", "dt_str", "=", "str", "(", "dt_sec", ")", "+", "' second'", "else", ":", "dt_str", "=", "str", "(", "dt_sec", ")", "+", "' seconds'", "return", "now", ",", "dt_str" ]
get elapsed time from the give time Returns: now: the absolute time now dt_str: elapsed time in string
[ "get", "elapsed", "time", "from", "the", "give", "time" ]
python
train
senaite/senaite.core
bika/lims/browser/analysisrequest/analysisrequests.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/analysisrequest/analysisrequests.py#L434-L517
def update(self): """Called before the listing renders """ super(AnalysisRequestsView, self).update() self.workflow = api.get_tool("portal_workflow") self.member = self.mtool.getAuthenticatedMember() self.roles = self.member.getRoles() setup = api.get_bika_setup() # remove `to_be_sampled` filter if not setup.getSamplingWorkflowEnabled(): self.review_states = filter( lambda x: x.get("id") != "to_be_sampled", self.review_states) # remove `scheduled_sampling` filter if not setup.getScheduleSamplingEnabled(): self.review_states = filter( lambda x: x.get("id") != "scheduled_sampling", self.review_states) # remove `to_be_preserved` filter if not setup.getSamplePreservationEnabled(): self.review_states = filter( lambda x: x.get("id") != "to_be_preserved", self.review_states) # remove `rejected` filter if not setup.getRejectionReasons(): self.review_states = filter( lambda x: x.get("id") != "rejected", self.review_states) self.hideclientlink = "RegulatoryInspector" in self.roles \ and "Manager" not in self.roles \ and "LabManager" not in self.roles \ and "LabClerk" not in self.roles if self.context.portal_type == "AnalysisRequestsFolder" and \ (self.mtool.checkPermission(AddAnalysisRequest, self.context)): self.context_actions[_("Add")] = \ {"url": "ar_add?ar_count=1", 'permission': 'Add portal content', "icon": "++resource++bika.lims.images/add.png"} self.editresults = -1 self.clients = {} # self.user_is_preserver = "Preserver" in self.roles # Printing workflow enabled? # If not, remove the Column self.printwfenabled = \ self.context.bika_setup.getPrintingWorkflowEnabled() printed_colname = "Printed" if not self.printwfenabled and printed_colname in self.columns: # Remove "Printed" columns del self.columns[printed_colname] tmprvs = [] for rs in self.review_states: tmprs = rs tmprs["columns"] = [c for c in rs.get("columns", []) if c != printed_colname] tmprvs.append(tmprs) self.review_states = tmprvs elif self.printwfenabled: # Print button to choose multiple ARs and print them. review_states = [] for review_state in self.review_states: review_state.get("custom_transitions", []).extend( [{"id": "print_sample", "title": _("Print"), "url": "workflow_action?action=print_sample"}, ]) review_states.append(review_state) self.review_states = review_states # Only "senaite.core: ManageAnalysisRequests" may see the copy to new button. # elsewhere it is hacked in where required. if self.copy_to_new_allowed: review_states = [] for review_state in self.review_states: review_state.get("custom_transitions", []).extend( [{"id": "copy_to_new", "title": _("Copy to new"), "url": "workflow_action?action=copy_to_new"}, ]) review_states.append(review_state) self.review_states = review_states
[ "def", "update", "(", "self", ")", ":", "super", "(", "AnalysisRequestsView", ",", "self", ")", ".", "update", "(", ")", "self", ".", "workflow", "=", "api", ".", "get_tool", "(", "\"portal_workflow\"", ")", "self", ".", "member", "=", "self", ".", "mtool", ".", "getAuthenticatedMember", "(", ")", "self", ".", "roles", "=", "self", ".", "member", ".", "getRoles", "(", ")", "setup", "=", "api", ".", "get_bika_setup", "(", ")", "# remove `to_be_sampled` filter", "if", "not", "setup", ".", "getSamplingWorkflowEnabled", "(", ")", ":", "self", ".", "review_states", "=", "filter", "(", "lambda", "x", ":", "x", ".", "get", "(", "\"id\"", ")", "!=", "\"to_be_sampled\"", ",", "self", ".", "review_states", ")", "# remove `scheduled_sampling` filter", "if", "not", "setup", ".", "getScheduleSamplingEnabled", "(", ")", ":", "self", ".", "review_states", "=", "filter", "(", "lambda", "x", ":", "x", ".", "get", "(", "\"id\"", ")", "!=", "\"scheduled_sampling\"", ",", "self", ".", "review_states", ")", "# remove `to_be_preserved` filter", "if", "not", "setup", ".", "getSamplePreservationEnabled", "(", ")", ":", "self", ".", "review_states", "=", "filter", "(", "lambda", "x", ":", "x", ".", "get", "(", "\"id\"", ")", "!=", "\"to_be_preserved\"", ",", "self", ".", "review_states", ")", "# remove `rejected` filter", "if", "not", "setup", ".", "getRejectionReasons", "(", ")", ":", "self", ".", "review_states", "=", "filter", "(", "lambda", "x", ":", "x", ".", "get", "(", "\"id\"", ")", "!=", "\"rejected\"", ",", "self", ".", "review_states", ")", "self", ".", "hideclientlink", "=", "\"RegulatoryInspector\"", "in", "self", ".", "roles", "and", "\"Manager\"", "not", "in", "self", ".", "roles", "and", "\"LabManager\"", "not", "in", "self", ".", "roles", "and", "\"LabClerk\"", "not", "in", "self", ".", "roles", "if", "self", ".", "context", ".", "portal_type", "==", "\"AnalysisRequestsFolder\"", "and", "(", "self", ".", "mtool", ".", "checkPermission", "(", "AddAnalysisRequest", ",", "self", ".", "context", ")", ")", ":", "self", ".", "context_actions", "[", "_", "(", "\"Add\"", ")", "]", "=", "{", "\"url\"", ":", "\"ar_add?ar_count=1\"", ",", "'permission'", ":", "'Add portal content'", ",", "\"icon\"", ":", "\"++resource++bika.lims.images/add.png\"", "}", "self", ".", "editresults", "=", "-", "1", "self", ".", "clients", "=", "{", "}", "# self.user_is_preserver = \"Preserver\" in self.roles", "# Printing workflow enabled?", "# If not, remove the Column", "self", ".", "printwfenabled", "=", "self", ".", "context", ".", "bika_setup", ".", "getPrintingWorkflowEnabled", "(", ")", "printed_colname", "=", "\"Printed\"", "if", "not", "self", ".", "printwfenabled", "and", "printed_colname", "in", "self", ".", "columns", ":", "# Remove \"Printed\" columns", "del", "self", ".", "columns", "[", "printed_colname", "]", "tmprvs", "=", "[", "]", "for", "rs", "in", "self", ".", "review_states", ":", "tmprs", "=", "rs", "tmprs", "[", "\"columns\"", "]", "=", "[", "c", "for", "c", "in", "rs", ".", "get", "(", "\"columns\"", ",", "[", "]", ")", "if", "c", "!=", "printed_colname", "]", "tmprvs", ".", "append", "(", "tmprs", ")", "self", ".", "review_states", "=", "tmprvs", "elif", "self", ".", "printwfenabled", ":", "# Print button to choose multiple ARs and print them.", "review_states", "=", "[", "]", "for", "review_state", "in", "self", ".", "review_states", ":", "review_state", ".", "get", "(", "\"custom_transitions\"", ",", "[", "]", ")", ".", "extend", "(", "[", "{", "\"id\"", ":", "\"print_sample\"", ",", "\"title\"", ":", "_", "(", "\"Print\"", ")", ",", "\"url\"", ":", "\"workflow_action?action=print_sample\"", "}", ",", "]", ")", "review_states", ".", "append", "(", "review_state", ")", "self", ".", "review_states", "=", "review_states", "# Only \"senaite.core: ManageAnalysisRequests\" may see the copy to new button.", "# elsewhere it is hacked in where required.", "if", "self", ".", "copy_to_new_allowed", ":", "review_states", "=", "[", "]", "for", "review_state", "in", "self", ".", "review_states", ":", "review_state", ".", "get", "(", "\"custom_transitions\"", ",", "[", "]", ")", ".", "extend", "(", "[", "{", "\"id\"", ":", "\"copy_to_new\"", ",", "\"title\"", ":", "_", "(", "\"Copy to new\"", ")", ",", "\"url\"", ":", "\"workflow_action?action=copy_to_new\"", "}", ",", "]", ")", "review_states", ".", "append", "(", "review_state", ")", "self", ".", "review_states", "=", "review_states" ]
Called before the listing renders
[ "Called", "before", "the", "listing", "renders" ]
python
train
pypa/pipenv
pipenv/vendor/click/utils.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/click/utils.py#L105-L121
def open(self): """Opens the file if it's not yet open. This call might fail with a :exc:`FileError`. Not handling this error will produce an error that Click shows. """ if self._f is not None: return self._f try: rv, self.should_close = open_stream(self.name, self.mode, self.encoding, self.errors, atomic=self.atomic) except (IOError, OSError) as e: from .exceptions import FileError raise FileError(self.name, hint=get_streerror(e)) self._f = rv return rv
[ "def", "open", "(", "self", ")", ":", "if", "self", ".", "_f", "is", "not", "None", ":", "return", "self", ".", "_f", "try", ":", "rv", ",", "self", ".", "should_close", "=", "open_stream", "(", "self", ".", "name", ",", "self", ".", "mode", ",", "self", ".", "encoding", ",", "self", ".", "errors", ",", "atomic", "=", "self", ".", "atomic", ")", "except", "(", "IOError", ",", "OSError", ")", "as", "e", ":", "from", ".", "exceptions", "import", "FileError", "raise", "FileError", "(", "self", ".", "name", ",", "hint", "=", "get_streerror", "(", "e", ")", ")", "self", ".", "_f", "=", "rv", "return", "rv" ]
Opens the file if it's not yet open. This call might fail with a :exc:`FileError`. Not handling this error will produce an error that Click shows.
[ "Opens", "the", "file", "if", "it", "s", "not", "yet", "open", ".", "This", "call", "might", "fail", "with", "a", ":", "exc", ":", "FileError", ".", "Not", "handling", "this", "error", "will", "produce", "an", "error", "that", "Click", "shows", "." ]
python
train
mixmastamyk/fr
fr/utils.py
https://github.com/mixmastamyk/fr/blob/f96df8ed7210a033b9e711bbed768d4116213bfb/fr/utils.py#L67-L74
def run(cmd, shell=False, debug=False): 'Run a command and return the output.' proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=shell) (out, _) = proc.communicate() # no need for stderr if debug: print(cmd) print(out) return out
[ "def", "run", "(", "cmd", ",", "shell", "=", "False", ",", "debug", "=", "False", ")", ":", "proc", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "shell", "=", "shell", ")", "(", "out", ",", "_", ")", "=", "proc", ".", "communicate", "(", ")", "# no need for stderr", "if", "debug", ":", "print", "(", "cmd", ")", "print", "(", "out", ")", "return", "out" ]
Run a command and return the output.
[ "Run", "a", "command", "and", "return", "the", "output", "." ]
python
train
scottrice/pysteam
pysteam/_crc_algorithms.py
https://github.com/scottrice/pysteam/blob/1eb2254b5235a053a953e596fa7602d0b110245d/pysteam/_crc_algorithms.py#L190-L212
def gen_table(self): """ This function generates the CRC table used for the table_driven CRC algorithm. The Python version cannot handle tables of an index width other than 8. See the generated C code for tables with different sizes instead. """ table_length = 1 << self.TableIdxWidth tbl = [0] * table_length for i in range(table_length): register = i if self.ReflectIn: register = self.reflect(register, self.TableIdxWidth) register = register << (self.Width - self.TableIdxWidth + self.CrcShift) for j in range(self.TableIdxWidth): if register & (self.MSB_Mask << self.CrcShift) != 0: register = (register << 1) ^ (self.Poly << self.CrcShift) else: register = (register << 1) if self.ReflectIn: register = self.reflect(register >> self.CrcShift, self.Width) << self.CrcShift tbl[i] = register & (self.Mask << self.CrcShift) return tbl
[ "def", "gen_table", "(", "self", ")", ":", "table_length", "=", "1", "<<", "self", ".", "TableIdxWidth", "tbl", "=", "[", "0", "]", "*", "table_length", "for", "i", "in", "range", "(", "table_length", ")", ":", "register", "=", "i", "if", "self", ".", "ReflectIn", ":", "register", "=", "self", ".", "reflect", "(", "register", ",", "self", ".", "TableIdxWidth", ")", "register", "=", "register", "<<", "(", "self", ".", "Width", "-", "self", ".", "TableIdxWidth", "+", "self", ".", "CrcShift", ")", "for", "j", "in", "range", "(", "self", ".", "TableIdxWidth", ")", ":", "if", "register", "&", "(", "self", ".", "MSB_Mask", "<<", "self", ".", "CrcShift", ")", "!=", "0", ":", "register", "=", "(", "register", "<<", "1", ")", "^", "(", "self", ".", "Poly", "<<", "self", ".", "CrcShift", ")", "else", ":", "register", "=", "(", "register", "<<", "1", ")", "if", "self", ".", "ReflectIn", ":", "register", "=", "self", ".", "reflect", "(", "register", ">>", "self", ".", "CrcShift", ",", "self", ".", "Width", ")", "<<", "self", ".", "CrcShift", "tbl", "[", "i", "]", "=", "register", "&", "(", "self", ".", "Mask", "<<", "self", ".", "CrcShift", ")", "return", "tbl" ]
This function generates the CRC table used for the table_driven CRC algorithm. The Python version cannot handle tables of an index width other than 8. See the generated C code for tables with different sizes instead.
[ "This", "function", "generates", "the", "CRC", "table", "used", "for", "the", "table_driven", "CRC", "algorithm", ".", "The", "Python", "version", "cannot", "handle", "tables", "of", "an", "index", "width", "other", "than", "8", ".", "See", "the", "generated", "C", "code", "for", "tables", "with", "different", "sizes", "instead", "." ]
python
train
tyarkoni/pliers
pliers/external/tensorflow/classify_image.py
https://github.com/tyarkoni/pliers/blob/5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b/pliers/external/tensorflow/classify_image.py#L130-L167
def run_inference_on_image(image): """Runs inference on an image. Args: image: Image file name. Returns: Nothing """ if not tf.gfile.Exists(image): tf.logging.fatal('File does not exist %s', image) image_data = tf.gfile.FastGFile(image, 'rb').read() # Creates graph from saved GraphDef. create_graph() with tf.Session() as sess: # Some useful tensors: # 'softmax:0': A tensor containing the normalized prediction across # 1000 labels. # 'pool_3:0': A tensor containing the next-to-last layer containing 2048 # float description of the image. # 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG # encoding of the image. # Runs the softmax tensor by feeding the image_data as input to the graph. softmax_tensor = sess.graph.get_tensor_by_name('softmax:0') predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0': image_data}) predictions = np.squeeze(predictions) # Creates node ID --> English string lookup. node_lookup = NodeLookup() top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1] for node_id in top_k: human_string = node_lookup.id_to_string(node_id) score = predictions[node_id] print('%s (score = %.5f)' % (human_string, score))
[ "def", "run_inference_on_image", "(", "image", ")", ":", "if", "not", "tf", ".", "gfile", ".", "Exists", "(", "image", ")", ":", "tf", ".", "logging", ".", "fatal", "(", "'File does not exist %s'", ",", "image", ")", "image_data", "=", "tf", ".", "gfile", ".", "FastGFile", "(", "image", ",", "'rb'", ")", ".", "read", "(", ")", "# Creates graph from saved GraphDef.", "create_graph", "(", ")", "with", "tf", ".", "Session", "(", ")", "as", "sess", ":", "# Some useful tensors:", "# 'softmax:0': A tensor containing the normalized prediction across", "# 1000 labels.", "# 'pool_3:0': A tensor containing the next-to-last layer containing 2048", "# float description of the image.", "# 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG", "# encoding of the image.", "# Runs the softmax tensor by feeding the image_data as input to the graph.", "softmax_tensor", "=", "sess", ".", "graph", ".", "get_tensor_by_name", "(", "'softmax:0'", ")", "predictions", "=", "sess", ".", "run", "(", "softmax_tensor", ",", "{", "'DecodeJpeg/contents:0'", ":", "image_data", "}", ")", "predictions", "=", "np", ".", "squeeze", "(", "predictions", ")", "# Creates node ID --> English string lookup.", "node_lookup", "=", "NodeLookup", "(", ")", "top_k", "=", "predictions", ".", "argsort", "(", ")", "[", "-", "FLAGS", ".", "num_top_predictions", ":", "]", "[", ":", ":", "-", "1", "]", "for", "node_id", "in", "top_k", ":", "human_string", "=", "node_lookup", ".", "id_to_string", "(", "node_id", ")", "score", "=", "predictions", "[", "node_id", "]", "print", "(", "'%s (score = %.5f)'", "%", "(", "human_string", ",", "score", ")", ")" ]
Runs inference on an image. Args: image: Image file name. Returns: Nothing
[ "Runs", "inference", "on", "an", "image", "." ]
python
train
MagicStack/asyncpg
asyncpg/pool.py
https://github.com/MagicStack/asyncpg/blob/92c2d81256a1efd8cab12c0118d74ccd1c18131b/asyncpg/pool.py#L276-L293
def _release(self): """Release this connection holder.""" if self._in_use is None: # The holder is not checked out. return if not self._in_use.done(): self._in_use.set_result(None) self._in_use = None # Deinitialize the connection proxy. All subsequent # operations on it will fail. if self._proxy is not None: self._proxy._detach() self._proxy = None # Put ourselves back to the pool queue. self._pool._queue.put_nowait(self)
[ "def", "_release", "(", "self", ")", ":", "if", "self", ".", "_in_use", "is", "None", ":", "# The holder is not checked out.", "return", "if", "not", "self", ".", "_in_use", ".", "done", "(", ")", ":", "self", ".", "_in_use", ".", "set_result", "(", "None", ")", "self", ".", "_in_use", "=", "None", "# Deinitialize the connection proxy. All subsequent", "# operations on it will fail.", "if", "self", ".", "_proxy", "is", "not", "None", ":", "self", ".", "_proxy", ".", "_detach", "(", ")", "self", ".", "_proxy", "=", "None", "# Put ourselves back to the pool queue.", "self", ".", "_pool", ".", "_queue", ".", "put_nowait", "(", "self", ")" ]
Release this connection holder.
[ "Release", "this", "connection", "holder", "." ]
python
train
pletzer/pnumpy
src/pnGhostedDistArray.py
https://github.com/pletzer/pnumpy/blob/9e6d308be94a42637466b91ab1a7b4d64b4c29ae/src/pnGhostedDistArray.py#L161-L172
def gmdaZeros(shape, dtype, mask=None, numGhosts=1): """ ghosted distributed array zero constructor @param shape the shape of the array @param dtype the numpy data type @param numGhosts the number of ghosts (>= 0) """ res = GhostedMaskedDistArray(shape, dtype) res.mas = mask res.setNumberOfGhosts(numGhosts) res[:] = 0 return res
[ "def", "gmdaZeros", "(", "shape", ",", "dtype", ",", "mask", "=", "None", ",", "numGhosts", "=", "1", ")", ":", "res", "=", "GhostedMaskedDistArray", "(", "shape", ",", "dtype", ")", "res", ".", "mas", "=", "mask", "res", ".", "setNumberOfGhosts", "(", "numGhosts", ")", "res", "[", ":", "]", "=", "0", "return", "res" ]
ghosted distributed array zero constructor @param shape the shape of the array @param dtype the numpy data type @param numGhosts the number of ghosts (>= 0)
[ "ghosted", "distributed", "array", "zero", "constructor" ]
python
train
outini/python-pylls
pylls/cachet.py
https://github.com/outini/python-pylls/blob/f9fa220594bc1974469097d9bad690a42d0d0f0f/pylls/cachet.py#L303-L334
def update(self, incident_id, name=None, message=None, status=None, visible=None, component_id=None, component_status=None, notify=None, created_at=None, template=None, tpl_vars=None): """Update an Incident :param int incident_id: Incident ID :param str name: Name of the incident :param str message: Incident explanation message :param int status: Status of the incident :param int visible: Whether the incident is publicly visible :param int component_id: Component to update :param int component_status: The status to update the given component :param bool notify: Whether to notify subscribers :param str created_at: When the incident was created :param str template: The template slug to use :param list tpl_vars: The variables to pass to the template :return: Created incident data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#update-an-incident """ data = ApiParams() data['name'] = name data['message'] = message data['status'] = status data['visible'] = visible data['component_id'] = component_id data['component_status'] = component_status data['notify'] = notify data['created_at'] = created_at data['template'] = template data['vars'] = tpl_vars return self._put('incidents/%s' % incident_id, data=data)['data']
[ "def", "update", "(", "self", ",", "incident_id", ",", "name", "=", "None", ",", "message", "=", "None", ",", "status", "=", "None", ",", "visible", "=", "None", ",", "component_id", "=", "None", ",", "component_status", "=", "None", ",", "notify", "=", "None", ",", "created_at", "=", "None", ",", "template", "=", "None", ",", "tpl_vars", "=", "None", ")", ":", "data", "=", "ApiParams", "(", ")", "data", "[", "'name'", "]", "=", "name", "data", "[", "'message'", "]", "=", "message", "data", "[", "'status'", "]", "=", "status", "data", "[", "'visible'", "]", "=", "visible", "data", "[", "'component_id'", "]", "=", "component_id", "data", "[", "'component_status'", "]", "=", "component_status", "data", "[", "'notify'", "]", "=", "notify", "data", "[", "'created_at'", "]", "=", "created_at", "data", "[", "'template'", "]", "=", "template", "data", "[", "'vars'", "]", "=", "tpl_vars", "return", "self", ".", "_put", "(", "'incidents/%s'", "%", "incident_id", ",", "data", "=", "data", ")", "[", "'data'", "]" ]
Update an Incident :param int incident_id: Incident ID :param str name: Name of the incident :param str message: Incident explanation message :param int status: Status of the incident :param int visible: Whether the incident is publicly visible :param int component_id: Component to update :param int component_status: The status to update the given component :param bool notify: Whether to notify subscribers :param str created_at: When the incident was created :param str template: The template slug to use :param list tpl_vars: The variables to pass to the template :return: Created incident data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#update-an-incident
[ "Update", "an", "Incident" ]
python
train
liampauling/betfair
betfairlightweight/resources/baseresource.py
https://github.com/liampauling/betfair/blob/8479392eb4849c525d78d43497c32c0bb108e977/betfairlightweight/resources/baseresource.py#L26-L39
def strip_datetime(value): """ Converts value to datetime if string or int. """ if isinstance(value, basestring): try: return parse_datetime(value) except ValueError: return elif isinstance(value, integer_types): try: return datetime.datetime.utcfromtimestamp(value / 1e3) except (ValueError, OverflowError, OSError): return
[ "def", "strip_datetime", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "basestring", ")", ":", "try", ":", "return", "parse_datetime", "(", "value", ")", "except", "ValueError", ":", "return", "elif", "isinstance", "(", "value", ",", "integer_types", ")", ":", "try", ":", "return", "datetime", ".", "datetime", ".", "utcfromtimestamp", "(", "value", "/", "1e3", ")", "except", "(", "ValueError", ",", "OverflowError", ",", "OSError", ")", ":", "return" ]
Converts value to datetime if string or int.
[ "Converts", "value", "to", "datetime", "if", "string", "or", "int", "." ]
python
train
maxharp3r/archive-rotator
archive_rotator/rotator.py
https://github.com/maxharp3r/archive-rotator/blob/40b8e571461c54717cee7daead04dbc9751062c8/archive_rotator/rotator.py#L85-L92
def _locate_files_to_delete(algorithm, rotated_files, next_rotation_id): """Looks for hanoi_rotator generated files that occupy the same slot that will be given to rotation_id. """ rotation_slot = algorithm.id_to_slot(next_rotation_id) for a_path, a_rotation_id in rotated_files: if rotation_slot == algorithm.id_to_slot(a_rotation_id): yield a_path
[ "def", "_locate_files_to_delete", "(", "algorithm", ",", "rotated_files", ",", "next_rotation_id", ")", ":", "rotation_slot", "=", "algorithm", ".", "id_to_slot", "(", "next_rotation_id", ")", "for", "a_path", ",", "a_rotation_id", "in", "rotated_files", ":", "if", "rotation_slot", "==", "algorithm", ".", "id_to_slot", "(", "a_rotation_id", ")", ":", "yield", "a_path" ]
Looks for hanoi_rotator generated files that occupy the same slot that will be given to rotation_id.
[ "Looks", "for", "hanoi_rotator", "generated", "files", "that", "occupy", "the", "same", "slot", "that", "will", "be", "given", "to", "rotation_id", "." ]
python
train
fhs/pyhdf
pyhdf/V.py
https://github.com/fhs/pyhdf/blob/dbdc1810a74a38df50dcad81fe903e239d2b388d/pyhdf/V.py#L1039-L1059
def delete(self, tag, ref): """Delete from the vgroup the member identified by its tag and reference number. Args:: tag tag of the member to delete ref reference number of the member to delete Returns:: None Only the link of the member with the vgroup is deleted. The member object is not deleted. C library equivalent : Vdeletatagref """ _checkErr('delete', _C.Vdeletetagref(self._id, tag, ref), "error deleting member")
[ "def", "delete", "(", "self", ",", "tag", ",", "ref", ")", ":", "_checkErr", "(", "'delete'", ",", "_C", ".", "Vdeletetagref", "(", "self", ".", "_id", ",", "tag", ",", "ref", ")", ",", "\"error deleting member\"", ")" ]
Delete from the vgroup the member identified by its tag and reference number. Args:: tag tag of the member to delete ref reference number of the member to delete Returns:: None Only the link of the member with the vgroup is deleted. The member object is not deleted. C library equivalent : Vdeletatagref
[ "Delete", "from", "the", "vgroup", "the", "member", "identified", "by", "its", "tag", "and", "reference", "number", "." ]
python
train
fedora-infra/fedora-messaging
fedora_messaging/config.py
https://github.com/fedora-infra/fedora-messaging/blob/be3e88534e2b15d579bcd24f9c4b7e795cb7e0b7/fedora_messaging/config.py#L391-L418
def validate_queues(queues): """ Validate the queues configuration. Raises: exceptions.ConfigurationException: If the configuration provided is of an invalid format. """ if not isinstance(queues, dict): raise exceptions.ConfigurationException( "'queues' must be a dictionary mapping queue names to settings." ) for queue, settings in queues.items(): if not isinstance(settings, dict): raise exceptions.ConfigurationException( "the {} queue in the 'queues' setting has a value of type {}, but it " "should be a dictionary of settings.".format(queue, type(settings)) ) missing_keys = [] for key in ("durable", "auto_delete", "exclusive", "arguments"): if key not in settings: missing_keys.append(key) if missing_keys: raise exceptions.ConfigurationException( "the {} queue is missing the following keys from its settings " "value: {}".format(queue, missing_keys) )
[ "def", "validate_queues", "(", "queues", ")", ":", "if", "not", "isinstance", "(", "queues", ",", "dict", ")", ":", "raise", "exceptions", ".", "ConfigurationException", "(", "\"'queues' must be a dictionary mapping queue names to settings.\"", ")", "for", "queue", ",", "settings", "in", "queues", ".", "items", "(", ")", ":", "if", "not", "isinstance", "(", "settings", ",", "dict", ")", ":", "raise", "exceptions", ".", "ConfigurationException", "(", "\"the {} queue in the 'queues' setting has a value of type {}, but it \"", "\"should be a dictionary of settings.\"", ".", "format", "(", "queue", ",", "type", "(", "settings", ")", ")", ")", "missing_keys", "=", "[", "]", "for", "key", "in", "(", "\"durable\"", ",", "\"auto_delete\"", ",", "\"exclusive\"", ",", "\"arguments\"", ")", ":", "if", "key", "not", "in", "settings", ":", "missing_keys", ".", "append", "(", "key", ")", "if", "missing_keys", ":", "raise", "exceptions", ".", "ConfigurationException", "(", "\"the {} queue is missing the following keys from its settings \"", "\"value: {}\"", ".", "format", "(", "queue", ",", "missing_keys", ")", ")" ]
Validate the queues configuration. Raises: exceptions.ConfigurationException: If the configuration provided is of an invalid format.
[ "Validate", "the", "queues", "configuration", "." ]
python
train
ouroboroscoding/format-oc-python
FormatOC/__init__.py
https://github.com/ouroboroscoding/format-oc-python/blob/c160b46fe4ff2c92333c776991c712de23991225/FormatOC/__init__.py#L2217-L2228
def keys(self): """Keys Returns a list of the node names in the parent Returns: list """ if hasattr(self._nodes, 'iterkeys'): return self._nodes.keys() else: return tuple(self._nodes.keys())
[ "def", "keys", "(", "self", ")", ":", "if", "hasattr", "(", "self", ".", "_nodes", ",", "'iterkeys'", ")", ":", "return", "self", ".", "_nodes", ".", "keys", "(", ")", "else", ":", "return", "tuple", "(", "self", ".", "_nodes", ".", "keys", "(", ")", ")" ]
Keys Returns a list of the node names in the parent Returns: list
[ "Keys" ]
python
train
alimanfoo/csvvalidator
csvvalidator.py
https://github.com/alimanfoo/csvvalidator/blob/50a86eefdc549c48f65a91a5c0a66099010ee65d/csvvalidator.py#L883-L905
def _apply_skips(self, i, r, summarize=False, report_unexpected_exceptions=True, context=None): """Apply skip functions on `r`.""" for skip in self._skips: try: result = skip(r) if result is True: yield True except Exception as e: if report_unexpected_exceptions: p = {'code': UNEXPECTED_EXCEPTION} if not summarize: p['message'] = MESSAGES[UNEXPECTED_EXCEPTION] % (e.__class__.__name__, e) p['row'] = i + 1 p['record'] = r p['exception'] = e p['function'] = '%s: %s' % (skip.__name__, skip.__doc__) if context is not None: p['context'] = context yield p
[ "def", "_apply_skips", "(", "self", ",", "i", ",", "r", ",", "summarize", "=", "False", ",", "report_unexpected_exceptions", "=", "True", ",", "context", "=", "None", ")", ":", "for", "skip", "in", "self", ".", "_skips", ":", "try", ":", "result", "=", "skip", "(", "r", ")", "if", "result", "is", "True", ":", "yield", "True", "except", "Exception", "as", "e", ":", "if", "report_unexpected_exceptions", ":", "p", "=", "{", "'code'", ":", "UNEXPECTED_EXCEPTION", "}", "if", "not", "summarize", ":", "p", "[", "'message'", "]", "=", "MESSAGES", "[", "UNEXPECTED_EXCEPTION", "]", "%", "(", "e", ".", "__class__", ".", "__name__", ",", "e", ")", "p", "[", "'row'", "]", "=", "i", "+", "1", "p", "[", "'record'", "]", "=", "r", "p", "[", "'exception'", "]", "=", "e", "p", "[", "'function'", "]", "=", "'%s: %s'", "%", "(", "skip", ".", "__name__", ",", "skip", ".", "__doc__", ")", "if", "context", "is", "not", "None", ":", "p", "[", "'context'", "]", "=", "context", "yield", "p" ]
Apply skip functions on `r`.
[ "Apply", "skip", "functions", "on", "r", "." ]
python
valid
poppy-project/pypot
pypot/primitive/primitive.py
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/primitive/primitive.py#L116-L124
def start(self): """ Start or restart (the :meth:`~pypot.primitive.primitive.Primitive.stop` method will automatically be called) the primitive. """ if not self.robot._primitive_manager.running: raise RuntimeError('Cannot run a primitive when the sync is stopped!') StoppableThread.start(self) self.wait_to_start() logger.info("Primitive %s started.", self)
[ "def", "start", "(", "self", ")", ":", "if", "not", "self", ".", "robot", ".", "_primitive_manager", ".", "running", ":", "raise", "RuntimeError", "(", "'Cannot run a primitive when the sync is stopped!'", ")", "StoppableThread", ".", "start", "(", "self", ")", "self", ".", "wait_to_start", "(", ")", "logger", ".", "info", "(", "\"Primitive %s started.\"", ",", "self", ")" ]
Start or restart (the :meth:`~pypot.primitive.primitive.Primitive.stop` method will automatically be called) the primitive.
[ "Start", "or", "restart", "(", "the", ":", "meth", ":", "~pypot", ".", "primitive", ".", "primitive", ".", "Primitive", ".", "stop", "method", "will", "automatically", "be", "called", ")", "the", "primitive", "." ]
python
train
pypa/pipenv
pipenv/vendor/pyparsing.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pyparsing.py#L3067-L3094
def sub(self, repl): """ Return Regex with an attached parse action to transform the parsed result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_. Example:: make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2</\1>") print(make_html.transformString("h1:main title:")) # prints "<h1>main title</h1>" """ if self.asGroupList: warnings.warn("cannot use sub() with Regex(asGroupList=True)", SyntaxWarning, stacklevel=2) raise SyntaxError() if self.asMatch and callable(repl): warnings.warn("cannot use sub() with a callable with Regex(asMatch=True)", SyntaxWarning, stacklevel=2) raise SyntaxError() if self.asMatch: def pa(tokens): return tokens[0].expand(repl) else: def pa(tokens): return self.re.sub(repl, tokens[0]) return self.addParseAction(pa)
[ "def", "sub", "(", "self", ",", "repl", ")", ":", "if", "self", ".", "asGroupList", ":", "warnings", ".", "warn", "(", "\"cannot use sub() with Regex(asGroupList=True)\"", ",", "SyntaxWarning", ",", "stacklevel", "=", "2", ")", "raise", "SyntaxError", "(", ")", "if", "self", ".", "asMatch", "and", "callable", "(", "repl", ")", ":", "warnings", ".", "warn", "(", "\"cannot use sub() with a callable with Regex(asMatch=True)\"", ",", "SyntaxWarning", ",", "stacklevel", "=", "2", ")", "raise", "SyntaxError", "(", ")", "if", "self", ".", "asMatch", ":", "def", "pa", "(", "tokens", ")", ":", "return", "tokens", "[", "0", "]", ".", "expand", "(", "repl", ")", "else", ":", "def", "pa", "(", "tokens", ")", ":", "return", "self", ".", "re", ".", "sub", "(", "repl", ",", "tokens", "[", "0", "]", ")", "return", "self", ".", "addParseAction", "(", "pa", ")" ]
Return Regex with an attached parse action to transform the parsed result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_. Example:: make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2</\1>") print(make_html.transformString("h1:main title:")) # prints "<h1>main title</h1>"
[ "Return", "Regex", "with", "an", "attached", "parse", "action", "to", "transform", "the", "parsed", "result", "as", "if", "called", "using", "re", ".", "sub", "(", "expr", "repl", "string", ")", "<https", ":", "//", "docs", ".", "python", ".", "org", "/", "3", "/", "library", "/", "re", ".", "html#re", ".", "sub", ">", "_", "." ]
python
train
paolodragone/pymzn
pymzn/mzn/minizinc.py
https://github.com/paolodragone/pymzn/blob/35b04cfb244918551649b9bb8a0ab65d37c31fe4/pymzn/mzn/minizinc.py#L502-L658
def minizinc( mzn, *dzn_files, args=None, data=None, include=None, stdlib_dir=None, globals_dir=None, declare_enums=True, allow_multiple_assignments=False, keep=False, output_vars=None, output_base=None, output_mode='dict', solver=None, timeout=None, two_pass=None, pre_passes=None, output_objective=False, non_unique=False, all_solutions=False, num_solutions=None, free_search=False, parallel=None, seed=None, rebase_arrays=True, keep_solutions=True, return_enums=False, **kwargs ): """Implements the workflow for solving a CSP problem encoded with MiniZinc. Parameters ---------- mzn : str The minizinc model. This can be either the path to the ``.mzn`` file or the content of the model itself. *dzn_files A list of paths to dzn files to attach to the minizinc execution, provided as positional arguments; by default no data file is attached. args : dict Arguments for the template engine. data : dict Additional data as a dictionary of variables assignments to supply to the minizinc executable. The dictionary is automatically converted to dzn format by the ``pymzn.dict2dzn`` function. include : str or list One or more additional paths to search for included ``.mzn`` files. stdlib_dir : str The path to the MiniZinc standard library. Provide it only if it is different from the default one. globals_dir : str The path to the MiniZinc globals directory. Provide it only if it is different from the default one. declare_enums : bool Whether to declare enum types when converting inline data into dzn format. If the enum types are declared elsewhere this option should be False. Default is ``True``. allow_multiple_assignments : bool Whether to allow multiple assignments of variables. Sometimes is convenient to simply let the data file override the value already assigned in the minizinc file. Default is ``False``. keep : bool Whether to keep the generated ``.mzn``, ``.dzn``, ``.fzn`` and ``.ozn`` files or not. If False, the generated files are created as temporary files which will be deleted right after the problem is solved. Though files generated by PyMzn are not intended to be kept, this property can be used for debugging purpose. Note that in case of error the files are not deleted even if this parameter is ``False``. Default is ``False``. output_vars : list of str A list of output variables. These variables will be the ones included in the output dictionary. Only available if ``ouptut_mode='dict'``. output_base : str Output directory for the files generated by PyMzn. The default (``None``) is the temporary directory of your OS (if ``keep=False``) or the current working directory (if ``keep=True``). output_mode : {'dict', 'item', 'dzn', 'json', 'raw'} The desired output format. The default is ``'dict'`` which returns a stream of solutions decoded as python dictionaries. The ``'item'`` format outputs a stream of strings as returned by the ``solns2out`` tool, formatted according to the output statement of the MiniZinc model. The ``'dzn'`` and ``'json'`` formats output a stream of strings formatted in dzn of json respectively. The ``'raw'`` format, instead returns the whole solution stream, without parsing. solver : Solver The ``Solver`` instance to use. The default solver is ``gecode``. timeout : int The timeout in seconds for the flattening + solving process. two_pass : bool or int If ``two_pass`` is True, then it is equivalent to the ``--two-pass`` option for the ``minizinc`` executable. If ``two_pass`` is an integer ``<n>``, instead, it is equivalent to the ``-O<n>`` option for the ``minizinc`` executable. pre_passes : int Equivalent to the ``--pre-passes`` option for the ``minizinc`` executable. output_objective : bool Equivalent to the ``--output-objective`` option for the ``minizinc`` executable. Adds a field ``_objective`` to all solutions. non_unique : bool Equivalent to the ``--non-unique`` option for the ``minizinc`` executable. all_solutions : bool Whether all the solutions must be returned. This option might not work if the solver does not support it. Default is ``False``. num_solutions : int The upper bound on the number of solutions to be returned. This option might not work if the solver does not support it. Default is ``1``. free_search : bool If ``True``, instruct the solver to perform free search. parallel : int The number of parallel threads the solver can utilize for the solving. seed : int The random number generator seed to pass to the solver. rebase_arrays : bool Whether to "rebase" parsed arrays (see the `Dzn files <http://paolodragone.com/pymzn/reference/dzn>`__ section). Default is True. keep_solutions : bool Whether to store the solutions in memory after solving is done. If ``keep_solutions`` is ``False``, the returned solution stream can only be iterated once and cannot be addressed as a list. return_enums : bool Wheter to return enum types along with the variable assignments in the solutions. Only used if ``output_mode='dict'``. Default is ``False``. **kwargs Additional arguments to pass to the solver, provided as additional keyword arguments to this function. Check the solver documentation for the available arguments. Returns ------- Solutions or str If ``output_mode`` is not ``'raw'``, returns a list-like object containing the solutions found by the solver. The format of the solution depends on the specified ``output_mode``. If ``keep_solutions=False``, the returned object cannot be addressed as a list and can only be iterated once. If ``output_mode='raw'``, the function returns the whole solution stream as a single string. """ mzn_file, dzn_files, data_file, data, keep, _output_mode, types = \ _minizinc_preliminaries( mzn, *dzn_files, args=args, data=data, include=include, stdlib_dir=stdlib_dir, globals_dir=globals_dir, output_vars=output_vars, keep=keep, output_base=output_base, output_mode=output_mode, declare_enums=declare_enums, allow_multiple_assignments=allow_multiple_assignments ) if not solver: solver = config.get('solver', gecode) solver_args = {**kwargs, **config.get('solver_args', {})} proc = solve( solver, mzn_file, *dzn_files, data=data, include=include, stdlib_dir=stdlib_dir, globals_dir=globals_dir, output_mode=_output_mode, timeout=timeout, two_pass=two_pass, pre_passes=pre_passes, output_objective=output_objective, non_unique=non_unique, all_solutions=all_solutions, num_solutions=num_solutions, free_search=free_search, parallel=parallel, seed=seed, allow_multiple_assignments=allow_multiple_assignments, **solver_args ) if not keep: _cleanup([mzn_file, data_file]) if output_mode == 'raw': return proc.stdout_data parser = SolutionParser( solver, output_mode=output_mode, rebase_arrays=rebase_arrays, types=types, keep_solutions=keep_solutions, return_enums=return_enums ) solns = parser.parse(proc) return solns
[ "def", "minizinc", "(", "mzn", ",", "*", "dzn_files", ",", "args", "=", "None", ",", "data", "=", "None", ",", "include", "=", "None", ",", "stdlib_dir", "=", "None", ",", "globals_dir", "=", "None", ",", "declare_enums", "=", "True", ",", "allow_multiple_assignments", "=", "False", ",", "keep", "=", "False", ",", "output_vars", "=", "None", ",", "output_base", "=", "None", ",", "output_mode", "=", "'dict'", ",", "solver", "=", "None", ",", "timeout", "=", "None", ",", "two_pass", "=", "None", ",", "pre_passes", "=", "None", ",", "output_objective", "=", "False", ",", "non_unique", "=", "False", ",", "all_solutions", "=", "False", ",", "num_solutions", "=", "None", ",", "free_search", "=", "False", ",", "parallel", "=", "None", ",", "seed", "=", "None", ",", "rebase_arrays", "=", "True", ",", "keep_solutions", "=", "True", ",", "return_enums", "=", "False", ",", "*", "*", "kwargs", ")", ":", "mzn_file", ",", "dzn_files", ",", "data_file", ",", "data", ",", "keep", ",", "_output_mode", ",", "types", "=", "_minizinc_preliminaries", "(", "mzn", ",", "*", "dzn_files", ",", "args", "=", "args", ",", "data", "=", "data", ",", "include", "=", "include", ",", "stdlib_dir", "=", "stdlib_dir", ",", "globals_dir", "=", "globals_dir", ",", "output_vars", "=", "output_vars", ",", "keep", "=", "keep", ",", "output_base", "=", "output_base", ",", "output_mode", "=", "output_mode", ",", "declare_enums", "=", "declare_enums", ",", "allow_multiple_assignments", "=", "allow_multiple_assignments", ")", "if", "not", "solver", ":", "solver", "=", "config", ".", "get", "(", "'solver'", ",", "gecode", ")", "solver_args", "=", "{", "*", "*", "kwargs", ",", "*", "*", "config", ".", "get", "(", "'solver_args'", ",", "{", "}", ")", "}", "proc", "=", "solve", "(", "solver", ",", "mzn_file", ",", "*", "dzn_files", ",", "data", "=", "data", ",", "include", "=", "include", ",", "stdlib_dir", "=", "stdlib_dir", ",", "globals_dir", "=", "globals_dir", ",", "output_mode", "=", "_output_mode", ",", "timeout", "=", "timeout", ",", "two_pass", "=", "two_pass", ",", "pre_passes", "=", "pre_passes", ",", "output_objective", "=", "output_objective", ",", "non_unique", "=", "non_unique", ",", "all_solutions", "=", "all_solutions", ",", "num_solutions", "=", "num_solutions", ",", "free_search", "=", "free_search", ",", "parallel", "=", "parallel", ",", "seed", "=", "seed", ",", "allow_multiple_assignments", "=", "allow_multiple_assignments", ",", "*", "*", "solver_args", ")", "if", "not", "keep", ":", "_cleanup", "(", "[", "mzn_file", ",", "data_file", "]", ")", "if", "output_mode", "==", "'raw'", ":", "return", "proc", ".", "stdout_data", "parser", "=", "SolutionParser", "(", "solver", ",", "output_mode", "=", "output_mode", ",", "rebase_arrays", "=", "rebase_arrays", ",", "types", "=", "types", ",", "keep_solutions", "=", "keep_solutions", ",", "return_enums", "=", "return_enums", ")", "solns", "=", "parser", ".", "parse", "(", "proc", ")", "return", "solns" ]
Implements the workflow for solving a CSP problem encoded with MiniZinc. Parameters ---------- mzn : str The minizinc model. This can be either the path to the ``.mzn`` file or the content of the model itself. *dzn_files A list of paths to dzn files to attach to the minizinc execution, provided as positional arguments; by default no data file is attached. args : dict Arguments for the template engine. data : dict Additional data as a dictionary of variables assignments to supply to the minizinc executable. The dictionary is automatically converted to dzn format by the ``pymzn.dict2dzn`` function. include : str or list One or more additional paths to search for included ``.mzn`` files. stdlib_dir : str The path to the MiniZinc standard library. Provide it only if it is different from the default one. globals_dir : str The path to the MiniZinc globals directory. Provide it only if it is different from the default one. declare_enums : bool Whether to declare enum types when converting inline data into dzn format. If the enum types are declared elsewhere this option should be False. Default is ``True``. allow_multiple_assignments : bool Whether to allow multiple assignments of variables. Sometimes is convenient to simply let the data file override the value already assigned in the minizinc file. Default is ``False``. keep : bool Whether to keep the generated ``.mzn``, ``.dzn``, ``.fzn`` and ``.ozn`` files or not. If False, the generated files are created as temporary files which will be deleted right after the problem is solved. Though files generated by PyMzn are not intended to be kept, this property can be used for debugging purpose. Note that in case of error the files are not deleted even if this parameter is ``False``. Default is ``False``. output_vars : list of str A list of output variables. These variables will be the ones included in the output dictionary. Only available if ``ouptut_mode='dict'``. output_base : str Output directory for the files generated by PyMzn. The default (``None``) is the temporary directory of your OS (if ``keep=False``) or the current working directory (if ``keep=True``). output_mode : {'dict', 'item', 'dzn', 'json', 'raw'} The desired output format. The default is ``'dict'`` which returns a stream of solutions decoded as python dictionaries. The ``'item'`` format outputs a stream of strings as returned by the ``solns2out`` tool, formatted according to the output statement of the MiniZinc model. The ``'dzn'`` and ``'json'`` formats output a stream of strings formatted in dzn of json respectively. The ``'raw'`` format, instead returns the whole solution stream, without parsing. solver : Solver The ``Solver`` instance to use. The default solver is ``gecode``. timeout : int The timeout in seconds for the flattening + solving process. two_pass : bool or int If ``two_pass`` is True, then it is equivalent to the ``--two-pass`` option for the ``minizinc`` executable. If ``two_pass`` is an integer ``<n>``, instead, it is equivalent to the ``-O<n>`` option for the ``minizinc`` executable. pre_passes : int Equivalent to the ``--pre-passes`` option for the ``minizinc`` executable. output_objective : bool Equivalent to the ``--output-objective`` option for the ``minizinc`` executable. Adds a field ``_objective`` to all solutions. non_unique : bool Equivalent to the ``--non-unique`` option for the ``minizinc`` executable. all_solutions : bool Whether all the solutions must be returned. This option might not work if the solver does not support it. Default is ``False``. num_solutions : int The upper bound on the number of solutions to be returned. This option might not work if the solver does not support it. Default is ``1``. free_search : bool If ``True``, instruct the solver to perform free search. parallel : int The number of parallel threads the solver can utilize for the solving. seed : int The random number generator seed to pass to the solver. rebase_arrays : bool Whether to "rebase" parsed arrays (see the `Dzn files <http://paolodragone.com/pymzn/reference/dzn>`__ section). Default is True. keep_solutions : bool Whether to store the solutions in memory after solving is done. If ``keep_solutions`` is ``False``, the returned solution stream can only be iterated once and cannot be addressed as a list. return_enums : bool Wheter to return enum types along with the variable assignments in the solutions. Only used if ``output_mode='dict'``. Default is ``False``. **kwargs Additional arguments to pass to the solver, provided as additional keyword arguments to this function. Check the solver documentation for the available arguments. Returns ------- Solutions or str If ``output_mode`` is not ``'raw'``, returns a list-like object containing the solutions found by the solver. The format of the solution depends on the specified ``output_mode``. If ``keep_solutions=False``, the returned object cannot be addressed as a list and can only be iterated once. If ``output_mode='raw'``, the function returns the whole solution stream as a single string.
[ "Implements", "the", "workflow", "for", "solving", "a", "CSP", "problem", "encoded", "with", "MiniZinc", "." ]
python
train
pywbem/pywbem
pywbem/cim_obj.py
https://github.com/pywbem/pywbem/blob/e54ecb82c2211e289a268567443d60fdd489f1e4/pywbem/cim_obj.py#L4063-L4074
def path(self, path): """Setter method; for a description see the getter method.""" # pylint: disable=attribute-defined-outside-init self._path = copy_.copy(path) # The provided path is shallow copied; it does not have any attributes # with mutable types. # We perform this check after the initialization to avoid errors # in test tools that show the object with repr(). assert isinstance(path, CIMClassName) or path is None
[ "def", "path", "(", "self", ",", "path", ")", ":", "# pylint: disable=attribute-defined-outside-init", "self", ".", "_path", "=", "copy_", ".", "copy", "(", "path", ")", "# The provided path is shallow copied; it does not have any attributes", "# with mutable types.", "# We perform this check after the initialization to avoid errors", "# in test tools that show the object with repr().", "assert", "isinstance", "(", "path", ",", "CIMClassName", ")", "or", "path", "is", "None" ]
Setter method; for a description see the getter method.
[ "Setter", "method", ";", "for", "a", "description", "see", "the", "getter", "method", "." ]
python
train
mozilla-services/python-dockerflow
src/dockerflow/flask/app.py
https://github.com/mozilla-services/python-dockerflow/blob/43703c5e8934ba6901b0a1520d6da4ed6457208c/src/dockerflow/flask/app.py#L354-L391
def check(self, func=None, name=None): """ A decorator to register a new Dockerflow check to be run when the /__heartbeat__ endpoint is called., e.g.:: from dockerflow.flask import checks @dockerflow.check def storage_reachable(): try: acme.storage.ping() except SlowConnectionException as exc: return [checks.Warning(exc.msg, id='acme.health.0002')] except StorageException as exc: return [checks.Error(exc.msg, id='acme.health.0001')] or using a custom name:: @dockerflow.check(name='acme-storage-check) def storage_reachable(): # ... """ if func is None: return functools.partial(self.check, name=name) if name is None: name = func.__name__ self.logger.info('Registered Dockerflow check %s', name) @functools.wraps(func) def decorated_function(*args, **kwargs): self.logger.info('Called Dockerflow check %s', name) return func(*args, **kwargs) self.checks[name] = decorated_function return decorated_function
[ "def", "check", "(", "self", ",", "func", "=", "None", ",", "name", "=", "None", ")", ":", "if", "func", "is", "None", ":", "return", "functools", ".", "partial", "(", "self", ".", "check", ",", "name", "=", "name", ")", "if", "name", "is", "None", ":", "name", "=", "func", ".", "__name__", "self", ".", "logger", ".", "info", "(", "'Registered Dockerflow check %s'", ",", "name", ")", "@", "functools", ".", "wraps", "(", "func", ")", "def", "decorated_function", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "logger", ".", "info", "(", "'Called Dockerflow check %s'", ",", "name", ")", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "self", ".", "checks", "[", "name", "]", "=", "decorated_function", "return", "decorated_function" ]
A decorator to register a new Dockerflow check to be run when the /__heartbeat__ endpoint is called., e.g.:: from dockerflow.flask import checks @dockerflow.check def storage_reachable(): try: acme.storage.ping() except SlowConnectionException as exc: return [checks.Warning(exc.msg, id='acme.health.0002')] except StorageException as exc: return [checks.Error(exc.msg, id='acme.health.0001')] or using a custom name:: @dockerflow.check(name='acme-storage-check) def storage_reachable(): # ...
[ "A", "decorator", "to", "register", "a", "new", "Dockerflow", "check", "to", "be", "run", "when", "the", "/", "__heartbeat__", "endpoint", "is", "called", ".", "e", ".", "g", ".", "::" ]
python
train
llazzaro/analyzerstrategies
analyzerstrategies/periodStrategy.py
https://github.com/llazzaro/analyzerstrategies/blob/3c647802f582bf2f06c6793f282bee0d26514cd6/analyzerstrategies/periodStrategy.py#L27-L34
def increase_and_check_counter(self): ''' increase counter by one and check whether a period is end ''' self.counter += 1 self.counter %= self.period if not self.counter: return True else: return False
[ "def", "increase_and_check_counter", "(", "self", ")", ":", "self", ".", "counter", "+=", "1", "self", ".", "counter", "%=", "self", ".", "period", "if", "not", "self", ".", "counter", ":", "return", "True", "else", ":", "return", "False" ]
increase counter by one and check whether a period is end
[ "increase", "counter", "by", "one", "and", "check", "whether", "a", "period", "is", "end" ]
python
train
jwodder/javaproperties
javaproperties/propclass.py
https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/propclass.py#L155-L167
def store(self, out, comments=None): """ Write the `Properties` object's entries (in unspecified order) in ``.properties`` format to ``out``, including the current timestamp. :param out: A file-like object to write the properties to. It must have been opened as a text file with a Latin-1-compatible encoding. :param comments: If non-`None`, ``comments`` will be written to ``out`` as a comment before any other content :type comments: text string or `None` :return: `None` """ dump(self.data, out, comments=comments)
[ "def", "store", "(", "self", ",", "out", ",", "comments", "=", "None", ")", ":", "dump", "(", "self", ".", "data", ",", "out", ",", "comments", "=", "comments", ")" ]
Write the `Properties` object's entries (in unspecified order) in ``.properties`` format to ``out``, including the current timestamp. :param out: A file-like object to write the properties to. It must have been opened as a text file with a Latin-1-compatible encoding. :param comments: If non-`None`, ``comments`` will be written to ``out`` as a comment before any other content :type comments: text string or `None` :return: `None`
[ "Write", "the", "Properties", "object", "s", "entries", "(", "in", "unspecified", "order", ")", "in", ".", "properties", "format", "to", "out", "including", "the", "current", "timestamp", "." ]
python
train
ionelmc/python-hunter
src/hunter/event.py
https://github.com/ionelmc/python-hunter/blob/b3a1310b0593d2c6b6ef430883843896e17d6a81/src/hunter/event.py#L247-L258
def source(self, getline=linecache.getline): """ A string with the sourcecode for the current line (from ``linecache`` - failures are ignored). Fast but sometimes incomplete. :type: str """ try: return getline(self.filename, self.lineno) except Exception as exc: return "??? NO SOURCE: {!r}".format(exc)
[ "def", "source", "(", "self", ",", "getline", "=", "linecache", ".", "getline", ")", ":", "try", ":", "return", "getline", "(", "self", ".", "filename", ",", "self", ".", "lineno", ")", "except", "Exception", "as", "exc", ":", "return", "\"??? NO SOURCE: {!r}\"", ".", "format", "(", "exc", ")" ]
A string with the sourcecode for the current line (from ``linecache`` - failures are ignored). Fast but sometimes incomplete. :type: str
[ "A", "string", "with", "the", "sourcecode", "for", "the", "current", "line", "(", "from", "linecache", "-", "failures", "are", "ignored", ")", "." ]
python
train
Rapptz/discord.py
discord/ext/commands/help.py
https://github.com/Rapptz/discord.py/blob/05d4f7f9620ef33635d6ac965b26528e09cdaf5b/discord/ext/commands/help.py#L865-L869
def shorten_text(self, text): """Shortens text to fit into the :attr:`width`.""" if len(text) > self.width: return text[:self.width - 3] + '...' return text
[ "def", "shorten_text", "(", "self", ",", "text", ")", ":", "if", "len", "(", "text", ")", ">", "self", ".", "width", ":", "return", "text", "[", ":", "self", ".", "width", "-", "3", "]", "+", "'...'", "return", "text" ]
Shortens text to fit into the :attr:`width`.
[ "Shortens", "text", "to", "fit", "into", "the", ":", "attr", ":", "width", "." ]
python
train
tempodb/tempodb-python
tempodb/protocol/objects.py
https://github.com/tempodb/tempodb-python/blob/8ce45231bd728c6c97ef799cf0f1513ea3a9a7d3/tempodb/protocol/objects.py#L475-L502
def from_json(self, json_text): """Deserialize a JSON object into this object. This method will check that the JSON object has the required keys and will set each of the keys in that JSON object as an instance attribute of this object. :param json_text: the JSON text or object to deserialize from :type json_text: dict or string :raises ValueError: if the JSON object lacks an expected key :rtype: None""" if type(json_text) in [str, unicode]: j = json.loads(json_text) else: j = json_text try: for p in self.properties: if p == 't': t = convert_iso_stamp(j[p], self.tz) setattr(self, 't', t) else: setattr(self, p, j[p]) #overriding this exception allows us to handle optional values like #id and key which are only present during particular API calls like #multi writes except KeyError: pass
[ "def", "from_json", "(", "self", ",", "json_text", ")", ":", "if", "type", "(", "json_text", ")", "in", "[", "str", ",", "unicode", "]", ":", "j", "=", "json", ".", "loads", "(", "json_text", ")", "else", ":", "j", "=", "json_text", "try", ":", "for", "p", "in", "self", ".", "properties", ":", "if", "p", "==", "'t'", ":", "t", "=", "convert_iso_stamp", "(", "j", "[", "p", "]", ",", "self", ".", "tz", ")", "setattr", "(", "self", ",", "'t'", ",", "t", ")", "else", ":", "setattr", "(", "self", ",", "p", ",", "j", "[", "p", "]", ")", "#overriding this exception allows us to handle optional values like", "#id and key which are only present during particular API calls like", "#multi writes", "except", "KeyError", ":", "pass" ]
Deserialize a JSON object into this object. This method will check that the JSON object has the required keys and will set each of the keys in that JSON object as an instance attribute of this object. :param json_text: the JSON text or object to deserialize from :type json_text: dict or string :raises ValueError: if the JSON object lacks an expected key :rtype: None
[ "Deserialize", "a", "JSON", "object", "into", "this", "object", ".", "This", "method", "will", "check", "that", "the", "JSON", "object", "has", "the", "required", "keys", "and", "will", "set", "each", "of", "the", "keys", "in", "that", "JSON", "object", "as", "an", "instance", "attribute", "of", "this", "object", "." ]
python
train
pyblish/pyblish-qml
pyblish_qml/vendor/mock.py
https://github.com/pyblish/pyblish-qml/blob/6095d18b2ec0afd0409a9b1a17e53b0658887283/pyblish_qml/vendor/mock.py#L1879-L1886
def mock_add_spec(self, spec, spec_set=False): """Add a spec to a mock. `spec` can either be an object or a list of strings. Only attributes on the `spec` can be fetched as attributes from the mock. If `spec_set` is True then only attributes on the spec can be set.""" self._mock_add_spec(spec, spec_set) self._mock_set_magics()
[ "def", "mock_add_spec", "(", "self", ",", "spec", ",", "spec_set", "=", "False", ")", ":", "self", ".", "_mock_add_spec", "(", "spec", ",", "spec_set", ")", "self", ".", "_mock_set_magics", "(", ")" ]
Add a spec to a mock. `spec` can either be an object or a list of strings. Only attributes on the `spec` can be fetched as attributes from the mock. If `spec_set` is True then only attributes on the spec can be set.
[ "Add", "a", "spec", "to", "a", "mock", ".", "spec", "can", "either", "be", "an", "object", "or", "a", "list", "of", "strings", ".", "Only", "attributes", "on", "the", "spec", "can", "be", "fetched", "as", "attributes", "from", "the", "mock", "." ]
python
train
data-8/datascience
datascience/tables.py
https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L2765-L2776
def _is_array_integer(arr): """Returns True if an array contains integers (integer type or near-int float values) and False otherwise. >>> _is_array_integer(np.arange(10)) True >>> _is_array_integer(np.arange(7.0, 20.0, 1.0)) True >>> _is_array_integer(np.arange(0, 1, 0.1)) False """ return issubclass(arr.dtype.type, np.integer) or np.allclose(arr, np.round(arr))
[ "def", "_is_array_integer", "(", "arr", ")", ":", "return", "issubclass", "(", "arr", ".", "dtype", ".", "type", ",", "np", ".", "integer", ")", "or", "np", ".", "allclose", "(", "arr", ",", "np", ".", "round", "(", "arr", ")", ")" ]
Returns True if an array contains integers (integer type or near-int float values) and False otherwise. >>> _is_array_integer(np.arange(10)) True >>> _is_array_integer(np.arange(7.0, 20.0, 1.0)) True >>> _is_array_integer(np.arange(0, 1, 0.1)) False
[ "Returns", "True", "if", "an", "array", "contains", "integers", "(", "integer", "type", "or", "near", "-", "int", "float", "values", ")", "and", "False", "otherwise", "." ]
python
train
Erotemic/utool
utool/util_latex.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_latex.py#L727-L875
def get_latex_figure_str(fpath_list, caption_str=None, label_str=None, width_str=r'\textwidth', height_str=None, nCols=None, dpath=None, colpos_sep=' ', nlsep='', use_sublbls=None, use_frame=False): r""" Args: fpath_list (list): dpath (str): directory relative to main tex file Returns: str: figure_str CommandLine: python -m utool.util_latex --test-get_latex_figure_str Example: >>> # DISABLE_DOCTEST >>> from utool.util_latex import * # NOQA >>> fpath_list = ['figures/foo.png'] >>> figure_str = get_latex_figure_str(fpath_list) >>> result = str(figure_str) >>> print(result) """ import utool as ut if nCols is None: nCols = len(fpath_list) USE_SUBFIGURE = True if width_str is not None: colwidth = (1.0 / nCols) if USE_SUBFIGURE: colwidth *= .95 graphics_sizestr = ('%.2f' % (colwidth,)) + width_str else: graphics_sizestr = '[width=%.1f%s]' % (colwidth, width_str) elif height_str is not None: graphics_sizestr = '[height=%s]' % (height_str) else: graphics_sizestr = '' if dpath is not None: fpath_list = [ut.relpath_unix(fpath_, dpath) for fpath_ in fpath_list] if USE_SUBFIGURE: # References: https://en.wikibooks.org/wiki/LaTeX/Floats,_Figures_and_Captions#Subfloats # TODO ? http://tex.stackexchange.com/questions/159290/how-can-i-place-a-vertical-rule-between-subfigures # Use subfigures graphics_list = [] sublbl_prefix = label_str if label_str is not None else '' for count, fpath in enumerate(fpath_list): """ print(', '.join([str(x) + ':' + chr(x) for x in range(65, 123)])) print(', '.join([str(x) + ':' + chr(x) for x in range(97, 123)])) """ CHRLBLS = True if CHRLBLS: #subchar = chr(97 + count) subchar = chr(65 + count) else: subchar = str(count) parts = [] subfigure_str = '' if len(fpath_list) > 1: parts.append('\\begin{subfigure}[h]{' + graphics_sizestr + '}') parts.append('\\centering') graphics_part = '\\includegraphics[width=%s]{%s}' % (width_str, fpath,) if use_frame: parts.append('\\fbox{%s}' % (graphics_part,)) else: parts.append(graphics_part) if use_sublbls is True or use_sublbls is None and len(fpath_list) > 1: parts.append('\\caption{}\\label{sub:' + sublbl_prefix + subchar + '}') if len(fpath_list) > 1: parts.append('\\end{subfigure}') subfigure_str = ''.join(parts) graphics_list.append(subfigure_str) else: if True: graphics_list = [ r'\includegraphics%s{%s}\captionof{figure}{%s}' % ( graphics_sizestr, fpath, 'fd', #'(' + str(count) + ')' #'(' + chr(97 + count) + ')' ) for count, fpath in enumerate(fpath_list)] else: graphics_list = [r'\includegraphics%s{%s}' % (graphics_sizestr, fpath,) for fpath in fpath_list] #graphics_list = [r'\includegraphics%s{%s}' % (graphics_sizestr, fpath,) ] #nRows = len(graphics_list) // nCols # Add separators NL = '\n' if USE_SUBFIGURE: col_spacer_mid = NL + '~~' + '% --' + NL col_spacer_end = NL + r'\\' + '% --' + NL else: col_spacer_mid = NL + '&' + NL col_spacer_end = NL + r'\\' + nlsep + NL sep_list = [ col_spacer_mid if count % nCols > 0 else col_spacer_end for count in range(1, len(graphics_list) + 1) ] if len(sep_list) > 0: sep_list[-1] = '' graphics_list_ = [graphstr + sep for graphstr, sep in zip(graphics_list, sep_list)] #graphics_body = '\n&\n'.join(graphics_list) graphics_body = ''.join(graphics_list_) header_str = colpos_sep.join(['c'] * nCols) if USE_SUBFIGURE: figure_body = graphics_body else: figure_body = ut.codeblock( r''' \begin{tabular}{%s} %s \end{tabular} ''' ) % (header_str, graphics_body) if caption_str is not None: #tabular_body += '\n\caption{\\footnotesize{%s}}' % (caption_str,) if label_str is not None: figure_body += '\n\caption[%s]{%s}' % (label_str, caption_str,) else: figure_body += '\n\caption{%s}' % (caption_str,) if label_str is not None: figure_body += '\n\label{fig:%s}' % (label_str,) #figure_fmtstr = ut.codeblock( # r''' # \begin{figure*} # \begin{center} # %s # \end{center} # \end{figure*} # ''' #) figure_fmtstr = ut.codeblock( r''' \begin{figure}[ht!] \centering %s \end{figure} ''' ) figure_str = figure_fmtstr % (figure_body) return figure_str
[ "def", "get_latex_figure_str", "(", "fpath_list", ",", "caption_str", "=", "None", ",", "label_str", "=", "None", ",", "width_str", "=", "r'\\textwidth'", ",", "height_str", "=", "None", ",", "nCols", "=", "None", ",", "dpath", "=", "None", ",", "colpos_sep", "=", "' '", ",", "nlsep", "=", "''", ",", "use_sublbls", "=", "None", ",", "use_frame", "=", "False", ")", ":", "import", "utool", "as", "ut", "if", "nCols", "is", "None", ":", "nCols", "=", "len", "(", "fpath_list", ")", "USE_SUBFIGURE", "=", "True", "if", "width_str", "is", "not", "None", ":", "colwidth", "=", "(", "1.0", "/", "nCols", ")", "if", "USE_SUBFIGURE", ":", "colwidth", "*=", ".95", "graphics_sizestr", "=", "(", "'%.2f'", "%", "(", "colwidth", ",", ")", ")", "+", "width_str", "else", ":", "graphics_sizestr", "=", "'[width=%.1f%s]'", "%", "(", "colwidth", ",", "width_str", ")", "elif", "height_str", "is", "not", "None", ":", "graphics_sizestr", "=", "'[height=%s]'", "%", "(", "height_str", ")", "else", ":", "graphics_sizestr", "=", "''", "if", "dpath", "is", "not", "None", ":", "fpath_list", "=", "[", "ut", ".", "relpath_unix", "(", "fpath_", ",", "dpath", ")", "for", "fpath_", "in", "fpath_list", "]", "if", "USE_SUBFIGURE", ":", "# References: https://en.wikibooks.org/wiki/LaTeX/Floats,_Figures_and_Captions#Subfloats", "# TODO ? http://tex.stackexchange.com/questions/159290/how-can-i-place-a-vertical-rule-between-subfigures", "# Use subfigures", "graphics_list", "=", "[", "]", "sublbl_prefix", "=", "label_str", "if", "label_str", "is", "not", "None", "else", "''", "for", "count", ",", "fpath", "in", "enumerate", "(", "fpath_list", ")", ":", "\"\"\"\n print(', '.join([str(x) + ':' + chr(x) for x in range(65, 123)]))\n print(', '.join([str(x) + ':' + chr(x) for x in range(97, 123)]))\n \"\"\"", "CHRLBLS", "=", "True", "if", "CHRLBLS", ":", "#subchar = chr(97 + count)", "subchar", "=", "chr", "(", "65", "+", "count", ")", "else", ":", "subchar", "=", "str", "(", "count", ")", "parts", "=", "[", "]", "subfigure_str", "=", "''", "if", "len", "(", "fpath_list", ")", ">", "1", ":", "parts", ".", "append", "(", "'\\\\begin{subfigure}[h]{'", "+", "graphics_sizestr", "+", "'}'", ")", "parts", ".", "append", "(", "'\\\\centering'", ")", "graphics_part", "=", "'\\\\includegraphics[width=%s]{%s}'", "%", "(", "width_str", ",", "fpath", ",", ")", "if", "use_frame", ":", "parts", ".", "append", "(", "'\\\\fbox{%s}'", "%", "(", "graphics_part", ",", ")", ")", "else", ":", "parts", ".", "append", "(", "graphics_part", ")", "if", "use_sublbls", "is", "True", "or", "use_sublbls", "is", "None", "and", "len", "(", "fpath_list", ")", ">", "1", ":", "parts", ".", "append", "(", "'\\\\caption{}\\\\label{sub:'", "+", "sublbl_prefix", "+", "subchar", "+", "'}'", ")", "if", "len", "(", "fpath_list", ")", ">", "1", ":", "parts", ".", "append", "(", "'\\\\end{subfigure}'", ")", "subfigure_str", "=", "''", ".", "join", "(", "parts", ")", "graphics_list", ".", "append", "(", "subfigure_str", ")", "else", ":", "if", "True", ":", "graphics_list", "=", "[", "r'\\includegraphics%s{%s}\\captionof{figure}{%s}'", "%", "(", "graphics_sizestr", ",", "fpath", ",", "'fd'", ",", "#'(' + str(count) + ')'", "#'(' + chr(97 + count) + ')'", ")", "for", "count", ",", "fpath", "in", "enumerate", "(", "fpath_list", ")", "]", "else", ":", "graphics_list", "=", "[", "r'\\includegraphics%s{%s}'", "%", "(", "graphics_sizestr", ",", "fpath", ",", ")", "for", "fpath", "in", "fpath_list", "]", "#graphics_list = [r'\\includegraphics%s{%s}' % (graphics_sizestr, fpath,) ]", "#nRows = len(graphics_list) // nCols", "# Add separators", "NL", "=", "'\\n'", "if", "USE_SUBFIGURE", ":", "col_spacer_mid", "=", "NL", "+", "'~~'", "+", "'% --'", "+", "NL", "col_spacer_end", "=", "NL", "+", "r'\\\\'", "+", "'% --'", "+", "NL", "else", ":", "col_spacer_mid", "=", "NL", "+", "'&'", "+", "NL", "col_spacer_end", "=", "NL", "+", "r'\\\\'", "+", "nlsep", "+", "NL", "sep_list", "=", "[", "col_spacer_mid", "if", "count", "%", "nCols", ">", "0", "else", "col_spacer_end", "for", "count", "in", "range", "(", "1", ",", "len", "(", "graphics_list", ")", "+", "1", ")", "]", "if", "len", "(", "sep_list", ")", ">", "0", ":", "sep_list", "[", "-", "1", "]", "=", "''", "graphics_list_", "=", "[", "graphstr", "+", "sep", "for", "graphstr", ",", "sep", "in", "zip", "(", "graphics_list", ",", "sep_list", ")", "]", "#graphics_body = '\\n&\\n'.join(graphics_list)", "graphics_body", "=", "''", ".", "join", "(", "graphics_list_", ")", "header_str", "=", "colpos_sep", ".", "join", "(", "[", "'c'", "]", "*", "nCols", ")", "if", "USE_SUBFIGURE", ":", "figure_body", "=", "graphics_body", "else", ":", "figure_body", "=", "ut", ".", "codeblock", "(", "r'''\n \\begin{tabular}{%s}\n %s\n \\end{tabular}\n '''", ")", "%", "(", "header_str", ",", "graphics_body", ")", "if", "caption_str", "is", "not", "None", ":", "#tabular_body += '\\n\\caption{\\\\footnotesize{%s}}' % (caption_str,)", "if", "label_str", "is", "not", "None", ":", "figure_body", "+=", "'\\n\\caption[%s]{%s}'", "%", "(", "label_str", ",", "caption_str", ",", ")", "else", ":", "figure_body", "+=", "'\\n\\caption{%s}'", "%", "(", "caption_str", ",", ")", "if", "label_str", "is", "not", "None", ":", "figure_body", "+=", "'\\n\\label{fig:%s}'", "%", "(", "label_str", ",", ")", "#figure_fmtstr = ut.codeblock(", "# r'''", "# \\begin{figure*}", "# \\begin{center}", "# %s", "# \\end{center}", "# \\end{figure*}", "# '''", "#)", "figure_fmtstr", "=", "ut", ".", "codeblock", "(", "r'''\n \\begin{figure}[ht!]\n \\centering\n %s\n \\end{figure}\n '''", ")", "figure_str", "=", "figure_fmtstr", "%", "(", "figure_body", ")", "return", "figure_str" ]
r""" Args: fpath_list (list): dpath (str): directory relative to main tex file Returns: str: figure_str CommandLine: python -m utool.util_latex --test-get_latex_figure_str Example: >>> # DISABLE_DOCTEST >>> from utool.util_latex import * # NOQA >>> fpath_list = ['figures/foo.png'] >>> figure_str = get_latex_figure_str(fpath_list) >>> result = str(figure_str) >>> print(result)
[ "r", "Args", ":", "fpath_list", "(", "list", ")", ":", "dpath", "(", "str", ")", ":", "directory", "relative", "to", "main", "tex", "file" ]
python
train
xtuml/pyxtuml
xtuml/load.py
https://github.com/xtuml/pyxtuml/blob/7dd9343b9a0191d1db1887ab9288d0a026608d9a/xtuml/load.py#L274-L282
def populate_unique_identifiers(self, metamodel): ''' Populate a *metamodel* with class unique identifiers previously encountered from input. ''' for stmt in self.statements: if isinstance(stmt, CreateUniqueStmt): metamodel.define_unique_identifier(stmt.kind, stmt.name, *stmt.attributes)
[ "def", "populate_unique_identifiers", "(", "self", ",", "metamodel", ")", ":", "for", "stmt", "in", "self", ".", "statements", ":", "if", "isinstance", "(", "stmt", ",", "CreateUniqueStmt", ")", ":", "metamodel", ".", "define_unique_identifier", "(", "stmt", ".", "kind", ",", "stmt", ".", "name", ",", "*", "stmt", ".", "attributes", ")" ]
Populate a *metamodel* with class unique identifiers previously encountered from input.
[ "Populate", "a", "*", "metamodel", "*", "with", "class", "unique", "identifiers", "previously", "encountered", "from", "input", "." ]
python
test
ThreshingFloor/libtf
libtf/logparsers/tf_log_base.py
https://github.com/ThreshingFloor/libtf/blob/f1a8710f750639c9b9e2a468ece0d2923bf8c3df/libtf/logparsers/tf_log_base.py#L80-L92
def reduce(self, show_noisy=False): """ Yield the reduced log lines :param show_noisy: If this is true, shows the reduced log file. If this is false, it shows the logs that were deleted. """ if not show_noisy: for log in self.quiet_logs: yield log['raw'].strip() else: for log in self.noisy_logs: yield log['raw'].strip()
[ "def", "reduce", "(", "self", ",", "show_noisy", "=", "False", ")", ":", "if", "not", "show_noisy", ":", "for", "log", "in", "self", ".", "quiet_logs", ":", "yield", "log", "[", "'raw'", "]", ".", "strip", "(", ")", "else", ":", "for", "log", "in", "self", ".", "noisy_logs", ":", "yield", "log", "[", "'raw'", "]", ".", "strip", "(", ")" ]
Yield the reduced log lines :param show_noisy: If this is true, shows the reduced log file. If this is false, it shows the logs that were deleted.
[ "Yield", "the", "reduced", "log", "lines" ]
python
train
mitsei/dlkit
dlkit/json_/hierarchy/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/hierarchy/sessions.py#L555-L570
def remove_root(self, id_): """Removes a root node. arg: id (osid.id.Id): the ``Id`` of the node raise: NotFound - ``id`` was not found or not in hierarchy raise: NullArgument - ``id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ result = self._rls.get_relationships_by_genus_type_for_peers(self._phantom_root_id, id_, self._relationship_type) if not bool(result.available()): raise errors.NotFound() self._ras.delete_relationship(result.get_next_relationship().get_id()) self._adopt_orphans(id_)
[ "def", "remove_root", "(", "self", ",", "id_", ")", ":", "result", "=", "self", ".", "_rls", ".", "get_relationships_by_genus_type_for_peers", "(", "self", ".", "_phantom_root_id", ",", "id_", ",", "self", ".", "_relationship_type", ")", "if", "not", "bool", "(", "result", ".", "available", "(", ")", ")", ":", "raise", "errors", ".", "NotFound", "(", ")", "self", ".", "_ras", ".", "delete_relationship", "(", "result", ".", "get_next_relationship", "(", ")", ".", "get_id", "(", ")", ")", "self", ".", "_adopt_orphans", "(", "id_", ")" ]
Removes a root node. arg: id (osid.id.Id): the ``Id`` of the node raise: NotFound - ``id`` was not found or not in hierarchy raise: NullArgument - ``id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Removes", "a", "root", "node", "." ]
python
train
uber/doubles
doubles/class_double.py
https://github.com/uber/doubles/blob/15e68dcf98f709b19a581915fa6af5ef49ebdd8a/doubles/class_double.py#L7-L20
def patch_class(input_class): """Create a new class based on the input_class. :param class input_class: The class to patch. :rtype class: """ class Instantiator(object): @classmethod def _doubles__new__(self, *args, **kwargs): pass new_class = type(input_class.__name__, (input_class, Instantiator), {}) return new_class
[ "def", "patch_class", "(", "input_class", ")", ":", "class", "Instantiator", "(", "object", ")", ":", "@", "classmethod", "def", "_doubles__new__", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "pass", "new_class", "=", "type", "(", "input_class", ".", "__name__", ",", "(", "input_class", ",", "Instantiator", ")", ",", "{", "}", ")", "return", "new_class" ]
Create a new class based on the input_class. :param class input_class: The class to patch. :rtype class:
[ "Create", "a", "new", "class", "based", "on", "the", "input_class", "." ]
python
train
titusjan/argos
argos/repo/rtiplugins/pillowio.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/repo/rtiplugins/pillowio.py#L135-L153
def dimensionNames(self): """ Returns ['Y', 'X', 'Band']. The underlying array is expected to be 3-dimensional. If this is not the case we fall back on the default dimension names ['Dim-0', 'Dim-1', ...] """ if self._array is None: return [] if self._array.ndim == 2: return ['Y', 'X'] elif self._array.ndim == 3: return ['Y', 'X', 'Band'] else: # Defensive programming: fall back on default names msg = "Expected 3D image. Got: {}".format(self._array.ndim) if DEBUGGING: raise ValueError(msg) logger.warn(msg) return super(PillowFileRti, self).dimensionNames
[ "def", "dimensionNames", "(", "self", ")", ":", "if", "self", ".", "_array", "is", "None", ":", "return", "[", "]", "if", "self", ".", "_array", ".", "ndim", "==", "2", ":", "return", "[", "'Y'", ",", "'X'", "]", "elif", "self", ".", "_array", ".", "ndim", "==", "3", ":", "return", "[", "'Y'", ",", "'X'", ",", "'Band'", "]", "else", ":", "# Defensive programming: fall back on default names", "msg", "=", "\"Expected 3D image. Got: {}\"", ".", "format", "(", "self", ".", "_array", ".", "ndim", ")", "if", "DEBUGGING", ":", "raise", "ValueError", "(", "msg", ")", "logger", ".", "warn", "(", "msg", ")", "return", "super", "(", "PillowFileRti", ",", "self", ")", ".", "dimensionNames" ]
Returns ['Y', 'X', 'Band']. The underlying array is expected to be 3-dimensional. If this is not the case we fall back on the default dimension names ['Dim-0', 'Dim-1', ...]
[ "Returns", "[", "Y", "X", "Band", "]", ".", "The", "underlying", "array", "is", "expected", "to", "be", "3", "-", "dimensional", ".", "If", "this", "is", "not", "the", "case", "we", "fall", "back", "on", "the", "default", "dimension", "names", "[", "Dim", "-", "0", "Dim", "-", "1", "...", "]" ]
python
train
pallets/werkzeug
examples/simplewiki/actions.py
https://github.com/pallets/werkzeug/blob/a220671d66755a94630a212378754bb432811158/examples/simplewiki/actions.py#L45-L85
def on_edit(request, page_name): """Edit the current revision of a page.""" change_note = error = "" revision = ( Revision.query.filter( (Page.name == page_name) & (Page.page_id == Revision.page_id) ) .order_by(Revision.revision_id.desc()) .first() ) if revision is None: page = None else: page = revision.page if request.method == "POST": text = request.form.get("text") if request.form.get("cancel") or revision and revision.text == text: return redirect(href(page.name)) elif not text: error = "You cannot save empty revisions." else: change_note = request.form.get("change_note", "") if page is None: page = Page(page_name) session.add(page) session.add(Revision(page, text, change_note)) session.commit() return redirect(href(page.name)) return Response( generate_template( "action_edit.html", revision=revision, page=page, new=page is None, page_name=page_name, change_note=change_note, error=error, ) )
[ "def", "on_edit", "(", "request", ",", "page_name", ")", ":", "change_note", "=", "error", "=", "\"\"", "revision", "=", "(", "Revision", ".", "query", ".", "filter", "(", "(", "Page", ".", "name", "==", "page_name", ")", "&", "(", "Page", ".", "page_id", "==", "Revision", ".", "page_id", ")", ")", ".", "order_by", "(", "Revision", ".", "revision_id", ".", "desc", "(", ")", ")", ".", "first", "(", ")", ")", "if", "revision", "is", "None", ":", "page", "=", "None", "else", ":", "page", "=", "revision", ".", "page", "if", "request", ".", "method", "==", "\"POST\"", ":", "text", "=", "request", ".", "form", ".", "get", "(", "\"text\"", ")", "if", "request", ".", "form", ".", "get", "(", "\"cancel\"", ")", "or", "revision", "and", "revision", ".", "text", "==", "text", ":", "return", "redirect", "(", "href", "(", "page", ".", "name", ")", ")", "elif", "not", "text", ":", "error", "=", "\"You cannot save empty revisions.\"", "else", ":", "change_note", "=", "request", ".", "form", ".", "get", "(", "\"change_note\"", ",", "\"\"", ")", "if", "page", "is", "None", ":", "page", "=", "Page", "(", "page_name", ")", "session", ".", "add", "(", "page", ")", "session", ".", "add", "(", "Revision", "(", "page", ",", "text", ",", "change_note", ")", ")", "session", ".", "commit", "(", ")", "return", "redirect", "(", "href", "(", "page", ".", "name", ")", ")", "return", "Response", "(", "generate_template", "(", "\"action_edit.html\"", ",", "revision", "=", "revision", ",", "page", "=", "page", ",", "new", "=", "page", "is", "None", ",", "page_name", "=", "page_name", ",", "change_note", "=", "change_note", ",", "error", "=", "error", ",", ")", ")" ]
Edit the current revision of a page.
[ "Edit", "the", "current", "revision", "of", "a", "page", "." ]
python
train
cloud-custodian/cloud-custodian
tools/sandbox/c7n_sphere11/c7n_sphere11/controller.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/tools/sandbox/c7n_sphere11/c7n_sphere11/controller.py#L50-L60
def get_session(self, account_id): """Get an active session in the target account.""" if account_id not in self.account_sessions: if account_id not in self.config['accounts']: raise AccountNotFound("account:%s is unknown" % account_id) self.account_sessions[account_id] = s = assumed_session( self.config['accounts'][account_id]['role'], "Sphere11") s._session.user_agent_name = "Sphere11" s._session.user_agent_version = "0.07" return self.account_sessions[account_id]
[ "def", "get_session", "(", "self", ",", "account_id", ")", ":", "if", "account_id", "not", "in", "self", ".", "account_sessions", ":", "if", "account_id", "not", "in", "self", ".", "config", "[", "'accounts'", "]", ":", "raise", "AccountNotFound", "(", "\"account:%s is unknown\"", "%", "account_id", ")", "self", ".", "account_sessions", "[", "account_id", "]", "=", "s", "=", "assumed_session", "(", "self", ".", "config", "[", "'accounts'", "]", "[", "account_id", "]", "[", "'role'", "]", ",", "\"Sphere11\"", ")", "s", ".", "_session", ".", "user_agent_name", "=", "\"Sphere11\"", "s", ".", "_session", ".", "user_agent_version", "=", "\"0.07\"", "return", "self", ".", "account_sessions", "[", "account_id", "]" ]
Get an active session in the target account.
[ "Get", "an", "active", "session", "in", "the", "target", "account", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/trax/models/transformer.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/trax/models/transformer.py#L123-L151
def DecoderLayer(feature_depth, feedforward_depth, num_heads, dropout, mode): """Transformer decoder layer. Args: feature_depth: int: depth of embedding feedforward_depth: int: depth of feed-forward layer num_heads: int: number of attention heads dropout: float: dropout rate (how much to drop out) mode: str: 'train' or 'eval' Returns: the layer. """ return layers.Serial( layers.Residual( # Self-attention block. layers.LayerNorm(), layers.Branch(), layers.Parallel(layers.Identity(), # activation for (q, k, v) layers.CausalMask(axis=-2)), # attention mask layers.MultiHeadedAttention(feature_depth, num_heads=num_heads, dropout=dropout, mode=mode), layers.Dropout(rate=dropout, mode=mode) ), ResidualFeedForward(feature_depth, feedforward_depth, dropout, mode=mode) )
[ "def", "DecoderLayer", "(", "feature_depth", ",", "feedforward_depth", ",", "num_heads", ",", "dropout", ",", "mode", ")", ":", "return", "layers", ".", "Serial", "(", "layers", ".", "Residual", "(", "# Self-attention block.", "layers", ".", "LayerNorm", "(", ")", ",", "layers", ".", "Branch", "(", ")", ",", "layers", ".", "Parallel", "(", "layers", ".", "Identity", "(", ")", ",", "# activation for (q, k, v)", "layers", ".", "CausalMask", "(", "axis", "=", "-", "2", ")", ")", ",", "# attention mask", "layers", ".", "MultiHeadedAttention", "(", "feature_depth", ",", "num_heads", "=", "num_heads", ",", "dropout", "=", "dropout", ",", "mode", "=", "mode", ")", ",", "layers", ".", "Dropout", "(", "rate", "=", "dropout", ",", "mode", "=", "mode", ")", ")", ",", "ResidualFeedForward", "(", "feature_depth", ",", "feedforward_depth", ",", "dropout", ",", "mode", "=", "mode", ")", ")" ]
Transformer decoder layer. Args: feature_depth: int: depth of embedding feedforward_depth: int: depth of feed-forward layer num_heads: int: number of attention heads dropout: float: dropout rate (how much to drop out) mode: str: 'train' or 'eval' Returns: the layer.
[ "Transformer", "decoder", "layer", "." ]
python
train
Shapeways/coyote_framework
coyote_framework/util/apps/objects.py
https://github.com/Shapeways/coyote_framework/blob/cb29899b984a21d56bf65d0b1d907073948fe16c/coyote_framework/util/apps/objects.py#L6-L12
def objectify(dictionary, name='Object'): """Converts a dictionary into a named tuple (shallow) """ o = namedtuple(name, dictionary.keys())(*dictionary.values()) return o
[ "def", "objectify", "(", "dictionary", ",", "name", "=", "'Object'", ")", ":", "o", "=", "namedtuple", "(", "name", ",", "dictionary", ".", "keys", "(", ")", ")", "(", "*", "dictionary", ".", "values", "(", ")", ")", "return", "o" ]
Converts a dictionary into a named tuple (shallow)
[ "Converts", "a", "dictionary", "into", "a", "named", "tuple", "(", "shallow", ")" ]
python
train
rq/Flask-RQ2
src/flask_rq2/functions.py
https://github.com/rq/Flask-RQ2/blob/58eedf6f0cd7bcde4ccd787074762ea08f531337/src/flask_rq2/functions.py#L223-L279
def cron(self, pattern, name, *args, **kwargs): """ A function to setup a RQ job as a cronjob:: @rq.job('low', timeout=60) def add(x, y): return x + y add.cron('* * * * *', 'add-some-numbers', 1, 2, timeout=10) :param \\*args: The positional arguments to pass to the queued job. :param \\*\\*kwargs: The keyword arguments to pass to the queued job. :param pattern: A Crontab pattern. :type pattern: str :param name: The name of the cronjob. :type name: str :param queue: Name of the queue to queue in, defaults to queue of of job or :attr:`~flask_rq2.RQ.default_queue`. :type queue: str :param timeout: The job timeout in seconds. If not provided uses the job's timeout or :attr:`~flask_rq2.RQ.default_timeout`. :type timeout: int :param description: Description of the job. :type description: str :param repeat: The number of times the job needs to be repeatedly queued via the cronjob. Take care only using this for cronjob that don't already repeat themselves natively due to their crontab. :type repeat: int :return: An RQ job instance. :rtype: ~flask_rq2.job.FlaskJob """ queue_name = kwargs.pop('queue', self.queue_name) timeout = kwargs.pop('timeout', self.timeout) description = kwargs.pop('description', None) repeat = kwargs.pop('repeat', None) return self.rq.get_scheduler().cron( pattern, self.wrapped, args=args, kwargs=kwargs, repeat=repeat, queue_name=queue_name, id='cron-%s' % name, timeout=timeout, description=description, )
[ "def", "cron", "(", "self", ",", "pattern", ",", "name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "queue_name", "=", "kwargs", ".", "pop", "(", "'queue'", ",", "self", ".", "queue_name", ")", "timeout", "=", "kwargs", ".", "pop", "(", "'timeout'", ",", "self", ".", "timeout", ")", "description", "=", "kwargs", ".", "pop", "(", "'description'", ",", "None", ")", "repeat", "=", "kwargs", ".", "pop", "(", "'repeat'", ",", "None", ")", "return", "self", ".", "rq", ".", "get_scheduler", "(", ")", ".", "cron", "(", "pattern", ",", "self", ".", "wrapped", ",", "args", "=", "args", ",", "kwargs", "=", "kwargs", ",", "repeat", "=", "repeat", ",", "queue_name", "=", "queue_name", ",", "id", "=", "'cron-%s'", "%", "name", ",", "timeout", "=", "timeout", ",", "description", "=", "description", ",", ")" ]
A function to setup a RQ job as a cronjob:: @rq.job('low', timeout=60) def add(x, y): return x + y add.cron('* * * * *', 'add-some-numbers', 1, 2, timeout=10) :param \\*args: The positional arguments to pass to the queued job. :param \\*\\*kwargs: The keyword arguments to pass to the queued job. :param pattern: A Crontab pattern. :type pattern: str :param name: The name of the cronjob. :type name: str :param queue: Name of the queue to queue in, defaults to queue of of job or :attr:`~flask_rq2.RQ.default_queue`. :type queue: str :param timeout: The job timeout in seconds. If not provided uses the job's timeout or :attr:`~flask_rq2.RQ.default_timeout`. :type timeout: int :param description: Description of the job. :type description: str :param repeat: The number of times the job needs to be repeatedly queued via the cronjob. Take care only using this for cronjob that don't already repeat themselves natively due to their crontab. :type repeat: int :return: An RQ job instance. :rtype: ~flask_rq2.job.FlaskJob
[ "A", "function", "to", "setup", "a", "RQ", "job", "as", "a", "cronjob", "::" ]
python
train
3DLIRIOUS/MeshLabXML
meshlabxml/transform.py
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/transform.py#L623-L655
def wrap2cylinder(script, radius=1, pitch=0, taper=0, pitch_func=None, taper_func=None): """Deform mesh around cylinder of radius and axis z y = 0 will be on the surface of radius "radius" pitch != 0 will create a helix, with distance "pitch" traveled in z for each rotation taper = change in r over z. E.g. a value of 0.5 will shrink r by 0.5 for every z length of 1 """ """vert_function(s=s, x='(%s+y-taper)*sin(x/(%s+y))' % (radius, radius), y='(%s+y)*cos(x/(%s+y))' % (radius, radius), z='z-%s*x/(2*%s*(%s+y))' % (pitch, pi, radius))""" if pitch_func is None: pitch_func = '-(pitch)*x/(2*pi*(radius))' pitch_func = pitch_func.replace( 'pitch', str(pitch)).replace( 'pi', str(math.pi)).replace( 'radius', str(radius)) if taper_func is None: taper_func = '-(taper)*(pitch_func)' taper_func = taper_func.replace( 'taper', str(taper)).replace( 'pitch_func', str(pitch_func)).replace( 'pi', str(math.pi)) x_func = '(y+(radius)+(taper_func))*sin(x/(radius))'.replace( 'radius', str(radius)).replace('taper_func', str(taper_func)) y_func = '(y+(radius)+(taper_func))*cos(x/(radius))'.replace( 'radius', str(radius)).replace('taper_func', str(taper_func)) z_func = 'z+(pitch_func)'.replace('pitch_func', str(pitch_func)) vert_function(script, x_func, y_func, z_func) return None
[ "def", "wrap2cylinder", "(", "script", ",", "radius", "=", "1", ",", "pitch", "=", "0", ",", "taper", "=", "0", ",", "pitch_func", "=", "None", ",", "taper_func", "=", "None", ")", ":", "\"\"\"vert_function(s=s, x='(%s+y-taper)*sin(x/(%s+y))' % (radius, radius),\n y='(%s+y)*cos(x/(%s+y))' % (radius, radius),\n z='z-%s*x/(2*%s*(%s+y))' % (pitch, pi, radius))\"\"\"", "if", "pitch_func", "is", "None", ":", "pitch_func", "=", "'-(pitch)*x/(2*pi*(radius))'", "pitch_func", "=", "pitch_func", ".", "replace", "(", "'pitch'", ",", "str", "(", "pitch", ")", ")", ".", "replace", "(", "'pi'", ",", "str", "(", "math", ".", "pi", ")", ")", ".", "replace", "(", "'radius'", ",", "str", "(", "radius", ")", ")", "if", "taper_func", "is", "None", ":", "taper_func", "=", "'-(taper)*(pitch_func)'", "taper_func", "=", "taper_func", ".", "replace", "(", "'taper'", ",", "str", "(", "taper", ")", ")", ".", "replace", "(", "'pitch_func'", ",", "str", "(", "pitch_func", ")", ")", ".", "replace", "(", "'pi'", ",", "str", "(", "math", ".", "pi", ")", ")", "x_func", "=", "'(y+(radius)+(taper_func))*sin(x/(radius))'", ".", "replace", "(", "'radius'", ",", "str", "(", "radius", ")", ")", ".", "replace", "(", "'taper_func'", ",", "str", "(", "taper_func", ")", ")", "y_func", "=", "'(y+(radius)+(taper_func))*cos(x/(radius))'", ".", "replace", "(", "'radius'", ",", "str", "(", "radius", ")", ")", ".", "replace", "(", "'taper_func'", ",", "str", "(", "taper_func", ")", ")", "z_func", "=", "'z+(pitch_func)'", ".", "replace", "(", "'pitch_func'", ",", "str", "(", "pitch_func", ")", ")", "vert_function", "(", "script", ",", "x_func", ",", "y_func", ",", "z_func", ")", "return", "None" ]
Deform mesh around cylinder of radius and axis z y = 0 will be on the surface of radius "radius" pitch != 0 will create a helix, with distance "pitch" traveled in z for each rotation taper = change in r over z. E.g. a value of 0.5 will shrink r by 0.5 for every z length of 1
[ "Deform", "mesh", "around", "cylinder", "of", "radius", "and", "axis", "z" ]
python
test
cloud9ers/gurumate
environment/lib/python2.7/site-packages/nose/plugins/prof.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/nose/plugins/prof.py#L33-L54
def options(self, parser, env): """Register commandline options. """ if not self.available(): return Plugin.options(self, parser, env) parser.add_option('--profile-sort', action='store', dest='profile_sort', default=env.get('NOSE_PROFILE_SORT', 'cumulative'), metavar="SORT", help="Set sort order for profiler output") parser.add_option('--profile-stats-file', action='store', dest='profile_stats_file', metavar="FILE", default=env.get('NOSE_PROFILE_STATS_FILE'), help='Profiler stats file; default is a new ' 'temp file on each run') parser.add_option('--profile-restrict', action='append', dest='profile_restrict', metavar="RESTRICT", default=env.get('NOSE_PROFILE_RESTRICT'), help="Restrict profiler output. See help for " "pstats.Stats for details")
[ "def", "options", "(", "self", ",", "parser", ",", "env", ")", ":", "if", "not", "self", ".", "available", "(", ")", ":", "return", "Plugin", ".", "options", "(", "self", ",", "parser", ",", "env", ")", "parser", ".", "add_option", "(", "'--profile-sort'", ",", "action", "=", "'store'", ",", "dest", "=", "'profile_sort'", ",", "default", "=", "env", ".", "get", "(", "'NOSE_PROFILE_SORT'", ",", "'cumulative'", ")", ",", "metavar", "=", "\"SORT\"", ",", "help", "=", "\"Set sort order for profiler output\"", ")", "parser", ".", "add_option", "(", "'--profile-stats-file'", ",", "action", "=", "'store'", ",", "dest", "=", "'profile_stats_file'", ",", "metavar", "=", "\"FILE\"", ",", "default", "=", "env", ".", "get", "(", "'NOSE_PROFILE_STATS_FILE'", ")", ",", "help", "=", "'Profiler stats file; default is a new '", "'temp file on each run'", ")", "parser", ".", "add_option", "(", "'--profile-restrict'", ",", "action", "=", "'append'", ",", "dest", "=", "'profile_restrict'", ",", "metavar", "=", "\"RESTRICT\"", ",", "default", "=", "env", ".", "get", "(", "'NOSE_PROFILE_RESTRICT'", ")", ",", "help", "=", "\"Restrict profiler output. See help for \"", "\"pstats.Stats for details\"", ")" ]
Register commandline options.
[ "Register", "commandline", "options", "." ]
python
test
polyaxon/rhea
rhea/manager.py
https://github.com/polyaxon/rhea/blob/f47b59777cd996d834a0497a1ab442541aaa8a62/rhea/manager.py#L231-L285
def get_dict(self, key, is_list=False, is_optional=False, is_secret=False, is_local=False, default=None, options=None): """ Get a the value corresponding to the key and converts it to `dict`. Args: key: the dict key. is_list: If this is one element or a list of elements. is_optional: To raise an error if key was not found. is_secret: If the key is a secret. is_local: If the key is a local to this service. default: default value if is_optional is True. options: list/tuple if provided, the value must be one of these values. Returns: `str`: value corresponding to the key. """ def convert_to_dict(x): x = json.loads(x) if not isinstance(x, Mapping): raise RheaError("Cannot convert value `{}` (key: `{}`) to `dict`".format(x, key)) return x if is_list: return self._get_typed_list_value(key=key, target_type=Mapping, type_convert=convert_to_dict, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options) value = self._get_typed_value(key=key, target_type=Mapping, type_convert=convert_to_dict, is_optional=is_optional, is_secret=is_secret, is_local=is_local, default=default, options=options) if not value: return default if not isinstance(value, Mapping): raise RheaError("Cannot convert value `{}` (key: `{}`) " "to `dict`".format(value, key)) return value
[ "def", "get_dict", "(", "self", ",", "key", ",", "is_list", "=", "False", ",", "is_optional", "=", "False", ",", "is_secret", "=", "False", ",", "is_local", "=", "False", ",", "default", "=", "None", ",", "options", "=", "None", ")", ":", "def", "convert_to_dict", "(", "x", ")", ":", "x", "=", "json", ".", "loads", "(", "x", ")", "if", "not", "isinstance", "(", "x", ",", "Mapping", ")", ":", "raise", "RheaError", "(", "\"Cannot convert value `{}` (key: `{}`) to `dict`\"", ".", "format", "(", "x", ",", "key", ")", ")", "return", "x", "if", "is_list", ":", "return", "self", ".", "_get_typed_list_value", "(", "key", "=", "key", ",", "target_type", "=", "Mapping", ",", "type_convert", "=", "convert_to_dict", ",", "is_optional", "=", "is_optional", ",", "is_secret", "=", "is_secret", ",", "is_local", "=", "is_local", ",", "default", "=", "default", ",", "options", "=", "options", ")", "value", "=", "self", ".", "_get_typed_value", "(", "key", "=", "key", ",", "target_type", "=", "Mapping", ",", "type_convert", "=", "convert_to_dict", ",", "is_optional", "=", "is_optional", ",", "is_secret", "=", "is_secret", ",", "is_local", "=", "is_local", ",", "default", "=", "default", ",", "options", "=", "options", ")", "if", "not", "value", ":", "return", "default", "if", "not", "isinstance", "(", "value", ",", "Mapping", ")", ":", "raise", "RheaError", "(", "\"Cannot convert value `{}` (key: `{}`) \"", "\"to `dict`\"", ".", "format", "(", "value", ",", "key", ")", ")", "return", "value" ]
Get a the value corresponding to the key and converts it to `dict`. Args: key: the dict key. is_list: If this is one element or a list of elements. is_optional: To raise an error if key was not found. is_secret: If the key is a secret. is_local: If the key is a local to this service. default: default value if is_optional is True. options: list/tuple if provided, the value must be one of these values. Returns: `str`: value corresponding to the key.
[ "Get", "a", "the", "value", "corresponding", "to", "the", "key", "and", "converts", "it", "to", "dict", "." ]
python
train
google-research/batch-ppo
agents/algorithms/ppo/ppo.py
https://github.com/google-research/batch-ppo/blob/3d09705977bae4e7c3eb20339a3b384d2a5531e4/agents/algorithms/ppo/ppo.py#L382-L415
def _update_step(self, sequence): """Compute the current combined loss and perform a gradient update step. The sequences must be a dict containing the keys `length` and `sequence`, where the latter is a tuple containing observations, actions, parameters of the behavioral policy, rewards, and advantages. Args: sequence: Sequences of episodes or chunks of episodes. Returns: Tuple of value loss, policy loss, and summary tensor. """ observ, action, old_policy_params, reward, advantage = sequence['sequence'] length = sequence['length'] old_policy = self._policy_type(**old_policy_params) value_loss, value_summary = self._value_loss(observ, reward, length) network = self._network(observ, length) policy_loss, policy_summary = self._policy_loss( old_policy, network.policy, action, advantage, length) network_loss = network.get('loss', 0.0) loss = policy_loss + value_loss + tf.reduce_mean(network_loss) gradients, variables = ( zip(*self._optimizer.compute_gradients(loss))) optimize = self._optimizer.apply_gradients( zip(gradients, variables)) summary = tf.summary.merge([ value_summary, policy_summary, tf.summary.histogram('network_loss', network_loss), tf.summary.scalar('avg_network_loss', tf.reduce_mean(network_loss)), tf.summary.scalar('gradient_norm', tf.global_norm(gradients)), utility.gradient_summaries(zip(gradients, variables))]) with tf.control_dependencies([optimize]): return [tf.identity(x) for x in (value_loss, policy_loss, summary)]
[ "def", "_update_step", "(", "self", ",", "sequence", ")", ":", "observ", ",", "action", ",", "old_policy_params", ",", "reward", ",", "advantage", "=", "sequence", "[", "'sequence'", "]", "length", "=", "sequence", "[", "'length'", "]", "old_policy", "=", "self", ".", "_policy_type", "(", "*", "*", "old_policy_params", ")", "value_loss", ",", "value_summary", "=", "self", ".", "_value_loss", "(", "observ", ",", "reward", ",", "length", ")", "network", "=", "self", ".", "_network", "(", "observ", ",", "length", ")", "policy_loss", ",", "policy_summary", "=", "self", ".", "_policy_loss", "(", "old_policy", ",", "network", ".", "policy", ",", "action", ",", "advantage", ",", "length", ")", "network_loss", "=", "network", ".", "get", "(", "'loss'", ",", "0.0", ")", "loss", "=", "policy_loss", "+", "value_loss", "+", "tf", ".", "reduce_mean", "(", "network_loss", ")", "gradients", ",", "variables", "=", "(", "zip", "(", "*", "self", ".", "_optimizer", ".", "compute_gradients", "(", "loss", ")", ")", ")", "optimize", "=", "self", ".", "_optimizer", ".", "apply_gradients", "(", "zip", "(", "gradients", ",", "variables", ")", ")", "summary", "=", "tf", ".", "summary", ".", "merge", "(", "[", "value_summary", ",", "policy_summary", ",", "tf", ".", "summary", ".", "histogram", "(", "'network_loss'", ",", "network_loss", ")", ",", "tf", ".", "summary", ".", "scalar", "(", "'avg_network_loss'", ",", "tf", ".", "reduce_mean", "(", "network_loss", ")", ")", ",", "tf", ".", "summary", ".", "scalar", "(", "'gradient_norm'", ",", "tf", ".", "global_norm", "(", "gradients", ")", ")", ",", "utility", ".", "gradient_summaries", "(", "zip", "(", "gradients", ",", "variables", ")", ")", "]", ")", "with", "tf", ".", "control_dependencies", "(", "[", "optimize", "]", ")", ":", "return", "[", "tf", ".", "identity", "(", "x", ")", "for", "x", "in", "(", "value_loss", ",", "policy_loss", ",", "summary", ")", "]" ]
Compute the current combined loss and perform a gradient update step. The sequences must be a dict containing the keys `length` and `sequence`, where the latter is a tuple containing observations, actions, parameters of the behavioral policy, rewards, and advantages. Args: sequence: Sequences of episodes or chunks of episodes. Returns: Tuple of value loss, policy loss, and summary tensor.
[ "Compute", "the", "current", "combined", "loss", "and", "perform", "a", "gradient", "update", "step", "." ]
python
train
9wfox/tornadoweb
tornadoweb/utility.py
https://github.com/9wfox/tornadoweb/blob/2286b66fbe10e4d9f212b979664c15fa17adf378/tornadoweb/utility.py#L28-L40
def get_modules(pkg_name, module_filter = None): """ 返回包中所有符合条件的模块。 参数: pkg_name 包名称 module_filter 模块名过滤器 def (module_name) """ path = app_path(pkg_name) #py_filter = lambda f: all((fnmatch(f, "*.py"), not f.startswith("__"), module_filter and module_filter(f) or True)) py_filter = lambda f: all((fnmatch(f, "*.pyc") or fnmatch(f, "*.py"), not f.startswith("__"), module_filter and module_filter(f) or True)) names = [splitext(n)[0] for n in os.listdir(path) if py_filter(n)] return [__import__("{0}.{1}".format(pkg_name, n)).__dict__[n] for n in names]
[ "def", "get_modules", "(", "pkg_name", ",", "module_filter", "=", "None", ")", ":", "path", "=", "app_path", "(", "pkg_name", ")", "#py_filter = lambda f: all((fnmatch(f, \"*.py\"), not f.startswith(\"__\"), module_filter and module_filter(f) or True))\r", "py_filter", "=", "lambda", "f", ":", "all", "(", "(", "fnmatch", "(", "f", ",", "\"*.pyc\"", ")", "or", "fnmatch", "(", "f", ",", "\"*.py\"", ")", ",", "not", "f", ".", "startswith", "(", "\"__\"", ")", ",", "module_filter", "and", "module_filter", "(", "f", ")", "or", "True", ")", ")", "names", "=", "[", "splitext", "(", "n", ")", "[", "0", "]", "for", "n", "in", "os", ".", "listdir", "(", "path", ")", "if", "py_filter", "(", "n", ")", "]", "return", "[", "__import__", "(", "\"{0}.{1}\"", ".", "format", "(", "pkg_name", ",", "n", ")", ")", ".", "__dict__", "[", "n", "]", "for", "n", "in", "names", "]" ]
返回包中所有符合条件的模块。 参数: pkg_name 包名称 module_filter 模块名过滤器 def (module_name)
[ "返回包中所有符合条件的模块。", "参数", ":", "pkg_name", "包名称", "module_filter", "模块名过滤器", "def", "(", "module_name", ")" ]
python
train
SuryaSankar/flask-sqlalchemy-booster
flask_sqlalchemy_booster/model_booster/queryable_mixin.py
https://github.com/SuryaSankar/flask-sqlalchemy-booster/blob/444048d167ab7718f758e943665ef32d101423a5/flask_sqlalchemy_booster/model_booster/queryable_mixin.py#L203-L235
def update(self, **kwargs): """Updates an instance. Args: **kwargs : Arbitrary keyword arguments. Column names are keywords and their new values are the values. Examples: >>> customer.update(email="[email protected]", name="new") """ kwargs = self._preprocess_params(kwargs) kwargs = self.preprocess_kwargs_before_update(kwargs) for key, value in kwargs.iteritems(): cls = type(self) if not hasattr(cls, key) or isinstance(getattr(cls, key), property): continue if key not in self._no_overwrite_: setattr(self, key, value) if isinstance(getattr(self, key), OrderingList): getattr(self, key).reorder() elif isinstance(getattr(cls, key), AssociationProxyInstance): target_name = getattr(cls, key).target_collection target_rel = getattr(self, target_name) if isinstance(target_rel, OrderingList): target_rel.reorder() try: self.session.commit() return self except Exception as e: self.session.rollback() raise e
[ "def", "update", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", "=", "self", ".", "_preprocess_params", "(", "kwargs", ")", "kwargs", "=", "self", ".", "preprocess_kwargs_before_update", "(", "kwargs", ")", "for", "key", ",", "value", "in", "kwargs", ".", "iteritems", "(", ")", ":", "cls", "=", "type", "(", "self", ")", "if", "not", "hasattr", "(", "cls", ",", "key", ")", "or", "isinstance", "(", "getattr", "(", "cls", ",", "key", ")", ",", "property", ")", ":", "continue", "if", "key", "not", "in", "self", ".", "_no_overwrite_", ":", "setattr", "(", "self", ",", "key", ",", "value", ")", "if", "isinstance", "(", "getattr", "(", "self", ",", "key", ")", ",", "OrderingList", ")", ":", "getattr", "(", "self", ",", "key", ")", ".", "reorder", "(", ")", "elif", "isinstance", "(", "getattr", "(", "cls", ",", "key", ")", ",", "AssociationProxyInstance", ")", ":", "target_name", "=", "getattr", "(", "cls", ",", "key", ")", ".", "target_collection", "target_rel", "=", "getattr", "(", "self", ",", "target_name", ")", "if", "isinstance", "(", "target_rel", ",", "OrderingList", ")", ":", "target_rel", ".", "reorder", "(", ")", "try", ":", "self", ".", "session", ".", "commit", "(", ")", "return", "self", "except", "Exception", "as", "e", ":", "self", ".", "session", ".", "rollback", "(", ")", "raise", "e" ]
Updates an instance. Args: **kwargs : Arbitrary keyword arguments. Column names are keywords and their new values are the values. Examples: >>> customer.update(email="[email protected]", name="new")
[ "Updates", "an", "instance", "." ]
python
train
LEMS/pylems
lems/model/simulation.py
https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/model/simulation.py#L367-L397
def toxml(self): """ Exports this object into a LEMS XML object """ chxmlstr = '' for run in self.runs: chxmlstr += run.toxml() for record in self.records: chxmlstr += record.toxml() for event_record in self.event_records: chxmlstr += event_record.toxml() for data_display in self.data_displays: chxmlstr += data_display.toxml() for data_writer in self.data_writers: chxmlstr += data_writer.toxml() for event_writer in self.event_writers: chxmlstr += event_writer.toxml() if chxmlstr: xmlstr = '<Simulation>' + chxmlstr + '</Simulation>' else: xmlstr = '' return xmlstr
[ "def", "toxml", "(", "self", ")", ":", "chxmlstr", "=", "''", "for", "run", "in", "self", ".", "runs", ":", "chxmlstr", "+=", "run", ".", "toxml", "(", ")", "for", "record", "in", "self", ".", "records", ":", "chxmlstr", "+=", "record", ".", "toxml", "(", ")", "for", "event_record", "in", "self", ".", "event_records", ":", "chxmlstr", "+=", "event_record", ".", "toxml", "(", ")", "for", "data_display", "in", "self", ".", "data_displays", ":", "chxmlstr", "+=", "data_display", ".", "toxml", "(", ")", "for", "data_writer", "in", "self", ".", "data_writers", ":", "chxmlstr", "+=", "data_writer", ".", "toxml", "(", ")", "for", "event_writer", "in", "self", ".", "event_writers", ":", "chxmlstr", "+=", "event_writer", ".", "toxml", "(", ")", "if", "chxmlstr", ":", "xmlstr", "=", "'<Simulation>'", "+", "chxmlstr", "+", "'</Simulation>'", "else", ":", "xmlstr", "=", "''", "return", "xmlstr" ]
Exports this object into a LEMS XML object
[ "Exports", "this", "object", "into", "a", "LEMS", "XML", "object" ]
python
train
fermiPy/fermipy
fermipy/jobs/batch.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/batch.py#L17-L41
def get_batch_job_args(job_time=1500): """ Get the correct set of batch jobs arguments. Parameters ---------- job_time : int Expected max length of the job, in seconds. This is used to select the batch queue and set the job_check_sleep parameter that sets how often we check for job completion. Returns ------- job_args : dict Dictionary of arguments used to submit a batch job """ if DEFAULT_JOB_TYPE == 'slac': from fermipy.jobs.slac_impl import get_slac_default_args return get_slac_default_args(job_time) elif DEFAULT_JOB_TYPE == 'native': from fermipy.jobs.native_impl import get_native_default_args return get_native_default_args() return None
[ "def", "get_batch_job_args", "(", "job_time", "=", "1500", ")", ":", "if", "DEFAULT_JOB_TYPE", "==", "'slac'", ":", "from", "fermipy", ".", "jobs", ".", "slac_impl", "import", "get_slac_default_args", "return", "get_slac_default_args", "(", "job_time", ")", "elif", "DEFAULT_JOB_TYPE", "==", "'native'", ":", "from", "fermipy", ".", "jobs", ".", "native_impl", "import", "get_native_default_args", "return", "get_native_default_args", "(", ")", "return", "None" ]
Get the correct set of batch jobs arguments. Parameters ---------- job_time : int Expected max length of the job, in seconds. This is used to select the batch queue and set the job_check_sleep parameter that sets how often we check for job completion. Returns ------- job_args : dict Dictionary of arguments used to submit a batch job
[ "Get", "the", "correct", "set", "of", "batch", "jobs", "arguments", "." ]
python
train
tnkteja/myhelp
virtualEnvironment/lib/python2.7/site-packages/coverage/control.py
https://github.com/tnkteja/myhelp/blob/fb3a4809d448ad14d5b2e6ddf2e7e89ad52b71cb/virtualEnvironment/lib/python2.7/site-packages/coverage/control.py#L495-L510
def combine(self): """Combine together a number of similarly-named coverage data files. All coverage data files whose name starts with `data_file` (from the coverage() constructor) will be read, and combined together into the current measurements. """ aliases = None if self.config.paths: aliases = PathAliases(self.file_locator) for paths in self.config.paths.values(): result = paths[0] for pattern in paths[1:]: aliases.add(pattern, result) self.data.combine_parallel_data(aliases=aliases)
[ "def", "combine", "(", "self", ")", ":", "aliases", "=", "None", "if", "self", ".", "config", ".", "paths", ":", "aliases", "=", "PathAliases", "(", "self", ".", "file_locator", ")", "for", "paths", "in", "self", ".", "config", ".", "paths", ".", "values", "(", ")", ":", "result", "=", "paths", "[", "0", "]", "for", "pattern", "in", "paths", "[", "1", ":", "]", ":", "aliases", ".", "add", "(", "pattern", ",", "result", ")", "self", ".", "data", ".", "combine_parallel_data", "(", "aliases", "=", "aliases", ")" ]
Combine together a number of similarly-named coverage data files. All coverage data files whose name starts with `data_file` (from the coverage() constructor) will be read, and combined together into the current measurements.
[ "Combine", "together", "a", "number", "of", "similarly", "-", "named", "coverage", "data", "files", "." ]
python
test
rueckstiess/mtools
mtools/util/logevent.py
https://github.com/rueckstiess/mtools/blob/a6a22910c3569c0c8a3908660ca218a4557e4249/mtools/util/logevent.py#L618-L624
def w(self): """Extract write lock (w) counter if available (lazy).""" if not self._counters_calculated: self._counters_calculated = True self._extract_counters() return self._w
[ "def", "w", "(", "self", ")", ":", "if", "not", "self", ".", "_counters_calculated", ":", "self", ".", "_counters_calculated", "=", "True", "self", ".", "_extract_counters", "(", ")", "return", "self", ".", "_w" ]
Extract write lock (w) counter if available (lazy).
[ "Extract", "write", "lock", "(", "w", ")", "counter", "if", "available", "(", "lazy", ")", "." ]
python
train
StagPython/StagPy
stagpy/args.py
https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/args.py#L65-L103
def parse_args(arglist=None): """Parse cmd line arguments. Update :attr:`stagpy.conf` accordingly. Args: arglist (list of str): the list of cmd line arguments. If set to None, the arguments are taken from :attr:`sys.argv`. Returns: function: the function implementing the sub command to be executed. """ climan = CLIManager(conf, **SUB_CMDS) create_complete_files(climan, CONFIG_DIR, 'stagpy', 'stagpy-git', zsh_sourceable=True) cmd_args, all_subs = climan.parse_args(arglist) sub_cmd = cmd_args.loam_sub_name if sub_cmd is None: return cmd_args.func if sub_cmd != 'config': commands.report_parsing_problems(PARSING_OUT) if conf.common.set: set_conf_str(conf, conf.common.set) if conf.common.config: commands.config_pp(all_subs) load_mplstyle() try: _steps_to_slices() except AttributeError: pass return cmd_args.func
[ "def", "parse_args", "(", "arglist", "=", "None", ")", ":", "climan", "=", "CLIManager", "(", "conf", ",", "*", "*", "SUB_CMDS", ")", "create_complete_files", "(", "climan", ",", "CONFIG_DIR", ",", "'stagpy'", ",", "'stagpy-git'", ",", "zsh_sourceable", "=", "True", ")", "cmd_args", ",", "all_subs", "=", "climan", ".", "parse_args", "(", "arglist", ")", "sub_cmd", "=", "cmd_args", ".", "loam_sub_name", "if", "sub_cmd", "is", "None", ":", "return", "cmd_args", ".", "func", "if", "sub_cmd", "!=", "'config'", ":", "commands", ".", "report_parsing_problems", "(", "PARSING_OUT", ")", "if", "conf", ".", "common", ".", "set", ":", "set_conf_str", "(", "conf", ",", "conf", ".", "common", ".", "set", ")", "if", "conf", ".", "common", ".", "config", ":", "commands", ".", "config_pp", "(", "all_subs", ")", "load_mplstyle", "(", ")", "try", ":", "_steps_to_slices", "(", ")", "except", "AttributeError", ":", "pass", "return", "cmd_args", ".", "func" ]
Parse cmd line arguments. Update :attr:`stagpy.conf` accordingly. Args: arglist (list of str): the list of cmd line arguments. If set to None, the arguments are taken from :attr:`sys.argv`. Returns: function: the function implementing the sub command to be executed.
[ "Parse", "cmd", "line", "arguments", "." ]
python
train
jonathf/chaospy
chaospy/distributions/operators/trunkation.py
https://github.com/jonathf/chaospy/blob/25ecfa7bf5608dc10c0b31d142ded0e3755f5d74/chaospy/distributions/operators/trunkation.py#L163-L194
def _ppf(self, q, left, right, cache): """ Point percentile function. Example: >>> print(chaospy.Uniform().inv([0.1, 0.2, 0.9])) [0.1 0.2 0.9] >>> print(chaospy.Trunc(chaospy.Uniform(), 0.4).inv([0.1, 0.2, 0.9])) [0.04 0.08 0.36] >>> print(chaospy.Trunc(0.6, chaospy.Uniform()).inv([0.1, 0.2, 0.9])) [0.64 0.68 0.96] """ if isinstance(left, Dist) and left in cache: left = cache[left] if isinstance(right, Dist) and right in cache: right = cache[right] if isinstance(left, Dist): if isinstance(right, Dist): raise StochasticallyDependentError( "under-defined distribution {} or {}".format(left, right)) elif not isinstance(right, Dist): raise StochasticallyDependentError( "truncated variable indirectly depends on underlying variable") else: left = (numpy.array(left).T*numpy.ones(q.shape).T).T uloc = evaluation.evaluate_forward(right, left) return evaluation.evaluate_inverse(right, q*(1-uloc)+uloc, cache=cache) right = (numpy.array(right).T*numpy.ones(q.shape).T).T uloc = evaluation.evaluate_forward(left, right, cache=cache.copy()) return evaluation.evaluate_inverse(left, q*uloc, cache=cache)
[ "def", "_ppf", "(", "self", ",", "q", ",", "left", ",", "right", ",", "cache", ")", ":", "if", "isinstance", "(", "left", ",", "Dist", ")", "and", "left", "in", "cache", ":", "left", "=", "cache", "[", "left", "]", "if", "isinstance", "(", "right", ",", "Dist", ")", "and", "right", "in", "cache", ":", "right", "=", "cache", "[", "right", "]", "if", "isinstance", "(", "left", ",", "Dist", ")", ":", "if", "isinstance", "(", "right", ",", "Dist", ")", ":", "raise", "StochasticallyDependentError", "(", "\"under-defined distribution {} or {}\"", ".", "format", "(", "left", ",", "right", ")", ")", "elif", "not", "isinstance", "(", "right", ",", "Dist", ")", ":", "raise", "StochasticallyDependentError", "(", "\"truncated variable indirectly depends on underlying variable\"", ")", "else", ":", "left", "=", "(", "numpy", ".", "array", "(", "left", ")", ".", "T", "*", "numpy", ".", "ones", "(", "q", ".", "shape", ")", ".", "T", ")", ".", "T", "uloc", "=", "evaluation", ".", "evaluate_forward", "(", "right", ",", "left", ")", "return", "evaluation", ".", "evaluate_inverse", "(", "right", ",", "q", "*", "(", "1", "-", "uloc", ")", "+", "uloc", ",", "cache", "=", "cache", ")", "right", "=", "(", "numpy", ".", "array", "(", "right", ")", ".", "T", "*", "numpy", ".", "ones", "(", "q", ".", "shape", ")", ".", "T", ")", ".", "T", "uloc", "=", "evaluation", ".", "evaluate_forward", "(", "left", ",", "right", ",", "cache", "=", "cache", ".", "copy", "(", ")", ")", "return", "evaluation", ".", "evaluate_inverse", "(", "left", ",", "q", "*", "uloc", ",", "cache", "=", "cache", ")" ]
Point percentile function. Example: >>> print(chaospy.Uniform().inv([0.1, 0.2, 0.9])) [0.1 0.2 0.9] >>> print(chaospy.Trunc(chaospy.Uniform(), 0.4).inv([0.1, 0.2, 0.9])) [0.04 0.08 0.36] >>> print(chaospy.Trunc(0.6, chaospy.Uniform()).inv([0.1, 0.2, 0.9])) [0.64 0.68 0.96]
[ "Point", "percentile", "function", "." ]
python
train
user-cont/conu
conu/utils/probes.py
https://github.com/user-cont/conu/blob/08caae7bb6bdd265b55bb106c3da6a7946a5a352/conu/utils/probes.py#L92-L116
def _wrapper(self, q, start): """ _wrapper checks return status of Probe.fnc and provides the result for process managing :param q: Queue for function results :param start: Time of function run (used for logging) :return: Return value or Exception """ try: func_name = self.fnc.__name__ except AttributeError: func_name = str(self.fnc) logger.debug("Running \"%s\" with parameters: \"%s\":\t%s/%s" % (func_name, str(self.kwargs), round(time.time() - start), self.timeout)) try: result = self.fnc(**self.kwargs) # let's log only first 50 characters of the response logger.debug("callback result = %s", str(result)[:50]) q.put(result) except self.expected_exceptions as ex: logger.debug("expected exception was caught: %s", ex) q.put(False) except Exception as ex: logger.debug("adding exception %s to queue", ex) q.put(ex)
[ "def", "_wrapper", "(", "self", ",", "q", ",", "start", ")", ":", "try", ":", "func_name", "=", "self", ".", "fnc", ".", "__name__", "except", "AttributeError", ":", "func_name", "=", "str", "(", "self", ".", "fnc", ")", "logger", ".", "debug", "(", "\"Running \\\"%s\\\" with parameters: \\\"%s\\\":\\t%s/%s\"", "%", "(", "func_name", ",", "str", "(", "self", ".", "kwargs", ")", ",", "round", "(", "time", ".", "time", "(", ")", "-", "start", ")", ",", "self", ".", "timeout", ")", ")", "try", ":", "result", "=", "self", ".", "fnc", "(", "*", "*", "self", ".", "kwargs", ")", "# let's log only first 50 characters of the response", "logger", ".", "debug", "(", "\"callback result = %s\"", ",", "str", "(", "result", ")", "[", ":", "50", "]", ")", "q", ".", "put", "(", "result", ")", "except", "self", ".", "expected_exceptions", "as", "ex", ":", "logger", ".", "debug", "(", "\"expected exception was caught: %s\"", ",", "ex", ")", "q", ".", "put", "(", "False", ")", "except", "Exception", "as", "ex", ":", "logger", ".", "debug", "(", "\"adding exception %s to queue\"", ",", "ex", ")", "q", ".", "put", "(", "ex", ")" ]
_wrapper checks return status of Probe.fnc and provides the result for process managing :param q: Queue for function results :param start: Time of function run (used for logging) :return: Return value or Exception
[ "_wrapper", "checks", "return", "status", "of", "Probe", ".", "fnc", "and", "provides", "the", "result", "for", "process", "managing" ]
python
train
pydata/xarray
xarray/core/dataset.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/dataset.py#L923-L933
def _level_coords(self): """Return a mapping of all MultiIndex levels and their corresponding coordinate name. """ level_coords = OrderedDict() for name, index in self.indexes.items(): if isinstance(index, pd.MultiIndex): level_names = index.names (dim,) = self.variables[name].dims level_coords.update({lname: dim for lname in level_names}) return level_coords
[ "def", "_level_coords", "(", "self", ")", ":", "level_coords", "=", "OrderedDict", "(", ")", "for", "name", ",", "index", "in", "self", ".", "indexes", ".", "items", "(", ")", ":", "if", "isinstance", "(", "index", ",", "pd", ".", "MultiIndex", ")", ":", "level_names", "=", "index", ".", "names", "(", "dim", ",", ")", "=", "self", ".", "variables", "[", "name", "]", ".", "dims", "level_coords", ".", "update", "(", "{", "lname", ":", "dim", "for", "lname", "in", "level_names", "}", ")", "return", "level_coords" ]
Return a mapping of all MultiIndex levels and their corresponding coordinate name.
[ "Return", "a", "mapping", "of", "all", "MultiIndex", "levels", "and", "their", "corresponding", "coordinate", "name", "." ]
python
train
cltk/cltk
cltk/inflection/old_norse/phonemic_rules.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/inflection/old_norse/phonemic_rules.py#L122-L213
def add_r_ending_to_syllable(last_syllable: str, is_first=True) -> str: """ Adds an the -r ending to the last syllable of an Old Norse word. In some cases, it really adds an -r. In other cases, it on doubles the last character or left the syllable unchanged. >>> add_r_ending_to_syllable("arm", True) 'armr' >>> add_r_ending_to_syllable("ás", True) 'áss' >>> add_r_ending_to_syllable("stól", True) 'stóll' >>> "jö"+add_r_ending_to_syllable("kul", False) 'jökull' >>> add_r_ending_to_syllable("stein", True) 'steinn' >>> 'mi'+add_r_ending_to_syllable('kil', False) 'mikill' >>> add_r_ending_to_syllable('sæl', True) 'sæll' >>> 'li'+add_r_ending_to_syllable('til', False) 'litill' >>> add_r_ending_to_syllable('vænn', True) 'vænn' >>> add_r_ending_to_syllable('lauss', True) 'lauss' >>> add_r_ending_to_syllable("vin", True) 'vinr' >>> add_r_ending_to_syllable("sel", True) 'selr' >>> add_r_ending_to_syllable('fagr', True) 'fagr' >>> add_r_ending_to_syllable('vitr', True) 'vitr' >>> add_r_ending_to_syllable('vetr', True) 'vetr' >>> add_r_ending_to_syllable('akr', True) 'akr' >>> add_r_ending_to_syllable('Björn', True) 'Björn' >>> add_r_ending_to_syllable('þurs', True) 'þurs' >>> add_r_ending_to_syllable('karl', True) 'karl' >>> add_r_ending_to_syllable('hrafn', True) 'hrafn' :param last_syllable: last syllable of the word :param is_first: is it the first syllable of the word? :return: inflected syllable """ if len(last_syllable) >= 2: if last_syllable[-1] in ['l', 'n', 's', 'r']: if last_syllable[-2] in CONSONANTS: # Apocope of r return last_syllable else: # Assimilation of r if len(last_syllable) >= 3 and last_syllable[-3:-1] in DIPHTHONGS: return apply_raw_r_assimilation(last_syllable) elif last_syllable[-2] in SHORT_VOWELS and is_first: # No assimilation when r is supposed to be added to a stressed syllable # whose last letter is l, n or s and the penultimate letter is a short vowel return last_syllable + "r" elif last_syllable[-2] in SHORT_VOWELS: return apply_raw_r_assimilation(last_syllable) elif last_syllable[-2] in LONG_VOWELS: return apply_raw_r_assimilation(last_syllable) return apply_raw_r_assimilation(last_syllable) else: return last_syllable + "r" else: return last_syllable + "r"
[ "def", "add_r_ending_to_syllable", "(", "last_syllable", ":", "str", ",", "is_first", "=", "True", ")", "->", "str", ":", "if", "len", "(", "last_syllable", ")", ">=", "2", ":", "if", "last_syllable", "[", "-", "1", "]", "in", "[", "'l'", ",", "'n'", ",", "'s'", ",", "'r'", "]", ":", "if", "last_syllable", "[", "-", "2", "]", "in", "CONSONANTS", ":", "# Apocope of r", "return", "last_syllable", "else", ":", "# Assimilation of r", "if", "len", "(", "last_syllable", ")", ">=", "3", "and", "last_syllable", "[", "-", "3", ":", "-", "1", "]", "in", "DIPHTHONGS", ":", "return", "apply_raw_r_assimilation", "(", "last_syllable", ")", "elif", "last_syllable", "[", "-", "2", "]", "in", "SHORT_VOWELS", "and", "is_first", ":", "# No assimilation when r is supposed to be added to a stressed syllable", "# whose last letter is l, n or s and the penultimate letter is a short vowel", "return", "last_syllable", "+", "\"r\"", "elif", "last_syllable", "[", "-", "2", "]", "in", "SHORT_VOWELS", ":", "return", "apply_raw_r_assimilation", "(", "last_syllable", ")", "elif", "last_syllable", "[", "-", "2", "]", "in", "LONG_VOWELS", ":", "return", "apply_raw_r_assimilation", "(", "last_syllable", ")", "return", "apply_raw_r_assimilation", "(", "last_syllable", ")", "else", ":", "return", "last_syllable", "+", "\"r\"", "else", ":", "return", "last_syllable", "+", "\"r\"" ]
Adds an the -r ending to the last syllable of an Old Norse word. In some cases, it really adds an -r. In other cases, it on doubles the last character or left the syllable unchanged. >>> add_r_ending_to_syllable("arm", True) 'armr' >>> add_r_ending_to_syllable("ás", True) 'áss' >>> add_r_ending_to_syllable("stól", True) 'stóll' >>> "jö"+add_r_ending_to_syllable("kul", False) 'jökull' >>> add_r_ending_to_syllable("stein", True) 'steinn' >>> 'mi'+add_r_ending_to_syllable('kil', False) 'mikill' >>> add_r_ending_to_syllable('sæl', True) 'sæll' >>> 'li'+add_r_ending_to_syllable('til', False) 'litill' >>> add_r_ending_to_syllable('vænn', True) 'vænn' >>> add_r_ending_to_syllable('lauss', True) 'lauss' >>> add_r_ending_to_syllable("vin", True) 'vinr' >>> add_r_ending_to_syllable("sel", True) 'selr' >>> add_r_ending_to_syllable('fagr', True) 'fagr' >>> add_r_ending_to_syllable('vitr', True) 'vitr' >>> add_r_ending_to_syllable('vetr', True) 'vetr' >>> add_r_ending_to_syllable('akr', True) 'akr' >>> add_r_ending_to_syllable('Björn', True) 'Björn' >>> add_r_ending_to_syllable('þurs', True) 'þurs' >>> add_r_ending_to_syllable('karl', True) 'karl' >>> add_r_ending_to_syllable('hrafn', True) 'hrafn' :param last_syllable: last syllable of the word :param is_first: is it the first syllable of the word? :return: inflected syllable
[ "Adds", "an", "the", "-", "r", "ending", "to", "the", "last", "syllable", "of", "an", "Old", "Norse", "word", ".", "In", "some", "cases", "it", "really", "adds", "an", "-", "r", ".", "In", "other", "cases", "it", "on", "doubles", "the", "last", "character", "or", "left", "the", "syllable", "unchanged", "." ]
python
train
mitsei/dlkit
dlkit/json_/grading/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/grading/sessions.py#L705-L747
def create_grade_system(self, grade_system_form): """Creates a new ``GradeSystem``. arg: grade_system_form (osid.grading.GradeSystemForm): the form for this ``GradeSystem`` return: (osid.grading.GradeSystem) - the new ``GradeSystem`` raise: IllegalState - ``grade_system_form`` already used in a create transaction raise: InvalidArgument - one or more of the form elements is invalid raise: NullArgument - ``grade_system_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``grade_system_form`` did not originate from ``get_grade_system_form_for_create()`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.create_resource_template collection = JSONClientValidated('grading', collection='GradeSystem', runtime=self._runtime) if not isinstance(grade_system_form, ABCGradeSystemForm): raise errors.InvalidArgument('argument type is not an GradeSystemForm') if grade_system_form.is_for_update(): raise errors.InvalidArgument('the GradeSystemForm is for update only, not create') try: if self._forms[grade_system_form.get_id().get_identifier()] == CREATED: raise errors.IllegalState('grade_system_form already used in a create transaction') except KeyError: raise errors.Unsupported('grade_system_form did not originate from this session') if not grade_system_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') insert_result = collection.insert_one(grade_system_form._my_map) self._forms[grade_system_form.get_id().get_identifier()] = CREATED result = objects.GradeSystem( osid_object_map=collection.find_one({'_id': insert_result.inserted_id}), runtime=self._runtime, proxy=self._proxy) return result
[ "def", "create_grade_system", "(", "self", ",", "grade_system_form", ")", ":", "# Implemented from template for", "# osid.resource.ResourceAdminSession.create_resource_template", "collection", "=", "JSONClientValidated", "(", "'grading'", ",", "collection", "=", "'GradeSystem'", ",", "runtime", "=", "self", ".", "_runtime", ")", "if", "not", "isinstance", "(", "grade_system_form", ",", "ABCGradeSystemForm", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'argument type is not an GradeSystemForm'", ")", "if", "grade_system_form", ".", "is_for_update", "(", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'the GradeSystemForm is for update only, not create'", ")", "try", ":", "if", "self", ".", "_forms", "[", "grade_system_form", ".", "get_id", "(", ")", ".", "get_identifier", "(", ")", "]", "==", "CREATED", ":", "raise", "errors", ".", "IllegalState", "(", "'grade_system_form already used in a create transaction'", ")", "except", "KeyError", ":", "raise", "errors", ".", "Unsupported", "(", "'grade_system_form did not originate from this session'", ")", "if", "not", "grade_system_form", ".", "is_valid", "(", ")", ":", "raise", "errors", ".", "InvalidArgument", "(", "'one or more of the form elements is invalid'", ")", "insert_result", "=", "collection", ".", "insert_one", "(", "grade_system_form", ".", "_my_map", ")", "self", ".", "_forms", "[", "grade_system_form", ".", "get_id", "(", ")", ".", "get_identifier", "(", ")", "]", "=", "CREATED", "result", "=", "objects", ".", "GradeSystem", "(", "osid_object_map", "=", "collection", ".", "find_one", "(", "{", "'_id'", ":", "insert_result", ".", "inserted_id", "}", ")", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ")", "return", "result" ]
Creates a new ``GradeSystem``. arg: grade_system_form (osid.grading.GradeSystemForm): the form for this ``GradeSystem`` return: (osid.grading.GradeSystem) - the new ``GradeSystem`` raise: IllegalState - ``grade_system_form`` already used in a create transaction raise: InvalidArgument - one or more of the form elements is invalid raise: NullArgument - ``grade_system_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``grade_system_form`` did not originate from ``get_grade_system_form_for_create()`` *compliance: mandatory -- This method must be implemented.*
[ "Creates", "a", "new", "GradeSystem", "." ]
python
train
GoogleCloudPlatform/appengine-gcs-client
python/src/cloudstorage/storage_api.py
https://github.com/GoogleCloudPlatform/appengine-gcs-client/blob/d11078331ecd915d753c886e96a80133599f3f98/python/src/cloudstorage/storage_api.py#L332-L372
def readline(self, size=-1): """Read one line delimited by '\n' from the file. A trailing newline character is kept in the string. It may be absent when a file ends with an incomplete line. If the size argument is non-negative, it specifies the maximum string size (counting the newline) to return. A negative size is the same as unspecified. Empty string is returned only when EOF is encountered immediately. Args: size: Maximum number of bytes to read. If not specified, readline stops only on '\n' or EOF. Returns: The data read as a string. Raises: IOError: When this buffer is closed. """ self._check_open() if size == 0 or not self._remaining(): return '' data_list = [] newline_offset = self._buffer.find_newline(size) while newline_offset < 0: data = self._buffer.read(size) size -= len(data) self._offset += len(data) data_list.append(data) if size == 0 or not self._remaining(): return ''.join(data_list) self._buffer.reset(self._buffer_future.get_result()) self._request_next_buffer() newline_offset = self._buffer.find_newline(size) data = self._buffer.read_to_offset(newline_offset + 1) self._offset += len(data) data_list.append(data) return ''.join(data_list)
[ "def", "readline", "(", "self", ",", "size", "=", "-", "1", ")", ":", "self", ".", "_check_open", "(", ")", "if", "size", "==", "0", "or", "not", "self", ".", "_remaining", "(", ")", ":", "return", "''", "data_list", "=", "[", "]", "newline_offset", "=", "self", ".", "_buffer", ".", "find_newline", "(", "size", ")", "while", "newline_offset", "<", "0", ":", "data", "=", "self", ".", "_buffer", ".", "read", "(", "size", ")", "size", "-=", "len", "(", "data", ")", "self", ".", "_offset", "+=", "len", "(", "data", ")", "data_list", ".", "append", "(", "data", ")", "if", "size", "==", "0", "or", "not", "self", ".", "_remaining", "(", ")", ":", "return", "''", ".", "join", "(", "data_list", ")", "self", ".", "_buffer", ".", "reset", "(", "self", ".", "_buffer_future", ".", "get_result", "(", ")", ")", "self", ".", "_request_next_buffer", "(", ")", "newline_offset", "=", "self", ".", "_buffer", ".", "find_newline", "(", "size", ")", "data", "=", "self", ".", "_buffer", ".", "read_to_offset", "(", "newline_offset", "+", "1", ")", "self", ".", "_offset", "+=", "len", "(", "data", ")", "data_list", ".", "append", "(", "data", ")", "return", "''", ".", "join", "(", "data_list", ")" ]
Read one line delimited by '\n' from the file. A trailing newline character is kept in the string. It may be absent when a file ends with an incomplete line. If the size argument is non-negative, it specifies the maximum string size (counting the newline) to return. A negative size is the same as unspecified. Empty string is returned only when EOF is encountered immediately. Args: size: Maximum number of bytes to read. If not specified, readline stops only on '\n' or EOF. Returns: The data read as a string. Raises: IOError: When this buffer is closed.
[ "Read", "one", "line", "delimited", "by", "\\", "n", "from", "the", "file", "." ]
python
train
jic-dtool/dtoolcore
dtoolcore/storagebroker.py
https://github.com/jic-dtool/dtoolcore/blob/eeb9a924dc8fcf543340653748a7877be1f98e0f/dtoolcore/storagebroker.py#L372-L400
def list_dataset_uris(cls, base_uri, config_path): """Return list containing URIs in location given by base_uri.""" parsed_uri = generous_parse_uri(base_uri) uri_list = [] path = parsed_uri.path if IS_WINDOWS: path = unix_to_windows_path(parsed_uri.path, parsed_uri.netloc) for d in os.listdir(path): dir_path = os.path.join(path, d) if not os.path.isdir(dir_path): continue storage_broker = cls(dir_path, config_path) if not storage_broker.has_admin_metadata(): continue uri = storage_broker.generate_uri( name=d, uuid=None, base_uri=base_uri ) uri_list.append(uri) return uri_list
[ "def", "list_dataset_uris", "(", "cls", ",", "base_uri", ",", "config_path", ")", ":", "parsed_uri", "=", "generous_parse_uri", "(", "base_uri", ")", "uri_list", "=", "[", "]", "path", "=", "parsed_uri", ".", "path", "if", "IS_WINDOWS", ":", "path", "=", "unix_to_windows_path", "(", "parsed_uri", ".", "path", ",", "parsed_uri", ".", "netloc", ")", "for", "d", "in", "os", ".", "listdir", "(", "path", ")", ":", "dir_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "d", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "dir_path", ")", ":", "continue", "storage_broker", "=", "cls", "(", "dir_path", ",", "config_path", ")", "if", "not", "storage_broker", ".", "has_admin_metadata", "(", ")", ":", "continue", "uri", "=", "storage_broker", ".", "generate_uri", "(", "name", "=", "d", ",", "uuid", "=", "None", ",", "base_uri", "=", "base_uri", ")", "uri_list", ".", "append", "(", "uri", ")", "return", "uri_list" ]
Return list containing URIs in location given by base_uri.
[ "Return", "list", "containing", "URIs", "in", "location", "given", "by", "base_uri", "." ]
python
train
xgfs/NetLSD
netlsd/kernels.py
https://github.com/xgfs/NetLSD/blob/54820b3669a94852bd9653be23b09e126e901ab3/netlsd/kernels.py#L208-L247
def _wkt(eivals, timescales, normalization, normalized_laplacian): """ Computes wave kernel trace from given eigenvalues, timescales, and normalization. For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18. Parameters ---------- eivals : numpy.ndarray Eigenvalue vector timescales : numpy.ndarray Vector of discrete timesteps for the kernel computation normalization : str or numpy.ndarray Either 'empty', 'complete' or None. If None or any ther value, return unnormalized wave kernel trace. For the details how 'empty' and 'complete' are computed, please refer to the paper. If np.ndarray, they are treated as exact normalization constants normalized_laplacian: bool Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization. Returns ------- numpy.ndarray Wave kernel trace signature """ nv = eivals.shape[0] wkt = np.zeros(timescales.shape) for idx, t in enumerate(timescales): wkt[idx] = np.sum(np.exp(-1j * t * eivals)) if isinstance(normalization, np.ndarray): return hkt / normalization if normalization == 'empty' or normalization == True: return wkt / nv if normalization == 'complete': if normalized_laplacian: return wkt / (1 + (nv - 1) * np.cos(timescales)) else: return wkt / (1 + (nv - 1) * np.cos(nv * timescales)) return wkt
[ "def", "_wkt", "(", "eivals", ",", "timescales", ",", "normalization", ",", "normalized_laplacian", ")", ":", "nv", "=", "eivals", ".", "shape", "[", "0", "]", "wkt", "=", "np", ".", "zeros", "(", "timescales", ".", "shape", ")", "for", "idx", ",", "t", "in", "enumerate", "(", "timescales", ")", ":", "wkt", "[", "idx", "]", "=", "np", ".", "sum", "(", "np", ".", "exp", "(", "-", "1j", "*", "t", "*", "eivals", ")", ")", "if", "isinstance", "(", "normalization", ",", "np", ".", "ndarray", ")", ":", "return", "hkt", "/", "normalization", "if", "normalization", "==", "'empty'", "or", "normalization", "==", "True", ":", "return", "wkt", "/", "nv", "if", "normalization", "==", "'complete'", ":", "if", "normalized_laplacian", ":", "return", "wkt", "/", "(", "1", "+", "(", "nv", "-", "1", ")", "*", "np", ".", "cos", "(", "timescales", ")", ")", "else", ":", "return", "wkt", "/", "(", "1", "+", "(", "nv", "-", "1", ")", "*", "np", ".", "cos", "(", "nv", "*", "timescales", ")", ")", "return", "wkt" ]
Computes wave kernel trace from given eigenvalues, timescales, and normalization. For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18. Parameters ---------- eivals : numpy.ndarray Eigenvalue vector timescales : numpy.ndarray Vector of discrete timesteps for the kernel computation normalization : str or numpy.ndarray Either 'empty', 'complete' or None. If None or any ther value, return unnormalized wave kernel trace. For the details how 'empty' and 'complete' are computed, please refer to the paper. If np.ndarray, they are treated as exact normalization constants normalized_laplacian: bool Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization. Returns ------- numpy.ndarray Wave kernel trace signature
[ "Computes", "wave", "kernel", "trace", "from", "given", "eigenvalues", "timescales", "and", "normalization", "." ]
python
train
bfontaine/p7magma
magma/courses.py
https://github.com/bfontaine/p7magma/blob/713647aa9e3187c93c2577ef812f33ec42ae5494/magma/courses.py#L105-L125
def _populate_large_table(self, trs): """ Populate the list, given that ``trs`` is a ``BeautifulSoup`` elements list from a large table (8 columns). """ for tr in trs: tds = tr.select('td') cs = Course( code=coursecode(tds[0]), title=text(tds[1]), semester=parseint(tds[2]), status=text(tds[3]), ects=parsefloat(tds[4]), followed=parsebool(tds[5]), ) followed = cs['followed'] cs['session'] = text(tds[7]) if followed else None cs['result'] = parseresult(tds[6]) if followed else None self.append(cs)
[ "def", "_populate_large_table", "(", "self", ",", "trs", ")", ":", "for", "tr", "in", "trs", ":", "tds", "=", "tr", ".", "select", "(", "'td'", ")", "cs", "=", "Course", "(", "code", "=", "coursecode", "(", "tds", "[", "0", "]", ")", ",", "title", "=", "text", "(", "tds", "[", "1", "]", ")", ",", "semester", "=", "parseint", "(", "tds", "[", "2", "]", ")", ",", "status", "=", "text", "(", "tds", "[", "3", "]", ")", ",", "ects", "=", "parsefloat", "(", "tds", "[", "4", "]", ")", ",", "followed", "=", "parsebool", "(", "tds", "[", "5", "]", ")", ",", ")", "followed", "=", "cs", "[", "'followed'", "]", "cs", "[", "'session'", "]", "=", "text", "(", "tds", "[", "7", "]", ")", "if", "followed", "else", "None", "cs", "[", "'result'", "]", "=", "parseresult", "(", "tds", "[", "6", "]", ")", "if", "followed", "else", "None", "self", ".", "append", "(", "cs", ")" ]
Populate the list, given that ``trs`` is a ``BeautifulSoup`` elements list from a large table (8 columns).
[ "Populate", "the", "list", "given", "that", "trs", "is", "a", "BeautifulSoup", "elements", "list", "from", "a", "large", "table", "(", "8", "columns", ")", "." ]
python
train
PyPSA/PyPSA
pypsa/pf.py
https://github.com/PyPSA/PyPSA/blob/46954b1b3c21460550f7104681517065279a53b7/pypsa/pf.py#L155-L185
def newton_raphson_sparse(f, guess, dfdx, x_tol=1e-10, lim_iter=100): """Solve f(x) = 0 with initial guess for x and dfdx(x). dfdx(x) should return a sparse Jacobian. Terminate if error on norm of f(x) is < x_tol or there were more than lim_iter iterations. """ converged = False n_iter = 0 F = f(guess) diff = norm(F,np.Inf) logger.debug("Error at iteration %d: %f", n_iter, diff) while diff > x_tol and n_iter < lim_iter: n_iter +=1 guess = guess - spsolve(dfdx(guess),F) F = f(guess) diff = norm(F,np.Inf) logger.debug("Error at iteration %d: %f", n_iter, diff) if diff > x_tol: logger.warning("Warning, we didn't reach the required tolerance within %d iterations, error is at %f. See the section \"Troubleshooting\" in the documentation for tips to fix this. ", n_iter, diff) elif not np.isnan(diff): converged = True return guess, n_iter, diff, converged
[ "def", "newton_raphson_sparse", "(", "f", ",", "guess", ",", "dfdx", ",", "x_tol", "=", "1e-10", ",", "lim_iter", "=", "100", ")", ":", "converged", "=", "False", "n_iter", "=", "0", "F", "=", "f", "(", "guess", ")", "diff", "=", "norm", "(", "F", ",", "np", ".", "Inf", ")", "logger", ".", "debug", "(", "\"Error at iteration %d: %f\"", ",", "n_iter", ",", "diff", ")", "while", "diff", ">", "x_tol", "and", "n_iter", "<", "lim_iter", ":", "n_iter", "+=", "1", "guess", "=", "guess", "-", "spsolve", "(", "dfdx", "(", "guess", ")", ",", "F", ")", "F", "=", "f", "(", "guess", ")", "diff", "=", "norm", "(", "F", ",", "np", ".", "Inf", ")", "logger", ".", "debug", "(", "\"Error at iteration %d: %f\"", ",", "n_iter", ",", "diff", ")", "if", "diff", ">", "x_tol", ":", "logger", ".", "warning", "(", "\"Warning, we didn't reach the required tolerance within %d iterations, error is at %f. See the section \\\"Troubleshooting\\\" in the documentation for tips to fix this. \"", ",", "n_iter", ",", "diff", ")", "elif", "not", "np", ".", "isnan", "(", "diff", ")", ":", "converged", "=", "True", "return", "guess", ",", "n_iter", ",", "diff", ",", "converged" ]
Solve f(x) = 0 with initial guess for x and dfdx(x). dfdx(x) should return a sparse Jacobian. Terminate if error on norm of f(x) is < x_tol or there were more than lim_iter iterations.
[ "Solve", "f", "(", "x", ")", "=", "0", "with", "initial", "guess", "for", "x", "and", "dfdx", "(", "x", ")", ".", "dfdx", "(", "x", ")", "should", "return", "a", "sparse", "Jacobian", ".", "Terminate", "if", "error", "on", "norm", "of", "f", "(", "x", ")", "is", "<", "x_tol", "or", "there", "were", "more", "than", "lim_iter", "iterations", "." ]
python
train
ciena/afkak
afkak/producer.py
https://github.com/ciena/afkak/blob/6f5e05ba6f135ea3c29cdb80efda009f7845569a/afkak/producer.py#L235-L252
def stop(self): """ Terminate any outstanding requests. :returns: :class:``Deferred` which fires when fully stopped. """ self.stopping = True # Cancel any outstanding request to our client if self._batch_send_d: self._batch_send_d.cancel() # Do we have to worry about our looping call? if self.batch_every_t is not None: # Stop our looping call, and wait for the deferred to be called if self._sendLooper is not None: self._sendLooper.stop() # Make sure requests that wasn't cancelled above are now self._cancel_outstanding() return self._sendLooperD or succeed(None)
[ "def", "stop", "(", "self", ")", ":", "self", ".", "stopping", "=", "True", "# Cancel any outstanding request to our client", "if", "self", ".", "_batch_send_d", ":", "self", ".", "_batch_send_d", ".", "cancel", "(", ")", "# Do we have to worry about our looping call?", "if", "self", ".", "batch_every_t", "is", "not", "None", ":", "# Stop our looping call, and wait for the deferred to be called", "if", "self", ".", "_sendLooper", "is", "not", "None", ":", "self", ".", "_sendLooper", ".", "stop", "(", ")", "# Make sure requests that wasn't cancelled above are now", "self", ".", "_cancel_outstanding", "(", ")", "return", "self", ".", "_sendLooperD", "or", "succeed", "(", "None", ")" ]
Terminate any outstanding requests. :returns: :class:``Deferred` which fires when fully stopped.
[ "Terminate", "any", "outstanding", "requests", "." ]
python
train
log2timeline/plaso
plaso/analysis/windows_services.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/analysis/windows_services.py#L230-L253
def CompileReport(self, mediator): """Compiles an analysis report. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. Returns: AnalysisReport: report. """ # TODO: move YAML representation out of plugin and into serialization. lines_of_text = [] if self._output_format == 'yaml': lines_of_text.append( yaml.safe_dump_all(self._service_collection.services)) else: lines_of_text.append('Listing Windows Services') for service in self._service_collection.services: lines_of_text.append(self._FormatServiceText(service)) lines_of_text.append('') lines_of_text.append('') report_text = '\n'.join(lines_of_text) return reports.AnalysisReport(plugin_name=self.NAME, text=report_text)
[ "def", "CompileReport", "(", "self", ",", "mediator", ")", ":", "# TODO: move YAML representation out of plugin and into serialization.", "lines_of_text", "=", "[", "]", "if", "self", ".", "_output_format", "==", "'yaml'", ":", "lines_of_text", ".", "append", "(", "yaml", ".", "safe_dump_all", "(", "self", ".", "_service_collection", ".", "services", ")", ")", "else", ":", "lines_of_text", ".", "append", "(", "'Listing Windows Services'", ")", "for", "service", "in", "self", ".", "_service_collection", ".", "services", ":", "lines_of_text", ".", "append", "(", "self", ".", "_FormatServiceText", "(", "service", ")", ")", "lines_of_text", ".", "append", "(", "''", ")", "lines_of_text", ".", "append", "(", "''", ")", "report_text", "=", "'\\n'", ".", "join", "(", "lines_of_text", ")", "return", "reports", ".", "AnalysisReport", "(", "plugin_name", "=", "self", ".", "NAME", ",", "text", "=", "report_text", ")" ]
Compiles an analysis report. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. Returns: AnalysisReport: report.
[ "Compiles", "an", "analysis", "report", "." ]
python
train
sorgerlab/indra
rest_api/api.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/rest_api/api.py#L534-L543
def map_grounding(): """Map grounding on a list of INDRA Statements.""" if request.method == 'OPTIONS': return {} response = request.body.read().decode('utf-8') body = json.loads(response) stmts_json = body.get('statements') stmts = stmts_from_json(stmts_json) stmts_out = ac.map_grounding(stmts) return _return_stmts(stmts_out)
[ "def", "map_grounding", "(", ")", ":", "if", "request", ".", "method", "==", "'OPTIONS'", ":", "return", "{", "}", "response", "=", "request", ".", "body", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "body", "=", "json", ".", "loads", "(", "response", ")", "stmts_json", "=", "body", ".", "get", "(", "'statements'", ")", "stmts", "=", "stmts_from_json", "(", "stmts_json", ")", "stmts_out", "=", "ac", ".", "map_grounding", "(", "stmts", ")", "return", "_return_stmts", "(", "stmts_out", ")" ]
Map grounding on a list of INDRA Statements.
[ "Map", "grounding", "on", "a", "list", "of", "INDRA", "Statements", "." ]
python
train
scizzorz/bumpy
bumpy.py
https://github.com/scizzorz/bumpy/blob/99ed5c5ccaa61842cafe9faf8b082de44bdf01f9/bumpy.py#L270-L293
def require(*reqs): '''Require tasks or files at runtime.''' for req in reqs: if type(req) is str: # does not exist and unknown generator if not os.path.exists(req) and req not in GENERATES: abort(LOCALE['abort_bad_file'].format(req)) # exists but unknown generator if req not in GENERATES: return # exists and known generator if req in GENERATES: req = GENERATES[req] if req.valid is None: if len(req.args): abort(LOCALE['abort_bad_args'], req, len(req.args)) req() if req.valid is False: abort(LOCALE['abort_bad_task'], req)
[ "def", "require", "(", "*", "reqs", ")", ":", "for", "req", "in", "reqs", ":", "if", "type", "(", "req", ")", "is", "str", ":", "# does not exist and unknown generator", "if", "not", "os", ".", "path", ".", "exists", "(", "req", ")", "and", "req", "not", "in", "GENERATES", ":", "abort", "(", "LOCALE", "[", "'abort_bad_file'", "]", ".", "format", "(", "req", ")", ")", "# exists but unknown generator", "if", "req", "not", "in", "GENERATES", ":", "return", "# exists and known generator", "if", "req", "in", "GENERATES", ":", "req", "=", "GENERATES", "[", "req", "]", "if", "req", ".", "valid", "is", "None", ":", "if", "len", "(", "req", ".", "args", ")", ":", "abort", "(", "LOCALE", "[", "'abort_bad_args'", "]", ",", "req", ",", "len", "(", "req", ".", "args", ")", ")", "req", "(", ")", "if", "req", ".", "valid", "is", "False", ":", "abort", "(", "LOCALE", "[", "'abort_bad_task'", "]", ",", "req", ")" ]
Require tasks or files at runtime.
[ "Require", "tasks", "or", "files", "at", "runtime", "." ]
python
train
phoebe-project/phoebe2
phoebe/frontend/bundle.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/frontend/bundle.py#L3320-L3330
def remove_model(self, model, **kwargs): """ Remove a 'model' from the bundle :parameter str twig: twig to filter for the model :parameter **kwargs: any other tags to do the filter (except twig or context) """ kwargs['model'] = model kwargs['context'] = 'model' self.remove_parameters_all(**kwargs)
[ "def", "remove_model", "(", "self", ",", "model", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'model'", "]", "=", "model", "kwargs", "[", "'context'", "]", "=", "'model'", "self", ".", "remove_parameters_all", "(", "*", "*", "kwargs", ")" ]
Remove a 'model' from the bundle :parameter str twig: twig to filter for the model :parameter **kwargs: any other tags to do the filter (except twig or context)
[ "Remove", "a", "model", "from", "the", "bundle" ]
python
train
waqasbhatti/astrobase
astrobase/lcmodels/eclipses.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/lcmodels/eclipses.py#L19-L46
def _gaussian(x, amp, loc, std): '''This is a simple gaussian. Parameters ---------- x : np.array The items at which the Gaussian is evaluated. amp : float The amplitude of the Gaussian. loc : float The central value of the Gaussian. std : float The standard deviation of the Gaussian. Returns ------- np.array Returns the Gaussian evaluated at the items in `x`, using the provided parameters of `amp`, `loc`, and `std`. ''' return amp * np.exp(-((x - loc)*(x - loc))/(2.0*std*std))
[ "def", "_gaussian", "(", "x", ",", "amp", ",", "loc", ",", "std", ")", ":", "return", "amp", "*", "np", ".", "exp", "(", "-", "(", "(", "x", "-", "loc", ")", "*", "(", "x", "-", "loc", ")", ")", "/", "(", "2.0", "*", "std", "*", "std", ")", ")" ]
This is a simple gaussian. Parameters ---------- x : np.array The items at which the Gaussian is evaluated. amp : float The amplitude of the Gaussian. loc : float The central value of the Gaussian. std : float The standard deviation of the Gaussian. Returns ------- np.array Returns the Gaussian evaluated at the items in `x`, using the provided parameters of `amp`, `loc`, and `std`.
[ "This", "is", "a", "simple", "gaussian", "." ]
python
valid
spyder-ide/spyder
spyder/utils/introspection/rope_patch.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/utils/introspection/rope_patch.py#L30-L211
def apply(): """Monkey patching rope See [1], [2], [3], [4] and [5] in module docstring.""" from spyder.utils.programs import is_module_installed if is_module_installed('rope', '<0.9.4'): import rope raise ImportError("rope %s can't be patched" % rope.VERSION) # [1] Patching project.Project for compatibility with py2exe/cx_Freeze # distributions from spyder.config.base import is_py2exe_or_cx_Freeze if is_py2exe_or_cx_Freeze(): from rope.base import project class PatchedProject(project.Project): def _default_config(self): # py2exe/cx_Freeze distribution from spyder.config.base import get_module_source_path fname = get_module_source_path('spyder', 'default_config.py') return open(fname, 'rb').read() project.Project = PatchedProject # Patching pycore.PyCore... from rope.base import pycore class PatchedPyCore(pycore.PyCore): # [2] ...so that forced builtin modules (i.e. modules that were # declared as 'extension_modules' in rope preferences) will be indeed # recognized as builtins by rope, as expected # # This patch is included in rope 0.9.4+ but applying it anyway is ok def get_module(self, name, folder=None): """Returns a `PyObject` if the module was found.""" # check if this is a builtin module pymod = self._builtin_module(name) if pymod is not None: return pymod module = self.find_module(name, folder) if module is None: raise pycore.ModuleNotFoundError( 'Module %s not found' % name) return self.resource_to_pyobject(module) # [3] ...to avoid considering folders without __init__.py as Python # packages def _find_module_in_folder(self, folder, modname): module = folder packages = modname.split('.') for pkg in packages[:-1]: if module.is_folder() and module.has_child(pkg): module = module.get_child(pkg) else: return None if module.is_folder(): if module.has_child(packages[-1]) and \ module.get_child(packages[-1]).is_folder() and \ module.get_child(packages[-1]).has_child('__init__.py'): return module.get_child(packages[-1]) elif module.has_child(packages[-1] + '.py') and \ not module.get_child(packages[-1] + '.py').is_folder(): return module.get_child(packages[-1] + '.py') pycore.PyCore = PatchedPyCore # [2] Patching BuiltinName for the go to definition feature to simply work # with forced builtins from rope.base import builtins, libutils, pyobjects import inspect import os.path as osp class PatchedBuiltinName(builtins.BuiltinName): def _pycore(self): p = self.pyobject while p.parent is not None: p = p.parent if isinstance(p, builtins.BuiltinModule) and p.pycore is not None: return p.pycore def get_definition_location(self): if not inspect.isbuiltin(self.pyobject): _lines, lineno = inspect.getsourcelines(self.pyobject.builtin) path = inspect.getfile(self.pyobject.builtin) if path.endswith('pyc') and osp.isfile(path[:-1]): path = path[:-1] pycore = self._pycore() if pycore and pycore.project: resource = libutils.path_to_resource(pycore.project, path) module = pyobjects.PyModule(pycore, None, resource) return (module, lineno) return (None, None) builtins.BuiltinName = PatchedBuiltinName # [4] Patching several PyDocExtractor methods: # 1. get_doc: # To force rope to return the docstring of any object which has one, even # if it's not an instance of AbstractFunction, AbstractClass, or # AbstractModule. # Also, to use utils.dochelpers.getdoc to get docs from forced builtins. # # 2. _get_class_docstring and _get_single_function_docstring: # To not let rope add a 2 spaces indentation to every docstring, which was # breaking our rich text mode. The only value that we are modifying is the # 'indents' keyword of those methods, from 2 to 0. # # 3. get_calltip # To easily get calltips of forced builtins from rope.contrib import codeassist from spyder_kernels.utils.dochelpers import getdoc from rope.base import exceptions class PatchedPyDocExtractor(codeassist.PyDocExtractor): def get_builtin_doc(self, pyobject): buitin = pyobject.builtin return getdoc(buitin) def get_doc(self, pyobject): if hasattr(pyobject, 'builtin'): doc = self.get_builtin_doc(pyobject) return doc elif isinstance(pyobject, builtins.BuiltinModule): docstring = pyobject.get_doc() if docstring is not None: docstring = self._trim_docstring(docstring) else: docstring = '' # TODO: Add a module_name key, so that the name could appear # on the OI text filed but not be used by sphinx to render # the page doc = {'name': '', 'argspec': '', 'note': '', 'docstring': docstring } return doc elif isinstance(pyobject, pyobjects.AbstractFunction): return self._get_function_docstring(pyobject) elif isinstance(pyobject, pyobjects.AbstractClass): return self._get_class_docstring(pyobject) elif isinstance(pyobject, pyobjects.AbstractModule): return self._trim_docstring(pyobject.get_doc()) elif pyobject.get_doc() is not None: # Spyder patch return self._trim_docstring(pyobject.get_doc()) return None def get_calltip(self, pyobject, ignore_unknown=False, remove_self=False): if hasattr(pyobject, 'builtin'): doc = self.get_builtin_doc(pyobject) return doc['name'] + doc['argspec'] try: if isinstance(pyobject, pyobjects.AbstractClass): pyobject = pyobject['__init__'].get_object() if not isinstance(pyobject, pyobjects.AbstractFunction): pyobject = pyobject['__call__'].get_object() except exceptions.AttributeNotFoundError: return None if ignore_unknown and not isinstance(pyobject, pyobjects.PyFunction): return if isinstance(pyobject, pyobjects.AbstractFunction): result = self._get_function_signature(pyobject, add_module=True) if remove_self and self._is_method(pyobject): return result.replace('(self)', '()').replace('(self, ', '(') return result def _get_class_docstring(self, pyclass): contents = self._trim_docstring(pyclass.get_doc(), indents=0) supers = [super.get_name() for super in pyclass.get_superclasses()] doc = 'class %s(%s):\n\n' % (pyclass.get_name(), ', '.join(supers)) + contents if '__init__' in pyclass: init = pyclass['__init__'].get_object() if isinstance(init, pyobjects.AbstractFunction): doc += '\n\n' + self._get_single_function_docstring(init) return doc def _get_single_function_docstring(self, pyfunction): docs = pyfunction.get_doc() docs = self._trim_docstring(docs, indents=0) return docs codeassist.PyDocExtractor = PatchedPyDocExtractor # [5] Get the right matplotlib docstrings for Help try: import matplotlib as mpl mpl.rcParams['docstring.hardcopy'] = True except: pass
[ "def", "apply", "(", ")", ":", "from", "spyder", ".", "utils", ".", "programs", "import", "is_module_installed", "if", "is_module_installed", "(", "'rope'", ",", "'<0.9.4'", ")", ":", "import", "rope", "raise", "ImportError", "(", "\"rope %s can't be patched\"", "%", "rope", ".", "VERSION", ")", "# [1] Patching project.Project for compatibility with py2exe/cx_Freeze\r", "# distributions\r", "from", "spyder", ".", "config", ".", "base", "import", "is_py2exe_or_cx_Freeze", "if", "is_py2exe_or_cx_Freeze", "(", ")", ":", "from", "rope", ".", "base", "import", "project", "class", "PatchedProject", "(", "project", ".", "Project", ")", ":", "def", "_default_config", "(", "self", ")", ":", "# py2exe/cx_Freeze distribution\r", "from", "spyder", ".", "config", ".", "base", "import", "get_module_source_path", "fname", "=", "get_module_source_path", "(", "'spyder'", ",", "'default_config.py'", ")", "return", "open", "(", "fname", ",", "'rb'", ")", ".", "read", "(", ")", "project", ".", "Project", "=", "PatchedProject", "# Patching pycore.PyCore...\r", "from", "rope", ".", "base", "import", "pycore", "class", "PatchedPyCore", "(", "pycore", ".", "PyCore", ")", ":", "# [2] ...so that forced builtin modules (i.e. modules that were \r", "# declared as 'extension_modules' in rope preferences) will be indeed\r", "# recognized as builtins by rope, as expected\r", "# \r", "# This patch is included in rope 0.9.4+ but applying it anyway is ok\r", "def", "get_module", "(", "self", ",", "name", ",", "folder", "=", "None", ")", ":", "\"\"\"Returns a `PyObject` if the module was found.\"\"\"", "# check if this is a builtin module\r", "pymod", "=", "self", ".", "_builtin_module", "(", "name", ")", "if", "pymod", "is", "not", "None", ":", "return", "pymod", "module", "=", "self", ".", "find_module", "(", "name", ",", "folder", ")", "if", "module", "is", "None", ":", "raise", "pycore", ".", "ModuleNotFoundError", "(", "'Module %s not found'", "%", "name", ")", "return", "self", ".", "resource_to_pyobject", "(", "module", ")", "# [3] ...to avoid considering folders without __init__.py as Python\r", "# packages\r", "def", "_find_module_in_folder", "(", "self", ",", "folder", ",", "modname", ")", ":", "module", "=", "folder", "packages", "=", "modname", ".", "split", "(", "'.'", ")", "for", "pkg", "in", "packages", "[", ":", "-", "1", "]", ":", "if", "module", ".", "is_folder", "(", ")", "and", "module", ".", "has_child", "(", "pkg", ")", ":", "module", "=", "module", ".", "get_child", "(", "pkg", ")", "else", ":", "return", "None", "if", "module", ".", "is_folder", "(", ")", ":", "if", "module", ".", "has_child", "(", "packages", "[", "-", "1", "]", ")", "and", "module", ".", "get_child", "(", "packages", "[", "-", "1", "]", ")", ".", "is_folder", "(", ")", "and", "module", ".", "get_child", "(", "packages", "[", "-", "1", "]", ")", ".", "has_child", "(", "'__init__.py'", ")", ":", "return", "module", ".", "get_child", "(", "packages", "[", "-", "1", "]", ")", "elif", "module", ".", "has_child", "(", "packages", "[", "-", "1", "]", "+", "'.py'", ")", "and", "not", "module", ".", "get_child", "(", "packages", "[", "-", "1", "]", "+", "'.py'", ")", ".", "is_folder", "(", ")", ":", "return", "module", ".", "get_child", "(", "packages", "[", "-", "1", "]", "+", "'.py'", ")", "pycore", ".", "PyCore", "=", "PatchedPyCore", "# [2] Patching BuiltinName for the go to definition feature to simply work \r", "# with forced builtins\r", "from", "rope", ".", "base", "import", "builtins", ",", "libutils", ",", "pyobjects", "import", "inspect", "import", "os", ".", "path", "as", "osp", "class", "PatchedBuiltinName", "(", "builtins", ".", "BuiltinName", ")", ":", "def", "_pycore", "(", "self", ")", ":", "p", "=", "self", ".", "pyobject", "while", "p", ".", "parent", "is", "not", "None", ":", "p", "=", "p", ".", "parent", "if", "isinstance", "(", "p", ",", "builtins", ".", "BuiltinModule", ")", "and", "p", ".", "pycore", "is", "not", "None", ":", "return", "p", ".", "pycore", "def", "get_definition_location", "(", "self", ")", ":", "if", "not", "inspect", ".", "isbuiltin", "(", "self", ".", "pyobject", ")", ":", "_lines", ",", "lineno", "=", "inspect", ".", "getsourcelines", "(", "self", ".", "pyobject", ".", "builtin", ")", "path", "=", "inspect", ".", "getfile", "(", "self", ".", "pyobject", ".", "builtin", ")", "if", "path", ".", "endswith", "(", "'pyc'", ")", "and", "osp", ".", "isfile", "(", "path", "[", ":", "-", "1", "]", ")", ":", "path", "=", "path", "[", ":", "-", "1", "]", "pycore", "=", "self", ".", "_pycore", "(", ")", "if", "pycore", "and", "pycore", ".", "project", ":", "resource", "=", "libutils", ".", "path_to_resource", "(", "pycore", ".", "project", ",", "path", ")", "module", "=", "pyobjects", ".", "PyModule", "(", "pycore", ",", "None", ",", "resource", ")", "return", "(", "module", ",", "lineno", ")", "return", "(", "None", ",", "None", ")", "builtins", ".", "BuiltinName", "=", "PatchedBuiltinName", "# [4] Patching several PyDocExtractor methods:\r", "# 1. get_doc:\r", "# To force rope to return the docstring of any object which has one, even\r", "# if it's not an instance of AbstractFunction, AbstractClass, or\r", "# AbstractModule.\r", "# Also, to use utils.dochelpers.getdoc to get docs from forced builtins.\r", "#\r", "# 2. _get_class_docstring and _get_single_function_docstring:\r", "# To not let rope add a 2 spaces indentation to every docstring, which was\r", "# breaking our rich text mode. The only value that we are modifying is the\r", "# 'indents' keyword of those methods, from 2 to 0.\r", "#\r", "# 3. get_calltip\r", "# To easily get calltips of forced builtins\r", "from", "rope", ".", "contrib", "import", "codeassist", "from", "spyder_kernels", ".", "utils", ".", "dochelpers", "import", "getdoc", "from", "rope", ".", "base", "import", "exceptions", "class", "PatchedPyDocExtractor", "(", "codeassist", ".", "PyDocExtractor", ")", ":", "def", "get_builtin_doc", "(", "self", ",", "pyobject", ")", ":", "buitin", "=", "pyobject", ".", "builtin", "return", "getdoc", "(", "buitin", ")", "def", "get_doc", "(", "self", ",", "pyobject", ")", ":", "if", "hasattr", "(", "pyobject", ",", "'builtin'", ")", ":", "doc", "=", "self", ".", "get_builtin_doc", "(", "pyobject", ")", "return", "doc", "elif", "isinstance", "(", "pyobject", ",", "builtins", ".", "BuiltinModule", ")", ":", "docstring", "=", "pyobject", ".", "get_doc", "(", ")", "if", "docstring", "is", "not", "None", ":", "docstring", "=", "self", ".", "_trim_docstring", "(", "docstring", ")", "else", ":", "docstring", "=", "''", "# TODO: Add a module_name key, so that the name could appear\r", "# on the OI text filed but not be used by sphinx to render\r", "# the page\r", "doc", "=", "{", "'name'", ":", "''", ",", "'argspec'", ":", "''", ",", "'note'", ":", "''", ",", "'docstring'", ":", "docstring", "}", "return", "doc", "elif", "isinstance", "(", "pyobject", ",", "pyobjects", ".", "AbstractFunction", ")", ":", "return", "self", ".", "_get_function_docstring", "(", "pyobject", ")", "elif", "isinstance", "(", "pyobject", ",", "pyobjects", ".", "AbstractClass", ")", ":", "return", "self", ".", "_get_class_docstring", "(", "pyobject", ")", "elif", "isinstance", "(", "pyobject", ",", "pyobjects", ".", "AbstractModule", ")", ":", "return", "self", ".", "_trim_docstring", "(", "pyobject", ".", "get_doc", "(", ")", ")", "elif", "pyobject", ".", "get_doc", "(", ")", "is", "not", "None", ":", "# Spyder patch\r", "return", "self", ".", "_trim_docstring", "(", "pyobject", ".", "get_doc", "(", ")", ")", "return", "None", "def", "get_calltip", "(", "self", ",", "pyobject", ",", "ignore_unknown", "=", "False", ",", "remove_self", "=", "False", ")", ":", "if", "hasattr", "(", "pyobject", ",", "'builtin'", ")", ":", "doc", "=", "self", ".", "get_builtin_doc", "(", "pyobject", ")", "return", "doc", "[", "'name'", "]", "+", "doc", "[", "'argspec'", "]", "try", ":", "if", "isinstance", "(", "pyobject", ",", "pyobjects", ".", "AbstractClass", ")", ":", "pyobject", "=", "pyobject", "[", "'__init__'", "]", ".", "get_object", "(", ")", "if", "not", "isinstance", "(", "pyobject", ",", "pyobjects", ".", "AbstractFunction", ")", ":", "pyobject", "=", "pyobject", "[", "'__call__'", "]", ".", "get_object", "(", ")", "except", "exceptions", ".", "AttributeNotFoundError", ":", "return", "None", "if", "ignore_unknown", "and", "not", "isinstance", "(", "pyobject", ",", "pyobjects", ".", "PyFunction", ")", ":", "return", "if", "isinstance", "(", "pyobject", ",", "pyobjects", ".", "AbstractFunction", ")", ":", "result", "=", "self", ".", "_get_function_signature", "(", "pyobject", ",", "add_module", "=", "True", ")", "if", "remove_self", "and", "self", ".", "_is_method", "(", "pyobject", ")", ":", "return", "result", ".", "replace", "(", "'(self)'", ",", "'()'", ")", ".", "replace", "(", "'(self, '", ",", "'('", ")", "return", "result", "def", "_get_class_docstring", "(", "self", ",", "pyclass", ")", ":", "contents", "=", "self", ".", "_trim_docstring", "(", "pyclass", ".", "get_doc", "(", ")", ",", "indents", "=", "0", ")", "supers", "=", "[", "super", ".", "get_name", "(", ")", "for", "super", "in", "pyclass", ".", "get_superclasses", "(", ")", "]", "doc", "=", "'class %s(%s):\\n\\n'", "%", "(", "pyclass", ".", "get_name", "(", ")", ",", "', '", ".", "join", "(", "supers", ")", ")", "+", "contents", "if", "'__init__'", "in", "pyclass", ":", "init", "=", "pyclass", "[", "'__init__'", "]", ".", "get_object", "(", ")", "if", "isinstance", "(", "init", ",", "pyobjects", ".", "AbstractFunction", ")", ":", "doc", "+=", "'\\n\\n'", "+", "self", ".", "_get_single_function_docstring", "(", "init", ")", "return", "doc", "def", "_get_single_function_docstring", "(", "self", ",", "pyfunction", ")", ":", "docs", "=", "pyfunction", ".", "get_doc", "(", ")", "docs", "=", "self", ".", "_trim_docstring", "(", "docs", ",", "indents", "=", "0", ")", "return", "docs", "codeassist", ".", "PyDocExtractor", "=", "PatchedPyDocExtractor", "# [5] Get the right matplotlib docstrings for Help\r", "try", ":", "import", "matplotlib", "as", "mpl", "mpl", ".", "rcParams", "[", "'docstring.hardcopy'", "]", "=", "True", "except", ":", "pass" ]
Monkey patching rope See [1], [2], [3], [4] and [5] in module docstring.
[ "Monkey", "patching", "rope", "See", "[", "1", "]", "[", "2", "]", "[", "3", "]", "[", "4", "]", "and", "[", "5", "]", "in", "module", "docstring", "." ]
python
train
pdkit/pdkit
pdkit/finger_tapping_processor.py
https://github.com/pdkit/pdkit/blob/c7120263da2071bb139815fbdb56ca77b544f340/pdkit/finger_tapping_processor.py#L209-L229
def extract_features(self, data_frame, pre=''): """ This method extracts all the features available to the Finger Tapping Processor class. :param data_frame: the data frame :type data_frame: pandas.DataFrame :return: 'frequency', 'moving_frequency','continuous_frequency','mean_moving_time','incoordination_score', \ 'mean_alnt_target_distance','kinesia_scores', 'akinesia_times','dysmetria_score' :rtype: list """ try: return {pre+'frequency': self.frequency(data_frame)[0], pre+'mean_moving_time': self.mean_moving_time(data_frame)[0], pre+'incoordination_score': self.incoordination_score(data_frame)[0], pre+'mean_alnt_target_distance': self.mean_alnt_target_distance(data_frame)[0], pre+'kinesia_scores': self.kinesia_scores(data_frame)[0], pre+'akinesia_times': self.akinesia_times(data_frame)[0], pre+'dysmetria_score': self.dysmetria_score(data_frame)[0]} except: logging.error("Error on FingerTappingProcessor process, extract features: %s", sys.exc_info()[0])
[ "def", "extract_features", "(", "self", ",", "data_frame", ",", "pre", "=", "''", ")", ":", "try", ":", "return", "{", "pre", "+", "'frequency'", ":", "self", ".", "frequency", "(", "data_frame", ")", "[", "0", "]", ",", "pre", "+", "'mean_moving_time'", ":", "self", ".", "mean_moving_time", "(", "data_frame", ")", "[", "0", "]", ",", "pre", "+", "'incoordination_score'", ":", "self", ".", "incoordination_score", "(", "data_frame", ")", "[", "0", "]", ",", "pre", "+", "'mean_alnt_target_distance'", ":", "self", ".", "mean_alnt_target_distance", "(", "data_frame", ")", "[", "0", "]", ",", "pre", "+", "'kinesia_scores'", ":", "self", ".", "kinesia_scores", "(", "data_frame", ")", "[", "0", "]", ",", "pre", "+", "'akinesia_times'", ":", "self", ".", "akinesia_times", "(", "data_frame", ")", "[", "0", "]", ",", "pre", "+", "'dysmetria_score'", ":", "self", ".", "dysmetria_score", "(", "data_frame", ")", "[", "0", "]", "}", "except", ":", "logging", ".", "error", "(", "\"Error on FingerTappingProcessor process, extract features: %s\"", ",", "sys", ".", "exc_info", "(", ")", "[", "0", "]", ")" ]
This method extracts all the features available to the Finger Tapping Processor class. :param data_frame: the data frame :type data_frame: pandas.DataFrame :return: 'frequency', 'moving_frequency','continuous_frequency','mean_moving_time','incoordination_score', \ 'mean_alnt_target_distance','kinesia_scores', 'akinesia_times','dysmetria_score' :rtype: list
[ "This", "method", "extracts", "all", "the", "features", "available", "to", "the", "Finger", "Tapping", "Processor", "class", "." ]
python
train
dropbox/stone
stone/ir/data_types.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/ir/data_types.py#L1979-L2001
def unwrap(data_type): """ Convenience method to unwrap all Aliases and Nullables from around a DataType. This checks for nullable wrapping aliases, as well as aliases wrapping nullables. Args: data_type (DataType): The target to unwrap. Return: Tuple[DataType, bool, bool]: The underlying data type; a bool that is set if a nullable was present; a bool that is set if an alias was present. """ unwrapped_nullable = False unwrapped_alias = False while is_alias(data_type) or is_nullable_type(data_type): if is_nullable_type(data_type): unwrapped_nullable = True if is_alias(data_type): unwrapped_alias = True data_type = data_type.data_type return data_type, unwrapped_nullable, unwrapped_alias
[ "def", "unwrap", "(", "data_type", ")", ":", "unwrapped_nullable", "=", "False", "unwrapped_alias", "=", "False", "while", "is_alias", "(", "data_type", ")", "or", "is_nullable_type", "(", "data_type", ")", ":", "if", "is_nullable_type", "(", "data_type", ")", ":", "unwrapped_nullable", "=", "True", "if", "is_alias", "(", "data_type", ")", ":", "unwrapped_alias", "=", "True", "data_type", "=", "data_type", ".", "data_type", "return", "data_type", ",", "unwrapped_nullable", ",", "unwrapped_alias" ]
Convenience method to unwrap all Aliases and Nullables from around a DataType. This checks for nullable wrapping aliases, as well as aliases wrapping nullables. Args: data_type (DataType): The target to unwrap. Return: Tuple[DataType, bool, bool]: The underlying data type; a bool that is set if a nullable was present; a bool that is set if an alias was present.
[ "Convenience", "method", "to", "unwrap", "all", "Aliases", "and", "Nullables", "from", "around", "a", "DataType", ".", "This", "checks", "for", "nullable", "wrapping", "aliases", "as", "well", "as", "aliases", "wrapping", "nullables", "." ]
python
train
rpcope1/HackerNewsAPI-Py
HackerNewsAPI/API.py
https://github.com/rpcope1/HackerNewsAPI-Py/blob/b231aed24ec59fc32af320bbef27d48cc4b69914/HackerNewsAPI/API.py#L108-L120
def get_top_stories(self): """ Get the item numbers for the current top stories. Will raise an requests.HTTPError if we got a non-200 response back. :return: A list with the top story item numbers. """ suburl = "v0/topstories.json" try: top_stories = self._make_request(suburl) except requests.HTTPError as e: hn_logger.exception('Faulted on getting top stories, with status {}'.format(e.errno)) raise e return top_stories
[ "def", "get_top_stories", "(", "self", ")", ":", "suburl", "=", "\"v0/topstories.json\"", "try", ":", "top_stories", "=", "self", ".", "_make_request", "(", "suburl", ")", "except", "requests", ".", "HTTPError", "as", "e", ":", "hn_logger", ".", "exception", "(", "'Faulted on getting top stories, with status {}'", ".", "format", "(", "e", ".", "errno", ")", ")", "raise", "e", "return", "top_stories" ]
Get the item numbers for the current top stories. Will raise an requests.HTTPError if we got a non-200 response back. :return: A list with the top story item numbers.
[ "Get", "the", "item", "numbers", "for", "the", "current", "top", "stories", ".", "Will", "raise", "an", "requests", ".", "HTTPError", "if", "we", "got", "a", "non", "-", "200", "response", "back", ".", ":", "return", ":", "A", "list", "with", "the", "top", "story", "item", "numbers", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_threshold_monitor.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_threshold_monitor.py#L263-L278
def threshold_monitor_hidden_threshold_monitor_security_policy_area_sec_area_value(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor") threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor") security = ET.SubElement(threshold_monitor, "security") policy = ET.SubElement(security, "policy") sec_policy_name_key = ET.SubElement(policy, "sec_policy_name") sec_policy_name_key.text = kwargs.pop('sec_policy_name') area = ET.SubElement(policy, "area") sec_area_value = ET.SubElement(area, "sec_area_value") sec_area_value.text = kwargs.pop('sec_area_value') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "threshold_monitor_hidden_threshold_monitor_security_policy_area_sec_area_value", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "threshold_monitor_hidden", "=", "ET", ".", "SubElement", "(", "config", ",", "\"threshold-monitor-hidden\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-threshold-monitor\"", ")", "threshold_monitor", "=", "ET", ".", "SubElement", "(", "threshold_monitor_hidden", ",", "\"threshold-monitor\"", ")", "security", "=", "ET", ".", "SubElement", "(", "threshold_monitor", ",", "\"security\"", ")", "policy", "=", "ET", ".", "SubElement", "(", "security", ",", "\"policy\"", ")", "sec_policy_name_key", "=", "ET", ".", "SubElement", "(", "policy", ",", "\"sec_policy_name\"", ")", "sec_policy_name_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'sec_policy_name'", ")", "area", "=", "ET", ".", "SubElement", "(", "policy", ",", "\"area\"", ")", "sec_area_value", "=", "ET", ".", "SubElement", "(", "area", ",", "\"sec_area_value\"", ")", "sec_area_value", ".", "text", "=", "kwargs", ".", "pop", "(", "'sec_area_value'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
RetailMeNotSandbox/acky
acky/ec2.py
https://github.com/RetailMeNotSandbox/acky/blob/fcd4d092c42892ede7c924cafc41e9cf4be3fb9f/acky/ec2.py#L485-L508
def create(self, az, size_or_snap, volume_type=None, iops=None, encrypted=True): """Create an EBS Volume using an availability-zone and size_or_snap parameter, encrypted by default. If the volume is crated from a snapshot, (str)size_or_snap denotes the snapshot id. Otherwise, (int)size_or_snap denotes the amount of GiB's to allocate. iops must be set if the volume type is io1. """ kwargs = {} kwargs['encrypted'] = encrypted if volume_type: kwargs['VolumeType'] = volume_type if iops: kwargs['Iops'] = iops is_snapshot_id = False try: size_or_snap = int(size_or_snap) except ValueError: is_snapshot_id = True if is_snapshot_id: return self.call("CreateVolume", AvailabilityZone=az, SnapshotId=size_or_snap, **kwargs) return self.call("CreateVolume", AvailabilityZone=az, Size=size_or_snap, **kwargs)
[ "def", "create", "(", "self", ",", "az", ",", "size_or_snap", ",", "volume_type", "=", "None", ",", "iops", "=", "None", ",", "encrypted", "=", "True", ")", ":", "kwargs", "=", "{", "}", "kwargs", "[", "'encrypted'", "]", "=", "encrypted", "if", "volume_type", ":", "kwargs", "[", "'VolumeType'", "]", "=", "volume_type", "if", "iops", ":", "kwargs", "[", "'Iops'", "]", "=", "iops", "is_snapshot_id", "=", "False", "try", ":", "size_or_snap", "=", "int", "(", "size_or_snap", ")", "except", "ValueError", ":", "is_snapshot_id", "=", "True", "if", "is_snapshot_id", ":", "return", "self", ".", "call", "(", "\"CreateVolume\"", ",", "AvailabilityZone", "=", "az", ",", "SnapshotId", "=", "size_or_snap", ",", "*", "*", "kwargs", ")", "return", "self", ".", "call", "(", "\"CreateVolume\"", ",", "AvailabilityZone", "=", "az", ",", "Size", "=", "size_or_snap", ",", "*", "*", "kwargs", ")" ]
Create an EBS Volume using an availability-zone and size_or_snap parameter, encrypted by default. If the volume is crated from a snapshot, (str)size_or_snap denotes the snapshot id. Otherwise, (int)size_or_snap denotes the amount of GiB's to allocate. iops must be set if the volume type is io1.
[ "Create", "an", "EBS", "Volume", "using", "an", "availability", "-", "zone", "and", "size_or_snap", "parameter", "encrypted", "by", "default", ".", "If", "the", "volume", "is", "crated", "from", "a", "snapshot", "(", "str", ")", "size_or_snap", "denotes", "the", "snapshot", "id", ".", "Otherwise", "(", "int", ")", "size_or_snap", "denotes", "the", "amount", "of", "GiB", "s", "to", "allocate", ".", "iops", "must", "be", "set", "if", "the", "volume", "type", "is", "io1", "." ]
python
train
xolox/python-update-dotdee
update_dotdee/__init__.py
https://github.com/xolox/python-update-dotdee/blob/04d5836f0d217e32778745b533beeb8159d80c32/update_dotdee/__init__.py#L251-L265
def available_files(self): """ The filenames of the available configuration files (a list of strings). The value of :attr:`available_files` is computed the first time its needed by searching for available configuration files that match :attr:`filename_patterns` using :func:`~glob.glob()`. If you set :attr:`available_files` this effectively disables searching for configuration files. """ matches = [] for pattern in self.filename_patterns: logger.debug("Matching filename pattern: %s", pattern) matches.extend(natsort(glob.glob(parse_path(pattern)))) return matches
[ "def", "available_files", "(", "self", ")", ":", "matches", "=", "[", "]", "for", "pattern", "in", "self", ".", "filename_patterns", ":", "logger", ".", "debug", "(", "\"Matching filename pattern: %s\"", ",", "pattern", ")", "matches", ".", "extend", "(", "natsort", "(", "glob", ".", "glob", "(", "parse_path", "(", "pattern", ")", ")", ")", ")", "return", "matches" ]
The filenames of the available configuration files (a list of strings). The value of :attr:`available_files` is computed the first time its needed by searching for available configuration files that match :attr:`filename_patterns` using :func:`~glob.glob()`. If you set :attr:`available_files` this effectively disables searching for configuration files.
[ "The", "filenames", "of", "the", "available", "configuration", "files", "(", "a", "list", "of", "strings", ")", "." ]
python
train
yinkaisheng/Python-UIAutomation-for-Windows
uiautomation/uiautomation.py
https://github.com/yinkaisheng/Python-UIAutomation-for-Windows/blob/2cc91060982cc8b777152e698d677cc2989bf263/uiautomation/uiautomation.py#L7703-L7719
def WaitHotKeyReleased(hotkey: tuple) -> None: """hotkey: tuple, two ints tuple(modifierKey, key)""" mod = {ModifierKey.Alt: Keys.VK_MENU, ModifierKey.Control: Keys.VK_CONTROL, ModifierKey.Shift: Keys.VK_SHIFT, ModifierKey.Win: Keys.VK_LWIN } while True: time.sleep(0.05) if IsKeyPressed(hotkey[1]): continue for k, v in mod.items(): if k & hotkey[0]: if IsKeyPressed(v): break else: break
[ "def", "WaitHotKeyReleased", "(", "hotkey", ":", "tuple", ")", "->", "None", ":", "mod", "=", "{", "ModifierKey", ".", "Alt", ":", "Keys", ".", "VK_MENU", ",", "ModifierKey", ".", "Control", ":", "Keys", ".", "VK_CONTROL", ",", "ModifierKey", ".", "Shift", ":", "Keys", ".", "VK_SHIFT", ",", "ModifierKey", ".", "Win", ":", "Keys", ".", "VK_LWIN", "}", "while", "True", ":", "time", ".", "sleep", "(", "0.05", ")", "if", "IsKeyPressed", "(", "hotkey", "[", "1", "]", ")", ":", "continue", "for", "k", ",", "v", "in", "mod", ".", "items", "(", ")", ":", "if", "k", "&", "hotkey", "[", "0", "]", ":", "if", "IsKeyPressed", "(", "v", ")", ":", "break", "else", ":", "break" ]
hotkey: tuple, two ints tuple(modifierKey, key)
[ "hotkey", ":", "tuple", "two", "ints", "tuple", "(", "modifierKey", "key", ")" ]
python
valid
mpg-age-bioinformatics/AGEpy
AGEpy/kegg.py
https://github.com/mpg-age-bioinformatics/AGEpy/blob/887808a7a2c1504f39ce8d8cb36c15c1721cd29f/AGEpy/kegg.py#L83-L105
def ensembl_to_kegg(organism,kegg_db): """ Looks up KEGG mappings of KEGG ids to ensembl ids :param organism: an organisms as listed in organismsKEGG() :param kegg_db: a matching KEGG db as reported in databasesKEGG :returns: a Pandas dataframe of with 'KEGGid' and 'ENSid'. """ print("KEGG API: http://rest.genome.jp/link/"+kegg_db+"/"+organism) sys.stdout.flush() kegg_ens=urlopen("http://rest.genome.jp/link/"+kegg_db+"/"+organism).read() kegg_ens=kegg_ens.split("\n") final=[] for i in kegg_ens: final.append(i.split("\t")) df=pd.DataFrame(final[0:len(final)-1])[[0,1]] ens_id=pd.DataFrame(df[1].str.split(":").tolist())[1] df=pd.concat([df,ens_id],axis=1) df.columns=['KEGGid','ensDB','ENSid'] df=df[['KEGGid','ENSid']] return df
[ "def", "ensembl_to_kegg", "(", "organism", ",", "kegg_db", ")", ":", "print", "(", "\"KEGG API: http://rest.genome.jp/link/\"", "+", "kegg_db", "+", "\"/\"", "+", "organism", ")", "sys", ".", "stdout", ".", "flush", "(", ")", "kegg_ens", "=", "urlopen", "(", "\"http://rest.genome.jp/link/\"", "+", "kegg_db", "+", "\"/\"", "+", "organism", ")", ".", "read", "(", ")", "kegg_ens", "=", "kegg_ens", ".", "split", "(", "\"\\n\"", ")", "final", "=", "[", "]", "for", "i", "in", "kegg_ens", ":", "final", ".", "append", "(", "i", ".", "split", "(", "\"\\t\"", ")", ")", "df", "=", "pd", ".", "DataFrame", "(", "final", "[", "0", ":", "len", "(", "final", ")", "-", "1", "]", ")", "[", "[", "0", ",", "1", "]", "]", "ens_id", "=", "pd", ".", "DataFrame", "(", "df", "[", "1", "]", ".", "str", ".", "split", "(", "\":\"", ")", ".", "tolist", "(", ")", ")", "[", "1", "]", "df", "=", "pd", ".", "concat", "(", "[", "df", ",", "ens_id", "]", ",", "axis", "=", "1", ")", "df", ".", "columns", "=", "[", "'KEGGid'", ",", "'ensDB'", ",", "'ENSid'", "]", "df", "=", "df", "[", "[", "'KEGGid'", ",", "'ENSid'", "]", "]", "return", "df" ]
Looks up KEGG mappings of KEGG ids to ensembl ids :param organism: an organisms as listed in organismsKEGG() :param kegg_db: a matching KEGG db as reported in databasesKEGG :returns: a Pandas dataframe of with 'KEGGid' and 'ENSid'.
[ "Looks", "up", "KEGG", "mappings", "of", "KEGG", "ids", "to", "ensembl", "ids" ]
python
train
sorgerlab/indra
indra/sources/bel/rdf_processor.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/sources/bel/rdf_processor.py#L21-L35
def namespace_from_uri(uri): """Return the entity namespace from the URI. Examples: http://www.openbel.org/bel/p_HGNC_RAF1 -> HGNC http://www.openbel.org/bel/p_RGD_Raf1 -> RGD http://www.openbel.org/bel/p_PFH_MEK1/2_Family -> PFH """ patterns = ['http://www.openbel.org/bel/[pragm]_([A-Za-z]+)_.*', 'http://www.openbel.org/bel/[a-z]+_[pr]_([A-Za-z]+)_.*', 'http://www.openbel.org/bel/[a-z]+_complex_([A-Za-z]+)_.*', 'http://www.openbel.org/bel/complex_([A-Za-z]+)_.*'] for pr in patterns: match = re.match(pr, uri) if match is not None: return match.groups()[0] return None
[ "def", "namespace_from_uri", "(", "uri", ")", ":", "patterns", "=", "[", "'http://www.openbel.org/bel/[pragm]_([A-Za-z]+)_.*'", ",", "'http://www.openbel.org/bel/[a-z]+_[pr]_([A-Za-z]+)_.*'", ",", "'http://www.openbel.org/bel/[a-z]+_complex_([A-Za-z]+)_.*'", ",", "'http://www.openbel.org/bel/complex_([A-Za-z]+)_.*'", "]", "for", "pr", "in", "patterns", ":", "match", "=", "re", ".", "match", "(", "pr", ",", "uri", ")", "if", "match", "is", "not", "None", ":", "return", "match", ".", "groups", "(", ")", "[", "0", "]", "return", "None" ]
Return the entity namespace from the URI. Examples: http://www.openbel.org/bel/p_HGNC_RAF1 -> HGNC http://www.openbel.org/bel/p_RGD_Raf1 -> RGD http://www.openbel.org/bel/p_PFH_MEK1/2_Family -> PFH
[ "Return", "the", "entity", "namespace", "from", "the", "URI", ".", "Examples", ":", "http", ":", "//", "www", ".", "openbel", ".", "org", "/", "bel", "/", "p_HGNC_RAF1", "-", ">", "HGNC", "http", ":", "//", "www", ".", "openbel", ".", "org", "/", "bel", "/", "p_RGD_Raf1", "-", ">", "RGD", "http", ":", "//", "www", ".", "openbel", ".", "org", "/", "bel", "/", "p_PFH_MEK1", "/", "2_Family", "-", ">", "PFH" ]
python
train
ContextLab/hypertools
hypertools/tools/reduce.py
https://github.com/ContextLab/hypertools/blob/b76c7ac8061998b560e969ff8e4f4c915088e7a0/hypertools/tools/reduce.py#L36-L158
def reduce(x, reduce='IncrementalPCA', ndims=None, normalize=None, align=None, model=None, model_params=None, internal=False, format_data=True): """ Reduces dimensionality of an array, or list of arrays Parameters ---------- x : Numpy array or list of arrays Dimensionality reduction using PCA is performed on this array. reduce : str or dict Decomposition/manifold learning model to use. Models supported: PCA, IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA, FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning, TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, MDS and UMAP. Can be passed as a string, but for finer control of the model parameters, pass as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}. See scikit-learn specific model docs for details on parameters supported for each model. ndims : int Number of dimensions to reduce format_data : bool Whether or not to first call the format_data function (default: True). model : None Deprecated argument. Please use reduce. model_params : None Deprecated argument. Please use reduce. align : None Deprecated argument. Please use new analyze function to perform combinations of transformations normalize : None Deprecated argument. Please use new analyze function to perform combinations of transformations Returns ---------- x_reduced : Numpy array or list of arrays The reduced data with ndims dimensionality is returned. If the input is a list, a list is returned. """ # deprecated warning if (model is not None) or (model_params is not None): warnings.warn('Model and model params will be deprecated. Please use the \ reduce keyword. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.tools.reduce.html#hypertools.tools.reduce') reduce = {} reduce['model'] = model reduce['params'] = model_params # if model is None, just return data if reduce is None: return x else: # common format if format_data: x = formatter(x, ppca=True) if np.vstack([i for i in x]).shape[0]==1: warnings.warn('Cannot reduce the dimensionality of a single row of' ' data. Return zeros length of ndims') return [np.zeros((1, ndims))] if ndims: if np.vstack([i for i in x]).shape[0]<ndims: warnings.warn('The number of rows in your data is less than ndims.' ' The data will be reduced to the number of rows.') # deprecation warnings if normalize is not None: warnings.warn('The normalize argument will be deprecated for this function. Please use the \ analyze function to perform combinations of these transformations. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.analyze.html#hypertools.analyze') x = normalizer(x, normalize=normalize) if align is not None: warnings.warn('The align argument will be deprecated for this function. Please use the \ analyze function to perform combinations of these transformations. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.analyze.html#hypertools.analyze') x = aligner(x, align=align) # if the shape of the data is already less than ndims, just return it if ndims is None: return x elif all([i.shape[1]<=ndims for i in x]): return x # if reduce is a string, find the corresponding model if type(reduce) in [str, np.string_]: model = models[reduce] model_params = { 'n_components' : ndims } # if its a dict, use custom params elif type(reduce) is dict: if isinstance((reduce['model']), six.string_types): model = models[reduce['model']] if reduce['params'] is None: model_params = { 'n_components' : ndims } else: model_params = reduce['params'] if ndims: model_params = { 'n_components' : ndims } # initialize model model = model(**model_params) # reduce data x_reduced = reduce_list(x, model) # return data if internal or len(x_reduced)>1: return x_reduced else: return x_reduced[0]
[ "def", "reduce", "(", "x", ",", "reduce", "=", "'IncrementalPCA'", ",", "ndims", "=", "None", ",", "normalize", "=", "None", ",", "align", "=", "None", ",", "model", "=", "None", ",", "model_params", "=", "None", ",", "internal", "=", "False", ",", "format_data", "=", "True", ")", ":", "# deprecated warning", "if", "(", "model", "is", "not", "None", ")", "or", "(", "model_params", "is", "not", "None", ")", ":", "warnings", ".", "warn", "(", "'Model and model params will be deprecated. Please use the \\\n reduce keyword. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.tools.reduce.html#hypertools.tools.reduce'", ")", "reduce", "=", "{", "}", "reduce", "[", "'model'", "]", "=", "model", "reduce", "[", "'params'", "]", "=", "model_params", "# if model is None, just return data", "if", "reduce", "is", "None", ":", "return", "x", "else", ":", "# common format", "if", "format_data", ":", "x", "=", "formatter", "(", "x", ",", "ppca", "=", "True", ")", "if", "np", ".", "vstack", "(", "[", "i", "for", "i", "in", "x", "]", ")", ".", "shape", "[", "0", "]", "==", "1", ":", "warnings", ".", "warn", "(", "'Cannot reduce the dimensionality of a single row of'", "' data. Return zeros length of ndims'", ")", "return", "[", "np", ".", "zeros", "(", "(", "1", ",", "ndims", ")", ")", "]", "if", "ndims", ":", "if", "np", ".", "vstack", "(", "[", "i", "for", "i", "in", "x", "]", ")", ".", "shape", "[", "0", "]", "<", "ndims", ":", "warnings", ".", "warn", "(", "'The number of rows in your data is less than ndims.'", "' The data will be reduced to the number of rows.'", ")", "# deprecation warnings", "if", "normalize", "is", "not", "None", ":", "warnings", ".", "warn", "(", "'The normalize argument will be deprecated for this function. Please use the \\\n analyze function to perform combinations of these transformations. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.analyze.html#hypertools.analyze'", ")", "x", "=", "normalizer", "(", "x", ",", "normalize", "=", "normalize", ")", "if", "align", "is", "not", "None", ":", "warnings", ".", "warn", "(", "'The align argument will be deprecated for this function. Please use the \\\n analyze function to perform combinations of these transformations. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.analyze.html#hypertools.analyze'", ")", "x", "=", "aligner", "(", "x", ",", "align", "=", "align", ")", "# if the shape of the data is already less than ndims, just return it", "if", "ndims", "is", "None", ":", "return", "x", "elif", "all", "(", "[", "i", ".", "shape", "[", "1", "]", "<=", "ndims", "for", "i", "in", "x", "]", ")", ":", "return", "x", "# if reduce is a string, find the corresponding model", "if", "type", "(", "reduce", ")", "in", "[", "str", ",", "np", ".", "string_", "]", ":", "model", "=", "models", "[", "reduce", "]", "model_params", "=", "{", "'n_components'", ":", "ndims", "}", "# if its a dict, use custom params", "elif", "type", "(", "reduce", ")", "is", "dict", ":", "if", "isinstance", "(", "(", "reduce", "[", "'model'", "]", ")", ",", "six", ".", "string_types", ")", ":", "model", "=", "models", "[", "reduce", "[", "'model'", "]", "]", "if", "reduce", "[", "'params'", "]", "is", "None", ":", "model_params", "=", "{", "'n_components'", ":", "ndims", "}", "else", ":", "model_params", "=", "reduce", "[", "'params'", "]", "if", "ndims", ":", "model_params", "=", "{", "'n_components'", ":", "ndims", "}", "# initialize model", "model", "=", "model", "(", "*", "*", "model_params", ")", "# reduce data", "x_reduced", "=", "reduce_list", "(", "x", ",", "model", ")", "# return data", "if", "internal", "or", "len", "(", "x_reduced", ")", ">", "1", ":", "return", "x_reduced", "else", ":", "return", "x_reduced", "[", "0", "]" ]
Reduces dimensionality of an array, or list of arrays Parameters ---------- x : Numpy array or list of arrays Dimensionality reduction using PCA is performed on this array. reduce : str or dict Decomposition/manifold learning model to use. Models supported: PCA, IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA, FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning, TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, MDS and UMAP. Can be passed as a string, but for finer control of the model parameters, pass as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}. See scikit-learn specific model docs for details on parameters supported for each model. ndims : int Number of dimensions to reduce format_data : bool Whether or not to first call the format_data function (default: True). model : None Deprecated argument. Please use reduce. model_params : None Deprecated argument. Please use reduce. align : None Deprecated argument. Please use new analyze function to perform combinations of transformations normalize : None Deprecated argument. Please use new analyze function to perform combinations of transformations Returns ---------- x_reduced : Numpy array or list of arrays The reduced data with ndims dimensionality is returned. If the input is a list, a list is returned.
[ "Reduces", "dimensionality", "of", "an", "array", "or", "list", "of", "arrays" ]
python
train
jobovy/galpy
galpy/orbit/Orbit.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/Orbit.py#L794-L825
def E(self,*args,**kwargs): """ NAME: E PURPOSE: calculate the energy INPUT: t - (optional) time at which to get the energy (can be Quantity) pot= Potential instance or list of such instances vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity) use_physical= use to override Object-wide default for using a physical scale for output OUTPUT: energy HISTORY: 2010-09-15 - Written - Bovy (NYU) """ if not kwargs.get('pot',None) is None: kwargs['pot']= flatten_potential(kwargs.get('pot')) _check_consistent_units(self,kwargs.get('pot',None)) return self._orb.E(*args,**kwargs)
[ "def", "E", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "kwargs", ".", "get", "(", "'pot'", ",", "None", ")", "is", "None", ":", "kwargs", "[", "'pot'", "]", "=", "flatten_potential", "(", "kwargs", ".", "get", "(", "'pot'", ")", ")", "_check_consistent_units", "(", "self", ",", "kwargs", ".", "get", "(", "'pot'", ",", "None", ")", ")", "return", "self", ".", "_orb", ".", "E", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
NAME: E PURPOSE: calculate the energy INPUT: t - (optional) time at which to get the energy (can be Quantity) pot= Potential instance or list of such instances vo= (Object-wide default) physical scale for velocities to use to convert (can be Quantity) use_physical= use to override Object-wide default for using a physical scale for output OUTPUT: energy HISTORY: 2010-09-15 - Written - Bovy (NYU)
[ "NAME", ":" ]
python
train
pavlin-policar/openTSNE
openTSNE/affinity.py
https://github.com/pavlin-policar/openTSNE/blob/28513a0d669f2f20e7b971c0c6373dc375f72771/openTSNE/affinity.py#L139-L178
def set_perplexity(self, new_perplexity): """Change the perplexity of the affinity matrix. Note that we only allow lowering the perplexity or restoring it to its original value. This restriction exists because setting a higher perplexity value requires recomputing all the nearest neighbors, which can take a long time. To avoid potential confusion as to why execution time is slow, this is not allowed. If you would like to increase the perplexity above the initial value, simply create a new instance. Parameters ---------- new_perplexity: float The new perplexity. """ # If the value hasn't changed, there's nothing to do if new_perplexity == self.perplexity: return # Verify that the perplexity isn't too large new_perplexity = self.check_perplexity(new_perplexity) # Recompute the affinity matrix k_neighbors = min(self.n_samples - 1, int(3 * new_perplexity)) if k_neighbors > self.__neighbors.shape[1]: raise RuntimeError( "The desired perplexity `%.2f` is larger than the initial one " "used. This would need to recompute the nearest neighbors, " "which is not efficient. Please create a new `%s` instance " "with the increased perplexity." % (new_perplexity, self.__class__.__name__) ) self.perplexity = new_perplexity self.P = joint_probabilities_nn( self.__neighbors[:, :k_neighbors], self.__distances[:, :k_neighbors], [self.perplexity], symmetrize=True, n_jobs=self.n_jobs, )
[ "def", "set_perplexity", "(", "self", ",", "new_perplexity", ")", ":", "# If the value hasn't changed, there's nothing to do", "if", "new_perplexity", "==", "self", ".", "perplexity", ":", "return", "# Verify that the perplexity isn't too large", "new_perplexity", "=", "self", ".", "check_perplexity", "(", "new_perplexity", ")", "# Recompute the affinity matrix", "k_neighbors", "=", "min", "(", "self", ".", "n_samples", "-", "1", ",", "int", "(", "3", "*", "new_perplexity", ")", ")", "if", "k_neighbors", ">", "self", ".", "__neighbors", ".", "shape", "[", "1", "]", ":", "raise", "RuntimeError", "(", "\"The desired perplexity `%.2f` is larger than the initial one \"", "\"used. This would need to recompute the nearest neighbors, \"", "\"which is not efficient. Please create a new `%s` instance \"", "\"with the increased perplexity.\"", "%", "(", "new_perplexity", ",", "self", ".", "__class__", ".", "__name__", ")", ")", "self", ".", "perplexity", "=", "new_perplexity", "self", ".", "P", "=", "joint_probabilities_nn", "(", "self", ".", "__neighbors", "[", ":", ",", ":", "k_neighbors", "]", ",", "self", ".", "__distances", "[", ":", ",", ":", "k_neighbors", "]", ",", "[", "self", ".", "perplexity", "]", ",", "symmetrize", "=", "True", ",", "n_jobs", "=", "self", ".", "n_jobs", ",", ")" ]
Change the perplexity of the affinity matrix. Note that we only allow lowering the perplexity or restoring it to its original value. This restriction exists because setting a higher perplexity value requires recomputing all the nearest neighbors, which can take a long time. To avoid potential confusion as to why execution time is slow, this is not allowed. If you would like to increase the perplexity above the initial value, simply create a new instance. Parameters ---------- new_perplexity: float The new perplexity.
[ "Change", "the", "perplexity", "of", "the", "affinity", "matrix", "." ]
python
train