repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
twilio/twilio-python
twilio/rest/taskrouter/v1/workspace/task_queue/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/taskrouter/v1/workspace/task_queue/__init__.py#L617-L641
def update(self, friendly_name=values.unset, target_workers=values.unset, reservation_activity_sid=values.unset, assignment_activity_sid=values.unset, max_reserved_workers=values.unset, task_order=values.unset): """ Update the TaskQueueInstance :param unicode friendly_name: Human readable description of this TaskQueue :param unicode target_workers: A string describing the Worker selection criteria for any Tasks that enter this TaskQueue. :param unicode reservation_activity_sid: ActivitySID that will be assigned to Workers when they are reserved for a task from this TaskQueue. :param unicode assignment_activity_sid: ActivitySID that will be assigned to Workers when they are assigned a task from this TaskQueue. :param unicode max_reserved_workers: The maximum amount of workers to create reservations for the assignment of a task while in this queue. :param TaskQueueInstance.TaskOrder task_order: TaskOrder will determine which order the Tasks will be assigned to Workers. :returns: Updated TaskQueueInstance :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.TaskQueueInstance """ return self._proxy.update( friendly_name=friendly_name, target_workers=target_workers, reservation_activity_sid=reservation_activity_sid, assignment_activity_sid=assignment_activity_sid, max_reserved_workers=max_reserved_workers, task_order=task_order, )
[ "def", "update", "(", "self", ",", "friendly_name", "=", "values", ".", "unset", ",", "target_workers", "=", "values", ".", "unset", ",", "reservation_activity_sid", "=", "values", ".", "unset", ",", "assignment_activity_sid", "=", "values", ".", "unset", ",", "max_reserved_workers", "=", "values", ".", "unset", ",", "task_order", "=", "values", ".", "unset", ")", ":", "return", "self", ".", "_proxy", ".", "update", "(", "friendly_name", "=", "friendly_name", ",", "target_workers", "=", "target_workers", ",", "reservation_activity_sid", "=", "reservation_activity_sid", ",", "assignment_activity_sid", "=", "assignment_activity_sid", ",", "max_reserved_workers", "=", "max_reserved_workers", ",", "task_order", "=", "task_order", ",", ")" ]
Update the TaskQueueInstance :param unicode friendly_name: Human readable description of this TaskQueue :param unicode target_workers: A string describing the Worker selection criteria for any Tasks that enter this TaskQueue. :param unicode reservation_activity_sid: ActivitySID that will be assigned to Workers when they are reserved for a task from this TaskQueue. :param unicode assignment_activity_sid: ActivitySID that will be assigned to Workers when they are assigned a task from this TaskQueue. :param unicode max_reserved_workers: The maximum amount of workers to create reservations for the assignment of a task while in this queue. :param TaskQueueInstance.TaskOrder task_order: TaskOrder will determine which order the Tasks will be assigned to Workers. :returns: Updated TaskQueueInstance :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.TaskQueueInstance
[ "Update", "the", "TaskQueueInstance" ]
python
train
gitenberg-dev/gitberg
gitenberg/workflow.py
https://github.com/gitenberg-dev/gitberg/blob/3f6db8b5a22ccdd2110d3199223c30db4e558b5c/gitenberg/workflow.py#L43-L57
def upload_list(book_id_list, rdf_library=None): """ Uses the fetch, make, push subcommands to add a list of pg books """ with open(book_id_list, 'r') as f: cache = {} for book_id in f: book_id = book_id.strip() try: if int(book_id) in missing_pgid: print(u'missing\t{}'.format(book_id)) continue upload_book(book_id, rdf_library=rdf_library, cache=cache) except Exception as e: print(u'error\t{}'.format(book_id)) logger.error(u"Error processing: {}\r{}".format(book_id, e))
[ "def", "upload_list", "(", "book_id_list", ",", "rdf_library", "=", "None", ")", ":", "with", "open", "(", "book_id_list", ",", "'r'", ")", "as", "f", ":", "cache", "=", "{", "}", "for", "book_id", "in", "f", ":", "book_id", "=", "book_id", ".", "strip", "(", ")", "try", ":", "if", "int", "(", "book_id", ")", "in", "missing_pgid", ":", "print", "(", "u'missing\\t{}'", ".", "format", "(", "book_id", ")", ")", "continue", "upload_book", "(", "book_id", ",", "rdf_library", "=", "rdf_library", ",", "cache", "=", "cache", ")", "except", "Exception", "as", "e", ":", "print", "(", "u'error\\t{}'", ".", "format", "(", "book_id", ")", ")", "logger", ".", "error", "(", "u\"Error processing: {}\\r{}\"", ".", "format", "(", "book_id", ",", "e", ")", ")" ]
Uses the fetch, make, push subcommands to add a list of pg books
[ "Uses", "the", "fetch", "make", "push", "subcommands", "to", "add", "a", "list", "of", "pg", "books" ]
python
train
DeepHorizons/iarm
iarm/arm_instructions/arithmetic.py
https://github.com/DeepHorizons/iarm/blob/b913c9fd577b793a6bbced78b78a5d8d7cd88de4/iarm/arm_instructions/arithmetic.py#L151-L177
def CMP(self, params): """ CMP Rm, Rn CMP Rm, #imm8 Subtract Rn or imm8 from Rm, set the NZCV flags, and discard the result Rm and Rn can be R0-R14 """ Rm, Rn = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params) if self.is_register(Rn): # CMP Rm, Rn self.check_arguments(R0_thru_R14=(Rm, Rn)) def CMP_func(): self.set_NZCV_flags(self.register[Rm], self.register[Rn], self.register[Rm] - self.register[Rn], 'sub') else: # CMP Rm, #imm8 self.check_arguments(R0_thru_R14=(Rm,), imm8=(Rn,)) def CMP_func(): tmp = self.convert_to_integer(Rn[1:]) self.set_NZCV_flags(self.register[Rm], tmp, self.register[Rm] - tmp, 'sub') return CMP_func
[ "def", "CMP", "(", "self", ",", "params", ")", ":", "Rm", ",", "Rn", "=", "self", ".", "get_two_parameters", "(", "self", ".", "TWO_PARAMETER_COMMA_SEPARATED", ",", "params", ")", "if", "self", ".", "is_register", "(", "Rn", ")", ":", "# CMP Rm, Rn", "self", ".", "check_arguments", "(", "R0_thru_R14", "=", "(", "Rm", ",", "Rn", ")", ")", "def", "CMP_func", "(", ")", ":", "self", ".", "set_NZCV_flags", "(", "self", ".", "register", "[", "Rm", "]", ",", "self", ".", "register", "[", "Rn", "]", ",", "self", ".", "register", "[", "Rm", "]", "-", "self", ".", "register", "[", "Rn", "]", ",", "'sub'", ")", "else", ":", "# CMP Rm, #imm8", "self", ".", "check_arguments", "(", "R0_thru_R14", "=", "(", "Rm", ",", ")", ",", "imm8", "=", "(", "Rn", ",", ")", ")", "def", "CMP_func", "(", ")", ":", "tmp", "=", "self", ".", "convert_to_integer", "(", "Rn", "[", "1", ":", "]", ")", "self", ".", "set_NZCV_flags", "(", "self", ".", "register", "[", "Rm", "]", ",", "tmp", ",", "self", ".", "register", "[", "Rm", "]", "-", "tmp", ",", "'sub'", ")", "return", "CMP_func" ]
CMP Rm, Rn CMP Rm, #imm8 Subtract Rn or imm8 from Rm, set the NZCV flags, and discard the result Rm and Rn can be R0-R14
[ "CMP", "Rm", "Rn", "CMP", "Rm", "#imm8" ]
python
train
chemlab/chemlab
chemlab/core/spacegroup/spacegroup.py
https://github.com/chemlab/chemlab/blob/c8730966316d101e24f39ac3b96b51282aba0abe/chemlab/core/spacegroup/spacegroup.py#L256-L276
def symmetry_normalised_reflections(self, hkl): """Returns an array of same size as *hkl*, containing the corresponding symmetry-equivalent reflections of lowest indices. Example: >>> from ase.lattice.spacegroup import Spacegroup >>> sg = Spacegroup(225) # fcc >>> sg.symmetry_normalised_reflections([[2, 0, 0], [0, 2, 0]]) array([[ 0, 0, -2], [ 0, 0, -2]]) """ hkl = np.array(hkl, dtype=int, ndmin=2) normalised = np.empty(hkl.shape, int) R = self.get_rotations().transpose(0, 2, 1) for i, g in enumerate(hkl): gsym = np.dot(R, g) j = np.lexsort(gsym.T)[0] normalised[i,:] = gsym[j] return normalised
[ "def", "symmetry_normalised_reflections", "(", "self", ",", "hkl", ")", ":", "hkl", "=", "np", ".", "array", "(", "hkl", ",", "dtype", "=", "int", ",", "ndmin", "=", "2", ")", "normalised", "=", "np", ".", "empty", "(", "hkl", ".", "shape", ",", "int", ")", "R", "=", "self", ".", "get_rotations", "(", ")", ".", "transpose", "(", "0", ",", "2", ",", "1", ")", "for", "i", ",", "g", "in", "enumerate", "(", "hkl", ")", ":", "gsym", "=", "np", ".", "dot", "(", "R", ",", "g", ")", "j", "=", "np", ".", "lexsort", "(", "gsym", ".", "T", ")", "[", "0", "]", "normalised", "[", "i", ",", ":", "]", "=", "gsym", "[", "j", "]", "return", "normalised" ]
Returns an array of same size as *hkl*, containing the corresponding symmetry-equivalent reflections of lowest indices. Example: >>> from ase.lattice.spacegroup import Spacegroup >>> sg = Spacegroup(225) # fcc >>> sg.symmetry_normalised_reflections([[2, 0, 0], [0, 2, 0]]) array([[ 0, 0, -2], [ 0, 0, -2]])
[ "Returns", "an", "array", "of", "same", "size", "as", "*", "hkl", "*", "containing", "the", "corresponding", "symmetry", "-", "equivalent", "reflections", "of", "lowest", "indices", "." ]
python
train
signalfx/signalfx-python
examples/signalflow/dataframe.py
https://github.com/signalfx/signalfx-python/blob/650eb9a2b301bcc795e4e3a8c031574ade69849d/examples/signalflow/dataframe.py#L20-L42
def get_data_frame(client, program, start, stop, resolution=None): """Executes the given program across the given time range (expressed in millisecond timestamps since Epoch), and returns a Pandas DataFrame containing the results, indexed by output timestamp. If the program contains multiple publish() calls, their outputs are merged into the returned DataFrame.""" data = {} metadata = {} c = client.execute(program, start=start, stop=stop, resolution=resolution) for msg in c.stream(): if isinstance(msg, messages.DataMessage): if msg.logical_timestamp_ms in data: data[msg.logical_timestamp_ms].update(msg.data) else: data[msg.logical_timestamp_ms] = msg.data elif isinstance(msg, messages.MetadataMessage): metadata[msg.tsid] = msg.properties df = pandas.DataFrame.from_dict(data, orient='index') df.metadata = metadata return df
[ "def", "get_data_frame", "(", "client", ",", "program", ",", "start", ",", "stop", ",", "resolution", "=", "None", ")", ":", "data", "=", "{", "}", "metadata", "=", "{", "}", "c", "=", "client", ".", "execute", "(", "program", ",", "start", "=", "start", ",", "stop", "=", "stop", ",", "resolution", "=", "resolution", ")", "for", "msg", "in", "c", ".", "stream", "(", ")", ":", "if", "isinstance", "(", "msg", ",", "messages", ".", "DataMessage", ")", ":", "if", "msg", ".", "logical_timestamp_ms", "in", "data", ":", "data", "[", "msg", ".", "logical_timestamp_ms", "]", ".", "update", "(", "msg", ".", "data", ")", "else", ":", "data", "[", "msg", ".", "logical_timestamp_ms", "]", "=", "msg", ".", "data", "elif", "isinstance", "(", "msg", ",", "messages", ".", "MetadataMessage", ")", ":", "metadata", "[", "msg", ".", "tsid", "]", "=", "msg", ".", "properties", "df", "=", "pandas", ".", "DataFrame", ".", "from_dict", "(", "data", ",", "orient", "=", "'index'", ")", "df", ".", "metadata", "=", "metadata", "return", "df" ]
Executes the given program across the given time range (expressed in millisecond timestamps since Epoch), and returns a Pandas DataFrame containing the results, indexed by output timestamp. If the program contains multiple publish() calls, their outputs are merged into the returned DataFrame.
[ "Executes", "the", "given", "program", "across", "the", "given", "time", "range", "(", "expressed", "in", "millisecond", "timestamps", "since", "Epoch", ")", "and", "returns", "a", "Pandas", "DataFrame", "containing", "the", "results", "indexed", "by", "output", "timestamp", "." ]
python
train
voxpupuli/pypuppetdb
pypuppetdb/api.py
https://github.com/voxpupuli/pypuppetdb/blob/cedeecf48014b4ad5b8e2513ca8230c814f45603/pypuppetdb/api.py#L643-L653
def catalog(self, node): """Get the available catalog for a given node. :param node: (Required) The name of the PuppetDB node. :type: :obj:`string` :returns: An instance of Catalog :rtype: :class:`pypuppetdb.types.Catalog` """ catalogs = self.catalogs(path=node) return next(x for x in catalogs)
[ "def", "catalog", "(", "self", ",", "node", ")", ":", "catalogs", "=", "self", ".", "catalogs", "(", "path", "=", "node", ")", "return", "next", "(", "x", "for", "x", "in", "catalogs", ")" ]
Get the available catalog for a given node. :param node: (Required) The name of the PuppetDB node. :type: :obj:`string` :returns: An instance of Catalog :rtype: :class:`pypuppetdb.types.Catalog`
[ "Get", "the", "available", "catalog", "for", "a", "given", "node", "." ]
python
valid
ioos/compliance-checker
compliance_checker/cfutil.py
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/cfutil.py#L135-L149
def get_sea_names(): ''' Returns a list of NODC sea names source of list: https://www.nodc.noaa.gov/General/NODC-Archive/seanamelist.txt ''' global _SEA_NAMES if _SEA_NAMES is None: buf = {} with open(resource_filename('compliance_checker', 'data/seanames.csv'), 'r') as f: reader = csv.reader(f) for code, sea_name in reader: buf[sea_name] = code _SEA_NAMES = buf return _SEA_NAMES
[ "def", "get_sea_names", "(", ")", ":", "global", "_SEA_NAMES", "if", "_SEA_NAMES", "is", "None", ":", "buf", "=", "{", "}", "with", "open", "(", "resource_filename", "(", "'compliance_checker'", ",", "'data/seanames.csv'", ")", ",", "'r'", ")", "as", "f", ":", "reader", "=", "csv", ".", "reader", "(", "f", ")", "for", "code", ",", "sea_name", "in", "reader", ":", "buf", "[", "sea_name", "]", "=", "code", "_SEA_NAMES", "=", "buf", "return", "_SEA_NAMES" ]
Returns a list of NODC sea names source of list: https://www.nodc.noaa.gov/General/NODC-Archive/seanamelist.txt
[ "Returns", "a", "list", "of", "NODC", "sea", "names" ]
python
train
KieranWynn/pyquaternion
pyquaternion/quaternion.py
https://github.com/KieranWynn/pyquaternion/blob/d2aad7f3fb0d4b9cc23aa72b390e9b2e1273eae9/pyquaternion/quaternion.py#L159-L233
def _from_matrix(cls, matrix): """Initialise from matrix representation Create a Quaternion by specifying the 3x3 rotation or 4x4 transformation matrix (as a numpy array) from which the quaternion's rotation should be created. """ try: shape = matrix.shape except AttributeError: raise TypeError("Invalid matrix type: Input must be a 3x3 or 4x4 numpy array or matrix") if shape == (3, 3): R = matrix elif shape == (4,4): R = matrix[:-1][:,:-1] # Upper left 3x3 sub-matrix else: raise ValueError("Invalid matrix shape: Input must be a 3x3 or 4x4 numpy array or matrix") # Check matrix properties if not np.allclose(np.dot(R, R.conj().transpose()), np.eye(3)): raise ValueError("Matrix must be orthogonal, i.e. its transpose should be its inverse") if not np.isclose(np.linalg.det(R), 1.0): raise ValueError("Matrix must be special orthogonal i.e. its determinant must be +1.0") def decomposition_method(matrix): """ Method supposedly able to deal with non-orthogonal matrices - NON-FUNCTIONAL! Based on this method: http://arc.aiaa.org/doi/abs/10.2514/2.4654 """ x, y, z = 0, 1, 2 # indices K = np.array([ [R[x, x]-R[y, y]-R[z, z], R[y, x]+R[x, y], R[z, x]+R[x, z], R[y, z]-R[z, y]], [R[y, x]+R[x, y], R[y, y]-R[x, x]-R[z, z], R[z, y]+R[y, z], R[z, x]-R[x, z]], [R[z, x]+R[x, z], R[z, y]+R[y, z], R[z, z]-R[x, x]-R[y, y], R[x, y]-R[y, x]], [R[y, z]-R[z, y], R[z, x]-R[x, z], R[x, y]-R[y, x], R[x, x]+R[y, y]+R[z, z]] ]) K = K / 3.0 e_vals, e_vecs = np.linalg.eig(K) print('Eigenvalues:', e_vals) print('Eigenvectors:', e_vecs) max_index = np.argmax(e_vals) principal_component = e_vecs[max_index] return principal_component def trace_method(matrix): """ This code uses a modification of the algorithm described in: https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2015/01/matrix-to-quat.pdf which is itself based on the method described here: http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/ Altered to work with the column vector convention instead of row vectors """ m = matrix.conj().transpose() # This method assumes row-vector and postmultiplication of that vector if m[2, 2] < 0: if m[0, 0] > m[1, 1]: t = 1 + m[0, 0] - m[1, 1] - m[2, 2] q = [m[1, 2]-m[2, 1], t, m[0, 1]+m[1, 0], m[2, 0]+m[0, 2]] else: t = 1 - m[0, 0] + m[1, 1] - m[2, 2] q = [m[2, 0]-m[0, 2], m[0, 1]+m[1, 0], t, m[1, 2]+m[2, 1]] else: if m[0, 0] < -m[1, 1]: t = 1 - m[0, 0] - m[1, 1] + m[2, 2] q = [m[0, 1]-m[1, 0], m[2, 0]+m[0, 2], m[1, 2]+m[2, 1], t] else: t = 1 + m[0, 0] + m[1, 1] + m[2, 2] q = [t, m[1, 2]-m[2, 1], m[2, 0]-m[0, 2], m[0, 1]-m[1, 0]] q = np.array(q) q *= 0.5 / sqrt(t); return q return cls(array=trace_method(R))
[ "def", "_from_matrix", "(", "cls", ",", "matrix", ")", ":", "try", ":", "shape", "=", "matrix", ".", "shape", "except", "AttributeError", ":", "raise", "TypeError", "(", "\"Invalid matrix type: Input must be a 3x3 or 4x4 numpy array or matrix\"", ")", "if", "shape", "==", "(", "3", ",", "3", ")", ":", "R", "=", "matrix", "elif", "shape", "==", "(", "4", ",", "4", ")", ":", "R", "=", "matrix", "[", ":", "-", "1", "]", "[", ":", ",", ":", "-", "1", "]", "# Upper left 3x3 sub-matrix", "else", ":", "raise", "ValueError", "(", "\"Invalid matrix shape: Input must be a 3x3 or 4x4 numpy array or matrix\"", ")", "# Check matrix properties", "if", "not", "np", ".", "allclose", "(", "np", ".", "dot", "(", "R", ",", "R", ".", "conj", "(", ")", ".", "transpose", "(", ")", ")", ",", "np", ".", "eye", "(", "3", ")", ")", ":", "raise", "ValueError", "(", "\"Matrix must be orthogonal, i.e. its transpose should be its inverse\"", ")", "if", "not", "np", ".", "isclose", "(", "np", ".", "linalg", ".", "det", "(", "R", ")", ",", "1.0", ")", ":", "raise", "ValueError", "(", "\"Matrix must be special orthogonal i.e. its determinant must be +1.0\"", ")", "def", "decomposition_method", "(", "matrix", ")", ":", "\"\"\" Method supposedly able to deal with non-orthogonal matrices - NON-FUNCTIONAL!\n Based on this method: http://arc.aiaa.org/doi/abs/10.2514/2.4654\n \"\"\"", "x", ",", "y", ",", "z", "=", "0", ",", "1", ",", "2", "# indices", "K", "=", "np", ".", "array", "(", "[", "[", "R", "[", "x", ",", "x", "]", "-", "R", "[", "y", ",", "y", "]", "-", "R", "[", "z", ",", "z", "]", ",", "R", "[", "y", ",", "x", "]", "+", "R", "[", "x", ",", "y", "]", ",", "R", "[", "z", ",", "x", "]", "+", "R", "[", "x", ",", "z", "]", ",", "R", "[", "y", ",", "z", "]", "-", "R", "[", "z", ",", "y", "]", "]", ",", "[", "R", "[", "y", ",", "x", "]", "+", "R", "[", "x", ",", "y", "]", ",", "R", "[", "y", ",", "y", "]", "-", "R", "[", "x", ",", "x", "]", "-", "R", "[", "z", ",", "z", "]", ",", "R", "[", "z", ",", "y", "]", "+", "R", "[", "y", ",", "z", "]", ",", "R", "[", "z", ",", "x", "]", "-", "R", "[", "x", ",", "z", "]", "]", ",", "[", "R", "[", "z", ",", "x", "]", "+", "R", "[", "x", ",", "z", "]", ",", "R", "[", "z", ",", "y", "]", "+", "R", "[", "y", ",", "z", "]", ",", "R", "[", "z", ",", "z", "]", "-", "R", "[", "x", ",", "x", "]", "-", "R", "[", "y", ",", "y", "]", ",", "R", "[", "x", ",", "y", "]", "-", "R", "[", "y", ",", "x", "]", "]", ",", "[", "R", "[", "y", ",", "z", "]", "-", "R", "[", "z", ",", "y", "]", ",", "R", "[", "z", ",", "x", "]", "-", "R", "[", "x", ",", "z", "]", ",", "R", "[", "x", ",", "y", "]", "-", "R", "[", "y", ",", "x", "]", ",", "R", "[", "x", ",", "x", "]", "+", "R", "[", "y", ",", "y", "]", "+", "R", "[", "z", ",", "z", "]", "]", "]", ")", "K", "=", "K", "/", "3.0", "e_vals", ",", "e_vecs", "=", "np", ".", "linalg", ".", "eig", "(", "K", ")", "print", "(", "'Eigenvalues:'", ",", "e_vals", ")", "print", "(", "'Eigenvectors:'", ",", "e_vecs", ")", "max_index", "=", "np", ".", "argmax", "(", "e_vals", ")", "principal_component", "=", "e_vecs", "[", "max_index", "]", "return", "principal_component", "def", "trace_method", "(", "matrix", ")", ":", "\"\"\"\n This code uses a modification of the algorithm described in:\n https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2015/01/matrix-to-quat.pdf\n which is itself based on the method described here:\n http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/\n\n Altered to work with the column vector convention instead of row vectors\n \"\"\"", "m", "=", "matrix", ".", "conj", "(", ")", ".", "transpose", "(", ")", "# This method assumes row-vector and postmultiplication of that vector", "if", "m", "[", "2", ",", "2", "]", "<", "0", ":", "if", "m", "[", "0", ",", "0", "]", ">", "m", "[", "1", ",", "1", "]", ":", "t", "=", "1", "+", "m", "[", "0", ",", "0", "]", "-", "m", "[", "1", ",", "1", "]", "-", "m", "[", "2", ",", "2", "]", "q", "=", "[", "m", "[", "1", ",", "2", "]", "-", "m", "[", "2", ",", "1", "]", ",", "t", ",", "m", "[", "0", ",", "1", "]", "+", "m", "[", "1", ",", "0", "]", ",", "m", "[", "2", ",", "0", "]", "+", "m", "[", "0", ",", "2", "]", "]", "else", ":", "t", "=", "1", "-", "m", "[", "0", ",", "0", "]", "+", "m", "[", "1", ",", "1", "]", "-", "m", "[", "2", ",", "2", "]", "q", "=", "[", "m", "[", "2", ",", "0", "]", "-", "m", "[", "0", ",", "2", "]", ",", "m", "[", "0", ",", "1", "]", "+", "m", "[", "1", ",", "0", "]", ",", "t", ",", "m", "[", "1", ",", "2", "]", "+", "m", "[", "2", ",", "1", "]", "]", "else", ":", "if", "m", "[", "0", ",", "0", "]", "<", "-", "m", "[", "1", ",", "1", "]", ":", "t", "=", "1", "-", "m", "[", "0", ",", "0", "]", "-", "m", "[", "1", ",", "1", "]", "+", "m", "[", "2", ",", "2", "]", "q", "=", "[", "m", "[", "0", ",", "1", "]", "-", "m", "[", "1", ",", "0", "]", ",", "m", "[", "2", ",", "0", "]", "+", "m", "[", "0", ",", "2", "]", ",", "m", "[", "1", ",", "2", "]", "+", "m", "[", "2", ",", "1", "]", ",", "t", "]", "else", ":", "t", "=", "1", "+", "m", "[", "0", ",", "0", "]", "+", "m", "[", "1", ",", "1", "]", "+", "m", "[", "2", ",", "2", "]", "q", "=", "[", "t", ",", "m", "[", "1", ",", "2", "]", "-", "m", "[", "2", ",", "1", "]", ",", "m", "[", "2", ",", "0", "]", "-", "m", "[", "0", ",", "2", "]", ",", "m", "[", "0", ",", "1", "]", "-", "m", "[", "1", ",", "0", "]", "]", "q", "=", "np", ".", "array", "(", "q", ")", "q", "*=", "0.5", "/", "sqrt", "(", "t", ")", "return", "q", "return", "cls", "(", "array", "=", "trace_method", "(", "R", ")", ")" ]
Initialise from matrix representation Create a Quaternion by specifying the 3x3 rotation or 4x4 transformation matrix (as a numpy array) from which the quaternion's rotation should be created.
[ "Initialise", "from", "matrix", "representation" ]
python
train
wrongwaycn/ssdb-py
ssdb/client.py
https://github.com/wrongwaycn/ssdb-py/blob/ce7b1542f0faa06fe71a60c667fe15992af0f621/ssdb/client.py#L495-L521
def setbit(self, name, offset, val): """ Flag the ``offset`` in ``name`` as ``value``. Returns a boolean indicating the previous value of ``offset``. Like **Redis.SETBIT** :param string name: the key name :param int offset: the bit position :param bool val: the bit value :return: the previous bit (False or True) at the ``offset`` :rtype: bool >>> ssdb.set('bit_test', 1) True >>> ssdb.setbit('bit_test', 1, 1) False >>> ssdb.get('bit_test') 3 >>> ssdb.setbit('bit_test', 2, 1) False >>> ssdb.get('bit_test') 7 """ val = int(get_boolean('val', val)) offset = get_positive_integer('offset', offset) return self.execute_command('setbit', name, offset, val)
[ "def", "setbit", "(", "self", ",", "name", ",", "offset", ",", "val", ")", ":", "val", "=", "int", "(", "get_boolean", "(", "'val'", ",", "val", ")", ")", "offset", "=", "get_positive_integer", "(", "'offset'", ",", "offset", ")", "return", "self", ".", "execute_command", "(", "'setbit'", ",", "name", ",", "offset", ",", "val", ")" ]
Flag the ``offset`` in ``name`` as ``value``. Returns a boolean indicating the previous value of ``offset``. Like **Redis.SETBIT** :param string name: the key name :param int offset: the bit position :param bool val: the bit value :return: the previous bit (False or True) at the ``offset`` :rtype: bool >>> ssdb.set('bit_test', 1) True >>> ssdb.setbit('bit_test', 1, 1) False >>> ssdb.get('bit_test') 3 >>> ssdb.setbit('bit_test', 2, 1) False >>> ssdb.get('bit_test') 7
[ "Flag", "the", "offset", "in", "name", "as", "value", ".", "Returns", "a", "boolean", "indicating", "the", "previous", "value", "of", "offset", "." ]
python
train
senaite/senaite.core
bika/lims/browser/workflow/analysisrequest.py
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/workflow/analysisrequest.py#L445-L450
def is_hidden(self, service): """Returns whether the request Hidden param for the given obj is True """ uid = api.get_uid(service) hidden_ans = self.request.form.get("Hidden", {}) return hidden_ans.get(uid, "") == "on"
[ "def", "is_hidden", "(", "self", ",", "service", ")", ":", "uid", "=", "api", ".", "get_uid", "(", "service", ")", "hidden_ans", "=", "self", ".", "request", ".", "form", ".", "get", "(", "\"Hidden\"", ",", "{", "}", ")", "return", "hidden_ans", ".", "get", "(", "uid", ",", "\"\"", ")", "==", "\"on\"" ]
Returns whether the request Hidden param for the given obj is True
[ "Returns", "whether", "the", "request", "Hidden", "param", "for", "the", "given", "obj", "is", "True" ]
python
train
stefanfoulis/django-sendsms
sendsms/backends/smsglobal.py
https://github.com/stefanfoulis/django-sendsms/blob/375f469789866853253eceba936ebcff98e83c07/sendsms/backends/smsglobal.py#L109-L125
def _parse_response(self, result_page): """ Takes a result page of sending the sms, returns an extracted tuple: ('numeric_err_code', '<sent_queued_message_id>', '<smsglobalmsgid>') Returns None if unable to extract info from result_page, it should be safe to assume that it was either a failed result or worse, the interface contract has changed. """ # Sample result_page, single line -> "OK: 0; Sent queued message ID: 2063619577732703 SMSGlobalMsgID:6171799108850954" resultline = result_page.splitlines()[0] # get result line if resultline.startswith('ERROR:'): raise Exception(resultline.replace('ERROR: ', '')) patt = re.compile(r'^.+?:\s*(.+?)\s*;\s*Sent queued message ID:\s*(.+?)\s*SMSGlobalMsgID:(.+?)$', re.IGNORECASE) m = patt.match(resultline) if m: return (m.group(1), m.group(2), m.group(3)) return None
[ "def", "_parse_response", "(", "self", ",", "result_page", ")", ":", "# Sample result_page, single line -> \"OK: 0; Sent queued message ID: 2063619577732703 SMSGlobalMsgID:6171799108850954\"", "resultline", "=", "result_page", ".", "splitlines", "(", ")", "[", "0", "]", "# get result line", "if", "resultline", ".", "startswith", "(", "'ERROR:'", ")", ":", "raise", "Exception", "(", "resultline", ".", "replace", "(", "'ERROR: '", ",", "''", ")", ")", "patt", "=", "re", ".", "compile", "(", "r'^.+?:\\s*(.+?)\\s*;\\s*Sent queued message ID:\\s*(.+?)\\s*SMSGlobalMsgID:(.+?)$'", ",", "re", ".", "IGNORECASE", ")", "m", "=", "patt", ".", "match", "(", "resultline", ")", "if", "m", ":", "return", "(", "m", ".", "group", "(", "1", ")", ",", "m", ".", "group", "(", "2", ")", ",", "m", ".", "group", "(", "3", ")", ")", "return", "None" ]
Takes a result page of sending the sms, returns an extracted tuple: ('numeric_err_code', '<sent_queued_message_id>', '<smsglobalmsgid>') Returns None if unable to extract info from result_page, it should be safe to assume that it was either a failed result or worse, the interface contract has changed.
[ "Takes", "a", "result", "page", "of", "sending", "the", "sms", "returns", "an", "extracted", "tuple", ":", "(", "numeric_err_code", "<sent_queued_message_id", ">", "<smsglobalmsgid", ">", ")", "Returns", "None", "if", "unable", "to", "extract", "info", "from", "result_page", "it", "should", "be", "safe", "to", "assume", "that", "it", "was", "either", "a", "failed", "result", "or", "worse", "the", "interface", "contract", "has", "changed", "." ]
python
train
waqasbhatti/astrobase
astrobase/varbase/autocorr.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/varbase/autocorr.py#L20-L57
def _autocorr_func1(mags, lag, maglen, magmed, magstd): '''Calculates the autocorr of mag series for specific lag. This version of the function is taken from: Kim et al. (`2011 <https://dx.doi.org/10.1088/0004-637X/735/2/68>`_) Parameters ---------- mags : np.array This is the magnitudes array. MUST NOT have any nans. lag : float The specific lag value to calculate the auto-correlation for. This MUST be less than total number of observations in `mags`. maglen : int The number of elements in the `mags` array. magmed : float The median of the `mags` array. magstd : float The standard deviation of the `mags` array. Returns ------- float The auto-correlation at this specific `lag` value. ''' lagindex = nparange(1,maglen-lag) products = (mags[lagindex] - magmed) * (mags[lagindex+lag] - magmed) acorr = (1.0/((maglen - lag)*magstd)) * npsum(products) return acorr
[ "def", "_autocorr_func1", "(", "mags", ",", "lag", ",", "maglen", ",", "magmed", ",", "magstd", ")", ":", "lagindex", "=", "nparange", "(", "1", ",", "maglen", "-", "lag", ")", "products", "=", "(", "mags", "[", "lagindex", "]", "-", "magmed", ")", "*", "(", "mags", "[", "lagindex", "+", "lag", "]", "-", "magmed", ")", "acorr", "=", "(", "1.0", "/", "(", "(", "maglen", "-", "lag", ")", "*", "magstd", ")", ")", "*", "npsum", "(", "products", ")", "return", "acorr" ]
Calculates the autocorr of mag series for specific lag. This version of the function is taken from: Kim et al. (`2011 <https://dx.doi.org/10.1088/0004-637X/735/2/68>`_) Parameters ---------- mags : np.array This is the magnitudes array. MUST NOT have any nans. lag : float The specific lag value to calculate the auto-correlation for. This MUST be less than total number of observations in `mags`. maglen : int The number of elements in the `mags` array. magmed : float The median of the `mags` array. magstd : float The standard deviation of the `mags` array. Returns ------- float The auto-correlation at this specific `lag` value.
[ "Calculates", "the", "autocorr", "of", "mag", "series", "for", "specific", "lag", "." ]
python
valid
msmbuilder/msmbuilder
msmbuilder/decomposition/tica.py
https://github.com/msmbuilder/msmbuilder/blob/556a93a170782f47be53f4a1e9d740fb1c8272b3/msmbuilder/decomposition/tica.py#L261-L290
def fit(self, sequences, y=None): """Fit the model with a collection of sequences. This method is not online. Any state accumulated from previous calls to fit() or partial_fit() will be cleared. For online learning, use `partial_fit`. Parameters ---------- sequences: list of array-like, each of shape (n_samples_i, n_features) Training data, where n_samples_i in the number of samples in sequence i and n_features is the number of features. y : None Ignored Returns ------- self : object Returns the instance itself. """ self._initialized = False check_iter_of_sequences(sequences, max_iter=3) # we might be lazy-loading for X in sequences: self._fit(X) if self.n_sequences_ == 0: raise ValueError('All sequences were shorter than ' 'the lag time, %d' % self.lag_time) return self
[ "def", "fit", "(", "self", ",", "sequences", ",", "y", "=", "None", ")", ":", "self", ".", "_initialized", "=", "False", "check_iter_of_sequences", "(", "sequences", ",", "max_iter", "=", "3", ")", "# we might be lazy-loading", "for", "X", "in", "sequences", ":", "self", ".", "_fit", "(", "X", ")", "if", "self", ".", "n_sequences_", "==", "0", ":", "raise", "ValueError", "(", "'All sequences were shorter than '", "'the lag time, %d'", "%", "self", ".", "lag_time", ")", "return", "self" ]
Fit the model with a collection of sequences. This method is not online. Any state accumulated from previous calls to fit() or partial_fit() will be cleared. For online learning, use `partial_fit`. Parameters ---------- sequences: list of array-like, each of shape (n_samples_i, n_features) Training data, where n_samples_i in the number of samples in sequence i and n_features is the number of features. y : None Ignored Returns ------- self : object Returns the instance itself.
[ "Fit", "the", "model", "with", "a", "collection", "of", "sequences", "." ]
python
train
FocusLab/Albertson
albertson/base.py
https://github.com/FocusLab/Albertson/blob/a42f9873559df9188c40c34fdffb079d78eaa3fe/albertson/base.py#L102-L117
def create_table(self): ''' Hook point for overriding how the CounterPool creates a new table in DynamooDB ''' table = self.conn.create_table( name=self.get_table_name(), schema=self.get_schema(), read_units=self.get_read_units(), write_units=self.get_write_units(), ) if table.status != 'ACTIVE': table.refresh(wait_for_active=True, retry_seconds=1) return table
[ "def", "create_table", "(", "self", ")", ":", "table", "=", "self", ".", "conn", ".", "create_table", "(", "name", "=", "self", ".", "get_table_name", "(", ")", ",", "schema", "=", "self", ".", "get_schema", "(", ")", ",", "read_units", "=", "self", ".", "get_read_units", "(", ")", ",", "write_units", "=", "self", ".", "get_write_units", "(", ")", ",", ")", "if", "table", ".", "status", "!=", "'ACTIVE'", ":", "table", ".", "refresh", "(", "wait_for_active", "=", "True", ",", "retry_seconds", "=", "1", ")", "return", "table" ]
Hook point for overriding how the CounterPool creates a new table in DynamooDB
[ "Hook", "point", "for", "overriding", "how", "the", "CounterPool", "creates", "a", "new", "table", "in", "DynamooDB" ]
python
valid
StackStorm/pybind
pybind/slxos/v17s_1_02/interface_vlan/vlan/ip/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/interface_vlan/vlan/ip/__init__.py#L199-L220
def _set_igmpVlan(self, v, load=False): """ Setter method for igmpVlan, mapped from YANG variable /interface_vlan/vlan/ip/igmpVlan (container) If this variable is read-only (config: false) in the source YANG file, then _set_igmpVlan is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_igmpVlan() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=igmpVlan.igmpVlan, is_container='container', presence=False, yang_name="igmpVlan", rest_name="igmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Internet Group Management Protocol (IGMP)', u'cli-incomplete-no': None, u'callpoint': u'IgmpsVlan', u'sort-priority': u'129', u'alt-name': u'igmp'}}, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """igmpVlan must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=igmpVlan.igmpVlan, is_container='container', presence=False, yang_name="igmpVlan", rest_name="igmp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Internet Group Management Protocol (IGMP)', u'cli-incomplete-no': None, u'callpoint': u'IgmpsVlan', u'sort-priority': u'129', u'alt-name': u'igmp'}}, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='container', is_config=True)""", }) self.__igmpVlan = t if hasattr(self, '_set'): self._set()
[ "def", "_set_igmpVlan", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "igmpVlan", ".", "igmpVlan", ",", "is_container", "=", "'container'", ",", "presence", "=", "False", ",", "yang_name", "=", "\"igmpVlan\"", ",", "rest_name", "=", "\"igmp\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Internet Group Management Protocol (IGMP)'", ",", "u'cli-incomplete-no'", ":", "None", ",", "u'callpoint'", ":", "u'IgmpsVlan'", ",", "u'sort-priority'", ":", "u'129'", ",", "u'alt-name'", ":", "u'igmp'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-igmp-snooping'", ",", "defining_module", "=", "'brocade-igmp-snooping'", ",", "yang_type", "=", "'container'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"igmpVlan must be of a type compatible with container\"\"\"", ",", "'defined-type'", ":", "\"container\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=igmpVlan.igmpVlan, is_container='container', presence=False, yang_name=\"igmpVlan\", rest_name=\"igmp\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Internet Group Management Protocol (IGMP)', u'cli-incomplete-no': None, u'callpoint': u'IgmpsVlan', u'sort-priority': u'129', u'alt-name': u'igmp'}}, namespace='urn:brocade.com:mgmt:brocade-igmp-snooping', defining_module='brocade-igmp-snooping', yang_type='container', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__igmpVlan", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for igmpVlan, mapped from YANG variable /interface_vlan/vlan/ip/igmpVlan (container) If this variable is read-only (config: false) in the source YANG file, then _set_igmpVlan is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_igmpVlan() directly.
[ "Setter", "method", "for", "igmpVlan", "mapped", "from", "YANG", "variable", "/", "interface_vlan", "/", "vlan", "/", "ip", "/", "igmpVlan", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_igmpVlan", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_igmpVlan", "()", "directly", "." ]
python
train
ziwenxie/netease-dl
netease/download.py
https://github.com/ziwenxie/netease-dl/blob/84b226fc07b10f7f66580f0fc69f10356f66b5c3/netease/download.py#L222-L234
def download_user_playlists_by_id(self, user_id): """Download user's playlists by his/her id. :params user_id: user id. """ try: playlist = self.crawler.get_user_playlists(user_id) except RequestException as exception: click.echo(exception) else: self.download_playlist_by_id( playlist.playlist_id, playlist.playlist_name)
[ "def", "download_user_playlists_by_id", "(", "self", ",", "user_id", ")", ":", "try", ":", "playlist", "=", "self", ".", "crawler", ".", "get_user_playlists", "(", "user_id", ")", "except", "RequestException", "as", "exception", ":", "click", ".", "echo", "(", "exception", ")", "else", ":", "self", ".", "download_playlist_by_id", "(", "playlist", ".", "playlist_id", ",", "playlist", ".", "playlist_name", ")" ]
Download user's playlists by his/her id. :params user_id: user id.
[ "Download", "user", "s", "playlists", "by", "his", "/", "her", "id", "." ]
python
train
wuher/devil
devil/resource.py
https://github.com/wuher/devil/blob/a8834d4f88d915a21754c6b96f99d0ad9123ad4d/devil/resource.py#L255-L270
def _validate_input_data(self, data, request): """ Validate input data. :param request: the HTTP request :param data: the parsed data :return: if validation is performed and succeeds the data is converted into whatever format the validation uses (by default Django's Forms) If not, the data is returned unchanged. :raises: HttpStatusCodeError if data is not valid """ validator = self._get_input_validator(request) if isinstance(data, (list, tuple)): return map(validator.validate, data) else: return validator.validate(data)
[ "def", "_validate_input_data", "(", "self", ",", "data", ",", "request", ")", ":", "validator", "=", "self", ".", "_get_input_validator", "(", "request", ")", "if", "isinstance", "(", "data", ",", "(", "list", ",", "tuple", ")", ")", ":", "return", "map", "(", "validator", ".", "validate", ",", "data", ")", "else", ":", "return", "validator", ".", "validate", "(", "data", ")" ]
Validate input data. :param request: the HTTP request :param data: the parsed data :return: if validation is performed and succeeds the data is converted into whatever format the validation uses (by default Django's Forms) If not, the data is returned unchanged. :raises: HttpStatusCodeError if data is not valid
[ "Validate", "input", "data", "." ]
python
train
jameshilliard/hlk-sw16
hlk_sw16/protocol.py
https://github.com/jameshilliard/hlk-sw16/blob/4f0c5a7b76b42167f4dc9d2aa6312c7518a8cd56/hlk_sw16/protocol.py#L235-L239
def register_status_callback(self, callback, switch): """Register a callback which will fire when state changes.""" if self.status_callbacks.get(switch, None) is None: self.status_callbacks[switch] = [] self.status_callbacks[switch].append(callback)
[ "def", "register_status_callback", "(", "self", ",", "callback", ",", "switch", ")", ":", "if", "self", ".", "status_callbacks", ".", "get", "(", "switch", ",", "None", ")", "is", "None", ":", "self", ".", "status_callbacks", "[", "switch", "]", "=", "[", "]", "self", ".", "status_callbacks", "[", "switch", "]", ".", "append", "(", "callback", ")" ]
Register a callback which will fire when state changes.
[ "Register", "a", "callback", "which", "will", "fire", "when", "state", "changes", "." ]
python
train
tdryer/hangups
hangups/client.py
https://github.com/tdryer/hangups/blob/85c0bf0a57698d077461283895707260f9dbf931/hangups/client.py#L400-L436
async def _pb_request(self, endpoint, request_pb, response_pb): """Send a Protocol Buffer formatted chat API request. Args: endpoint (str): The chat API endpoint to use. request_pb: The request body as a Protocol Buffer message. response_pb: The response body as a Protocol Buffer message. Raises: NetworkError: If the request fails. """ logger.debug('Sending Protocol Buffer request %s:\n%s', endpoint, request_pb) res = await self._base_request( 'https://clients6.google.com/chat/v1/{}'.format(endpoint), 'application/x-protobuf', # Request body is Protocol Buffer. 'proto', # Response body is Protocol Buffer. request_pb.SerializeToString() ) try: response_pb.ParseFromString(base64.b64decode(res.body)) except binascii.Error as e: raise exceptions.NetworkError( 'Failed to decode base64 response: {}'.format(e) ) except google.protobuf.message.DecodeError as e: raise exceptions.NetworkError( 'Failed to decode Protocol Buffer response: {}'.format(e) ) logger.debug('Received Protocol Buffer response:\n%s', response_pb) status = response_pb.response_header.status if status != hangouts_pb2.RESPONSE_STATUS_OK: description = response_pb.response_header.error_description raise exceptions.NetworkError( 'Request failed with status {}: \'{}\'' .format(status, description) )
[ "async", "def", "_pb_request", "(", "self", ",", "endpoint", ",", "request_pb", ",", "response_pb", ")", ":", "logger", ".", "debug", "(", "'Sending Protocol Buffer request %s:\\n%s'", ",", "endpoint", ",", "request_pb", ")", "res", "=", "await", "self", ".", "_base_request", "(", "'https://clients6.google.com/chat/v1/{}'", ".", "format", "(", "endpoint", ")", ",", "'application/x-protobuf'", ",", "# Request body is Protocol Buffer.", "'proto'", ",", "# Response body is Protocol Buffer.", "request_pb", ".", "SerializeToString", "(", ")", ")", "try", ":", "response_pb", ".", "ParseFromString", "(", "base64", ".", "b64decode", "(", "res", ".", "body", ")", ")", "except", "binascii", ".", "Error", "as", "e", ":", "raise", "exceptions", ".", "NetworkError", "(", "'Failed to decode base64 response: {}'", ".", "format", "(", "e", ")", ")", "except", "google", ".", "protobuf", ".", "message", ".", "DecodeError", "as", "e", ":", "raise", "exceptions", ".", "NetworkError", "(", "'Failed to decode Protocol Buffer response: {}'", ".", "format", "(", "e", ")", ")", "logger", ".", "debug", "(", "'Received Protocol Buffer response:\\n%s'", ",", "response_pb", ")", "status", "=", "response_pb", ".", "response_header", ".", "status", "if", "status", "!=", "hangouts_pb2", ".", "RESPONSE_STATUS_OK", ":", "description", "=", "response_pb", ".", "response_header", ".", "error_description", "raise", "exceptions", ".", "NetworkError", "(", "'Request failed with status {}: \\'{}\\''", ".", "format", "(", "status", ",", "description", ")", ")" ]
Send a Protocol Buffer formatted chat API request. Args: endpoint (str): The chat API endpoint to use. request_pb: The request body as a Protocol Buffer message. response_pb: The response body as a Protocol Buffer message. Raises: NetworkError: If the request fails.
[ "Send", "a", "Protocol", "Buffer", "formatted", "chat", "API", "request", "." ]
python
valid
rhayes777/PyAutoFit
autofit/optimize/optimizer.py
https://github.com/rhayes777/PyAutoFit/blob/a9e6144abb08edfc6a6906c4030d7119bf8d3e14/autofit/optimize/optimizer.py#L1-L29
def grid(fitness_function, no_dimensions, step_size): """ Grid search using a fitness function over a given number of dimensions and a given step size between inclusive limits of 0 and 1. Parameters ---------- fitness_function: function A function that takes a tuple of floats as an argument no_dimensions: int The number of dimensions of the grid search step_size: float The step size of the grid search Returns ------- best_arguments: tuple[float] The tuple of arguments that gave the highest fitness """ best_fitness = float("-inf") best_arguments = None for arguments in make_lists(no_dimensions, step_size): fitness = fitness_function(tuple(arguments)) if fitness > best_fitness: best_fitness = fitness best_arguments = tuple(arguments) return best_arguments
[ "def", "grid", "(", "fitness_function", ",", "no_dimensions", ",", "step_size", ")", ":", "best_fitness", "=", "float", "(", "\"-inf\"", ")", "best_arguments", "=", "None", "for", "arguments", "in", "make_lists", "(", "no_dimensions", ",", "step_size", ")", ":", "fitness", "=", "fitness_function", "(", "tuple", "(", "arguments", ")", ")", "if", "fitness", ">", "best_fitness", ":", "best_fitness", "=", "fitness", "best_arguments", "=", "tuple", "(", "arguments", ")", "return", "best_arguments" ]
Grid search using a fitness function over a given number of dimensions and a given step size between inclusive limits of 0 and 1. Parameters ---------- fitness_function: function A function that takes a tuple of floats as an argument no_dimensions: int The number of dimensions of the grid search step_size: float The step size of the grid search Returns ------- best_arguments: tuple[float] The tuple of arguments that gave the highest fitness
[ "Grid", "search", "using", "a", "fitness", "function", "over", "a", "given", "number", "of", "dimensions", "and", "a", "given", "step", "size", "between", "inclusive", "limits", "of", "0", "and", "1", "." ]
python
train
KeithSSmith/switcheo-python
switcheo/authenticated_client.py
https://github.com/KeithSSmith/switcheo-python/blob/22f943dea1ad7d692b2bfcd9f0822ec80f4641a6/switcheo/authenticated_client.py#L595-L651
def execute_order(self, order_params, private_key): """ This function executes the order created before it and signs the transaction to be submitted to the blockchain. Execution of this function is as follows:: execute_order(order_params=create_order, private_key=kp) The expected return result for this function is the same as the execute_order function:: { 'id': '4e6a59fd-d750-4332-aaf0-f2babfa8ad67', 'blockchain': 'neo', 'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82', 'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59', 'side': 'buy', 'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', 'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132', 'offer_amount': '2000000', 'want_amount': '10000000000', 'transfer_amount': '0', 'priority_gas_amount': '0', 'use_native_token': True, 'native_fee_transfer_amount': 0, 'deposit_txn': None, 'created_at': '2018-08-05T10:38:37.714Z', 'status': 'processed', 'fills': [], 'makes': [ { 'id': 'e30a7fdf-779c-4623-8f92-8a961450d843', 'offer_hash': 'b45ddfb97ade5e0363d9e707dac9ad1c530448db263e86494225a0025006f968', 'available_amount': '2000000', 'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', 'offer_amount': '2000000', 'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132', 'want_amount': '10000000000', 'filled_amount': '0.0', 'txn': None, 'cancel_txn': None, 'price': '0.0002', 'status': 'confirming', 'created_at': '2018-08-05T10:38:37.731Z', 'transaction_hash': '5c4cb1e73b9f2e608b6e768e0654649a4d15e08a7fe63fc536c454fa563a2f0f', 'trades': [] } ] } :param order_params: Dictionary generated from the create order function. :type order_params: dict :param private_key: The Private Key (ETH) or KeyPair (NEO) for the wallet being used to sign deposit message. :type private_key: KeyPair or str :return: Dictionary of the transaction on the order book. """ order_id = order_params['id'] api_params = self.sign_execute_order_function[self.blockchain](order_params, private_key) return self.request.post(path='/orders/{}/broadcast'.format(order_id), json_data=api_params)
[ "def", "execute_order", "(", "self", ",", "order_params", ",", "private_key", ")", ":", "order_id", "=", "order_params", "[", "'id'", "]", "api_params", "=", "self", ".", "sign_execute_order_function", "[", "self", ".", "blockchain", "]", "(", "order_params", ",", "private_key", ")", "return", "self", ".", "request", ".", "post", "(", "path", "=", "'/orders/{}/broadcast'", ".", "format", "(", "order_id", ")", ",", "json_data", "=", "api_params", ")" ]
This function executes the order created before it and signs the transaction to be submitted to the blockchain. Execution of this function is as follows:: execute_order(order_params=create_order, private_key=kp) The expected return result for this function is the same as the execute_order function:: { 'id': '4e6a59fd-d750-4332-aaf0-f2babfa8ad67', 'blockchain': 'neo', 'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82', 'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59', 'side': 'buy', 'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', 'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132', 'offer_amount': '2000000', 'want_amount': '10000000000', 'transfer_amount': '0', 'priority_gas_amount': '0', 'use_native_token': True, 'native_fee_transfer_amount': 0, 'deposit_txn': None, 'created_at': '2018-08-05T10:38:37.714Z', 'status': 'processed', 'fills': [], 'makes': [ { 'id': 'e30a7fdf-779c-4623-8f92-8a961450d843', 'offer_hash': 'b45ddfb97ade5e0363d9e707dac9ad1c530448db263e86494225a0025006f968', 'available_amount': '2000000', 'offer_asset_id': 'c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b', 'offer_amount': '2000000', 'want_asset_id': 'ab38352559b8b203bde5fddfa0b07d8b2525e132', 'want_amount': '10000000000', 'filled_amount': '0.0', 'txn': None, 'cancel_txn': None, 'price': '0.0002', 'status': 'confirming', 'created_at': '2018-08-05T10:38:37.731Z', 'transaction_hash': '5c4cb1e73b9f2e608b6e768e0654649a4d15e08a7fe63fc536c454fa563a2f0f', 'trades': [] } ] } :param order_params: Dictionary generated from the create order function. :type order_params: dict :param private_key: The Private Key (ETH) or KeyPair (NEO) for the wallet being used to sign deposit message. :type private_key: KeyPair or str :return: Dictionary of the transaction on the order book.
[ "This", "function", "executes", "the", "order", "created", "before", "it", "and", "signs", "the", "transaction", "to", "be", "submitted", "to", "the", "blockchain", ".", "Execution", "of", "this", "function", "is", "as", "follows", "::" ]
python
train
pazz/alot
alot/settings/utils.py
https://github.com/pazz/alot/blob/d0297605c0ec1c6b65f541d0fd5b69ac5a0f4ded/alot/settings/utils.py#L14-L82
def read_config(configpath=None, specpath=None, checks=None, report_extra=False): """ get a (validated) config object for given config file path. :param configpath: path to config-file or a list of lines as its content :type configpath: str or list(str) :param specpath: path to spec-file :type specpath: str :param checks: custom checks to use for validator. see `validate docs <http://www.voidspace.org.uk/python/validate.html>`_ :type checks: dict str->callable, :param report_extra: log if a setting is not present in the spec file :type report_extra: boolean :raises: :class:`~alot.settings.errors.ConfigError` :rtype: `configobj.ConfigObj` """ checks = checks or {} try: config = ConfigObj(infile=configpath, configspec=specpath, file_error=True, encoding='UTF8') except ConfigObjError as e: msg = 'Error when parsing `%s`:\n%s' % (configpath, e) logging.error(msg) raise ConfigError(msg) except IOError: raise ConfigError('Could not read %s and/or %s' % (configpath, specpath)) except UnboundLocalError: # this works around a bug in configobj msg = '%s is malformed. Check for sections without parents..' raise ConfigError(msg % configpath) if specpath: validator = Validator() validator.functions.update(checks) try: results = config.validate(validator, preserve_errors=True) except ConfigObjError as e: raise ConfigError(str(e)) if results is not True: error_msg = '' for (section_list, key, res) in flatten_errors(config, results): if key is not None: if res is False: msg = 'key "%s" in section "%s" is missing.' msg = msg % (key, ', '.join(section_list)) else: msg = 'key "%s" in section "%s" failed validation: %s' msg = msg % (key, ', '.join(section_list), res) else: msg = 'section "%s" is missing' % '.'.join(section_list) error_msg += msg + '\n' raise ConfigError(error_msg) extra_values = get_extra_values(config) if report_extra else None if extra_values: msg = ['Unknown values were found in `%s`. Please check for ' 'typos if a specified setting does not seem to work:' % configpath] for sections, val in extra_values: if sections: msg.append('%s: %s' % ('->'.join(sections), val)) else: msg.append(str(val)) logging.info('\n'.join(msg)) return config
[ "def", "read_config", "(", "configpath", "=", "None", ",", "specpath", "=", "None", ",", "checks", "=", "None", ",", "report_extra", "=", "False", ")", ":", "checks", "=", "checks", "or", "{", "}", "try", ":", "config", "=", "ConfigObj", "(", "infile", "=", "configpath", ",", "configspec", "=", "specpath", ",", "file_error", "=", "True", ",", "encoding", "=", "'UTF8'", ")", "except", "ConfigObjError", "as", "e", ":", "msg", "=", "'Error when parsing `%s`:\\n%s'", "%", "(", "configpath", ",", "e", ")", "logging", ".", "error", "(", "msg", ")", "raise", "ConfigError", "(", "msg", ")", "except", "IOError", ":", "raise", "ConfigError", "(", "'Could not read %s and/or %s'", "%", "(", "configpath", ",", "specpath", ")", ")", "except", "UnboundLocalError", ":", "# this works around a bug in configobj", "msg", "=", "'%s is malformed. Check for sections without parents..'", "raise", "ConfigError", "(", "msg", "%", "configpath", ")", "if", "specpath", ":", "validator", "=", "Validator", "(", ")", "validator", ".", "functions", ".", "update", "(", "checks", ")", "try", ":", "results", "=", "config", ".", "validate", "(", "validator", ",", "preserve_errors", "=", "True", ")", "except", "ConfigObjError", "as", "e", ":", "raise", "ConfigError", "(", "str", "(", "e", ")", ")", "if", "results", "is", "not", "True", ":", "error_msg", "=", "''", "for", "(", "section_list", ",", "key", ",", "res", ")", "in", "flatten_errors", "(", "config", ",", "results", ")", ":", "if", "key", "is", "not", "None", ":", "if", "res", "is", "False", ":", "msg", "=", "'key \"%s\" in section \"%s\" is missing.'", "msg", "=", "msg", "%", "(", "key", ",", "', '", ".", "join", "(", "section_list", ")", ")", "else", ":", "msg", "=", "'key \"%s\" in section \"%s\" failed validation: %s'", "msg", "=", "msg", "%", "(", "key", ",", "', '", ".", "join", "(", "section_list", ")", ",", "res", ")", "else", ":", "msg", "=", "'section \"%s\" is missing'", "%", "'.'", ".", "join", "(", "section_list", ")", "error_msg", "+=", "msg", "+", "'\\n'", "raise", "ConfigError", "(", "error_msg", ")", "extra_values", "=", "get_extra_values", "(", "config", ")", "if", "report_extra", "else", "None", "if", "extra_values", ":", "msg", "=", "[", "'Unknown values were found in `%s`. Please check for '", "'typos if a specified setting does not seem to work:'", "%", "configpath", "]", "for", "sections", ",", "val", "in", "extra_values", ":", "if", "sections", ":", "msg", ".", "append", "(", "'%s: %s'", "%", "(", "'->'", ".", "join", "(", "sections", ")", ",", "val", ")", ")", "else", ":", "msg", ".", "append", "(", "str", "(", "val", ")", ")", "logging", ".", "info", "(", "'\\n'", ".", "join", "(", "msg", ")", ")", "return", "config" ]
get a (validated) config object for given config file path. :param configpath: path to config-file or a list of lines as its content :type configpath: str or list(str) :param specpath: path to spec-file :type specpath: str :param checks: custom checks to use for validator. see `validate docs <http://www.voidspace.org.uk/python/validate.html>`_ :type checks: dict str->callable, :param report_extra: log if a setting is not present in the spec file :type report_extra: boolean :raises: :class:`~alot.settings.errors.ConfigError` :rtype: `configobj.ConfigObj`
[ "get", "a", "(", "validated", ")", "config", "object", "for", "given", "config", "file", "path", "." ]
python
train
shawnsilva/steamwebapi
steamwebapi/profiles.py
https://github.com/shawnsilva/steamwebapi/blob/dc16538ebe985cc7ea170f660169ebc2366efbf2/steamwebapi/profiles.py#L76-L84
def personastate(self): """Return the Persona State of the Users Profile""" if self._personastate == None: return None elif self._personastate in self.PersonaState: return self.PersonaState[self._personastate] else: #Invalid State return None
[ "def", "personastate", "(", "self", ")", ":", "if", "self", ".", "_personastate", "==", "None", ":", "return", "None", "elif", "self", ".", "_personastate", "in", "self", ".", "PersonaState", ":", "return", "self", ".", "PersonaState", "[", "self", ".", "_personastate", "]", "else", ":", "#Invalid State", "return", "None" ]
Return the Persona State of the Users Profile
[ "Return", "the", "Persona", "State", "of", "the", "Users", "Profile" ]
python
train
tensorflow/probability
tensorflow_probability/python/vi/csiszar_divergence.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/vi/csiszar_divergence.py#L550-L585
def jeffreys(logu, name=None): """The Jeffreys Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` The Jeffreys Csiszar-function is: ```none f(u) = 0.5 ( u log(u) - log(u) ) = 0.5 kl_forward + 0.5 kl_reverse = symmetrized_csiszar_function(kl_reverse) = symmetrized_csiszar_function(kl_forward) ``` This Csiszar-function induces a symmetric f-Divergence, i.e., `D_f[p, q] = D_f[q, p]`. Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. name: Python `str` name prefixed to Ops created by this function. Returns: jeffreys_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`. """ with tf.compat.v1.name_scope(name, "jeffreys", [logu]): logu = tf.convert_to_tensor(value=logu, name="logu") return 0.5 * tf.math.expm1(logu) * logu
[ "def", "jeffreys", "(", "logu", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "compat", ".", "v1", ".", "name_scope", "(", "name", ",", "\"jeffreys\"", ",", "[", "logu", "]", ")", ":", "logu", "=", "tf", ".", "convert_to_tensor", "(", "value", "=", "logu", ",", "name", "=", "\"logu\"", ")", "return", "0.5", "*", "tf", ".", "math", ".", "expm1", "(", "logu", ")", "*", "logu" ]
The Jeffreys Csiszar-function in log-space. A Csiszar-function is a member of, ```none F = { f:R_+ to R : f convex }. ``` The Jeffreys Csiszar-function is: ```none f(u) = 0.5 ( u log(u) - log(u) ) = 0.5 kl_forward + 0.5 kl_reverse = symmetrized_csiszar_function(kl_reverse) = symmetrized_csiszar_function(kl_forward) ``` This Csiszar-function induces a symmetric f-Divergence, i.e., `D_f[p, q] = D_f[q, p]`. Warning: this function makes non-log-space calculations and may therefore be numerically unstable for `|logu| >> 0`. Args: logu: `float`-like `Tensor` representing `log(u)` from above. name: Python `str` name prefixed to Ops created by this function. Returns: jeffreys_of_u: `float`-like `Tensor` of the Csiszar-function evaluated at `u = exp(logu)`.
[ "The", "Jeffreys", "Csiszar", "-", "function", "in", "log", "-", "space", "." ]
python
test
pepkit/peppy
peppy/utils.py
https://github.com/pepkit/peppy/blob/f0f725e1557936b81c86573a77400e6f8da78f05/peppy/utils.py#L393-L409
def standard_stream_redirector(stream): """ Temporarily redirect stdout and stderr to another stream. This can be useful for capturing messages for easier inspection, or for rerouting and essentially ignoring them, with the destination as something like an opened os.devnull. :param FileIO[str] stream: temporary proxy for standard streams """ import sys genuine_stdout, genuine_stderr = sys.stdout, sys.stderr sys.stdout, sys.stderr = stream, stream try: yield finally: sys.stdout, sys.stderr = genuine_stdout, genuine_stderr
[ "def", "standard_stream_redirector", "(", "stream", ")", ":", "import", "sys", "genuine_stdout", ",", "genuine_stderr", "=", "sys", ".", "stdout", ",", "sys", ".", "stderr", "sys", ".", "stdout", ",", "sys", ".", "stderr", "=", "stream", ",", "stream", "try", ":", "yield", "finally", ":", "sys", ".", "stdout", ",", "sys", ".", "stderr", "=", "genuine_stdout", ",", "genuine_stderr" ]
Temporarily redirect stdout and stderr to another stream. This can be useful for capturing messages for easier inspection, or for rerouting and essentially ignoring them, with the destination as something like an opened os.devnull. :param FileIO[str] stream: temporary proxy for standard streams
[ "Temporarily", "redirect", "stdout", "and", "stderr", "to", "another", "stream", "." ]
python
train
tonybaloney/wily
wily/decorators.py
https://github.com/tonybaloney/wily/blob/bae259354a91b57d56603f0ca7403186f086a84c/wily/decorators.py#L11-L20
def add_version(f): """ Add the version of wily to the help heading. :param f: function to decorate :return: decorated function """ doc = f.__doc__ f.__doc__ = "Version: " + __version__ + "\n\n" + doc return f
[ "def", "add_version", "(", "f", ")", ":", "doc", "=", "f", ".", "__doc__", "f", ".", "__doc__", "=", "\"Version: \"", "+", "__version__", "+", "\"\\n\\n\"", "+", "doc", "return", "f" ]
Add the version of wily to the help heading. :param f: function to decorate :return: decorated function
[ "Add", "the", "version", "of", "wily", "to", "the", "help", "heading", "." ]
python
train
acutesoftware/virtual-AI-simulator
vais/character.py
https://github.com/acutesoftware/virtual-AI-simulator/blob/57de679a5b1a58c38fefe6aea58af1f3a7e79c58/vais/character.py#L198-L219
def _parse_char_line_to_self(self, k,v): """ takes a line from a saved file split into key and values and updates the appropriate self parameters of character. """ k = k.strip(' ').strip('\n') v = v.strip(' ').strip('\n') # print('_parse_char_line_to_self(self, k,v): ' , k, v) if k == 'CHARACTER': self.name = v elif k == 'Race': self.race = v elif k == 'Class': self.ch_class = v elif k == 'STATS': self.stats = self._extract_stats_from_line(v) elif k == 'Story': self.story = v.strip(' ').strip('\n') elif k == 'SKILLS': self.skills = v.split(', ') elif k == 'INVENTORY': self.inventory = v.split(', ')
[ "def", "_parse_char_line_to_self", "(", "self", ",", "k", ",", "v", ")", ":", "k", "=", "k", ".", "strip", "(", "' '", ")", ".", "strip", "(", "'\\n'", ")", "v", "=", "v", ".", "strip", "(", "' '", ")", ".", "strip", "(", "'\\n'", ")", "# print('_parse_char_line_to_self(self, k,v): ' , k, v)", "if", "k", "==", "'CHARACTER'", ":", "self", ".", "name", "=", "v", "elif", "k", "==", "'Race'", ":", "self", ".", "race", "=", "v", "elif", "k", "==", "'Class'", ":", "self", ".", "ch_class", "=", "v", "elif", "k", "==", "'STATS'", ":", "self", ".", "stats", "=", "self", ".", "_extract_stats_from_line", "(", "v", ")", "elif", "k", "==", "'Story'", ":", "self", ".", "story", "=", "v", ".", "strip", "(", "' '", ")", ".", "strip", "(", "'\\n'", ")", "elif", "k", "==", "'SKILLS'", ":", "self", ".", "skills", "=", "v", ".", "split", "(", "', '", ")", "elif", "k", "==", "'INVENTORY'", ":", "self", ".", "inventory", "=", "v", ".", "split", "(", "', '", ")" ]
takes a line from a saved file split into key and values and updates the appropriate self parameters of character.
[ "takes", "a", "line", "from", "a", "saved", "file", "split", "into", "key", "and", "values", "and", "updates", "the", "appropriate", "self", "parameters", "of", "character", "." ]
python
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/python_message.py#L761-L770
def _IsPresent(item): """Given a (FieldDescriptor, value) tuple from _fields, return true if the value should be included in the list returned by ListFields().""" if item[0].label == _FieldDescriptor.LABEL_REPEATED: return bool(item[1]) elif item[0].cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE: return item[1]._is_present_in_parent else: return True
[ "def", "_IsPresent", "(", "item", ")", ":", "if", "item", "[", "0", "]", ".", "label", "==", "_FieldDescriptor", ".", "LABEL_REPEATED", ":", "return", "bool", "(", "item", "[", "1", "]", ")", "elif", "item", "[", "0", "]", ".", "cpp_type", "==", "_FieldDescriptor", ".", "CPPTYPE_MESSAGE", ":", "return", "item", "[", "1", "]", ".", "_is_present_in_parent", "else", ":", "return", "True" ]
Given a (FieldDescriptor, value) tuple from _fields, return true if the value should be included in the list returned by ListFields().
[ "Given", "a", "(", "FieldDescriptor", "value", ")", "tuple", "from", "_fields", "return", "true", "if", "the", "value", "should", "be", "included", "in", "the", "list", "returned", "by", "ListFields", "()", "." ]
python
train
JarryShaw/PyPCAPKit
src/const/hip/registration.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/const/hip/registration.py#L17-L23
def get(key, default=-1): """Backport support for original codes.""" if isinstance(key, int): return Registration(key) if key not in Registration._member_map_: extend_enum(Registration, key, default) return Registration[key]
[ "def", "get", "(", "key", ",", "default", "=", "-", "1", ")", ":", "if", "isinstance", "(", "key", ",", "int", ")", ":", "return", "Registration", "(", "key", ")", "if", "key", "not", "in", "Registration", ".", "_member_map_", ":", "extend_enum", "(", "Registration", ",", "key", ",", "default", ")", "return", "Registration", "[", "key", "]" ]
Backport support for original codes.
[ "Backport", "support", "for", "original", "codes", "." ]
python
train
peterbrittain/asciimatics
asciimatics/screen.py
https://github.com/peterbrittain/asciimatics/blob/f471427d7786ce2d5f1eeb2dae0e67d19e46e085/asciimatics/screen.py#L83-L91
def set(self, x, y, value): """ Set the cell value from the specified location :param x: The column (x coord) of the character. :param y: The row (y coord) of the character. :param value: A 5-tuple of (unicode, foreground, attributes, background, width). """ self._double_buffer[y][x] = value
[ "def", "set", "(", "self", ",", "x", ",", "y", ",", "value", ")", ":", "self", ".", "_double_buffer", "[", "y", "]", "[", "x", "]", "=", "value" ]
Set the cell value from the specified location :param x: The column (x coord) of the character. :param y: The row (y coord) of the character. :param value: A 5-tuple of (unicode, foreground, attributes, background, width).
[ "Set", "the", "cell", "value", "from", "the", "specified", "location" ]
python
train
ui/django-post_office
post_office/utils.py
https://github.com/ui/django-post_office/blob/03e1ffb69829b475402f0f3ecd9f8a90af7da4bd/post_office/utils.py#L122-L141
def parse_emails(emails): """ A function that returns a list of valid email addresses. This function will also convert a single email address into a list of email addresses. None value is also converted into an empty list. """ if isinstance(emails, string_types): emails = [emails] elif emails is None: emails = [] for email in emails: try: validate_email_with_name(email) except ValidationError: raise ValidationError('%s is not a valid email address' % email) return emails
[ "def", "parse_emails", "(", "emails", ")", ":", "if", "isinstance", "(", "emails", ",", "string_types", ")", ":", "emails", "=", "[", "emails", "]", "elif", "emails", "is", "None", ":", "emails", "=", "[", "]", "for", "email", "in", "emails", ":", "try", ":", "validate_email_with_name", "(", "email", ")", "except", "ValidationError", ":", "raise", "ValidationError", "(", "'%s is not a valid email address'", "%", "email", ")", "return", "emails" ]
A function that returns a list of valid email addresses. This function will also convert a single email address into a list of email addresses. None value is also converted into an empty list.
[ "A", "function", "that", "returns", "a", "list", "of", "valid", "email", "addresses", ".", "This", "function", "will", "also", "convert", "a", "single", "email", "address", "into", "a", "list", "of", "email", "addresses", ".", "None", "value", "is", "also", "converted", "into", "an", "empty", "list", "." ]
python
train
casacore/python-casacore
casacore/functionals/functional.py
https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/functionals/functional.py#L122-L141
def fdf(self, x): """Calculate the value of the functional for the specified arguments, and the derivatives with respect to the parameters (taking any specified mask into account). :param x: the value(s) to evaluate at """ x = self._flatten(x) n = 1 if hasattr(x, "__len__"): n = len(x) if self._dtype == 0: retval = _functional._fdf(self, x) else: retval = _functional._fdfc(self, x) if len(retval) == n: return numpy.array(retval) return numpy.array(retval).reshape(self.npar() + 1, n // self.ndim()).transpose()
[ "def", "fdf", "(", "self", ",", "x", ")", ":", "x", "=", "self", ".", "_flatten", "(", "x", ")", "n", "=", "1", "if", "hasattr", "(", "x", ",", "\"__len__\"", ")", ":", "n", "=", "len", "(", "x", ")", "if", "self", ".", "_dtype", "==", "0", ":", "retval", "=", "_functional", ".", "_fdf", "(", "self", ",", "x", ")", "else", ":", "retval", "=", "_functional", ".", "_fdfc", "(", "self", ",", "x", ")", "if", "len", "(", "retval", ")", "==", "n", ":", "return", "numpy", ".", "array", "(", "retval", ")", "return", "numpy", ".", "array", "(", "retval", ")", ".", "reshape", "(", "self", ".", "npar", "(", ")", "+", "1", ",", "n", "//", "self", ".", "ndim", "(", ")", ")", ".", "transpose", "(", ")" ]
Calculate the value of the functional for the specified arguments, and the derivatives with respect to the parameters (taking any specified mask into account). :param x: the value(s) to evaluate at
[ "Calculate", "the", "value", "of", "the", "functional", "for", "the", "specified", "arguments", "and", "the", "derivatives", "with", "respect", "to", "the", "parameters", "(", "taking", "any", "specified", "mask", "into", "account", ")", "." ]
python
train
boppreh/keyboard
keyboard/__init__.py
https://github.com/boppreh/keyboard/blob/dbb73dfff484f733d5fed8dbc53301af5b6c7f50/keyboard/__init__.py#L794-L809
def restore_state(scan_codes): """ Given a list of scan_codes ensures these keys, and only these keys, are pressed. Pairs well with `stash_state`, alternative to `restore_modifiers`. """ _listener.is_replaying = True with _pressed_events_lock: current = set(_pressed_events) target = set(scan_codes) for scan_code in current - target: _os_keyboard.release(scan_code) for scan_code in target - current: _os_keyboard.press(scan_code) _listener.is_replaying = False
[ "def", "restore_state", "(", "scan_codes", ")", ":", "_listener", ".", "is_replaying", "=", "True", "with", "_pressed_events_lock", ":", "current", "=", "set", "(", "_pressed_events", ")", "target", "=", "set", "(", "scan_codes", ")", "for", "scan_code", "in", "current", "-", "target", ":", "_os_keyboard", ".", "release", "(", "scan_code", ")", "for", "scan_code", "in", "target", "-", "current", ":", "_os_keyboard", ".", "press", "(", "scan_code", ")", "_listener", ".", "is_replaying", "=", "False" ]
Given a list of scan_codes ensures these keys, and only these keys, are pressed. Pairs well with `stash_state`, alternative to `restore_modifiers`.
[ "Given", "a", "list", "of", "scan_codes", "ensures", "these", "keys", "and", "only", "these", "keys", "are", "pressed", ".", "Pairs", "well", "with", "stash_state", "alternative", "to", "restore_modifiers", "." ]
python
train
wreckage/django-happenings
happenings/utils/common.py
https://github.com/wreckage/django-happenings/blob/7bca5576efa6cd4c4e87356bf9e5b8cd538ae91d/happenings/utils/common.py#L30-L34
def _inc_day(year, month, day, net): """Increments the day by converting to a datetime.date().""" d = date(year, month, day) new_d = d + timezone.timedelta(days=net) return new_d.year, new_d.month, new_d.day
[ "def", "_inc_day", "(", "year", ",", "month", ",", "day", ",", "net", ")", ":", "d", "=", "date", "(", "year", ",", "month", ",", "day", ")", "new_d", "=", "d", "+", "timezone", ".", "timedelta", "(", "days", "=", "net", ")", "return", "new_d", ".", "year", ",", "new_d", ".", "month", ",", "new_d", ".", "day" ]
Increments the day by converting to a datetime.date().
[ "Increments", "the", "day", "by", "converting", "to", "a", "datetime", ".", "date", "()", "." ]
python
test
zabertech/python-swampyer
swampyer/__init__.py
https://github.com/zabertech/python-swampyer/blob/31b040e7570455718709a496d6d9faacfb372a00/swampyer/__init__.py#L692-L700
def handle_challenge(self,data): """ Executed when the server requests additional authentication """ # Send challenge response self.send_message(AUTHENTICATE( signature = self.password, extra = {} ))
[ "def", "handle_challenge", "(", "self", ",", "data", ")", ":", "# Send challenge response", "self", ".", "send_message", "(", "AUTHENTICATE", "(", "signature", "=", "self", ".", "password", ",", "extra", "=", "{", "}", ")", ")" ]
Executed when the server requests additional authentication
[ "Executed", "when", "the", "server", "requests", "additional", "authentication" ]
python
train
inasafe/inasafe
safe/report/extractors/action_notes.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/report/extractors/action_notes.py#L204-L235
def action_checklist_report_extractor(impact_report, component_metadata): """Extracting action checklist of the impact layer to its own report. :param impact_report: the impact report that acts as a proxy to fetch all the data that extractor needed :type impact_report: safe.report.impact_report.ImpactReport :param component_metadata: the component metadata. Used to obtain information about the component we want to render :type component_metadata: safe.report.report_metadata. ReportComponentsMetadata :return: context for rendering phase :rtype: dict .. versionadded:: 4.1 """ context = {} extra_args = component_metadata.extra_args components_list = resolve_from_dictionary( extra_args, 'components_list') context['brand_logo'] = resource_url( resources_path('img', 'logos', 'inasafe-logo-white.png')) for key, component in list(components_list.items()): context[key] = jinja2_output_as_string( impact_report, component['key']) context['inasafe_resources_base_dir'] = resources_path() return context
[ "def", "action_checklist_report_extractor", "(", "impact_report", ",", "component_metadata", ")", ":", "context", "=", "{", "}", "extra_args", "=", "component_metadata", ".", "extra_args", "components_list", "=", "resolve_from_dictionary", "(", "extra_args", ",", "'components_list'", ")", "context", "[", "'brand_logo'", "]", "=", "resource_url", "(", "resources_path", "(", "'img'", ",", "'logos'", ",", "'inasafe-logo-white.png'", ")", ")", "for", "key", ",", "component", "in", "list", "(", "components_list", ".", "items", "(", ")", ")", ":", "context", "[", "key", "]", "=", "jinja2_output_as_string", "(", "impact_report", ",", "component", "[", "'key'", "]", ")", "context", "[", "'inasafe_resources_base_dir'", "]", "=", "resources_path", "(", ")", "return", "context" ]
Extracting action checklist of the impact layer to its own report. :param impact_report: the impact report that acts as a proxy to fetch all the data that extractor needed :type impact_report: safe.report.impact_report.ImpactReport :param component_metadata: the component metadata. Used to obtain information about the component we want to render :type component_metadata: safe.report.report_metadata. ReportComponentsMetadata :return: context for rendering phase :rtype: dict .. versionadded:: 4.1
[ "Extracting", "action", "checklist", "of", "the", "impact", "layer", "to", "its", "own", "report", "." ]
python
train
openspending/babbage
babbage/model/aggregate.py
https://github.com/openspending/babbage/blob/9e03efe62e0be0cceabafd4de2a09cb8ec794b92/babbage/model/aggregate.py#L22-L32
def bind(self, cube): """ When one column needs to match, use the key. """ if self.measure: table, column = self.measure.bind(cube) else: table, column = cube.fact_table, cube.fact_pk # apply the SQL aggregation function: column = getattr(func, self.function)(column) column = column.label(self.ref) column.quote = True return table, column
[ "def", "bind", "(", "self", ",", "cube", ")", ":", "if", "self", ".", "measure", ":", "table", ",", "column", "=", "self", ".", "measure", ".", "bind", "(", "cube", ")", "else", ":", "table", ",", "column", "=", "cube", ".", "fact_table", ",", "cube", ".", "fact_pk", "# apply the SQL aggregation function:", "column", "=", "getattr", "(", "func", ",", "self", ".", "function", ")", "(", "column", ")", "column", "=", "column", ".", "label", "(", "self", ".", "ref", ")", "column", ".", "quote", "=", "True", "return", "table", ",", "column" ]
When one column needs to match, use the key.
[ "When", "one", "column", "needs", "to", "match", "use", "the", "key", "." ]
python
train
DataBiosphere/dsub
dsub/providers/local.py
https://github.com/DataBiosphere/dsub/blob/443ce31daa6023dc2fd65ef2051796e19d18d5a7/dsub/providers/local.py#L566-L580
def _datetime_in_range(self, dt, dt_min=None, dt_max=None): """Determine if the provided time is within the range, inclusive.""" # The pipelines API stores operation create-time with second granularity. # We mimic this behavior in the local provider by truncating to seconds. dt = dt.replace(microsecond=0) if dt_min: dt_min = dt_min.replace(microsecond=0) else: dt_min = dsub_util.replace_timezone(datetime.datetime.min, pytz.utc) if dt_max: dt_max = dt_max.replace(microsecond=0) else: dt_max = dsub_util.replace_timezone(datetime.datetime.max, pytz.utc) return dt_min <= dt <= dt_max
[ "def", "_datetime_in_range", "(", "self", ",", "dt", ",", "dt_min", "=", "None", ",", "dt_max", "=", "None", ")", ":", "# The pipelines API stores operation create-time with second granularity.", "# We mimic this behavior in the local provider by truncating to seconds.", "dt", "=", "dt", ".", "replace", "(", "microsecond", "=", "0", ")", "if", "dt_min", ":", "dt_min", "=", "dt_min", ".", "replace", "(", "microsecond", "=", "0", ")", "else", ":", "dt_min", "=", "dsub_util", ".", "replace_timezone", "(", "datetime", ".", "datetime", ".", "min", ",", "pytz", ".", "utc", ")", "if", "dt_max", ":", "dt_max", "=", "dt_max", ".", "replace", "(", "microsecond", "=", "0", ")", "else", ":", "dt_max", "=", "dsub_util", ".", "replace_timezone", "(", "datetime", ".", "datetime", ".", "max", ",", "pytz", ".", "utc", ")", "return", "dt_min", "<=", "dt", "<=", "dt_max" ]
Determine if the provided time is within the range, inclusive.
[ "Determine", "if", "the", "provided", "time", "is", "within", "the", "range", "inclusive", "." ]
python
valid
pyviz/param
param/parameterized.py
https://github.com/pyviz/param/blob/8f0dafa78defa883247b40635f96cc6d5c1b3481/param/parameterized.py#L1231-L1255
def params(self_, parameter_name=None): """ Return the Parameters of this class as the dictionary {name: parameter_object} Includes Parameters from this class and its superclasses. """ if self_.self is not None and self_.self._instance__params: self_.warning('The Parameterized instance has instance ' 'parameters created using new-style param ' 'APIs, which are incompatible with .params. ' 'Use the new more explicit APIs on the ' '.param accessor to query parameter instances.' 'To query all parameter instances use ' '.param.objects with the option to return ' 'either class or instance parameter objects. ' 'Alternatively use .param[name] indexing to ' 'access a specific parameter object by name.') pdict = self_.objects(instance='existing') if parameter_name is None: return pdict else: return pdict[parameter_name]
[ "def", "params", "(", "self_", ",", "parameter_name", "=", "None", ")", ":", "if", "self_", ".", "self", "is", "not", "None", "and", "self_", ".", "self", ".", "_instance__params", ":", "self_", ".", "warning", "(", "'The Parameterized instance has instance '", "'parameters created using new-style param '", "'APIs, which are incompatible with .params. '", "'Use the new more explicit APIs on the '", "'.param accessor to query parameter instances.'", "'To query all parameter instances use '", "'.param.objects with the option to return '", "'either class or instance parameter objects. '", "'Alternatively use .param[name] indexing to '", "'access a specific parameter object by name.'", ")", "pdict", "=", "self_", ".", "objects", "(", "instance", "=", "'existing'", ")", "if", "parameter_name", "is", "None", ":", "return", "pdict", "else", ":", "return", "pdict", "[", "parameter_name", "]" ]
Return the Parameters of this class as the dictionary {name: parameter_object} Includes Parameters from this class and its superclasses.
[ "Return", "the", "Parameters", "of", "this", "class", "as", "the", "dictionary", "{", "name", ":", "parameter_object", "}" ]
python
train
wakatime/wakatime
wakatime/packages/pygments/util.py
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/pygments/util.py#L170-L180
def doctype_matches(text, regex): """Check if the doctype matches a regular expression (if present). Note that this method only checks the first part of a DOCTYPE. eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"' """ m = doctype_lookup_re.match(text) if m is None: return False doctype = m.group(2) return re.compile(regex, re.I).match(doctype.strip()) is not None
[ "def", "doctype_matches", "(", "text", ",", "regex", ")", ":", "m", "=", "doctype_lookup_re", ".", "match", "(", "text", ")", "if", "m", "is", "None", ":", "return", "False", "doctype", "=", "m", ".", "group", "(", "2", ")", "return", "re", ".", "compile", "(", "regex", ",", "re", ".", "I", ")", ".", "match", "(", "doctype", ".", "strip", "(", ")", ")", "is", "not", "None" ]
Check if the doctype matches a regular expression (if present). Note that this method only checks the first part of a DOCTYPE. eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
[ "Check", "if", "the", "doctype", "matches", "a", "regular", "expression", "(", "if", "present", ")", "." ]
python
train
raamana/mrivis
mrivis/workflow.py
https://github.com/raamana/mrivis/blob/199ad096b8a1d825f69109e7218a81b2f1cec756/mrivis/workflow.py#L714-L740
def _get_checkers(slice_shape, patch_size): """Creates checkerboard of a given tile size, filling a given slice.""" if patch_size is not None: patch_size = check_patch_size(patch_size) else: # 7 patches in each axis, min voxels/patch = 3 # TODO make 7 a user settable parameter patch_size = np.round(np.array(slice_shape) / 7).astype('int16') patch_size = np.maximum(patch_size, np.array([3, 3])) black = np.zeros(patch_size) white = np.ones(patch_size) tile = np.vstack((np.hstack([black, white]), np.hstack([white, black]))) # using ceil so we can clip the extra portions num_tiles = np.ceil(np.divide(slice_shape, tile.shape)).astype(int) checkers = np.tile(tile, num_tiles) # clipping any extra columns or rows if any(np.greater(checkers.shape, slice_shape)): if checkers.shape[0] > slice_shape[0]: checkers = np.delete(checkers, np.s_[slice_shape[0]:], axis=0) if checkers.shape[1] > slice_shape[1]: checkers = np.delete(checkers, np.s_[slice_shape[1]:], axis=1) return checkers
[ "def", "_get_checkers", "(", "slice_shape", ",", "patch_size", ")", ":", "if", "patch_size", "is", "not", "None", ":", "patch_size", "=", "check_patch_size", "(", "patch_size", ")", "else", ":", "# 7 patches in each axis, min voxels/patch = 3", "# TODO make 7 a user settable parameter", "patch_size", "=", "np", ".", "round", "(", "np", ".", "array", "(", "slice_shape", ")", "/", "7", ")", ".", "astype", "(", "'int16'", ")", "patch_size", "=", "np", ".", "maximum", "(", "patch_size", ",", "np", ".", "array", "(", "[", "3", ",", "3", "]", ")", ")", "black", "=", "np", ".", "zeros", "(", "patch_size", ")", "white", "=", "np", ".", "ones", "(", "patch_size", ")", "tile", "=", "np", ".", "vstack", "(", "(", "np", ".", "hstack", "(", "[", "black", ",", "white", "]", ")", ",", "np", ".", "hstack", "(", "[", "white", ",", "black", "]", ")", ")", ")", "# using ceil so we can clip the extra portions", "num_tiles", "=", "np", ".", "ceil", "(", "np", ".", "divide", "(", "slice_shape", ",", "tile", ".", "shape", ")", ")", ".", "astype", "(", "int", ")", "checkers", "=", "np", ".", "tile", "(", "tile", ",", "num_tiles", ")", "# clipping any extra columns or rows", "if", "any", "(", "np", ".", "greater", "(", "checkers", ".", "shape", ",", "slice_shape", ")", ")", ":", "if", "checkers", ".", "shape", "[", "0", "]", ">", "slice_shape", "[", "0", "]", ":", "checkers", "=", "np", ".", "delete", "(", "checkers", ",", "np", ".", "s_", "[", "slice_shape", "[", "0", "]", ":", "]", ",", "axis", "=", "0", ")", "if", "checkers", ".", "shape", "[", "1", "]", ">", "slice_shape", "[", "1", "]", ":", "checkers", "=", "np", ".", "delete", "(", "checkers", ",", "np", ".", "s_", "[", "slice_shape", "[", "1", "]", ":", "]", ",", "axis", "=", "1", ")", "return", "checkers" ]
Creates checkerboard of a given tile size, filling a given slice.
[ "Creates", "checkerboard", "of", "a", "given", "tile", "size", "filling", "a", "given", "slice", "." ]
python
train
quodlibet/mutagen
mutagen/_senf/_temp.py
https://github.com/quodlibet/mutagen/blob/e393df5971ba41ba5a50de9c2c9e7e5484d82c4e/mutagen/_senf/_temp.py#L54-L75
def mkstemp(suffix=None, prefix=None, dir=None, text=False): """ Args: suffix (`pathlike` or `None`): suffix or `None` to use the default prefix (`pathlike` or `None`): prefix or `None` to use the default dir (`pathlike` or `None`): temp dir or `None` to use the default text (bool): if the file should be opened in text mode Returns: Tuple[`int`, `fsnative`]: A tuple containing the file descriptor and the file path Raises: EnvironmentError Like :func:`python3:tempfile.mkstemp` but always returns a `fsnative` path. """ suffix = fsnative() if suffix is None else path2fsn(suffix) prefix = gettempprefix() if prefix is None else path2fsn(prefix) dir = gettempdir() if dir is None else path2fsn(dir) return tempfile.mkstemp(suffix, prefix, dir, text)
[ "def", "mkstemp", "(", "suffix", "=", "None", ",", "prefix", "=", "None", ",", "dir", "=", "None", ",", "text", "=", "False", ")", ":", "suffix", "=", "fsnative", "(", ")", "if", "suffix", "is", "None", "else", "path2fsn", "(", "suffix", ")", "prefix", "=", "gettempprefix", "(", ")", "if", "prefix", "is", "None", "else", "path2fsn", "(", "prefix", ")", "dir", "=", "gettempdir", "(", ")", "if", "dir", "is", "None", "else", "path2fsn", "(", "dir", ")", "return", "tempfile", ".", "mkstemp", "(", "suffix", ",", "prefix", ",", "dir", ",", "text", ")" ]
Args: suffix (`pathlike` or `None`): suffix or `None` to use the default prefix (`pathlike` or `None`): prefix or `None` to use the default dir (`pathlike` or `None`): temp dir or `None` to use the default text (bool): if the file should be opened in text mode Returns: Tuple[`int`, `fsnative`]: A tuple containing the file descriptor and the file path Raises: EnvironmentError Like :func:`python3:tempfile.mkstemp` but always returns a `fsnative` path.
[ "Args", ":", "suffix", "(", "pathlike", "or", "None", ")", ":", "suffix", "or", "None", "to", "use", "the", "default", "prefix", "(", "pathlike", "or", "None", ")", ":", "prefix", "or", "None", "to", "use", "the", "default", "dir", "(", "pathlike", "or", "None", ")", ":", "temp", "dir", "or", "None", "to", "use", "the", "default", "text", "(", "bool", ")", ":", "if", "the", "file", "should", "be", "opened", "in", "text", "mode", "Returns", ":", "Tuple", "[", "int", "fsnative", "]", ":", "A", "tuple", "containing", "the", "file", "descriptor", "and", "the", "file", "path", "Raises", ":", "EnvironmentError" ]
python
train
nickjj/ansigenome
ansigenome/utils.py
https://github.com/nickjj/ansigenome/blob/70cd98d7a23d36c56f4e713ea820cfb4c485c81c/ansigenome/utils.py#L59-L68
def file_to_string(path): """ Return the contents of a file when given a path. """ if not os.path.exists(path): ui.error(c.MESSAGES["path_missing"], path) sys.exit(1) with codecs.open(path, "r", "UTF-8") as contents: return contents.read()
[ "def", "file_to_string", "(", "path", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "ui", ".", "error", "(", "c", ".", "MESSAGES", "[", "\"path_missing\"", "]", ",", "path", ")", "sys", ".", "exit", "(", "1", ")", "with", "codecs", ".", "open", "(", "path", ",", "\"r\"", ",", "\"UTF-8\"", ")", "as", "contents", ":", "return", "contents", ".", "read", "(", ")" ]
Return the contents of a file when given a path.
[ "Return", "the", "contents", "of", "a", "file", "when", "given", "a", "path", "." ]
python
train
robert-b-clarke/nre-darwin-py
nredarwin/webservice.py
https://github.com/robert-b-clarke/nre-darwin-py/blob/6b0b181770e085dc7f71fbd2eb3fe779f653da62/nredarwin/webservice.py#L601-L612
def previous_calling_points(self): """ A list of CallingPoint objects. This is the list of all previous calling points for the service, including all associated services if multiple services join together to form this service. """ calling_points = list() for cpl in self._previous_calling_point_lists: calling_points += cpl.calling_points return calling_points
[ "def", "previous_calling_points", "(", "self", ")", ":", "calling_points", "=", "list", "(", ")", "for", "cpl", "in", "self", ".", "_previous_calling_point_lists", ":", "calling_points", "+=", "cpl", ".", "calling_points", "return", "calling_points" ]
A list of CallingPoint objects. This is the list of all previous calling points for the service, including all associated services if multiple services join together to form this service.
[ "A", "list", "of", "CallingPoint", "objects", "." ]
python
train
klen/graphite-beacon
graphite_beacon/handlers/cli.py
https://github.com/klen/graphite-beacon/blob/c1f071e9f557693bc90f6acbc314994985dc3b77/graphite_beacon/handlers/cli.py#L41-L57
def substitute_variables(command, level, name, value, target=None, **kwargs): """Substitute variables in command fragments by values e.g. ${level} => 'warning'.""" rule = kwargs.get('rule', {}) rule_value = rule.get('value', '') if rule else '' substitutes = { '${level}': str(level), '${target}': str(target), '${name}': '"' + str(name) + '"', '${value}': str(value), '${limit_value}': str(rule_value), } result = command for pattern, value in substitutes.items(): result = result.replace(pattern, value) return result
[ "def", "substitute_variables", "(", "command", ",", "level", ",", "name", ",", "value", ",", "target", "=", "None", ",", "*", "*", "kwargs", ")", ":", "rule", "=", "kwargs", ".", "get", "(", "'rule'", ",", "{", "}", ")", "rule_value", "=", "rule", ".", "get", "(", "'value'", ",", "''", ")", "if", "rule", "else", "''", "substitutes", "=", "{", "'${level}'", ":", "str", "(", "level", ")", ",", "'${target}'", ":", "str", "(", "target", ")", ",", "'${name}'", ":", "'\"'", "+", "str", "(", "name", ")", "+", "'\"'", ",", "'${value}'", ":", "str", "(", "value", ")", ",", "'${limit_value}'", ":", "str", "(", "rule_value", ")", ",", "}", "result", "=", "command", "for", "pattern", ",", "value", "in", "substitutes", ".", "items", "(", ")", ":", "result", "=", "result", ".", "replace", "(", "pattern", ",", "value", ")", "return", "result" ]
Substitute variables in command fragments by values e.g. ${level} => 'warning'.
[ "Substitute", "variables", "in", "command", "fragments", "by", "values", "e", ".", "g", ".", "$", "{", "level", "}", "=", ">", "warning", "." ]
python
train
jmcgeheeiv/pyfakefs
pyfakefs/fake_scandir.py
https://github.com/jmcgeheeiv/pyfakefs/blob/6c36fb8987108107fc861fc3013620d46c7d2f9c/pyfakefs/fake_scandir.py#L176-L200
def _classify_directory_contents(filesystem, root): """Classify contents of a directory as files/directories. Args: filesystem: The fake filesystem used for implementation root: (str) Directory to examine. Returns: (tuple) A tuple consisting of three values: the directory examined, a list containing all of the directory entries, and a list containing all of the non-directory entries. (This is the same format as returned by the `os.walk` generator.) Raises: Nothing on its own, but be ready to catch exceptions generated by underlying mechanisms like `os.listdir`. """ dirs = [] files = [] for entry in filesystem.listdir(root): if filesystem.isdir(filesystem.joinpaths(root, entry)): dirs.append(entry) else: files.append(entry) return root, dirs, files
[ "def", "_classify_directory_contents", "(", "filesystem", ",", "root", ")", ":", "dirs", "=", "[", "]", "files", "=", "[", "]", "for", "entry", "in", "filesystem", ".", "listdir", "(", "root", ")", ":", "if", "filesystem", ".", "isdir", "(", "filesystem", ".", "joinpaths", "(", "root", ",", "entry", ")", ")", ":", "dirs", ".", "append", "(", "entry", ")", "else", ":", "files", ".", "append", "(", "entry", ")", "return", "root", ",", "dirs", ",", "files" ]
Classify contents of a directory as files/directories. Args: filesystem: The fake filesystem used for implementation root: (str) Directory to examine. Returns: (tuple) A tuple consisting of three values: the directory examined, a list containing all of the directory entries, and a list containing all of the non-directory entries. (This is the same format as returned by the `os.walk` generator.) Raises: Nothing on its own, but be ready to catch exceptions generated by underlying mechanisms like `os.listdir`.
[ "Classify", "contents", "of", "a", "directory", "as", "files", "/", "directories", "." ]
python
train
vertexproject/synapse
synapse/lib/provenance.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/provenance.py#L169-L181
def commit(self): ''' Writes the current provenance stack to storage if it wasn't already there and returns it Returns (Tuple[bool, str, List[]]): Whether the stack was not cached, the iden of the prov stack, and the provstack ''' providen, provstack = get() wasnew = (providen is None) if wasnew: providen = self.getProvIden(provstack) setiden(providen) return wasnew, s_common.ehex(providen), provstack
[ "def", "commit", "(", "self", ")", ":", "providen", ",", "provstack", "=", "get", "(", ")", "wasnew", "=", "(", "providen", "is", "None", ")", "if", "wasnew", ":", "providen", "=", "self", ".", "getProvIden", "(", "provstack", ")", "setiden", "(", "providen", ")", "return", "wasnew", ",", "s_common", ".", "ehex", "(", "providen", ")", ",", "provstack" ]
Writes the current provenance stack to storage if it wasn't already there and returns it Returns (Tuple[bool, str, List[]]): Whether the stack was not cached, the iden of the prov stack, and the provstack
[ "Writes", "the", "current", "provenance", "stack", "to", "storage", "if", "it", "wasn", "t", "already", "there", "and", "returns", "it" ]
python
train
aio-libs/aioredis
aioredis/commands/set.py
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/set.py#L79-L90
def isscan(self, key, *, match=None, count=None): """Incrementally iterate set elements using async for. Usage example: >>> async for val in redis.isscan(key, match='something*'): ... print('Matched:', val) """ return _ScanIter(lambda cur: self.sscan(key, cur, match=match, count=count))
[ "def", "isscan", "(", "self", ",", "key", ",", "*", ",", "match", "=", "None", ",", "count", "=", "None", ")", ":", "return", "_ScanIter", "(", "lambda", "cur", ":", "self", ".", "sscan", "(", "key", ",", "cur", ",", "match", "=", "match", ",", "count", "=", "count", ")", ")" ]
Incrementally iterate set elements using async for. Usage example: >>> async for val in redis.isscan(key, match='something*'): ... print('Matched:', val)
[ "Incrementally", "iterate", "set", "elements", "using", "async", "for", "." ]
python
train
portfors-lab/sparkle
sparkle/gui/stim/stimulusview.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/stim/stimulusview.py#L139-L156
def splitAt(self, point): """Gets the nearest index to *point*, *point* does not have to be over an item. index can be +1 more in row and/or column than existing items :param point: any point within the view, in view coordinates :type point: :qtdoc:`QPoint` :returns: (int, int) -- (row, column) of the nearest index """ wx = point.x() + self.horizontalScrollBar().value() wy = point.y() + self.verticalScrollBar().value() row = wy/(ROW_HEIGHT + ROW_SPACE) if row > self.model().rowCount(self.rootIndex()) - 1: row = self.model().rowCount(self.rootIndex()) for col in range(self.model().columnCountForRow(row)): if self._rects[row][col].contains(wx, wy): return (row, col) return row, self.model().columnCountForRow(row)
[ "def", "splitAt", "(", "self", ",", "point", ")", ":", "wx", "=", "point", ".", "x", "(", ")", "+", "self", ".", "horizontalScrollBar", "(", ")", ".", "value", "(", ")", "wy", "=", "point", ".", "y", "(", ")", "+", "self", ".", "verticalScrollBar", "(", ")", ".", "value", "(", ")", "row", "=", "wy", "/", "(", "ROW_HEIGHT", "+", "ROW_SPACE", ")", "if", "row", ">", "self", ".", "model", "(", ")", ".", "rowCount", "(", "self", ".", "rootIndex", "(", ")", ")", "-", "1", ":", "row", "=", "self", ".", "model", "(", ")", ".", "rowCount", "(", "self", ".", "rootIndex", "(", ")", ")", "for", "col", "in", "range", "(", "self", ".", "model", "(", ")", ".", "columnCountForRow", "(", "row", ")", ")", ":", "if", "self", ".", "_rects", "[", "row", "]", "[", "col", "]", ".", "contains", "(", "wx", ",", "wy", ")", ":", "return", "(", "row", ",", "col", ")", "return", "row", ",", "self", ".", "model", "(", ")", ".", "columnCountForRow", "(", "row", ")" ]
Gets the nearest index to *point*, *point* does not have to be over an item. index can be +1 more in row and/or column than existing items :param point: any point within the view, in view coordinates :type point: :qtdoc:`QPoint` :returns: (int, int) -- (row, column) of the nearest index
[ "Gets", "the", "nearest", "index", "to", "*", "point", "*", "*", "point", "*", "does", "not", "have", "to", "be", "over", "an", "item", ".", "index", "can", "be", "+", "1", "more", "in", "row", "and", "/", "or", "column", "than", "existing", "items" ]
python
train
alefnula/tea
tea/shell/__init__.py
https://github.com/alefnula/tea/blob/f5a0a724a425ec4f9dd2c7fe966ef06faf3a15a3/tea/shell/__init__.py#L25-L37
def split(s, posix=True): """Split the string s using shell-like syntax. Args: s (str): String to split posix (bool): Use posix split Returns: list of str: List of string parts """ if isinstance(s, six.binary_type): s = s.decode("utf-8") return shlex.split(s, posix=posix)
[ "def", "split", "(", "s", ",", "posix", "=", "True", ")", ":", "if", "isinstance", "(", "s", ",", "six", ".", "binary_type", ")", ":", "s", "=", "s", ".", "decode", "(", "\"utf-8\"", ")", "return", "shlex", ".", "split", "(", "s", ",", "posix", "=", "posix", ")" ]
Split the string s using shell-like syntax. Args: s (str): String to split posix (bool): Use posix split Returns: list of str: List of string parts
[ "Split", "the", "string", "s", "using", "shell", "-", "like", "syntax", "." ]
python
train
tanghaibao/jcvi
jcvi/assembly/coverage.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/coverage.py#L96-L160
def bed_to_bedpe(bedfile, bedpefile, pairsbedfile=None, matesfile=None, ca=False, strand=False): """ This converts the bedfile to bedpefile, assuming the reads are from CA. """ fp = must_open(bedfile) fw = must_open(bedpefile, "w") if pairsbedfile: fwpairs = must_open(pairsbedfile, "w") clones = defaultdict(list) for row in fp: b = BedLine(row) name = b.accn clonename = clone_name(name, ca=ca) clones[clonename].append(b) if matesfile: fp = open(matesfile) libraryline = next(fp) # 'library bes 37896 126916' lib, name, smin, smax = libraryline.split() assert lib == "library" smin, smax = int(smin), int(smax) logging.debug("Happy mates for lib {0} fall between {1} - {2}".\ format(name, smin, smax)) nbedpe = 0 nspan = 0 for clonename, blines in clones.items(): nlines = len(blines) if nlines == 2: a, b = blines aseqid, astart, aend = a.seqid, a.start, a.end bseqid, bstart, bend = b.seqid, b.start, b.end outcols = [aseqid, astart - 1, aend, bseqid, bstart - 1, bend, clonename] if strand: outcols.extend([0, a.strand, b.strand]) print("\t".join(str(x) for x in outcols), file=fw) nbedpe += 1 elif nlines == 1: a, = blines aseqid, astart, aend = a.seqid, a.start, a.end bseqid, bstart, bend = 0, 0, 0 else: # More than two lines per pair pass if pairsbedfile: start = min(astart, bstart) if bstart > 0 else astart end = max(aend, bend) if bend > 0 else aend if aseqid != bseqid: continue span = end - start + 1 if (not matesfile) or (smin <= span <= smax): print("\t".join(str(x) for x in \ (aseqid, start - 1, end, clonename)), file=fwpairs) nspan += 1 fw.close() logging.debug("A total of {0} bedpe written to `{1}`.".\ format(nbedpe, bedpefile)) if pairsbedfile: fwpairs.close() logging.debug("A total of {0} spans written to `{1}`.".\ format(nspan, pairsbedfile))
[ "def", "bed_to_bedpe", "(", "bedfile", ",", "bedpefile", ",", "pairsbedfile", "=", "None", ",", "matesfile", "=", "None", ",", "ca", "=", "False", ",", "strand", "=", "False", ")", ":", "fp", "=", "must_open", "(", "bedfile", ")", "fw", "=", "must_open", "(", "bedpefile", ",", "\"w\"", ")", "if", "pairsbedfile", ":", "fwpairs", "=", "must_open", "(", "pairsbedfile", ",", "\"w\"", ")", "clones", "=", "defaultdict", "(", "list", ")", "for", "row", "in", "fp", ":", "b", "=", "BedLine", "(", "row", ")", "name", "=", "b", ".", "accn", "clonename", "=", "clone_name", "(", "name", ",", "ca", "=", "ca", ")", "clones", "[", "clonename", "]", ".", "append", "(", "b", ")", "if", "matesfile", ":", "fp", "=", "open", "(", "matesfile", ")", "libraryline", "=", "next", "(", "fp", ")", "# 'library bes 37896 126916'", "lib", ",", "name", ",", "smin", ",", "smax", "=", "libraryline", ".", "split", "(", ")", "assert", "lib", "==", "\"library\"", "smin", ",", "smax", "=", "int", "(", "smin", ")", ",", "int", "(", "smax", ")", "logging", ".", "debug", "(", "\"Happy mates for lib {0} fall between {1} - {2}\"", ".", "format", "(", "name", ",", "smin", ",", "smax", ")", ")", "nbedpe", "=", "0", "nspan", "=", "0", "for", "clonename", ",", "blines", "in", "clones", ".", "items", "(", ")", ":", "nlines", "=", "len", "(", "blines", ")", "if", "nlines", "==", "2", ":", "a", ",", "b", "=", "blines", "aseqid", ",", "astart", ",", "aend", "=", "a", ".", "seqid", ",", "a", ".", "start", ",", "a", ".", "end", "bseqid", ",", "bstart", ",", "bend", "=", "b", ".", "seqid", ",", "b", ".", "start", ",", "b", ".", "end", "outcols", "=", "[", "aseqid", ",", "astart", "-", "1", ",", "aend", ",", "bseqid", ",", "bstart", "-", "1", ",", "bend", ",", "clonename", "]", "if", "strand", ":", "outcols", ".", "extend", "(", "[", "0", ",", "a", ".", "strand", ",", "b", ".", "strand", "]", ")", "print", "(", "\"\\t\"", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "outcols", ")", ",", "file", "=", "fw", ")", "nbedpe", "+=", "1", "elif", "nlines", "==", "1", ":", "a", ",", "=", "blines", "aseqid", ",", "astart", ",", "aend", "=", "a", ".", "seqid", ",", "a", ".", "start", ",", "a", ".", "end", "bseqid", ",", "bstart", ",", "bend", "=", "0", ",", "0", ",", "0", "else", ":", "# More than two lines per pair", "pass", "if", "pairsbedfile", ":", "start", "=", "min", "(", "astart", ",", "bstart", ")", "if", "bstart", ">", "0", "else", "astart", "end", "=", "max", "(", "aend", ",", "bend", ")", "if", "bend", ">", "0", "else", "aend", "if", "aseqid", "!=", "bseqid", ":", "continue", "span", "=", "end", "-", "start", "+", "1", "if", "(", "not", "matesfile", ")", "or", "(", "smin", "<=", "span", "<=", "smax", ")", ":", "print", "(", "\"\\t\"", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "(", "aseqid", ",", "start", "-", "1", ",", "end", ",", "clonename", ")", ")", ",", "file", "=", "fwpairs", ")", "nspan", "+=", "1", "fw", ".", "close", "(", ")", "logging", ".", "debug", "(", "\"A total of {0} bedpe written to `{1}`.\"", ".", "format", "(", "nbedpe", ",", "bedpefile", ")", ")", "if", "pairsbedfile", ":", "fwpairs", ".", "close", "(", ")", "logging", ".", "debug", "(", "\"A total of {0} spans written to `{1}`.\"", ".", "format", "(", "nspan", ",", "pairsbedfile", ")", ")" ]
This converts the bedfile to bedpefile, assuming the reads are from CA.
[ "This", "converts", "the", "bedfile", "to", "bedpefile", "assuming", "the", "reads", "are", "from", "CA", "." ]
python
train
ray-project/ray
python/ray/experimental/state.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/experimental/state.py#L528-L596
def chrome_tracing_dump(self, filename=None): """Return a list of profiling events that can viewed as a timeline. To view this information as a timeline, simply dump it as a json file by passing in "filename" or using using json.dump, and then load go to chrome://tracing in the Chrome web browser and load the dumped file. Make sure to enable "Flow events" in the "View Options" menu. Args: filename: If a filename is provided, the timeline is dumped to that file. Returns: If filename is not provided, this returns a list of profiling events. Each profile event is a dictionary. """ # TODO(rkn): Support including the task specification data in the # timeline. # TODO(rkn): This should support viewing just a window of time or a # limited number of events. profile_table = self.profile_table() all_events = [] for component_id_hex, component_events in profile_table.items(): # Only consider workers and drivers. component_type = component_events[0]["component_type"] if component_type not in ["worker", "driver"]: continue for event in component_events: new_event = { # The category of the event. "cat": event["event_type"], # The string displayed on the event. "name": event["event_type"], # The identifier for the group of rows that the event # appears in. "pid": event["node_ip_address"], # The identifier for the row that the event appears in. "tid": event["component_type"] + ":" + event["component_id"], # The start time in microseconds. "ts": self._seconds_to_microseconds(event["start_time"]), # The duration in microseconds. "dur": self._seconds_to_microseconds(event["end_time"] - event["start_time"]), # What is this? "ph": "X", # This is the name of the color to display the box in. "cname": self._default_color_mapping[event["event_type"]], # The extra user-defined data. "args": event["extra_data"], } # Modify the json with the additional user-defined extra data. # This can be used to add fields or override existing fields. if "cname" in event["extra_data"]: new_event["cname"] = event["extra_data"]["cname"] if "name" in event["extra_data"]: new_event["name"] = event["extra_data"]["name"] all_events.append(new_event) if filename is not None: with open(filename, "w") as outfile: json.dump(all_events, outfile) else: return all_events
[ "def", "chrome_tracing_dump", "(", "self", ",", "filename", "=", "None", ")", ":", "# TODO(rkn): Support including the task specification data in the", "# timeline.", "# TODO(rkn): This should support viewing just a window of time or a", "# limited number of events.", "profile_table", "=", "self", ".", "profile_table", "(", ")", "all_events", "=", "[", "]", "for", "component_id_hex", ",", "component_events", "in", "profile_table", ".", "items", "(", ")", ":", "# Only consider workers and drivers.", "component_type", "=", "component_events", "[", "0", "]", "[", "\"component_type\"", "]", "if", "component_type", "not", "in", "[", "\"worker\"", ",", "\"driver\"", "]", ":", "continue", "for", "event", "in", "component_events", ":", "new_event", "=", "{", "# The category of the event.", "\"cat\"", ":", "event", "[", "\"event_type\"", "]", ",", "# The string displayed on the event.", "\"name\"", ":", "event", "[", "\"event_type\"", "]", ",", "# The identifier for the group of rows that the event", "# appears in.", "\"pid\"", ":", "event", "[", "\"node_ip_address\"", "]", ",", "# The identifier for the row that the event appears in.", "\"tid\"", ":", "event", "[", "\"component_type\"", "]", "+", "\":\"", "+", "event", "[", "\"component_id\"", "]", ",", "# The start time in microseconds.", "\"ts\"", ":", "self", ".", "_seconds_to_microseconds", "(", "event", "[", "\"start_time\"", "]", ")", ",", "# The duration in microseconds.", "\"dur\"", ":", "self", ".", "_seconds_to_microseconds", "(", "event", "[", "\"end_time\"", "]", "-", "event", "[", "\"start_time\"", "]", ")", ",", "# What is this?", "\"ph\"", ":", "\"X\"", ",", "# This is the name of the color to display the box in.", "\"cname\"", ":", "self", ".", "_default_color_mapping", "[", "event", "[", "\"event_type\"", "]", "]", ",", "# The extra user-defined data.", "\"args\"", ":", "event", "[", "\"extra_data\"", "]", ",", "}", "# Modify the json with the additional user-defined extra data.", "# This can be used to add fields or override existing fields.", "if", "\"cname\"", "in", "event", "[", "\"extra_data\"", "]", ":", "new_event", "[", "\"cname\"", "]", "=", "event", "[", "\"extra_data\"", "]", "[", "\"cname\"", "]", "if", "\"name\"", "in", "event", "[", "\"extra_data\"", "]", ":", "new_event", "[", "\"name\"", "]", "=", "event", "[", "\"extra_data\"", "]", "[", "\"name\"", "]", "all_events", ".", "append", "(", "new_event", ")", "if", "filename", "is", "not", "None", ":", "with", "open", "(", "filename", ",", "\"w\"", ")", "as", "outfile", ":", "json", ".", "dump", "(", "all_events", ",", "outfile", ")", "else", ":", "return", "all_events" ]
Return a list of profiling events that can viewed as a timeline. To view this information as a timeline, simply dump it as a json file by passing in "filename" or using using json.dump, and then load go to chrome://tracing in the Chrome web browser and load the dumped file. Make sure to enable "Flow events" in the "View Options" menu. Args: filename: If a filename is provided, the timeline is dumped to that file. Returns: If filename is not provided, this returns a list of profiling events. Each profile event is a dictionary.
[ "Return", "a", "list", "of", "profiling", "events", "that", "can", "viewed", "as", "a", "timeline", "." ]
python
train
saltstack/salt
salt/utils/schedule.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/schedule.py#L201-L246
def _check_max_running(self, func, data, opts, now): ''' Return the schedule data structure ''' # Check to see if there are other jobs with this # signature running. If there are more than maxrunning # jobs present then don't start another. # If jid_include is False for this job we can ignore all this # NOTE--jid_include defaults to True, thus if it is missing from the data # dict we treat it like it was there and is True # Check if we're able to run if not data['run']: return data if 'jid_include' not in data or data['jid_include']: jobcount = 0 if self.opts['__role'] == 'master': current_jobs = salt.utils.master.get_running_jobs(self.opts) else: current_jobs = salt.utils.minion.running(self.opts) for job in current_jobs: if 'schedule' in job: log.debug( 'schedule.handle_func: Checking job against fun ' '%s: %s', func, job ) if data['name'] == job['schedule'] \ and salt.utils.process.os_is_running(job['pid']): jobcount += 1 log.debug( 'schedule.handle_func: Incrementing jobcount, ' 'now %s, maxrunning is %s', jobcount, data['maxrunning'] ) if jobcount >= data['maxrunning']: log.debug( 'schedule.handle_func: The scheduled job ' '%s was not started, %s already running', data['name'], data['maxrunning'] ) data['_skip_reason'] = 'maxrunning' data['_skipped'] = True data['_skipped_time'] = now data['run'] = False return data return data
[ "def", "_check_max_running", "(", "self", ",", "func", ",", "data", ",", "opts", ",", "now", ")", ":", "# Check to see if there are other jobs with this", "# signature running. If there are more than maxrunning", "# jobs present then don't start another.", "# If jid_include is False for this job we can ignore all this", "# NOTE--jid_include defaults to True, thus if it is missing from the data", "# dict we treat it like it was there and is True", "# Check if we're able to run", "if", "not", "data", "[", "'run'", "]", ":", "return", "data", "if", "'jid_include'", "not", "in", "data", "or", "data", "[", "'jid_include'", "]", ":", "jobcount", "=", "0", "if", "self", ".", "opts", "[", "'__role'", "]", "==", "'master'", ":", "current_jobs", "=", "salt", ".", "utils", ".", "master", ".", "get_running_jobs", "(", "self", ".", "opts", ")", "else", ":", "current_jobs", "=", "salt", ".", "utils", ".", "minion", ".", "running", "(", "self", ".", "opts", ")", "for", "job", "in", "current_jobs", ":", "if", "'schedule'", "in", "job", ":", "log", ".", "debug", "(", "'schedule.handle_func: Checking job against fun '", "'%s: %s'", ",", "func", ",", "job", ")", "if", "data", "[", "'name'", "]", "==", "job", "[", "'schedule'", "]", "and", "salt", ".", "utils", ".", "process", ".", "os_is_running", "(", "job", "[", "'pid'", "]", ")", ":", "jobcount", "+=", "1", "log", ".", "debug", "(", "'schedule.handle_func: Incrementing jobcount, '", "'now %s, maxrunning is %s'", ",", "jobcount", ",", "data", "[", "'maxrunning'", "]", ")", "if", "jobcount", ">=", "data", "[", "'maxrunning'", "]", ":", "log", ".", "debug", "(", "'schedule.handle_func: The scheduled job '", "'%s was not started, %s already running'", ",", "data", "[", "'name'", "]", ",", "data", "[", "'maxrunning'", "]", ")", "data", "[", "'_skip_reason'", "]", "=", "'maxrunning'", "data", "[", "'_skipped'", "]", "=", "True", "data", "[", "'_skipped_time'", "]", "=", "now", "data", "[", "'run'", "]", "=", "False", "return", "data", "return", "data" ]
Return the schedule data structure
[ "Return", "the", "schedule", "data", "structure" ]
python
train
roclark/sportsreference
sportsreference/ncaab/roster.py
https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/ncaab/roster.py#L120-L138
def _retrieve_html_page(self): """ Download the requested player's stats page. Download the requested page and strip all of the comment tags before returning a pyquery object which will be used to parse the data. Returns ------- PyQuery object The requested page is returned as a queriable PyQuery object with the comment tags removed. """ url = PLAYER_URL % self._player_id try: url_data = pq(url) except HTTPError: return None return pq(utils._remove_html_comment_tags(url_data))
[ "def", "_retrieve_html_page", "(", "self", ")", ":", "url", "=", "PLAYER_URL", "%", "self", ".", "_player_id", "try", ":", "url_data", "=", "pq", "(", "url", ")", "except", "HTTPError", ":", "return", "None", "return", "pq", "(", "utils", ".", "_remove_html_comment_tags", "(", "url_data", ")", ")" ]
Download the requested player's stats page. Download the requested page and strip all of the comment tags before returning a pyquery object which will be used to parse the data. Returns ------- PyQuery object The requested page is returned as a queriable PyQuery object with the comment tags removed.
[ "Download", "the", "requested", "player", "s", "stats", "page", "." ]
python
train
materialsproject/pymatgen
pymatgen/io/abinit/flows.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/flows.py#L1190-L1218
def show_tricky_tasks(self, verbose=0): """ Print list of tricky tasks i.e. tasks that have been restarted or launched more than once or tasks with corrections. Args: verbose: Verbosity level. If > 0, task history and corrections (if any) are printed. """ nids, tasks = [], [] for task in self.iflat_tasks(): if task.num_launches > 1 or any(n > 0 for n in (task.num_restarts, task.num_corrections)): nids.append(task.node_id) tasks.append(task) if not nids: cprint("Everything's fine, no tricky tasks found", color="green") else: self.show_status(nids=nids) if not verbose: print("Use --verbose to print task history.") return for nid, task in zip(nids, tasks): cprint(repr(task), **task.status.color_opts) self.show_history(nids=[nid], full_history=False, metadata=False) #if task.num_restarts: # self.show_restarts(nids=[nid]) if task.num_corrections: self.show_corrections(nids=[nid])
[ "def", "show_tricky_tasks", "(", "self", ",", "verbose", "=", "0", ")", ":", "nids", ",", "tasks", "=", "[", "]", ",", "[", "]", "for", "task", "in", "self", ".", "iflat_tasks", "(", ")", ":", "if", "task", ".", "num_launches", ">", "1", "or", "any", "(", "n", ">", "0", "for", "n", "in", "(", "task", ".", "num_restarts", ",", "task", ".", "num_corrections", ")", ")", ":", "nids", ".", "append", "(", "task", ".", "node_id", ")", "tasks", ".", "append", "(", "task", ")", "if", "not", "nids", ":", "cprint", "(", "\"Everything's fine, no tricky tasks found\"", ",", "color", "=", "\"green\"", ")", "else", ":", "self", ".", "show_status", "(", "nids", "=", "nids", ")", "if", "not", "verbose", ":", "print", "(", "\"Use --verbose to print task history.\"", ")", "return", "for", "nid", ",", "task", "in", "zip", "(", "nids", ",", "tasks", ")", ":", "cprint", "(", "repr", "(", "task", ")", ",", "*", "*", "task", ".", "status", ".", "color_opts", ")", "self", ".", "show_history", "(", "nids", "=", "[", "nid", "]", ",", "full_history", "=", "False", ",", "metadata", "=", "False", ")", "#if task.num_restarts:", "# self.show_restarts(nids=[nid])", "if", "task", ".", "num_corrections", ":", "self", ".", "show_corrections", "(", "nids", "=", "[", "nid", "]", ")" ]
Print list of tricky tasks i.e. tasks that have been restarted or launched more than once or tasks with corrections. Args: verbose: Verbosity level. If > 0, task history and corrections (if any) are printed.
[ "Print", "list", "of", "tricky", "tasks", "i", ".", "e", ".", "tasks", "that", "have", "been", "restarted", "or", "launched", "more", "than", "once", "or", "tasks", "with", "corrections", "." ]
python
train
pypyr/pypyr-cli
pypyr/steps/py.py
https://github.com/pypyr/pypyr-cli/blob/4003f999cd5eb030b4c7407317de728f5115a80f/pypyr/steps/py.py#L11-L35
def run_step(context): """Executes dynamic python code. Context is a dictionary or dictionary-like. Context must contain key 'pycode' Will exec context['pycode'] as dynamically interpreted python statements. context is mandatory. When you execute the pipeline, it should look something like this: pipeline-runner [name here] 'pycode=print(1+1)'. """ logger.debug("started") context.assert_key_has_value(key='pycode', caller=__name__) logger.debug(f"Executing python string: {context['pycode']}") locals_dictionary = locals() exec(context['pycode'], globals(), locals_dictionary) # It looks like this dance might be unnecessary in python 3.6 logger.debug("looking for context update in exec") exec_context = locals_dictionary['context'] context.update(exec_context) logger.debug("exec output context merged with pipeline context") logger.debug("done")
[ "def", "run_step", "(", "context", ")", ":", "logger", ".", "debug", "(", "\"started\"", ")", "context", ".", "assert_key_has_value", "(", "key", "=", "'pycode'", ",", "caller", "=", "__name__", ")", "logger", ".", "debug", "(", "f\"Executing python string: {context['pycode']}\"", ")", "locals_dictionary", "=", "locals", "(", ")", "exec", "(", "context", "[", "'pycode'", "]", ",", "globals", "(", ")", ",", "locals_dictionary", ")", "# It looks like this dance might be unnecessary in python 3.6", "logger", ".", "debug", "(", "\"looking for context update in exec\"", ")", "exec_context", "=", "locals_dictionary", "[", "'context'", "]", "context", ".", "update", "(", "exec_context", ")", "logger", ".", "debug", "(", "\"exec output context merged with pipeline context\"", ")", "logger", ".", "debug", "(", "\"done\"", ")" ]
Executes dynamic python code. Context is a dictionary or dictionary-like. Context must contain key 'pycode' Will exec context['pycode'] as dynamically interpreted python statements. context is mandatory. When you execute the pipeline, it should look something like this: pipeline-runner [name here] 'pycode=print(1+1)'.
[ "Executes", "dynamic", "python", "code", "." ]
python
train
salimm/pylods
pylods/backend/pylodsc/mapper.py
https://github.com/salimm/pylods/blob/d089e2a9afb1fa8cb6c754933fc574b512757c40/pylods/backend/pylodsc/mapper.py#L95-L107
def copy(self): ''' makes a clone copy of the mapper. It won't clone the serializers or deserializers and it won't copy the events ''' try: tmp = self.__class__() except Exception: tmp = self.__class__(self._pdict) tmp._serializers = self._serializers tmp.__deserializers = self.__deserializers return tmp
[ "def", "copy", "(", "self", ")", ":", "try", ":", "tmp", "=", "self", ".", "__class__", "(", ")", "except", "Exception", ":", "tmp", "=", "self", ".", "__class__", "(", "self", ".", "_pdict", ")", "tmp", ".", "_serializers", "=", "self", ".", "_serializers", "tmp", ".", "__deserializers", "=", "self", ".", "__deserializers", "return", "tmp" ]
makes a clone copy of the mapper. It won't clone the serializers or deserializers and it won't copy the events
[ "makes", "a", "clone", "copy", "of", "the", "mapper", ".", "It", "won", "t", "clone", "the", "serializers", "or", "deserializers", "and", "it", "won", "t", "copy", "the", "events" ]
python
train
SoCo/SoCo
soco/core.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/soco/core.py#L1531-L1581
def __get_favorites(self, favorite_type, start=0, max_items=100): """ Helper method for `get_favorite_radio_*` methods. Args: favorite_type (str): Specify either `RADIO_STATIONS` or `RADIO_SHOWS`. start (int): Which number to start the retrieval from. Used for paging. max_items (int): The total number of results to return. """ if favorite_type not in (RADIO_SHOWS, RADIO_STATIONS): favorite_type = SONOS_FAVORITES response = self.contentDirectory.Browse([ ('ObjectID', 'FV:2' if favorite_type is SONOS_FAVORITES else 'R:0/{0}'.format(favorite_type)), ('BrowseFlag', 'BrowseDirectChildren'), ('Filter', '*'), ('StartingIndex', start), ('RequestedCount', max_items), ('SortCriteria', '') ]) result = {} favorites = [] results_xml = response['Result'] if results_xml != '': # Favorites are returned in DIDL-Lite format metadata = XML.fromstring(really_utf8(results_xml)) for item in metadata.findall( '{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container' if favorite_type == RADIO_SHOWS else '{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}item'): favorite = {} favorite['title'] = item.findtext( '{http://purl.org/dc/elements/1.1/}title') favorite['uri'] = item.findtext( '{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}res') if favorite_type == SONOS_FAVORITES: favorite['meta'] = item.findtext( '{urn:schemas-rinconnetworks-com:metadata-1-0/}resMD') favorites.append(favorite) result['total'] = response['TotalMatches'] result['returned'] = len(favorites) result['favorites'] = favorites return result
[ "def", "__get_favorites", "(", "self", ",", "favorite_type", ",", "start", "=", "0", ",", "max_items", "=", "100", ")", ":", "if", "favorite_type", "not", "in", "(", "RADIO_SHOWS", ",", "RADIO_STATIONS", ")", ":", "favorite_type", "=", "SONOS_FAVORITES", "response", "=", "self", ".", "contentDirectory", ".", "Browse", "(", "[", "(", "'ObjectID'", ",", "'FV:2'", "if", "favorite_type", "is", "SONOS_FAVORITES", "else", "'R:0/{0}'", ".", "format", "(", "favorite_type", ")", ")", ",", "(", "'BrowseFlag'", ",", "'BrowseDirectChildren'", ")", ",", "(", "'Filter'", ",", "'*'", ")", ",", "(", "'StartingIndex'", ",", "start", ")", ",", "(", "'RequestedCount'", ",", "max_items", ")", ",", "(", "'SortCriteria'", ",", "''", ")", "]", ")", "result", "=", "{", "}", "favorites", "=", "[", "]", "results_xml", "=", "response", "[", "'Result'", "]", "if", "results_xml", "!=", "''", ":", "# Favorites are returned in DIDL-Lite format", "metadata", "=", "XML", ".", "fromstring", "(", "really_utf8", "(", "results_xml", ")", ")", "for", "item", "in", "metadata", ".", "findall", "(", "'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container'", "if", "favorite_type", "==", "RADIO_SHOWS", "else", "'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}item'", ")", ":", "favorite", "=", "{", "}", "favorite", "[", "'title'", "]", "=", "item", ".", "findtext", "(", "'{http://purl.org/dc/elements/1.1/}title'", ")", "favorite", "[", "'uri'", "]", "=", "item", ".", "findtext", "(", "'{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}res'", ")", "if", "favorite_type", "==", "SONOS_FAVORITES", ":", "favorite", "[", "'meta'", "]", "=", "item", ".", "findtext", "(", "'{urn:schemas-rinconnetworks-com:metadata-1-0/}resMD'", ")", "favorites", ".", "append", "(", "favorite", ")", "result", "[", "'total'", "]", "=", "response", "[", "'TotalMatches'", "]", "result", "[", "'returned'", "]", "=", "len", "(", "favorites", ")", "result", "[", "'favorites'", "]", "=", "favorites", "return", "result" ]
Helper method for `get_favorite_radio_*` methods. Args: favorite_type (str): Specify either `RADIO_STATIONS` or `RADIO_SHOWS`. start (int): Which number to start the retrieval from. Used for paging. max_items (int): The total number of results to return.
[ "Helper", "method", "for", "get_favorite_radio_", "*", "methods", "." ]
python
train
bloomreach/s4cmd
s4cmd.py
https://github.com/bloomreach/s4cmd/blob/bb51075bf43703e7cd95aa39288cf7732ec13a6d/s4cmd.py#L854-L864
def get_single_file(self, pool, source, target): '''Download a single file or a directory by adding a task into queue''' if source[-1] == PATH_SEP: if self.opt.recursive: basepath = S3URL(source).path for f in (f for f in self.s3walk(source) if not f['is_dir']): pool.download(f['name'], os.path.join(target, os.path.relpath(S3URL(f['name']).path, basepath))) else: message('omitting directory "%s".' % source) else: pool.download(source, target)
[ "def", "get_single_file", "(", "self", ",", "pool", ",", "source", ",", "target", ")", ":", "if", "source", "[", "-", "1", "]", "==", "PATH_SEP", ":", "if", "self", ".", "opt", ".", "recursive", ":", "basepath", "=", "S3URL", "(", "source", ")", ".", "path", "for", "f", "in", "(", "f", "for", "f", "in", "self", ".", "s3walk", "(", "source", ")", "if", "not", "f", "[", "'is_dir'", "]", ")", ":", "pool", ".", "download", "(", "f", "[", "'name'", "]", ",", "os", ".", "path", ".", "join", "(", "target", ",", "os", ".", "path", ".", "relpath", "(", "S3URL", "(", "f", "[", "'name'", "]", ")", ".", "path", ",", "basepath", ")", ")", ")", "else", ":", "message", "(", "'omitting directory \"%s\".'", "%", "source", ")", "else", ":", "pool", ".", "download", "(", "source", ",", "target", ")" ]
Download a single file or a directory by adding a task into queue
[ "Download", "a", "single", "file", "or", "a", "directory", "by", "adding", "a", "task", "into", "queue" ]
python
test
eandersson/amqpstorm
amqpstorm/management/virtual_host.py
https://github.com/eandersson/amqpstorm/blob/38330906c0af19eea482f43c5ce79bab98a1e064/amqpstorm/management/virtual_host.py#L10-L21
def get(self, virtual_host): """Get Virtual Host details. :param str virtual_host: Virtual host name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict """ virtual_host = quote(virtual_host, '') return self.http_client.get(API_VIRTUAL_HOST % virtual_host)
[ "def", "get", "(", "self", ",", "virtual_host", ")", ":", "virtual_host", "=", "quote", "(", "virtual_host", ",", "''", ")", "return", "self", ".", "http_client", ".", "get", "(", "API_VIRTUAL_HOST", "%", "virtual_host", ")" ]
Get Virtual Host details. :param str virtual_host: Virtual host name :raises ApiError: Raises if the remote server encountered an error. :raises ApiConnectionError: Raises if there was a connectivity issue. :rtype: dict
[ "Get", "Virtual", "Host", "details", "." ]
python
train
maximtrp/scikit-posthocs
scikit_posthocs/_outliers.py
https://github.com/maximtrp/scikit-posthocs/blob/5476b09e2a325cd4e31c0b0bc6906ab5cd77fc5d/scikit_posthocs/_outliers.py#L115-L191
def outliers_tietjen(x, k, hypo = False, alpha = 0.05): """ Tietjen-Moore test [1]_ to detect multiple outliers in a univariate data set that follows an approximately normal distribution. The Tietjen-Moore test [2]_ is a generalization of the Grubbs' test to the case of multiple outliers. If testing for a single outlier, the Tietjen-Moore test is equivalent to the Grubbs' test. The null hypothesis implies that there are no outliers in the data set. Parameters ---------- x : array_like or ndarray, 1d An array, any object exposing the array interface, containing data to test for an outlier in. k : int Number of potential outliers to test for. Function tests for outliers in both tails. hypo : bool, optional Specifies whether to return a bool value of a hypothesis test result. Returns True when we can reject the null hypothesis. Otherwise, False. Available options are: 1) True - return a hypothesis test result 2) False - return a filtered array without outliers (default) alpha : float, optional Significance level for a hypothesis test. Default is 0.05. Returns ------- Numpy array if hypo is False or a bool value of a hypothesis test result. Notes ----- .. [1] Tietjen and Moore (August 1972), Some Grubbs-Type Statistics for the Detection of Outliers, Technometrics, 14(3), pp. 583-597. .. [2] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35h2.htm Examples -------- >>> x = np.array([-1.40, -0.44, -0.30, -0.24, -0.22, -0.13, -0.05, 0.06, 0.10, 0.18, 0.20, 0.39, 0.48, 0.63, 1.01]) >>> outliers_tietjen(x, 2) array([-0.44, -0.3 , -0.24, -0.22, -0.13, -0.05, 0.06, 0.1 , 0.18, 0.2 , 0.39, 0.48, 0.63]) """ n = x.size def tietjen(x_, k_): x_mean = x_.mean() r = np.abs(x_ - x_mean) z = x_[r.argsort()] E = np.sum((z[:-k_] - z[:-k_].mean()) ** 2) / np.sum((z - x_mean) ** 2) return E E_x = tietjen(x, k) E_norm = np.zeros(10000) for i in np.arange(10000): norm = np.random.normal(size=n) E_norm[i] = tietjen(norm, k) CV = np.percentile(E_norm, alpha * 100) result = E_x < CV if hypo: return result else: if result: ind = np.argpartition(np.abs(x - x.mean()), -k)[-k:] return np.delete(x, ind) else: return x
[ "def", "outliers_tietjen", "(", "x", ",", "k", ",", "hypo", "=", "False", ",", "alpha", "=", "0.05", ")", ":", "n", "=", "x", ".", "size", "def", "tietjen", "(", "x_", ",", "k_", ")", ":", "x_mean", "=", "x_", ".", "mean", "(", ")", "r", "=", "np", ".", "abs", "(", "x_", "-", "x_mean", ")", "z", "=", "x_", "[", "r", ".", "argsort", "(", ")", "]", "E", "=", "np", ".", "sum", "(", "(", "z", "[", ":", "-", "k_", "]", "-", "z", "[", ":", "-", "k_", "]", ".", "mean", "(", ")", ")", "**", "2", ")", "/", "np", ".", "sum", "(", "(", "z", "-", "x_mean", ")", "**", "2", ")", "return", "E", "E_x", "=", "tietjen", "(", "x", ",", "k", ")", "E_norm", "=", "np", ".", "zeros", "(", "10000", ")", "for", "i", "in", "np", ".", "arange", "(", "10000", ")", ":", "norm", "=", "np", ".", "random", ".", "normal", "(", "size", "=", "n", ")", "E_norm", "[", "i", "]", "=", "tietjen", "(", "norm", ",", "k", ")", "CV", "=", "np", ".", "percentile", "(", "E_norm", ",", "alpha", "*", "100", ")", "result", "=", "E_x", "<", "CV", "if", "hypo", ":", "return", "result", "else", ":", "if", "result", ":", "ind", "=", "np", ".", "argpartition", "(", "np", ".", "abs", "(", "x", "-", "x", ".", "mean", "(", ")", ")", ",", "-", "k", ")", "[", "-", "k", ":", "]", "return", "np", ".", "delete", "(", "x", ",", "ind", ")", "else", ":", "return", "x" ]
Tietjen-Moore test [1]_ to detect multiple outliers in a univariate data set that follows an approximately normal distribution. The Tietjen-Moore test [2]_ is a generalization of the Grubbs' test to the case of multiple outliers. If testing for a single outlier, the Tietjen-Moore test is equivalent to the Grubbs' test. The null hypothesis implies that there are no outliers in the data set. Parameters ---------- x : array_like or ndarray, 1d An array, any object exposing the array interface, containing data to test for an outlier in. k : int Number of potential outliers to test for. Function tests for outliers in both tails. hypo : bool, optional Specifies whether to return a bool value of a hypothesis test result. Returns True when we can reject the null hypothesis. Otherwise, False. Available options are: 1) True - return a hypothesis test result 2) False - return a filtered array without outliers (default) alpha : float, optional Significance level for a hypothesis test. Default is 0.05. Returns ------- Numpy array if hypo is False or a bool value of a hypothesis test result. Notes ----- .. [1] Tietjen and Moore (August 1972), Some Grubbs-Type Statistics for the Detection of Outliers, Technometrics, 14(3), pp. 583-597. .. [2] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35h2.htm Examples -------- >>> x = np.array([-1.40, -0.44, -0.30, -0.24, -0.22, -0.13, -0.05, 0.06, 0.10, 0.18, 0.20, 0.39, 0.48, 0.63, 1.01]) >>> outliers_tietjen(x, 2) array([-0.44, -0.3 , -0.24, -0.22, -0.13, -0.05, 0.06, 0.1 , 0.18, 0.2 , 0.39, 0.48, 0.63])
[ "Tietjen", "-", "Moore", "test", "[", "1", "]", "_", "to", "detect", "multiple", "outliers", "in", "a", "univariate", "data", "set", "that", "follows", "an", "approximately", "normal", "distribution", ".", "The", "Tietjen", "-", "Moore", "test", "[", "2", "]", "_", "is", "a", "generalization", "of", "the", "Grubbs", "test", "to", "the", "case", "of", "multiple", "outliers", ".", "If", "testing", "for", "a", "single", "outlier", "the", "Tietjen", "-", "Moore", "test", "is", "equivalent", "to", "the", "Grubbs", "test", "." ]
python
train
inveniosoftware-contrib/invenio-workflows
invenio_workflows/engine.py
https://github.com/inveniosoftware-contrib/invenio-workflows/blob/9c09fd29509a3db975ac2aba337e6760d8cfd3c2/invenio_workflows/engine.py#L368-L391
def WaitProcessing(obj, eng, callbacks, exc_info): """Take actions when WaitProcessing is raised. ..note:: We're essentially doing HaltProcessing, plus `obj.set_action` and object status `WAITING` instead of `HALTED`. This is not present in TransitionActions so that's why it is not calling super in this case. """ e = exc_info[1] obj.set_action(e.action, e.message) obj.save(status=eng.object_status.WAITING, callback_pos=eng.state.callback_pos, id_workflow=eng.uuid) eng.save(WorkflowStatus.HALTED) eng.log.warning("Workflow '%s' waiting at task %s with message: %s", eng.name, eng.current_taskname or "Unknown", e.message) db.session.commit() # Call super which will reraise TransitionActions.HaltProcessing( obj, eng, callbacks, exc_info )
[ "def", "WaitProcessing", "(", "obj", ",", "eng", ",", "callbacks", ",", "exc_info", ")", ":", "e", "=", "exc_info", "[", "1", "]", "obj", ".", "set_action", "(", "e", ".", "action", ",", "e", ".", "message", ")", "obj", ".", "save", "(", "status", "=", "eng", ".", "object_status", ".", "WAITING", ",", "callback_pos", "=", "eng", ".", "state", ".", "callback_pos", ",", "id_workflow", "=", "eng", ".", "uuid", ")", "eng", ".", "save", "(", "WorkflowStatus", ".", "HALTED", ")", "eng", ".", "log", ".", "warning", "(", "\"Workflow '%s' waiting at task %s with message: %s\"", ",", "eng", ".", "name", ",", "eng", ".", "current_taskname", "or", "\"Unknown\"", ",", "e", ".", "message", ")", "db", ".", "session", ".", "commit", "(", ")", "# Call super which will reraise", "TransitionActions", ".", "HaltProcessing", "(", "obj", ",", "eng", ",", "callbacks", ",", "exc_info", ")" ]
Take actions when WaitProcessing is raised. ..note:: We're essentially doing HaltProcessing, plus `obj.set_action` and object status `WAITING` instead of `HALTED`. This is not present in TransitionActions so that's why it is not calling super in this case.
[ "Take", "actions", "when", "WaitProcessing", "is", "raised", "." ]
python
train
StellarCN/py-stellar-base
stellar_base/operation.py
https://github.com/StellarCN/py-stellar-base/blob/cce2e782064fb3955c85e1696e630d67b1010848/stellar_base/operation.py#L51-L60
def to_xdr_object(self): """Creates an XDR Operation object that represents this :class:`Operation`. """ try: source_account = [account_xdr_object(self.source)] except StellarAddressInvalidError: source_account = [] return Xdr.types.Operation(source_account, self.body)
[ "def", "to_xdr_object", "(", "self", ")", ":", "try", ":", "source_account", "=", "[", "account_xdr_object", "(", "self", ".", "source", ")", "]", "except", "StellarAddressInvalidError", ":", "source_account", "=", "[", "]", "return", "Xdr", ".", "types", ".", "Operation", "(", "source_account", ",", "self", ".", "body", ")" ]
Creates an XDR Operation object that represents this :class:`Operation`.
[ "Creates", "an", "XDR", "Operation", "object", "that", "represents", "this", ":", "class", ":", "Operation", "." ]
python
train
angr/angr
angr/factory.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/factory.py#L41-L54
def successors(self, *args, **kwargs): """ Perform execution using any applicable engine. Enumerate the current engines and use the first one that works. Return a SimSuccessors object classifying the results of the run. :param state: The state to analyze :param addr: optional, an address to execute at instead of the state's ip :param jumpkind: optional, the jumpkind of the previous exit :param inline: This is an inline execution. Do not bother copying the state. Additional keyword arguments will be passed directly into each engine's process method. """ return self.project.engines.successors(*args, **kwargs)
[ "def", "successors", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "project", ".", "engines", ".", "successors", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Perform execution using any applicable engine. Enumerate the current engines and use the first one that works. Return a SimSuccessors object classifying the results of the run. :param state: The state to analyze :param addr: optional, an address to execute at instead of the state's ip :param jumpkind: optional, the jumpkind of the previous exit :param inline: This is an inline execution. Do not bother copying the state. Additional keyword arguments will be passed directly into each engine's process method.
[ "Perform", "execution", "using", "any", "applicable", "engine", ".", "Enumerate", "the", "current", "engines", "and", "use", "the", "first", "one", "that", "works", ".", "Return", "a", "SimSuccessors", "object", "classifying", "the", "results", "of", "the", "run", "." ]
python
train
andymccurdy/redis-py
redis/client.py
https://github.com/andymccurdy/redis-py/blob/cdfe2befbe00db4a3c48c9ddd6d64dea15f6f0db/redis/client.py#L1641-L1643
def lset(self, name, index, value): "Set ``position`` of list ``name`` to ``value``" return self.execute_command('LSET', name, index, value)
[ "def", "lset", "(", "self", ",", "name", ",", "index", ",", "value", ")", ":", "return", "self", ".", "execute_command", "(", "'LSET'", ",", "name", ",", "index", ",", "value", ")" ]
Set ``position`` of list ``name`` to ``value``
[ "Set", "position", "of", "list", "name", "to", "value" ]
python
train
insilichem/ommprotocol
ommprotocol/io.py
https://github.com/insilichem/ommprotocol/blob/7283fddba7203e5ac3542fdab41fc1279d3b444e/ommprotocol/io.py#L434-L441
def write_pdb(self, path): """ Outputs a PDB file with the current contents of the system """ if self.master is None and self.positions is None: raise ValueError('Topology and positions are needed to write output files.') with open(path, 'w') as f: PDBFile.writeFile(self.topology, self.positions, f)
[ "def", "write_pdb", "(", "self", ",", "path", ")", ":", "if", "self", ".", "master", "is", "None", "and", "self", ".", "positions", "is", "None", ":", "raise", "ValueError", "(", "'Topology and positions are needed to write output files.'", ")", "with", "open", "(", "path", ",", "'w'", ")", "as", "f", ":", "PDBFile", ".", "writeFile", "(", "self", ".", "topology", ",", "self", ".", "positions", ",", "f", ")" ]
Outputs a PDB file with the current contents of the system
[ "Outputs", "a", "PDB", "file", "with", "the", "current", "contents", "of", "the", "system" ]
python
train
NikolayDachev/jadm
lib/paramiko-1.14.1/paramiko/util.py
https://github.com/NikolayDachev/jadm/blob/12bb550445edfcd87506f7cba7a6a35d413c5511/lib/paramiko-1.14.1/paramiko/util.py#L39-L56
def inflate_long(s, always_positive=False): """turns a normalized byte string into a long-int (adapted from Crypto.Util.number)""" out = long(0) negative = 0 if not always_positive and (len(s) > 0) and (byte_ord(s[0]) >= 0x80): negative = 1 if len(s) % 4: filler = zero_byte if negative: filler = max_byte # never convert this to ``s +=`` because this is a string, not a number # noinspection PyAugmentAssignment s = filler * (4 - len(s) % 4) + s for i in range(0, len(s), 4): out = (out << 32) + struct.unpack('>I', s[i:i+4])[0] if negative: out -= (long(1) << (8 * len(s))) return out
[ "def", "inflate_long", "(", "s", ",", "always_positive", "=", "False", ")", ":", "out", "=", "long", "(", "0", ")", "negative", "=", "0", "if", "not", "always_positive", "and", "(", "len", "(", "s", ")", ">", "0", ")", "and", "(", "byte_ord", "(", "s", "[", "0", "]", ")", ">=", "0x80", ")", ":", "negative", "=", "1", "if", "len", "(", "s", ")", "%", "4", ":", "filler", "=", "zero_byte", "if", "negative", ":", "filler", "=", "max_byte", "# never convert this to ``s +=`` because this is a string, not a number", "# noinspection PyAugmentAssignment", "s", "=", "filler", "*", "(", "4", "-", "len", "(", "s", ")", "%", "4", ")", "+", "s", "for", "i", "in", "range", "(", "0", ",", "len", "(", "s", ")", ",", "4", ")", ":", "out", "=", "(", "out", "<<", "32", ")", "+", "struct", ".", "unpack", "(", "'>I'", ",", "s", "[", "i", ":", "i", "+", "4", "]", ")", "[", "0", "]", "if", "negative", ":", "out", "-=", "(", "long", "(", "1", ")", "<<", "(", "8", "*", "len", "(", "s", ")", ")", ")", "return", "out" ]
turns a normalized byte string into a long-int (adapted from Crypto.Util.number)
[ "turns", "a", "normalized", "byte", "string", "into", "a", "long", "-", "int", "(", "adapted", "from", "Crypto", ".", "Util", ".", "number", ")" ]
python
train
kakwa/ldapcherry
ldapcherry/__init__.py
https://github.com/kakwa/ldapcherry/blob/b5e7cb6a44065abc30d164e72981b3713a172dda/ldapcherry/__init__.py#L1010-L1025
def searchadmin(self, searchstring=None): """ search user page """ self._check_auth(must_admin=True) is_admin = self._check_admin() if searchstring is not None: res = self._search(searchstring) else: res = None attrs_list = self.attributes.get_search_attributes() return self.temp['searchadmin.tmpl'].render( searchresult=res, attrs_list=attrs_list, is_admin=is_admin, custom_js=self.custom_js, notifications=self._empty_notification(), )
[ "def", "searchadmin", "(", "self", ",", "searchstring", "=", "None", ")", ":", "self", ".", "_check_auth", "(", "must_admin", "=", "True", ")", "is_admin", "=", "self", ".", "_check_admin", "(", ")", "if", "searchstring", "is", "not", "None", ":", "res", "=", "self", ".", "_search", "(", "searchstring", ")", "else", ":", "res", "=", "None", "attrs_list", "=", "self", ".", "attributes", ".", "get_search_attributes", "(", ")", "return", "self", ".", "temp", "[", "'searchadmin.tmpl'", "]", ".", "render", "(", "searchresult", "=", "res", ",", "attrs_list", "=", "attrs_list", ",", "is_admin", "=", "is_admin", ",", "custom_js", "=", "self", ".", "custom_js", ",", "notifications", "=", "self", ".", "_empty_notification", "(", ")", ",", ")" ]
search user page
[ "search", "user", "page" ]
python
train
smarie/python-valid8
valid8/entry_points_annotations.py
https://github.com/smarie/python-valid8/blob/5e15d1de11602933c5114eb9f73277ad91d97800/valid8/entry_points_annotations.py#L352-L369
def validate_field(cls, field_name, *validation_func, # type: ValidationFuncs **kwargs): # type: (...) -> Callable """ A class decorator. It goes through all class variables and for all of those that are descriptors with a __set__, it wraps the descriptors' setter function with a `validate_arg` annotation :param field_name: :param validation_func: :param help_msg: :param error_type: :param none_policy: :param kw_context_args: :return """ return decorate_cls_with_validation(cls, field_name, *validation_func, **kwargs)
[ "def", "validate_field", "(", "cls", ",", "field_name", ",", "*", "validation_func", ",", "# type: ValidationFuncs", "*", "*", "kwargs", ")", ":", "# type: (...) -> Callable", "return", "decorate_cls_with_validation", "(", "cls", ",", "field_name", ",", "*", "validation_func", ",", "*", "*", "kwargs", ")" ]
A class decorator. It goes through all class variables and for all of those that are descriptors with a __set__, it wraps the descriptors' setter function with a `validate_arg` annotation :param field_name: :param validation_func: :param help_msg: :param error_type: :param none_policy: :param kw_context_args: :return
[ "A", "class", "decorator", ".", "It", "goes", "through", "all", "class", "variables", "and", "for", "all", "of", "those", "that", "are", "descriptors", "with", "a", "__set__", "it", "wraps", "the", "descriptors", "setter", "function", "with", "a", "validate_arg", "annotation" ]
python
train
vmware/pyvmomi
pyVim/connect.py
https://github.com/vmware/pyvmomi/blob/3ffcb23bf77d757175c0d5216ba9a25345d824cd/pyVim/connect.py#L453-L463
def __Logout(si): """ Disconnect (logout) service instance @param si: Service instance (returned from Connect) """ try: if si: content = si.RetrieveContent() content.sessionManager.Logout() except Exception as e: pass
[ "def", "__Logout", "(", "si", ")", ":", "try", ":", "if", "si", ":", "content", "=", "si", ".", "RetrieveContent", "(", ")", "content", ".", "sessionManager", ".", "Logout", "(", ")", "except", "Exception", "as", "e", ":", "pass" ]
Disconnect (logout) service instance @param si: Service instance (returned from Connect)
[ "Disconnect", "(", "logout", ")", "service", "instance" ]
python
train
project-rig/rig
setup.py
https://github.com/project-rig/rig/blob/3a3e053d3214899b6d68758685835de0afd5542b/setup.py#L13-L55
def replace_local_hyperlinks( text, base_url="https://github.com/project-rig/rig/blob/master/"): """Replace local hyperlinks in RST with absolute addresses using the given base URL. This is used to make links in the long description function correctly outside of the repository (e.g. when published on PyPi). NOTE: This may need adjusting if further syntax is used. """ def get_new_url(url): return base_url + url[2:] # Deal with anonymous URLS for match in re.finditer(r"^__ (?P<url>\./.*)", text, re.MULTILINE): orig_url = match.groupdict()["url"] url = get_new_url(orig_url) text = re.sub("^__ {}".format(orig_url), "__ {}".format(url), text, flags=re.MULTILINE) # Deal with named URLS for match in re.finditer(r"^\.\. _(?P<identifier>[^:]*): (?P<url>\./.*)", text, re.MULTILINE): identifier = match.groupdict()["identifier"] orig_url = match.groupdict()["url"] url = get_new_url(orig_url) text = re.sub( "^\.\. _{}: {}".format(identifier, orig_url), ".. _{}: {}".format(identifier, url), text, flags=re.MULTILINE) # Deal with image URLS for match in re.finditer(r"^\.\. image:: (?P<url>\./.*)", text, re.MULTILINE): orig_url = match.groupdict()["url"] url = get_new_url(orig_url) text = text.replace(".. image:: {}".format(orig_url), ".. image:: {}".format(url)) return text
[ "def", "replace_local_hyperlinks", "(", "text", ",", "base_url", "=", "\"https://github.com/project-rig/rig/blob/master/\"", ")", ":", "def", "get_new_url", "(", "url", ")", ":", "return", "base_url", "+", "url", "[", "2", ":", "]", "# Deal with anonymous URLS", "for", "match", "in", "re", ".", "finditer", "(", "r\"^__ (?P<url>\\./.*)\"", ",", "text", ",", "re", ".", "MULTILINE", ")", ":", "orig_url", "=", "match", ".", "groupdict", "(", ")", "[", "\"url\"", "]", "url", "=", "get_new_url", "(", "orig_url", ")", "text", "=", "re", ".", "sub", "(", "\"^__ {}\"", ".", "format", "(", "orig_url", ")", ",", "\"__ {}\"", ".", "format", "(", "url", ")", ",", "text", ",", "flags", "=", "re", ".", "MULTILINE", ")", "# Deal with named URLS", "for", "match", "in", "re", ".", "finditer", "(", "r\"^\\.\\. _(?P<identifier>[^:]*): (?P<url>\\./.*)\"", ",", "text", ",", "re", ".", "MULTILINE", ")", ":", "identifier", "=", "match", ".", "groupdict", "(", ")", "[", "\"identifier\"", "]", "orig_url", "=", "match", ".", "groupdict", "(", ")", "[", "\"url\"", "]", "url", "=", "get_new_url", "(", "orig_url", ")", "text", "=", "re", ".", "sub", "(", "\"^\\.\\. _{}: {}\"", ".", "format", "(", "identifier", ",", "orig_url", ")", ",", "\".. _{}: {}\"", ".", "format", "(", "identifier", ",", "url", ")", ",", "text", ",", "flags", "=", "re", ".", "MULTILINE", ")", "# Deal with image URLS", "for", "match", "in", "re", ".", "finditer", "(", "r\"^\\.\\. image:: (?P<url>\\./.*)\"", ",", "text", ",", "re", ".", "MULTILINE", ")", ":", "orig_url", "=", "match", ".", "groupdict", "(", ")", "[", "\"url\"", "]", "url", "=", "get_new_url", "(", "orig_url", ")", "text", "=", "text", ".", "replace", "(", "\".. image:: {}\"", ".", "format", "(", "orig_url", ")", ",", "\".. image:: {}\"", ".", "format", "(", "url", ")", ")", "return", "text" ]
Replace local hyperlinks in RST with absolute addresses using the given base URL. This is used to make links in the long description function correctly outside of the repository (e.g. when published on PyPi). NOTE: This may need adjusting if further syntax is used.
[ "Replace", "local", "hyperlinks", "in", "RST", "with", "absolute", "addresses", "using", "the", "given", "base", "URL", "." ]
python
train
celiao/rtsimple
rtsimple/lists.py
https://github.com/celiao/rtsimple/blob/91f82cbd61a745bbe3a2cca54dfbb6b0ac123b86/rtsimple/lists.py#L92-L106
def movies_opening(self, **kwargs): """Gets the current opening movies from the API. Args: limit (optional): limits the number of movies returned, default=10 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('movies_opening') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
[ "def", "movies_opening", "(", "self", ",", "*", "*", "kwargs", ")", ":", "path", "=", "self", ".", "_get_path", "(", "'movies_opening'", ")", "response", "=", "self", ".", "_GET", "(", "path", ",", "kwargs", ")", "self", ".", "_set_attrs_to_values", "(", "response", ")", "return", "response" ]
Gets the current opening movies from the API. Args: limit (optional): limits the number of movies returned, default=10 country (optional): localized data for selected country, default="us" Returns: A dict respresentation of the JSON returned from the API.
[ "Gets", "the", "current", "opening", "movies", "from", "the", "API", "." ]
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py#L781-L792
def get_vnetwork_vswitches_output_instance_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vnetwork_vswitches = ET.Element("get_vnetwork_vswitches") config = get_vnetwork_vswitches output = ET.SubElement(get_vnetwork_vswitches, "output") instance_id = ET.SubElement(output, "instance-id") instance_id.text = kwargs.pop('instance_id') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_vnetwork_vswitches_output_instance_id", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_vnetwork_vswitches", "=", "ET", ".", "Element", "(", "\"get_vnetwork_vswitches\"", ")", "config", "=", "get_vnetwork_vswitches", "output", "=", "ET", ".", "SubElement", "(", "get_vnetwork_vswitches", ",", "\"output\"", ")", "instance_id", "=", "ET", ".", "SubElement", "(", "output", ",", "\"instance-id\"", ")", "instance_id", ".", "text", "=", "kwargs", ".", "pop", "(", "'instance_id'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
dmlc/gluon-nlp
scripts/word_embeddings/extract_vocab.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/word_embeddings/extract_vocab.py#L32-L44
def parse_args(): """Parse command line arguments.""" parser = argparse.ArgumentParser( description='Vocabulary extractor.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--max-size', type=int, default=None) parser.add_argument('--min-freq', type=int, default=5) parser.add_argument('--max-word-length', type=int, default=50) parser.add_argument('files', type=str, nargs='+') parser.add_argument('--vocab-output', type=str, default='vocab.json') parser.add_argument('--counts-output', type=str, default='counts.json') args = parser.parse_args() return args
[ "def", "parse_args", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "'Vocabulary extractor.'", ",", "formatter_class", "=", "argparse", ".", "ArgumentDefaultsHelpFormatter", ")", "parser", ".", "add_argument", "(", "'--max-size'", ",", "type", "=", "int", ",", "default", "=", "None", ")", "parser", ".", "add_argument", "(", "'--min-freq'", ",", "type", "=", "int", ",", "default", "=", "5", ")", "parser", ".", "add_argument", "(", "'--max-word-length'", ",", "type", "=", "int", ",", "default", "=", "50", ")", "parser", ".", "add_argument", "(", "'files'", ",", "type", "=", "str", ",", "nargs", "=", "'+'", ")", "parser", ".", "add_argument", "(", "'--vocab-output'", ",", "type", "=", "str", ",", "default", "=", "'vocab.json'", ")", "parser", ".", "add_argument", "(", "'--counts-output'", ",", "type", "=", "str", ",", "default", "=", "'counts.json'", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "return", "args" ]
Parse command line arguments.
[ "Parse", "command", "line", "arguments", "." ]
python
train
Kozea/pygal
pygal/graph/graph.py
https://github.com/Kozea/pygal/blob/5e25c98a59a0642eecd9fcc5dbfeeb2190fbb5e7/pygal/graph/graph.py#L555-L596
def _static_value( self, serie_node, value, x, y, metadata, align_text='left', classes=None ): """Write the print value""" label = metadata and metadata.get('label') classes = classes and [classes] or [] if self.print_labels and label: label_cls = classes + ['label'] if self.print_values: y -= self.style.value_font_size / 2 self.svg.node( serie_node['text_overlay'], 'text', class_=' '.join(label_cls), x=x, y=y + self.style.value_font_size / 3 ).text = label y += self.style.value_font_size if self.print_values or self.dynamic_print_values: val_cls = classes + ['value'] if self.dynamic_print_values: val_cls.append('showable') self.svg.node( serie_node['text_overlay'], 'text', class_=' '.join(val_cls), x=x, y=y + self.style.value_font_size / 3, attrib={ 'text-anchor': align_text } ).text = value if self.print_zeroes or value != '0' else ''
[ "def", "_static_value", "(", "self", ",", "serie_node", ",", "value", ",", "x", ",", "y", ",", "metadata", ",", "align_text", "=", "'left'", ",", "classes", "=", "None", ")", ":", "label", "=", "metadata", "and", "metadata", ".", "get", "(", "'label'", ")", "classes", "=", "classes", "and", "[", "classes", "]", "or", "[", "]", "if", "self", ".", "print_labels", "and", "label", ":", "label_cls", "=", "classes", "+", "[", "'label'", "]", "if", "self", ".", "print_values", ":", "y", "-=", "self", ".", "style", ".", "value_font_size", "/", "2", "self", ".", "svg", ".", "node", "(", "serie_node", "[", "'text_overlay'", "]", ",", "'text'", ",", "class_", "=", "' '", ".", "join", "(", "label_cls", ")", ",", "x", "=", "x", ",", "y", "=", "y", "+", "self", ".", "style", ".", "value_font_size", "/", "3", ")", ".", "text", "=", "label", "y", "+=", "self", ".", "style", ".", "value_font_size", "if", "self", ".", "print_values", "or", "self", ".", "dynamic_print_values", ":", "val_cls", "=", "classes", "+", "[", "'value'", "]", "if", "self", ".", "dynamic_print_values", ":", "val_cls", ".", "append", "(", "'showable'", ")", "self", ".", "svg", ".", "node", "(", "serie_node", "[", "'text_overlay'", "]", ",", "'text'", ",", "class_", "=", "' '", ".", "join", "(", "val_cls", ")", ",", "x", "=", "x", ",", "y", "=", "y", "+", "self", ".", "style", ".", "value_font_size", "/", "3", ",", "attrib", "=", "{", "'text-anchor'", ":", "align_text", "}", ")", ".", "text", "=", "value", "if", "self", ".", "print_zeroes", "or", "value", "!=", "'0'", "else", "''" ]
Write the print value
[ "Write", "the", "print", "value" ]
python
train
deepmipt/DeepPavlov
deeppavlov/core/layers/tf_layers.py
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/core/layers/tf_layers.py#L371-L430
def character_embedding_network(char_placeholder: tf.Tensor, n_characters: int = None, emb_mat: np.array = None, char_embedding_dim: int = None, filter_widths=(3, 4, 5, 7), highway_on_top=False): """ Characters to vector. Every sequence of characters (token) is embedded to vector space with dimensionality char_embedding_dim Convolution plus max_pooling is used to obtain vector representations of words. Args: char_placeholder: placeholder of int32 type with dimensionality [B, T, C] B - batch size (can be None) T - Number of tokens (can be None) C - number of characters (can be None) n_characters: total number of unique characters emb_mat: if n_characters is not provided the emb_mat should be provided it is a numpy array with dimensions [V, E], where V - vocabulary size and E - embeddings dimension char_embedding_dim: dimensionality of characters embeddings filter_widths: array of width of kernel in convolutional embedding network used in parallel Returns: embeddings: tf.Tensor with dimensionality [B, T, F], where F is dimensionality of embeddings """ if emb_mat is None: emb_mat = np.random.randn(n_characters, char_embedding_dim).astype(np.float32) / np.sqrt(char_embedding_dim) else: char_embedding_dim = emb_mat.shape[1] char_emb_var = tf.Variable(emb_mat, trainable=True) with tf.variable_scope('Char_Emb_Network'): # Character embedding layer c_emb = tf.nn.embedding_lookup(char_emb_var, char_placeholder) # Character embedding network conv_results_list = [] for filter_width in filter_widths: conv_results_list.append(tf.layers.conv2d(c_emb, char_embedding_dim, (1, filter_width), padding='same', kernel_initializer=INITIALIZER)) units = tf.concat(conv_results_list, axis=3) units = tf.reduce_max(units, axis=2) if highway_on_top: sigmoid_gate = tf.layers.dense(units, 1, activation=tf.sigmoid, kernel_initializer=INITIALIZER, kernel_regularizer=tf.nn.l2_loss) deeper_units = tf.layers.dense(units, tf.shape(units)[-1], kernel_initializer=INITIALIZER, kernel_regularizer=tf.nn.l2_loss) units = sigmoid_gate * units + (1 - sigmoid_gate) * deeper_units units = tf.nn.relu(units) return units
[ "def", "character_embedding_network", "(", "char_placeholder", ":", "tf", ".", "Tensor", ",", "n_characters", ":", "int", "=", "None", ",", "emb_mat", ":", "np", ".", "array", "=", "None", ",", "char_embedding_dim", ":", "int", "=", "None", ",", "filter_widths", "=", "(", "3", ",", "4", ",", "5", ",", "7", ")", ",", "highway_on_top", "=", "False", ")", ":", "if", "emb_mat", "is", "None", ":", "emb_mat", "=", "np", ".", "random", ".", "randn", "(", "n_characters", ",", "char_embedding_dim", ")", ".", "astype", "(", "np", ".", "float32", ")", "/", "np", ".", "sqrt", "(", "char_embedding_dim", ")", "else", ":", "char_embedding_dim", "=", "emb_mat", ".", "shape", "[", "1", "]", "char_emb_var", "=", "tf", ".", "Variable", "(", "emb_mat", ",", "trainable", "=", "True", ")", "with", "tf", ".", "variable_scope", "(", "'Char_Emb_Network'", ")", ":", "# Character embedding layer", "c_emb", "=", "tf", ".", "nn", ".", "embedding_lookup", "(", "char_emb_var", ",", "char_placeholder", ")", "# Character embedding network", "conv_results_list", "=", "[", "]", "for", "filter_width", "in", "filter_widths", ":", "conv_results_list", ".", "append", "(", "tf", ".", "layers", ".", "conv2d", "(", "c_emb", ",", "char_embedding_dim", ",", "(", "1", ",", "filter_width", ")", ",", "padding", "=", "'same'", ",", "kernel_initializer", "=", "INITIALIZER", ")", ")", "units", "=", "tf", ".", "concat", "(", "conv_results_list", ",", "axis", "=", "3", ")", "units", "=", "tf", ".", "reduce_max", "(", "units", ",", "axis", "=", "2", ")", "if", "highway_on_top", ":", "sigmoid_gate", "=", "tf", ".", "layers", ".", "dense", "(", "units", ",", "1", ",", "activation", "=", "tf", ".", "sigmoid", ",", "kernel_initializer", "=", "INITIALIZER", ",", "kernel_regularizer", "=", "tf", ".", "nn", ".", "l2_loss", ")", "deeper_units", "=", "tf", ".", "layers", ".", "dense", "(", "units", ",", "tf", ".", "shape", "(", "units", ")", "[", "-", "1", "]", ",", "kernel_initializer", "=", "INITIALIZER", ",", "kernel_regularizer", "=", "tf", ".", "nn", ".", "l2_loss", ")", "units", "=", "sigmoid_gate", "*", "units", "+", "(", "1", "-", "sigmoid_gate", ")", "*", "deeper_units", "units", "=", "tf", ".", "nn", ".", "relu", "(", "units", ")", "return", "units" ]
Characters to vector. Every sequence of characters (token) is embedded to vector space with dimensionality char_embedding_dim Convolution plus max_pooling is used to obtain vector representations of words. Args: char_placeholder: placeholder of int32 type with dimensionality [B, T, C] B - batch size (can be None) T - Number of tokens (can be None) C - number of characters (can be None) n_characters: total number of unique characters emb_mat: if n_characters is not provided the emb_mat should be provided it is a numpy array with dimensions [V, E], where V - vocabulary size and E - embeddings dimension char_embedding_dim: dimensionality of characters embeddings filter_widths: array of width of kernel in convolutional embedding network used in parallel Returns: embeddings: tf.Tensor with dimensionality [B, T, F], where F is dimensionality of embeddings
[ "Characters", "to", "vector", ".", "Every", "sequence", "of", "characters", "(", "token", ")", "is", "embedded", "to", "vector", "space", "with", "dimensionality", "char_embedding_dim", "Convolution", "plus", "max_pooling", "is", "used", "to", "obtain", "vector", "representations", "of", "words", "." ]
python
test
osrg/ryu
ryu/lib/packet/bfd.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/lib/packet/bfd.py#L262-L274
def pack(self): """ Encode a BFD Control packet without authentication section. """ diag = (self.ver << 5) + self.diag flags = (self.state << 6) + self.flags length = len(self) return struct.pack(self._PACK_STR, diag, flags, self.detect_mult, length, self.my_discr, self.your_discr, self.desired_min_tx_interval, self.required_min_rx_interval, self.required_min_echo_rx_interval)
[ "def", "pack", "(", "self", ")", ":", "diag", "=", "(", "self", ".", "ver", "<<", "5", ")", "+", "self", ".", "diag", "flags", "=", "(", "self", ".", "state", "<<", "6", ")", "+", "self", ".", "flags", "length", "=", "len", "(", "self", ")", "return", "struct", ".", "pack", "(", "self", ".", "_PACK_STR", ",", "diag", ",", "flags", ",", "self", ".", "detect_mult", ",", "length", ",", "self", ".", "my_discr", ",", "self", ".", "your_discr", ",", "self", ".", "desired_min_tx_interval", ",", "self", ".", "required_min_rx_interval", ",", "self", ".", "required_min_echo_rx_interval", ")" ]
Encode a BFD Control packet without authentication section.
[ "Encode", "a", "BFD", "Control", "packet", "without", "authentication", "section", "." ]
python
train
deepmipt/DeepPavlov
deeppavlov/models/elmo/elmo2tfhub.py
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/models/elmo/elmo2tfhub.py#L190-L206
def export2hub(weight_file, hub_dir, options): """Exports a TF-Hub module """ spec = make_module_spec(options, str(weight_file)) try: with tf.Graph().as_default(): module = hub.Module(spec) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) if hub_dir.exists(): shutil.rmtree(hub_dir) module.export(str(hub_dir), sess) finally: pass
[ "def", "export2hub", "(", "weight_file", ",", "hub_dir", ",", "options", ")", ":", "spec", "=", "make_module_spec", "(", "options", ",", "str", "(", "weight_file", ")", ")", "try", ":", "with", "tf", ".", "Graph", "(", ")", ".", "as_default", "(", ")", ":", "module", "=", "hub", ".", "Module", "(", "spec", ")", "with", "tf", ".", "Session", "(", ")", "as", "sess", ":", "sess", ".", "run", "(", "tf", ".", "global_variables_initializer", "(", ")", ")", "if", "hub_dir", ".", "exists", "(", ")", ":", "shutil", ".", "rmtree", "(", "hub_dir", ")", "module", ".", "export", "(", "str", "(", "hub_dir", ")", ",", "sess", ")", "finally", ":", "pass" ]
Exports a TF-Hub module
[ "Exports", "a", "TF", "-", "Hub", "module" ]
python
test
jart/fabulous
fabulous/term.py
https://github.com/jart/fabulous/blob/19903cf0a980b82f5928c3bec1f28b6bdd3785bd/fabulous/term.py#L517-L530
def _split_attributes(self, attrs): """Spilt attribute code Takes an attribute code and returns a tuple containing foreground (fg), foreground intensity (fgi), background (bg), and background intensity (bgi) Attributes can be joined using ``fg | fgi | bg | bgi`` """ fg = attrs & self.FG_ALL fgi = attrs & self.FG_INTENSITY bg = attrs & self.BG_ALL bgi = attrs & self.BG_INTENSITY return fg, fgi, bg, bgi
[ "def", "_split_attributes", "(", "self", ",", "attrs", ")", ":", "fg", "=", "attrs", "&", "self", ".", "FG_ALL", "fgi", "=", "attrs", "&", "self", ".", "FG_INTENSITY", "bg", "=", "attrs", "&", "self", ".", "BG_ALL", "bgi", "=", "attrs", "&", "self", ".", "BG_INTENSITY", "return", "fg", ",", "fgi", ",", "bg", ",", "bgi" ]
Spilt attribute code Takes an attribute code and returns a tuple containing foreground (fg), foreground intensity (fgi), background (bg), and background intensity (bgi) Attributes can be joined using ``fg | fgi | bg | bgi``
[ "Spilt", "attribute", "code", "Takes", "an", "attribute", "code", "and", "returns", "a", "tuple", "containing", "foreground", "(", "fg", ")", "foreground", "intensity", "(", "fgi", ")", "background", "(", "bg", ")", "and", "background", "intensity", "(", "bgi", ")", "Attributes", "can", "be", "joined", "using", "fg", "|", "fgi", "|", "bg", "|", "bgi" ]
python
train
almarklein/pyelastix
pyelastix.py
https://github.com/almarklein/pyelastix/blob/971a677ce9a3ef8eb0b95ae393db8e2506d2f8a4/pyelastix.py#L272-L337
def _system3(cmd, verbose=False): """ Execute the given command in a subprocess and wait for it to finish. A thread is run that prints output of the process if verbose is True. """ # Init flag interrupted = False # Create progress if verbose > 0: progress = Progress() stdout = [] def poll_process(p): while not interrupted: msg = p.stdout.readline().decode() if msg: stdout.append(msg) if 'error' in msg.lower(): print(msg.rstrip()) if verbose == 1: progress.reset() elif verbose > 1: print(msg.rstrip()) elif verbose == 1: progress.update(msg) else: break time.sleep(0.01) #print("thread exit") # Start process that runs the command p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # Keep reading stdout from it # thread.start_new_thread(poll_process, (p,)) Python 2.x my_thread = threading.Thread(target=poll_process, args=(p,)) my_thread.setDaemon(True) my_thread.start() # Wait here try: while p.poll() is None: time.sleep(0.01) except KeyboardInterrupt: # Set flag interrupted = True # Kill subprocess pid = p.pid if hasattr(os,'kill'): import signal os.kill(pid, signal.SIGKILL) elif sys.platform.startswith('win'): kernel32 = ctypes.windll.kernel32 handle = kernel32.OpenProcess(1, 0, pid) kernel32.TerminateProcess(handle, 0) #os.system("TASKKILL /PID " + str(pid) + " /F") # All good? if interrupted: raise RuntimeError('Registration process interrupted by the user.') if p.returncode: stdout.append(p.stdout.read().decode()) print(''.join(stdout)) raise RuntimeError('An error occured during the registration.')
[ "def", "_system3", "(", "cmd", ",", "verbose", "=", "False", ")", ":", "# Init flag", "interrupted", "=", "False", "# Create progress", "if", "verbose", ">", "0", ":", "progress", "=", "Progress", "(", ")", "stdout", "=", "[", "]", "def", "poll_process", "(", "p", ")", ":", "while", "not", "interrupted", ":", "msg", "=", "p", ".", "stdout", ".", "readline", "(", ")", ".", "decode", "(", ")", "if", "msg", ":", "stdout", ".", "append", "(", "msg", ")", "if", "'error'", "in", "msg", ".", "lower", "(", ")", ":", "print", "(", "msg", ".", "rstrip", "(", ")", ")", "if", "verbose", "==", "1", ":", "progress", ".", "reset", "(", ")", "elif", "verbose", ">", "1", ":", "print", "(", "msg", ".", "rstrip", "(", ")", ")", "elif", "verbose", "==", "1", ":", "progress", ".", "update", "(", "msg", ")", "else", ":", "break", "time", ".", "sleep", "(", "0.01", ")", "#print(\"thread exit\")", "# Start process that runs the command", "p", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "# Keep reading stdout from it", "# thread.start_new_thread(poll_process, (p,)) Python 2.x", "my_thread", "=", "threading", ".", "Thread", "(", "target", "=", "poll_process", ",", "args", "=", "(", "p", ",", ")", ")", "my_thread", ".", "setDaemon", "(", "True", ")", "my_thread", ".", "start", "(", ")", "# Wait here", "try", ":", "while", "p", ".", "poll", "(", ")", "is", "None", ":", "time", ".", "sleep", "(", "0.01", ")", "except", "KeyboardInterrupt", ":", "# Set flag", "interrupted", "=", "True", "# Kill subprocess", "pid", "=", "p", ".", "pid", "if", "hasattr", "(", "os", ",", "'kill'", ")", ":", "import", "signal", "os", ".", "kill", "(", "pid", ",", "signal", ".", "SIGKILL", ")", "elif", "sys", ".", "platform", ".", "startswith", "(", "'win'", ")", ":", "kernel32", "=", "ctypes", ".", "windll", ".", "kernel32", "handle", "=", "kernel32", ".", "OpenProcess", "(", "1", ",", "0", ",", "pid", ")", "kernel32", ".", "TerminateProcess", "(", "handle", ",", "0", ")", "#os.system(\"TASKKILL /PID \" + str(pid) + \" /F\")", "# All good?", "if", "interrupted", ":", "raise", "RuntimeError", "(", "'Registration process interrupted by the user.'", ")", "if", "p", ".", "returncode", ":", "stdout", ".", "append", "(", "p", ".", "stdout", ".", "read", "(", ")", ".", "decode", "(", ")", ")", "print", "(", "''", ".", "join", "(", "stdout", ")", ")", "raise", "RuntimeError", "(", "'An error occured during the registration.'", ")" ]
Execute the given command in a subprocess and wait for it to finish. A thread is run that prints output of the process if verbose is True.
[ "Execute", "the", "given", "command", "in", "a", "subprocess", "and", "wait", "for", "it", "to", "finish", ".", "A", "thread", "is", "run", "that", "prints", "output", "of", "the", "process", "if", "verbose", "is", "True", "." ]
python
train
apache/incubator-mxnet
example/rnn/word_lm/module.py
https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/rnn/word_lm/module.py#L78-L90
def forward(self, data_batch, is_train=None, carry_state=True): """Forward computation. States from previous forward computation are carried to the current iteration if `carry_state` is set to `True`. """ # propagate states from the previous iteration if carry_state: if isinstance(self._next_states, (int, float)): self._module.set_states(value=self._next_states) else: self._module.set_states(states=self._next_states) self._module.forward(data_batch, is_train=is_train) outputs = self._module.get_outputs(merge_multi_context=False) self._next_states = outputs[:-1]
[ "def", "forward", "(", "self", ",", "data_batch", ",", "is_train", "=", "None", ",", "carry_state", "=", "True", ")", ":", "# propagate states from the previous iteration", "if", "carry_state", ":", "if", "isinstance", "(", "self", ".", "_next_states", ",", "(", "int", ",", "float", ")", ")", ":", "self", ".", "_module", ".", "set_states", "(", "value", "=", "self", ".", "_next_states", ")", "else", ":", "self", ".", "_module", ".", "set_states", "(", "states", "=", "self", ".", "_next_states", ")", "self", ".", "_module", ".", "forward", "(", "data_batch", ",", "is_train", "=", "is_train", ")", "outputs", "=", "self", ".", "_module", ".", "get_outputs", "(", "merge_multi_context", "=", "False", ")", "self", ".", "_next_states", "=", "outputs", "[", ":", "-", "1", "]" ]
Forward computation. States from previous forward computation are carried to the current iteration if `carry_state` is set to `True`.
[ "Forward", "computation", ".", "States", "from", "previous", "forward", "computation", "are", "carried", "to", "the", "current", "iteration", "if", "carry_state", "is", "set", "to", "True", "." ]
python
train
MDAnalysis/GridDataFormats
gridData/core.py
https://github.com/MDAnalysis/GridDataFormats/blob/3eeb0432f8cf856912436e4f3e7aba99d3c916be/gridData/core.py#L389-L394
def _load_cpp4(self, filename): """Initializes Grid from a CCP4 file.""" ccp4 = CCP4.CCP4() ccp4.read(filename) grid, edges = ccp4.histogramdd() self.__init__(grid=grid, edges=edges, metadata=self.metadata)
[ "def", "_load_cpp4", "(", "self", ",", "filename", ")", ":", "ccp4", "=", "CCP4", ".", "CCP4", "(", ")", "ccp4", ".", "read", "(", "filename", ")", "grid", ",", "edges", "=", "ccp4", ".", "histogramdd", "(", ")", "self", ".", "__init__", "(", "grid", "=", "grid", ",", "edges", "=", "edges", ",", "metadata", "=", "self", ".", "metadata", ")" ]
Initializes Grid from a CCP4 file.
[ "Initializes", "Grid", "from", "a", "CCP4", "file", "." ]
python
valid
quantmind/pulsar
pulsar/utils/pylib/events.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/utils/pylib/events.py#L56-L66
def unbind(self, callback): """Remove a callback from the list """ handlers = self._handlers if handlers: filtered_callbacks = [f for f in handlers if f != callback] removed_count = len(handlers) - len(filtered_callbacks) if removed_count: self._handlers = filtered_callbacks return removed_count return 0
[ "def", "unbind", "(", "self", ",", "callback", ")", ":", "handlers", "=", "self", ".", "_handlers", "if", "handlers", ":", "filtered_callbacks", "=", "[", "f", "for", "f", "in", "handlers", "if", "f", "!=", "callback", "]", "removed_count", "=", "len", "(", "handlers", ")", "-", "len", "(", "filtered_callbacks", ")", "if", "removed_count", ":", "self", ".", "_handlers", "=", "filtered_callbacks", "return", "removed_count", "return", "0" ]
Remove a callback from the list
[ "Remove", "a", "callback", "from", "the", "list" ]
python
train
tensorflow/mesh
mesh_tensorflow/ops.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/ops.py#L1254-L1268
def convert_args_to_laid_out_tensors(xs): """Convert list elements to laid-out-tensors when possible. Args: xs: a list Returns: a list """ ret = [] for x in xs: if hasattr(x, "to_laid_out_tensor"): ret.append(x.to_laid_out_tensor()) else: ret.append(x) return ret
[ "def", "convert_args_to_laid_out_tensors", "(", "xs", ")", ":", "ret", "=", "[", "]", "for", "x", "in", "xs", ":", "if", "hasattr", "(", "x", ",", "\"to_laid_out_tensor\"", ")", ":", "ret", ".", "append", "(", "x", ".", "to_laid_out_tensor", "(", ")", ")", "else", ":", "ret", ".", "append", "(", "x", ")", "return", "ret" ]
Convert list elements to laid-out-tensors when possible. Args: xs: a list Returns: a list
[ "Convert", "list", "elements", "to", "laid", "-", "out", "-", "tensors", "when", "possible", "." ]
python
train
Carreau/warn
warn/warn.py
https://github.com/Carreau/warn/blob/251ed08bc13b536c47392ba577f86e1f96bdad6b/warn/warn.py#L28-L115
def warn_explicit(message, category, filename, lineno, module=None, registry=None, module_globals=None, emit_module=None): """ Low level implementation of the warning functionality. Duplicate of the standard library `warnings.warn_explicit`, except it accepts the following arguments: `emit_module`: regular expression that should match the module the warnings are emitted from. """ lineno = int(lineno) if module is None: module = filename or "<unknown>" if module[-3:].lower() == ".py": module = module[:-3] # XXX What about leading pathname? if registry is None: registry = {} if registry.get('version', 0) != warnings._filters_version: registry.clear() registry['version'] = warnings._filters_version if isinstance(message, Warning): text = str(message) category = message.__class__ else: text = message message = category(message) key = (text, category, lineno) # Quick test for common case if registry.get(key): return # Search the filters for item in warnings.filters: item = _get_proxy_filter(item) if len(item) == 5: action, msg, cat, mod, ln = item emod = None else: action, msg, cat, mod, ln, emod = item if ((msg is None or msg.match(text)) and issubclass(category, cat) and (mod is None or mod.match(module)) and (emod is None or emod.match(emit_module)) and (ln == 0 or lineno == ln)): break else: action = defaultaction # Early exit actions if action == "ignore": registry[key] = 1 return # Prime the linecache for formatting, in case the # "file" is actually in a zipfile or something. import linecache linecache.getlines(filename, module_globals) if action == "error": raise message # Other actions if action == "once": registry[key] = 1 oncekey = (text, category) if onceregistry.get(oncekey): return onceregistry[oncekey] = 1 elif action == "always": pass elif action == "module": registry[key] = 1 altkey = (text, category, 0) if registry.get(altkey): return registry[altkey] = 1 elif action == "default": registry[key] = 1 else: # Unrecognized actions are errors raise RuntimeError( "Unrecognized action (%r) in warnings.filters:\n %s" % (action, item)) if not callable(warnings.showwarning): raise TypeError("warnings.showwarning() must be set to a " "function or method") # Print message and context warnings.showwarning(message, category, filename, lineno)
[ "def", "warn_explicit", "(", "message", ",", "category", ",", "filename", ",", "lineno", ",", "module", "=", "None", ",", "registry", "=", "None", ",", "module_globals", "=", "None", ",", "emit_module", "=", "None", ")", ":", "lineno", "=", "int", "(", "lineno", ")", "if", "module", "is", "None", ":", "module", "=", "filename", "or", "\"<unknown>\"", "if", "module", "[", "-", "3", ":", "]", ".", "lower", "(", ")", "==", "\".py\"", ":", "module", "=", "module", "[", ":", "-", "3", "]", "# XXX What about leading pathname?", "if", "registry", "is", "None", ":", "registry", "=", "{", "}", "if", "registry", ".", "get", "(", "'version'", ",", "0", ")", "!=", "warnings", ".", "_filters_version", ":", "registry", ".", "clear", "(", ")", "registry", "[", "'version'", "]", "=", "warnings", ".", "_filters_version", "if", "isinstance", "(", "message", ",", "Warning", ")", ":", "text", "=", "str", "(", "message", ")", "category", "=", "message", ".", "__class__", "else", ":", "text", "=", "message", "message", "=", "category", "(", "message", ")", "key", "=", "(", "text", ",", "category", ",", "lineno", ")", "# Quick test for common case", "if", "registry", ".", "get", "(", "key", ")", ":", "return", "# Search the filters", "for", "item", "in", "warnings", ".", "filters", ":", "item", "=", "_get_proxy_filter", "(", "item", ")", "if", "len", "(", "item", ")", "==", "5", ":", "action", ",", "msg", ",", "cat", ",", "mod", ",", "ln", "=", "item", "emod", "=", "None", "else", ":", "action", ",", "msg", ",", "cat", ",", "mod", ",", "ln", ",", "emod", "=", "item", "if", "(", "(", "msg", "is", "None", "or", "msg", ".", "match", "(", "text", ")", ")", "and", "issubclass", "(", "category", ",", "cat", ")", "and", "(", "mod", "is", "None", "or", "mod", ".", "match", "(", "module", ")", ")", "and", "(", "emod", "is", "None", "or", "emod", ".", "match", "(", "emit_module", ")", ")", "and", "(", "ln", "==", "0", "or", "lineno", "==", "ln", ")", ")", ":", "break", "else", ":", "action", "=", "defaultaction", "# Early exit actions", "if", "action", "==", "\"ignore\"", ":", "registry", "[", "key", "]", "=", "1", "return", "# Prime the linecache for formatting, in case the", "# \"file\" is actually in a zipfile or something.", "import", "linecache", "linecache", ".", "getlines", "(", "filename", ",", "module_globals", ")", "if", "action", "==", "\"error\"", ":", "raise", "message", "# Other actions", "if", "action", "==", "\"once\"", ":", "registry", "[", "key", "]", "=", "1", "oncekey", "=", "(", "text", ",", "category", ")", "if", "onceregistry", ".", "get", "(", "oncekey", ")", ":", "return", "onceregistry", "[", "oncekey", "]", "=", "1", "elif", "action", "==", "\"always\"", ":", "pass", "elif", "action", "==", "\"module\"", ":", "registry", "[", "key", "]", "=", "1", "altkey", "=", "(", "text", ",", "category", ",", "0", ")", "if", "registry", ".", "get", "(", "altkey", ")", ":", "return", "registry", "[", "altkey", "]", "=", "1", "elif", "action", "==", "\"default\"", ":", "registry", "[", "key", "]", "=", "1", "else", ":", "# Unrecognized actions are errors", "raise", "RuntimeError", "(", "\"Unrecognized action (%r) in warnings.filters:\\n %s\"", "%", "(", "action", ",", "item", ")", ")", "if", "not", "callable", "(", "warnings", ".", "showwarning", ")", ":", "raise", "TypeError", "(", "\"warnings.showwarning() must be set to a \"", "\"function or method\"", ")", "# Print message and context", "warnings", ".", "showwarning", "(", "message", ",", "category", ",", "filename", ",", "lineno", ")" ]
Low level implementation of the warning functionality. Duplicate of the standard library `warnings.warn_explicit`, except it accepts the following arguments: `emit_module`: regular expression that should match the module the warnings are emitted from.
[ "Low", "level", "implementation", "of", "the", "warning", "functionality", ".", "Duplicate", "of", "the", "standard", "library", "warnings", ".", "warn_explicit", "except", "it", "accepts", "the", "following", "arguments", ":" ]
python
train
mikekatz04/BOWIE
snr_calculator_folder/gwsnrcalc/utils/waveforms.py
https://github.com/mikekatz04/BOWIE/blob/a941342a3536cb57c817a1643896d99a3f354a86/snr_calculator_folder/gwsnrcalc/utils/waveforms.py#L534-L539
def _hcn_func(self): """Eq. 56 from Barack and Cutler 2004 """ self.hc = 1./(np.pi*self.dist)*np.sqrt(2.*self._dEndfr()) return
[ "def", "_hcn_func", "(", "self", ")", ":", "self", ".", "hc", "=", "1.", "/", "(", "np", ".", "pi", "*", "self", ".", "dist", ")", "*", "np", ".", "sqrt", "(", "2.", "*", "self", ".", "_dEndfr", "(", ")", ")", "return" ]
Eq. 56 from Barack and Cutler 2004
[ "Eq", ".", "56", "from", "Barack", "and", "Cutler", "2004" ]
python
train
CalebBell/fpi
fpi/drag.py
https://github.com/CalebBell/fpi/blob/6e6da3b9d0c17e10cc0886c97bc1bb8aeba2cca5/fpi/drag.py#L834-L885
def Almedeij(Re): r'''Calculates drag coefficient of a smooth sphere using the method in [1]_ as described in [2]_. .. math:: C_D = \left[\frac{1}{(\phi_1 + \phi_2)^{-1} + (\phi_3)^{-1}} + \phi_4\right]^{0.1} \phi_1 = (24Re^{-1})^{10} + (21Re^{-0.67})^{10} + (4Re^{-0.33})^{10} + 0.4^{10} \phi_2 = \left[(0.148Re^{0.11})^{-10} + (0.5)^{-10}\right]^{-1} \phi_3 = (1.57\times10^8Re^{-1.625})^{10} \phi_4 = \left[(6\times10^{-17}Re^{2.63})^{-10} + (0.2)^{-10}\right]^{-1} Parameters ---------- Re : float Reynolds number of the sphere, [-] Returns ------- Cd : float Drag coefficient [-] Notes ----- Range is Re <= 1E6. Original work has been reviewed. Examples -------- >>> Almedeij(200.) 0.7114768646813396 References ---------- .. [1] Almedeij, Jaber. "Drag Coefficient of Flow around a Sphere: Matching Asymptotically the Wide Trend." Powder Technology 186, no. 3 (September 10, 2008): 218-23. doi:10.1016/j.powtec.2007.12.006. .. [2] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz Ahmadi. "Development of Empirical Models with High Accuracy for Estimation of Drag Coefficient of Flow around a Smooth Sphere: An Evolutionary Approach." Powder Technology 257 (May 2014): 11-19. doi:10.1016/j.powtec.2014.02.045. ''' phi4 = ((6E-17*Re**2.63)**-10 + 0.2**-10)**-1 phi3 = (1.57E8*Re**-1.625)**10 phi2 = ((0.148*Re**0.11)**-10 + 0.5**-10)**-1 phi1 = (24*Re**-1)**10 + (21*Re**-0.67)**10 + (4*Re**-0.33)**10 + 0.4**10 Cd = (1/((phi1 + phi2)**-1 + phi3**-1) + phi4)**0.1 return Cd
[ "def", "Almedeij", "(", "Re", ")", ":", "phi4", "=", "(", "(", "6E-17", "*", "Re", "**", "2.63", ")", "**", "-", "10", "+", "0.2", "**", "-", "10", ")", "**", "-", "1", "phi3", "=", "(", "1.57E8", "*", "Re", "**", "-", "1.625", ")", "**", "10", "phi2", "=", "(", "(", "0.148", "*", "Re", "**", "0.11", ")", "**", "-", "10", "+", "0.5", "**", "-", "10", ")", "**", "-", "1", "phi1", "=", "(", "24", "*", "Re", "**", "-", "1", ")", "**", "10", "+", "(", "21", "*", "Re", "**", "-", "0.67", ")", "**", "10", "+", "(", "4", "*", "Re", "**", "-", "0.33", ")", "**", "10", "+", "0.4", "**", "10", "Cd", "=", "(", "1", "/", "(", "(", "phi1", "+", "phi2", ")", "**", "-", "1", "+", "phi3", "**", "-", "1", ")", "+", "phi4", ")", "**", "0.1", "return", "Cd" ]
r'''Calculates drag coefficient of a smooth sphere using the method in [1]_ as described in [2]_. .. math:: C_D = \left[\frac{1}{(\phi_1 + \phi_2)^{-1} + (\phi_3)^{-1}} + \phi_4\right]^{0.1} \phi_1 = (24Re^{-1})^{10} + (21Re^{-0.67})^{10} + (4Re^{-0.33})^{10} + 0.4^{10} \phi_2 = \left[(0.148Re^{0.11})^{-10} + (0.5)^{-10}\right]^{-1} \phi_3 = (1.57\times10^8Re^{-1.625})^{10} \phi_4 = \left[(6\times10^{-17}Re^{2.63})^{-10} + (0.2)^{-10}\right]^{-1} Parameters ---------- Re : float Reynolds number of the sphere, [-] Returns ------- Cd : float Drag coefficient [-] Notes ----- Range is Re <= 1E6. Original work has been reviewed. Examples -------- >>> Almedeij(200.) 0.7114768646813396 References ---------- .. [1] Almedeij, Jaber. "Drag Coefficient of Flow around a Sphere: Matching Asymptotically the Wide Trend." Powder Technology 186, no. 3 (September 10, 2008): 218-23. doi:10.1016/j.powtec.2007.12.006. .. [2] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz Ahmadi. "Development of Empirical Models with High Accuracy for Estimation of Drag Coefficient of Flow around a Smooth Sphere: An Evolutionary Approach." Powder Technology 257 (May 2014): 11-19. doi:10.1016/j.powtec.2014.02.045.
[ "r", "Calculates", "drag", "coefficient", "of", "a", "smooth", "sphere", "using", "the", "method", "in", "[", "1", "]", "_", "as", "described", "in", "[", "2", "]", "_", "." ]
python
train
spdx/tools-python
spdx/parsers/lexers/tagvalue.py
https://github.com/spdx/tools-python/blob/301d72f6ae57c832c1da7f6402fa49b192de6810/spdx/parsers/lexers/tagvalue.py#L104-L112
def t_text_end(self, t): r'</text>\s*' t.type = 'TEXT' t.value = t.lexer.lexdata[ t.lexer.text_start:t.lexer.lexpos] t.lexer.lineno += t.value.count('\n') t.value = t.value.strip() t.lexer.begin('INITIAL') return t
[ "def", "t_text_end", "(", "self", ",", "t", ")", ":", "t", ".", "type", "=", "'TEXT'", "t", ".", "value", "=", "t", ".", "lexer", ".", "lexdata", "[", "t", ".", "lexer", ".", "text_start", ":", "t", ".", "lexer", ".", "lexpos", "]", "t", ".", "lexer", ".", "lineno", "+=", "t", ".", "value", ".", "count", "(", "'\\n'", ")", "t", ".", "value", "=", "t", ".", "value", ".", "strip", "(", ")", "t", ".", "lexer", ".", "begin", "(", "'INITIAL'", ")", "return", "t" ]
r'</text>\s*
[ "r", "<", "/", "text", ">", "\\", "s", "*" ]
python
valid
UCL-INGI/INGInious
inginious/frontend/arch_helper.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/arch_helper.py#L33-L47
def _run_asyncio(loop, zmq_context): """ Run asyncio (should be called in a thread) and close the loop and the zmq context when the thread ends :param loop: :param zmq_context: :return: """ try: asyncio.set_event_loop(loop) loop.run_forever() except: pass finally: loop.close() zmq_context.destroy(1000)
[ "def", "_run_asyncio", "(", "loop", ",", "zmq_context", ")", ":", "try", ":", "asyncio", ".", "set_event_loop", "(", "loop", ")", "loop", ".", "run_forever", "(", ")", "except", ":", "pass", "finally", ":", "loop", ".", "close", "(", ")", "zmq_context", ".", "destroy", "(", "1000", ")" ]
Run asyncio (should be called in a thread) and close the loop and the zmq context when the thread ends :param loop: :param zmq_context: :return:
[ "Run", "asyncio", "(", "should", "be", "called", "in", "a", "thread", ")", "and", "close", "the", "loop", "and", "the", "zmq", "context", "when", "the", "thread", "ends", ":", "param", "loop", ":", ":", "param", "zmq_context", ":", ":", "return", ":" ]
python
train
ajyoon/blur
examples/waves/waves.py
https://github.com/ajyoon/blur/blob/25fcf083af112bb003956a7a7e1c6ff7d8fef279/examples/waves/waves.py#L182-L221
def build_chunk(oscillators): """ Build an audio chunk and progress the oscillator states. Args: oscillators (list): A list of oscillator.Oscillator objects to build chunks from Returns: str: a string of audio sample bytes ready to be written to a wave file """ step_random_processes(oscillators) subchunks = [] for osc in oscillators: osc.amplitude.step_amp() osc_chunk = osc.get_samples(config.CHUNK_SIZE) if osc_chunk is not None: subchunks.append(osc_chunk) if len(subchunks): new_chunk = sum(subchunks) else: new_chunk = numpy.zeros(config.CHUNK_SIZE) # If we exceed the maximum amplitude, handle it gracefully chunk_amplitude = amplitude.find_amplitude(new_chunk) if chunk_amplitude > config.MAX_AMPLITUDE: # Normalize the amplitude chunk to mitigate immediate clipping new_chunk = amplitude.normalize_amplitude(new_chunk, config.MAX_AMPLITUDE) # Pick some of the offending oscillators (and some random others) # and lower their drift targets avg_amp = (sum(osc.amplitude.value for osc in oscillators) / len(oscillators)) for osc in oscillators: if (osc.amplitude.value > avg_amp and rand.prob_bool(0.1) or rand.prob_bool(0.01)): osc.amplitude.drift_target = rand.weighted_rand( [(-5, 1), (0, 10)]) osc.amplitude.change_rate = rand.weighted_rand( osc.amplitude.change_rate_weights) return new_chunk.astype(config.SAMPLE_DATA_TYPE).tostring()
[ "def", "build_chunk", "(", "oscillators", ")", ":", "step_random_processes", "(", "oscillators", ")", "subchunks", "=", "[", "]", "for", "osc", "in", "oscillators", ":", "osc", ".", "amplitude", ".", "step_amp", "(", ")", "osc_chunk", "=", "osc", ".", "get_samples", "(", "config", ".", "CHUNK_SIZE", ")", "if", "osc_chunk", "is", "not", "None", ":", "subchunks", ".", "append", "(", "osc_chunk", ")", "if", "len", "(", "subchunks", ")", ":", "new_chunk", "=", "sum", "(", "subchunks", ")", "else", ":", "new_chunk", "=", "numpy", ".", "zeros", "(", "config", ".", "CHUNK_SIZE", ")", "# If we exceed the maximum amplitude, handle it gracefully", "chunk_amplitude", "=", "amplitude", ".", "find_amplitude", "(", "new_chunk", ")", "if", "chunk_amplitude", ">", "config", ".", "MAX_AMPLITUDE", ":", "# Normalize the amplitude chunk to mitigate immediate clipping", "new_chunk", "=", "amplitude", ".", "normalize_amplitude", "(", "new_chunk", ",", "config", ".", "MAX_AMPLITUDE", ")", "# Pick some of the offending oscillators (and some random others)", "# and lower their drift targets", "avg_amp", "=", "(", "sum", "(", "osc", ".", "amplitude", ".", "value", "for", "osc", "in", "oscillators", ")", "/", "len", "(", "oscillators", ")", ")", "for", "osc", "in", "oscillators", ":", "if", "(", "osc", ".", "amplitude", ".", "value", ">", "avg_amp", "and", "rand", ".", "prob_bool", "(", "0.1", ")", "or", "rand", ".", "prob_bool", "(", "0.01", ")", ")", ":", "osc", ".", "amplitude", ".", "drift_target", "=", "rand", ".", "weighted_rand", "(", "[", "(", "-", "5", ",", "1", ")", ",", "(", "0", ",", "10", ")", "]", ")", "osc", ".", "amplitude", ".", "change_rate", "=", "rand", ".", "weighted_rand", "(", "osc", ".", "amplitude", ".", "change_rate_weights", ")", "return", "new_chunk", ".", "astype", "(", "config", ".", "SAMPLE_DATA_TYPE", ")", ".", "tostring", "(", ")" ]
Build an audio chunk and progress the oscillator states. Args: oscillators (list): A list of oscillator.Oscillator objects to build chunks from Returns: str: a string of audio sample bytes ready to be written to a wave file
[ "Build", "an", "audio", "chunk", "and", "progress", "the", "oscillator", "states", "." ]
python
train
Telefonica/toolium
toolium/behave/env_utils.py
https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/behave/env_utils.py#L237-L247
def execute_before_scenario_steps(self, context): """ actions before each scenario :param context: It’s a clever place where you and behave can store information to share around, automatically managed by behave. """ if not self.feature_error: self.__execute_steps_by_action(context, ACTIONS_BEFORE_SCENARIO) if context.dyn_env.scenario_error: # Mark this Scenario as skipped. Steps will not be executed. context.scenario.mark_skipped()
[ "def", "execute_before_scenario_steps", "(", "self", ",", "context", ")", ":", "if", "not", "self", ".", "feature_error", ":", "self", ".", "__execute_steps_by_action", "(", "context", ",", "ACTIONS_BEFORE_SCENARIO", ")", "if", "context", ".", "dyn_env", ".", "scenario_error", ":", "# Mark this Scenario as skipped. Steps will not be executed.", "context", ".", "scenario", ".", "mark_skipped", "(", ")" ]
actions before each scenario :param context: It’s a clever place where you and behave can store information to share around, automatically managed by behave.
[ "actions", "before", "each", "scenario", ":", "param", "context", ":", "It’s", "a", "clever", "place", "where", "you", "and", "behave", "can", "store", "information", "to", "share", "around", "automatically", "managed", "by", "behave", "." ]
python
train
barryp/py-amqplib
amqplib/client_0_8/method_framing.py
https://github.com/barryp/py-amqplib/blob/2b3a47de34b4712c111d0a55d7ff109dffc2a7b2/amqplib/client_0_8/method_framing.py#L174-L193
def _process_content_header(self, channel, payload): """ Process Content Header frames """ partial = self.partial_messages[channel] partial.add_header(payload) if partial.complete: # # a bodyless message, we're done # self.queue.put((channel, partial.method_sig, partial.args, partial.msg)) del self.partial_messages[channel] self.expected_types[channel] = 1 else: # # wait for the content-body # self.expected_types[channel] = 3
[ "def", "_process_content_header", "(", "self", ",", "channel", ",", "payload", ")", ":", "partial", "=", "self", ".", "partial_messages", "[", "channel", "]", "partial", ".", "add_header", "(", "payload", ")", "if", "partial", ".", "complete", ":", "#", "# a bodyless message, we're done", "#", "self", ".", "queue", ".", "put", "(", "(", "channel", ",", "partial", ".", "method_sig", ",", "partial", ".", "args", ",", "partial", ".", "msg", ")", ")", "del", "self", ".", "partial_messages", "[", "channel", "]", "self", ".", "expected_types", "[", "channel", "]", "=", "1", "else", ":", "#", "# wait for the content-body", "#", "self", ".", "expected_types", "[", "channel", "]", "=", "3" ]
Process Content Header frames
[ "Process", "Content", "Header", "frames" ]
python
train
blackecho/Deep-Learning-TensorFlow
yadlt/models/convolutional/conv_net.py
https://github.com/blackecho/Deep-Learning-TensorFlow/blob/ddeb1f2848da7b7bee166ad2152b4afc46bb2086/yadlt/models/convolutional/conv_net.py#L279-L283
def max_pool(x, dim): """Max pooling operation.""" return tf.nn.max_pool( x, ksize=[1, dim, dim, 1], strides=[1, dim, dim, 1], padding='SAME')
[ "def", "max_pool", "(", "x", ",", "dim", ")", ":", "return", "tf", ".", "nn", ".", "max_pool", "(", "x", ",", "ksize", "=", "[", "1", ",", "dim", ",", "dim", ",", "1", "]", ",", "strides", "=", "[", "1", ",", "dim", ",", "dim", ",", "1", "]", ",", "padding", "=", "'SAME'", ")" ]
Max pooling operation.
[ "Max", "pooling", "operation", "." ]
python
train
elehcimd/pynb
fabfile.py
https://github.com/elehcimd/pynb/blob/a32af1f0e574f880eccda4a46aede6d65151f8c9/fabfile.py#L68-L87
def git_push(): """ Push new version and corresponding tag to origin :return: """ # get current version new_version = version.__version__ values = list(map(lambda x: int(x), new_version.split('.'))) # Push to origin new version and corresponding tag: # * commit new version # * create tag # * push version,tag to origin local('git add pynb/version.py version.py') local('git commit -m "updated version"') local('git tag {}.{}.{}'.format(values[0], values[1], values[2])) local('git push origin --tags') local('git push')
[ "def", "git_push", "(", ")", ":", "# get current version", "new_version", "=", "version", ".", "__version__", "values", "=", "list", "(", "map", "(", "lambda", "x", ":", "int", "(", "x", ")", ",", "new_version", ".", "split", "(", "'.'", ")", ")", ")", "# Push to origin new version and corresponding tag:", "# * commit new version", "# * create tag", "# * push version,tag to origin", "local", "(", "'git add pynb/version.py version.py'", ")", "local", "(", "'git commit -m \"updated version\"'", ")", "local", "(", "'git tag {}.{}.{}'", ".", "format", "(", "values", "[", "0", "]", ",", "values", "[", "1", "]", ",", "values", "[", "2", "]", ")", ")", "local", "(", "'git push origin --tags'", ")", "local", "(", "'git push'", ")" ]
Push new version and corresponding tag to origin :return:
[ "Push", "new", "version", "and", "corresponding", "tag", "to", "origin", ":", "return", ":" ]
python
train
dagster-io/dagster
python_modules/dagster/dagster/core/definitions/decorators.py
https://github.com/dagster-io/dagster/blob/4119f8c773089de64831b1dfb9e168e353d401dc/python_modules/dagster/dagster/core/definitions/decorators.py#L173-L271
def solid(name=None, inputs=None, outputs=None, config_field=None, description=None): '''(decorator) Create a solid with specified parameters. This shortcut simplifies the core solid API by exploding arguments into kwargs of the transform function and omitting additional parameters when they are not needed. Parameters are otherwise as in the core API, :py:class:`SolidDefinition`. The decorated function will be used as the solid's transform function. Unlike in the core API, the transform function does not have to yield :py:class:`Result` object directly. Several simpler alternatives are available: 1. Return a value. This is returned as a :py:class:`Result` for a single output solid. 2. Return a :py:class:`Result`. Works like yielding result. 3. Return an instance of :py:class:`MultipleResults`. Works like yielding several results for multiple outputs. Useful for solids that have multiple outputs. 4. Yield :py:class:`Result`. Same as default transform behaviour. Args: name (str): Name of solid. inputs (list[InputDefinition]): List of inputs. outputs (list[OutputDefinition]): List of outputs. config_field (Field): The configuration for this solid. description (str): Description of this solid. Examples: .. code-block:: python @solid def hello_world(_context): print('hello') @solid() def hello_world(_context): print('hello') @solid(outputs=[OutputDefinition()]) def hello_world(_context): return {'foo': 'bar'} @solid(outputs=[OutputDefinition()]) def hello_world(_context): return Result(value={'foo': 'bar'}) @solid(outputs=[OutputDefinition()]) def hello_world(_context): yield Result(value={'foo': 'bar'}) @solid(outputs=[ OutputDefinition(name="left"), OutputDefinition(name="right"), ]) def hello_world(_context): return MultipleResults.from_dict({ 'left': {'foo': 'left'}, 'right': {'foo': 'right'}, }) @solid( inputs=[InputDefinition(name="foo")], outputs=[OutputDefinition()] ) def hello_world(_context, foo): return foo @solid( inputs=[InputDefinition(name="foo")], outputs=[OutputDefinition()], ) def hello_world(context, foo): context.log.info('log something') return foo @solid( inputs=[InputDefinition(name="foo")], outputs=[OutputDefinition()], config_field=Field(types.Dict({'str_value' : Field(types.String)})), ) def hello_world(context, foo): # context.solid_config is a dictionary with 'str_value' key return foo + context.solid_config['str_value'] ''' # This case is for when decorator is used bare, without arguments. e.g. @solid versus @solid() if callable(name): check.invariant(inputs is None) check.invariant(outputs is None) check.invariant(description is None) check.invariant(config_field is None) return _Solid()(name) return _Solid( name=name, inputs=inputs, outputs=outputs, config_field=config_field, description=description, )
[ "def", "solid", "(", "name", "=", "None", ",", "inputs", "=", "None", ",", "outputs", "=", "None", ",", "config_field", "=", "None", ",", "description", "=", "None", ")", ":", "# This case is for when decorator is used bare, without arguments. e.g. @solid versus @solid()", "if", "callable", "(", "name", ")", ":", "check", ".", "invariant", "(", "inputs", "is", "None", ")", "check", ".", "invariant", "(", "outputs", "is", "None", ")", "check", ".", "invariant", "(", "description", "is", "None", ")", "check", ".", "invariant", "(", "config_field", "is", "None", ")", "return", "_Solid", "(", ")", "(", "name", ")", "return", "_Solid", "(", "name", "=", "name", ",", "inputs", "=", "inputs", ",", "outputs", "=", "outputs", ",", "config_field", "=", "config_field", ",", "description", "=", "description", ",", ")" ]
(decorator) Create a solid with specified parameters. This shortcut simplifies the core solid API by exploding arguments into kwargs of the transform function and omitting additional parameters when they are not needed. Parameters are otherwise as in the core API, :py:class:`SolidDefinition`. The decorated function will be used as the solid's transform function. Unlike in the core API, the transform function does not have to yield :py:class:`Result` object directly. Several simpler alternatives are available: 1. Return a value. This is returned as a :py:class:`Result` for a single output solid. 2. Return a :py:class:`Result`. Works like yielding result. 3. Return an instance of :py:class:`MultipleResults`. Works like yielding several results for multiple outputs. Useful for solids that have multiple outputs. 4. Yield :py:class:`Result`. Same as default transform behaviour. Args: name (str): Name of solid. inputs (list[InputDefinition]): List of inputs. outputs (list[OutputDefinition]): List of outputs. config_field (Field): The configuration for this solid. description (str): Description of this solid. Examples: .. code-block:: python @solid def hello_world(_context): print('hello') @solid() def hello_world(_context): print('hello') @solid(outputs=[OutputDefinition()]) def hello_world(_context): return {'foo': 'bar'} @solid(outputs=[OutputDefinition()]) def hello_world(_context): return Result(value={'foo': 'bar'}) @solid(outputs=[OutputDefinition()]) def hello_world(_context): yield Result(value={'foo': 'bar'}) @solid(outputs=[ OutputDefinition(name="left"), OutputDefinition(name="right"), ]) def hello_world(_context): return MultipleResults.from_dict({ 'left': {'foo': 'left'}, 'right': {'foo': 'right'}, }) @solid( inputs=[InputDefinition(name="foo")], outputs=[OutputDefinition()] ) def hello_world(_context, foo): return foo @solid( inputs=[InputDefinition(name="foo")], outputs=[OutputDefinition()], ) def hello_world(context, foo): context.log.info('log something') return foo @solid( inputs=[InputDefinition(name="foo")], outputs=[OutputDefinition()], config_field=Field(types.Dict({'str_value' : Field(types.String)})), ) def hello_world(context, foo): # context.solid_config is a dictionary with 'str_value' key return foo + context.solid_config['str_value']
[ "(", "decorator", ")", "Create", "a", "solid", "with", "specified", "parameters", "." ]
python
test
ashmastaflash/kal-wrapper
kalibrate/fn.py
https://github.com/ashmastaflash/kal-wrapper/blob/80ee03ab7bd3172ac26b769d6b442960f3424b0e/kalibrate/fn.py#L65-L69
def to_eng(num_in): """Return number in engineering notation.""" x = decimal.Decimal(str(num_in)) eng_not = x.normalize().to_eng_string() return(eng_not)
[ "def", "to_eng", "(", "num_in", ")", ":", "x", "=", "decimal", ".", "Decimal", "(", "str", "(", "num_in", ")", ")", "eng_not", "=", "x", ".", "normalize", "(", ")", ".", "to_eng_string", "(", ")", "return", "(", "eng_not", ")" ]
Return number in engineering notation.
[ "Return", "number", "in", "engineering", "notation", "." ]
python
train
victorlei/smop
smop/parse.py
https://github.com/victorlei/smop/blob/bdad96b715d1dd75ce8ab4724f76b9b1bb1f61cd/smop/parse.py#L455-L460
def p_expr_stmt(p): """ expr_stmt : expr_list SEMI """ assert isinstance(p[1], node.expr_list) p[0] = node.expr_stmt(expr=p[1])
[ "def", "p_expr_stmt", "(", "p", ")", ":", "assert", "isinstance", "(", "p", "[", "1", "]", ",", "node", ".", "expr_list", ")", "p", "[", "0", "]", "=", "node", ".", "expr_stmt", "(", "expr", "=", "p", "[", "1", "]", ")" ]
expr_stmt : expr_list SEMI
[ "expr_stmt", ":", "expr_list", "SEMI" ]
python
train
inasafe/inasafe
safe/gui/tools/options_dialog.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/options_dialog.py#L914-L919
def set_welcome_message(self): """Create and insert welcome message.""" string = html_header() string += welcome_message().to_html() string += html_footer() self.welcome_message.setHtml(string)
[ "def", "set_welcome_message", "(", "self", ")", ":", "string", "=", "html_header", "(", ")", "string", "+=", "welcome_message", "(", ")", ".", "to_html", "(", ")", "string", "+=", "html_footer", "(", ")", "self", ".", "welcome_message", ".", "setHtml", "(", "string", ")" ]
Create and insert welcome message.
[ "Create", "and", "insert", "welcome", "message", "." ]
python
train
acutesoftware/AIKIF
aikif/toolbox/Toolbox.py
https://github.com/acutesoftware/AIKIF/blob/fcf1582dc5f884b9a4fa7c6e20e9de9d94d21d03/aikif/toolbox/Toolbox.py#L98-L107
def verify(self, tool): """ check that the tool exists """ if os.path.isfile(tool['file']): print('Toolbox: program exists = TOK :: ' + tool['file']) return True else: print('Toolbox: program exists = FAIL :: ' + tool['file']) return False
[ "def", "verify", "(", "self", ",", "tool", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "tool", "[", "'file'", "]", ")", ":", "print", "(", "'Toolbox: program exists = TOK :: '", "+", "tool", "[", "'file'", "]", ")", "return", "True", "else", ":", "print", "(", "'Toolbox: program exists = FAIL :: '", "+", "tool", "[", "'file'", "]", ")", "return", "False" ]
check that the tool exists
[ "check", "that", "the", "tool", "exists" ]
python
train
streamlink/streamlink
src/streamlink/plugin/api/validate.py
https://github.com/streamlink/streamlink/blob/c8ed1daff14ac03195870238b9b900c1109dd5c1/src/streamlink/plugin/api/validate.py#L154-L177
def get(item, default=None): """Get item from value (value[item]). If the item is not found, return the default. Handles XML elements, regex matches and anything that has __getitem__. """ def getter(value): if ET.iselement(value): value = value.attrib try: # Use .group() if this is a regex match object if _is_re_match(value): return value.group(item) else: return value[item] except (KeyError, IndexError): return default except (TypeError, AttributeError) as err: raise ValueError(err) return transform(getter)
[ "def", "get", "(", "item", ",", "default", "=", "None", ")", ":", "def", "getter", "(", "value", ")", ":", "if", "ET", ".", "iselement", "(", "value", ")", ":", "value", "=", "value", ".", "attrib", "try", ":", "# Use .group() if this is a regex match object", "if", "_is_re_match", "(", "value", ")", ":", "return", "value", ".", "group", "(", "item", ")", "else", ":", "return", "value", "[", "item", "]", "except", "(", "KeyError", ",", "IndexError", ")", ":", "return", "default", "except", "(", "TypeError", ",", "AttributeError", ")", "as", "err", ":", "raise", "ValueError", "(", "err", ")", "return", "transform", "(", "getter", ")" ]
Get item from value (value[item]). If the item is not found, return the default. Handles XML elements, regex matches and anything that has __getitem__.
[ "Get", "item", "from", "value", "(", "value", "[", "item", "]", ")", "." ]
python
test